From 3a163e10e3de1eec93607cba93a67a805cfd08db Mon Sep 17 00:00:00 2001 From: Maciej Szulik Date: Thu, 28 Sep 2017 09:59:05 -0400 Subject: [PATCH 01/27] Update hack/godep-restore.sh to match fork names --- hack/godep-restore.sh | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/hack/godep-restore.sh b/hack/godep-restore.sh index a3ea45bb80f7..cff15f29bde9 100755 --- a/hack/godep-restore.sh +++ b/hack/godep-restore.sh @@ -55,12 +55,12 @@ pin-godep 'v79' preload-remote "github.com/openshift" "origin" "github.com/openshift" "origin" # this looks goofy, but if you are not in GOPATH you need to pull origin explicitly preload-remote "k8s.io" "kubernetes" "github.com/openshift" "kubernetes" preload-remote "github.com/docker" "distribution" "github.com/openshift" "docker-distribution" -preload-remote "github.com/skynetservices" "skydns" "github.com/openshift" "skydns" -preload-remote "github.com/coreos" "etcd" "github.com/openshift" "etcd" -preload-remote "github.com/emicklei" "go-restful" "github.com/openshift" "go-restful" -preload-remote "github.com/cloudflare" "cfssl" "github.com/openshift" "cfssl" -preload-remote "github.com/google" "certificate-transparency" "github.com/openshift" "certificate-transparency" -preload-remote "github.com/google" "cadvisor" "github.com/openshift" "cadvisor" +preload-remote "github.com/skynetservices" "skydns" "github.com/openshift" "skynetservices-skydns" +preload-remote "github.com/coreos" "etcd" "github.com/openshift" "coreos-etcd" +preload-remote "github.com/emicklei" "go-restful" "github.com/openshift" "emicklei-go-restful-swagger12" +preload-remote "github.com/cloudflare" "cfssl" "github.com/openshift" "cloudflare-cfssl" +preload-remote "github.com/google" "certificate-transparency" "github.com/openshift" "google-certificate-transparency" +preload-remote "github.com/google" "cadvisor" "github.com/openshift" "google-cadvisor" # preload any odd-ball commits # kube e2e test dep From 771a804ecbfe7d349e5a78eb56ad80fc0310772a Mon Sep 17 00:00:00 2001 From: Maciej Szulik Date: Fri, 29 Sep 2017 13:09:34 +0200 Subject: [PATCH 02/27] bump(github.com/fatih/structs): 7e5a8eef611ee84dd359503f3969f80df4c50723 --- Godeps/Godeps.json | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 78e35e0e3621..6d238b749c7f 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -1567,6 +1567,11 @@ "ImportPath": "github.com/fatih/camelcase", "Rev": "f6a740d52f961c60348ebb109adde9f4635d7540" }, + { + "ImportPath": "github.com/fatih/structs", + "Comment": "v1.0-4-g7e5a8ee", + "Rev": "7e5a8eef611ee84dd359503f3969f80df4c50723" + }, { "ImportPath": "github.com/fsnotify/fsnotify", "Comment": "v1.3.1-1-gf12c623", From 25e7dfd56dc32fc85b070d07206561f88877e6d6 Mon Sep 17 00:00:00 2001 From: Maciej Szulik Date: Fri, 29 Sep 2017 13:37:23 +0200 Subject: [PATCH 03/27] bump(github.com/containers/image): dbd0a4cee2480da39048095a326506ae114d635a --- Godeps/Godeps.json | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 6d238b749c7f..450749304e51 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -590,6 +590,10 @@ "Comment": "v0.5.2", "Rev": "137b4975ecab6e1f0c24c1e3c228a50a3cfba75e" }, + { + "ImportPath": "github.com/containers/image/docker", + "Rev": "dbd0a4cee2480da39048095a326506ae114d635a" + }, { "ImportPath": "github.com/containers/image/docker/policyconfiguration", "Rev": "dbd0a4cee2480da39048095a326506ae114d635a" From 24501f44b4ee506015b40ea0b6b34bb80b17d1c2 Mon Sep 17 00:00:00 2001 From: Maciej Szulik Date: Fri, 29 Sep 2017 13:48:45 +0200 Subject: [PATCH 04/27] bump(github.com/docker/distribution): 1e2bbed6e09c6c8047f52af965a6e301f346d04e --- Godeps/Godeps.json | 222 ++++++++++++++++++++++----------------------- 1 file changed, 111 insertions(+), 111 deletions(-) diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 450749304e51..079ae842581e 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -1115,317 +1115,322 @@ }, { "ImportPath": "github.com/docker/distribution", - "Comment": "v2.6.2", - "Rev": "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89" + "Comment": "v2.6.2-7-g729840c", + "Rev": "1e2bbed6e09c6c8047f52af965a6e301f346d04e" }, { "ImportPath": "github.com/docker/distribution/configuration", - "Comment": "v2.6.2", - "Rev": "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89" + "Comment": "v2.6.2-7-g729840c", + "Rev": "1e2bbed6e09c6c8047f52af965a6e301f346d04e" }, { "ImportPath": "github.com/docker/distribution/context", - "Comment": "v2.6.2", - "Rev": "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89" + "Comment": "v2.6.2-7-g729840c", + "Rev": "1e2bbed6e09c6c8047f52af965a6e301f346d04e" }, { "ImportPath": "github.com/docker/distribution/digest", - "Comment": "v2.6.2", - "Rev": "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89" + "Comment": "v2.6.2-7-g729840c", + "Rev": "1e2bbed6e09c6c8047f52af965a6e301f346d04e" }, { "ImportPath": "github.com/docker/distribution/health", - "Comment": "v2.6.2", - "Rev": "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89" + "Comment": "v2.6.2-7-g729840c", + "Rev": "1e2bbed6e09c6c8047f52af965a6e301f346d04e" }, { "ImportPath": "github.com/docker/distribution/health/checks", - "Comment": "v2.6.2", - "Rev": "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89" + "Comment": "v2.6.2-7-g729840c", + "Rev": "1e2bbed6e09c6c8047f52af965a6e301f346d04e" }, { "ImportPath": "github.com/docker/distribution/manifest", - "Comment": "v2.6.2", - "Rev": "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89" + "Comment": "v2.6.2-7-g729840c", + "Rev": "1e2bbed6e09c6c8047f52af965a6e301f346d04e" }, { "ImportPath": "github.com/docker/distribution/manifest/manifestlist", - "Comment": "v2.6.2", - "Rev": "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89" + "Comment": "v2.6.2-7-g729840c", + "Rev": "1e2bbed6e09c6c8047f52af965a6e301f346d04e" }, { "ImportPath": "github.com/docker/distribution/manifest/schema1", - "Comment": "v2.6.2", - "Rev": "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89" + "Comment": "v2.6.2-7-g729840c", + "Rev": "1e2bbed6e09c6c8047f52af965a6e301f346d04e" }, { "ImportPath": "github.com/docker/distribution/manifest/schema2", - "Comment": "v2.6.2", - "Rev": "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89" + "Comment": "v2.6.2-7-g729840c", + "Rev": "1e2bbed6e09c6c8047f52af965a6e301f346d04e" }, { "ImportPath": "github.com/docker/distribution/notifications", - "Comment": "v2.6.2", - "Rev": "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89" + "Comment": "v2.6.2-7-g729840c", + "Rev": "1e2bbed6e09c6c8047f52af965a6e301f346d04e" }, { "ImportPath": "github.com/docker/distribution/reference", - "Comment": "v2.6.2", - "Rev": "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89" + "Comment": "v2.6.2-7-g729840c", + "Rev": "1e2bbed6e09c6c8047f52af965a6e301f346d04e" }, { "ImportPath": "github.com/docker/distribution/registry/api/errcode", - "Comment": "v2.6.2", - "Rev": "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89" + "Comment": "v2.6.2-7-g729840c", + "Rev": "1e2bbed6e09c6c8047f52af965a6e301f346d04e" }, { "ImportPath": "github.com/docker/distribution/registry/api/v2", - "Comment": "v2.6.2", - "Rev": "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89" + "Comment": "v2.6.2-7-g729840c", + "Rev": "1e2bbed6e09c6c8047f52af965a6e301f346d04e" }, { "ImportPath": "github.com/docker/distribution/registry/auth", - "Comment": "v2.6.2", - "Rev": "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89" + "Comment": "v2.6.2-7-g729840c", + "Rev": "1e2bbed6e09c6c8047f52af965a6e301f346d04e" }, { "ImportPath": "github.com/docker/distribution/registry/auth/htpasswd", - "Comment": "v2.6.2", - "Rev": "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89" + "Comment": "v2.6.2-7-g729840c", + "Rev": "1e2bbed6e09c6c8047f52af965a6e301f346d04e" }, { "ImportPath": "github.com/docker/distribution/registry/auth/token", - "Comment": "v2.6.2", - "Rev": "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89" + "Comment": "v2.6.2-7-g729840c", + "Rev": "1e2bbed6e09c6c8047f52af965a6e301f346d04e" }, { "ImportPath": "github.com/docker/distribution/registry/client", - "Comment": "v2.6.2", - "Rev": "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89" + "Comment": "v2.6.2-7-g729840c", + "Rev": "1e2bbed6e09c6c8047f52af965a6e301f346d04e" }, { "ImportPath": "github.com/docker/distribution/registry/client/auth", - "Comment": "v2.6.2", - "Rev": "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89" + "Comment": "v2.6.2-7-g729840c", + "Rev": "1e2bbed6e09c6c8047f52af965a6e301f346d04e" + }, + { + "ImportPath": "github.com/docker/distribution/registry/client/auth/challenge", + "Comment": "v2.6.2-7-g729840c", + "Rev": "1e2bbed6e09c6c8047f52af965a6e301f346d04e" }, { "ImportPath": "github.com/docker/distribution/registry/client/transport", - "Comment": "v2.6.2", - "Rev": "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89" + "Comment": "v2.6.2-7-g729840c", + "Rev": "1e2bbed6e09c6c8047f52af965a6e301f346d04e" }, { "ImportPath": "github.com/docker/distribution/registry/handlers", - "Comment": "v2.6.2", - "Rev": "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89" + "Comment": "v2.6.2-7-g729840c", + "Rev": "1e2bbed6e09c6c8047f52af965a6e301f346d04e" }, { "ImportPath": "github.com/docker/distribution/registry/middleware/registry", - "Comment": "v2.6.2", - "Rev": "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89" + "Comment": "v2.6.2-7-g729840c", + "Rev": "1e2bbed6e09c6c8047f52af965a6e301f346d04e" }, { "ImportPath": "github.com/docker/distribution/registry/middleware/repository", - "Comment": "v2.6.2", - "Rev": "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89" + "Comment": "v2.6.2-7-g729840c", + "Rev": "1e2bbed6e09c6c8047f52af965a6e301f346d04e" }, { "ImportPath": "github.com/docker/distribution/registry/proxy", - "Comment": "v2.6.2", - "Rev": "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89" + "Comment": "v2.6.2-7-g729840c", + "Rev": "1e2bbed6e09c6c8047f52af965a6e301f346d04e" }, { "ImportPath": "github.com/docker/distribution/registry/proxy/scheduler", - "Comment": "v2.6.2", - "Rev": "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89" + "Comment": "v2.6.2-7-g729840c", + "Rev": "1e2bbed6e09c6c8047f52af965a6e301f346d04e" }, { "ImportPath": "github.com/docker/distribution/registry/storage", - "Comment": "v2.6.2", - "Rev": "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89" + "Comment": "v2.6.2-7-g729840c", + "Rev": "1e2bbed6e09c6c8047f52af965a6e301f346d04e" }, { "ImportPath": "github.com/docker/distribution/registry/storage/cache", - "Comment": "v2.6.2", - "Rev": "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89" + "Comment": "v2.6.2-7-g729840c", + "Rev": "1e2bbed6e09c6c8047f52af965a6e301f346d04e" }, { "ImportPath": "github.com/docker/distribution/registry/storage/cache/memory", - "Comment": "v2.6.2", - "Rev": "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89" + "Comment": "v2.6.2-7-g729840c", + "Rev": "1e2bbed6e09c6c8047f52af965a6e301f346d04e" }, { "ImportPath": "github.com/docker/distribution/registry/storage/cache/redis", - "Comment": "v2.6.2", - "Rev": "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89" + "Comment": "v2.6.2-7-g729840c", + "Rev": "1e2bbed6e09c6c8047f52af965a6e301f346d04e" }, { "ImportPath": "github.com/docker/distribution/registry/storage/driver", - "Comment": "v2.6.2", - "Rev": "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89" + "Comment": "v2.6.2-7-g729840c", + "Rev": "1e2bbed6e09c6c8047f52af965a6e301f346d04e" }, { "ImportPath": "github.com/docker/distribution/registry/storage/driver/azure", - "Comment": "v2.6.2", - "Rev": "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89" + "Comment": "v2.6.2-7-g729840c", + "Rev": "1e2bbed6e09c6c8047f52af965a6e301f346d04e" }, { "ImportPath": "github.com/docker/distribution/registry/storage/driver/base", - "Comment": "v2.6.2", - "Rev": "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89" + "Comment": "v2.6.2-7-g729840c", + "Rev": "1e2bbed6e09c6c8047f52af965a6e301f346d04e" }, { "ImportPath": "github.com/docker/distribution/registry/storage/driver/factory", - "Comment": "v2.6.2", - "Rev": "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89" + "Comment": "v2.6.2-7-g729840c", + "Rev": "1e2bbed6e09c6c8047f52af965a6e301f346d04e" }, { "ImportPath": "github.com/docker/distribution/registry/storage/driver/filesystem", - "Comment": "v2.6.2", - "Rev": "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89" + "Comment": "v2.6.2-7-g729840c", + "Rev": "1e2bbed6e09c6c8047f52af965a6e301f346d04e" }, { "ImportPath": "github.com/docker/distribution/registry/storage/driver/gcs", - "Comment": "v2.6.2", - "Rev": "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89" + "Comment": "v2.6.2-7-g729840c", + "Rev": "1e2bbed6e09c6c8047f52af965a6e301f346d04e" }, { "ImportPath": "github.com/docker/distribution/registry/storage/driver/inmemory", - "Comment": "v2.6.2", - "Rev": "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89" + "Comment": "v2.6.2-7-g729840c", + "Rev": "1e2bbed6e09c6c8047f52af965a6e301f346d04e" }, { "ImportPath": "github.com/docker/distribution/registry/storage/driver/middleware", - "Comment": "v2.6.2", - "Rev": "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89" + "Comment": "v2.6.2-7-g729840c", + "Rev": "1e2bbed6e09c6c8047f52af965a6e301f346d04e" }, { "ImportPath": "github.com/docker/distribution/registry/storage/driver/middleware/cloudfront", - "Comment": "v2.6.2", - "Rev": "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89" + "Comment": "v2.6.2-7-g729840c", + "Rev": "1e2bbed6e09c6c8047f52af965a6e301f346d04e" }, { "ImportPath": "github.com/docker/distribution/registry/storage/driver/oss", - "Comment": "v2.6.2", - "Rev": "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89" + "Comment": "v2.6.2-7-g729840c", + "Rev": "1e2bbed6e09c6c8047f52af965a6e301f346d04e" }, { "ImportPath": "github.com/docker/distribution/registry/storage/driver/s3-aws", - "Comment": "v2.6.2", - "Rev": "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89" + "Comment": "v2.6.2-7-g729840c", + "Rev": "1e2bbed6e09c6c8047f52af965a6e301f346d04e" }, { "ImportPath": "github.com/docker/distribution/registry/storage/driver/swift", - "Comment": "v2.6.2", - "Rev": "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89" + "Comment": "v2.6.2-7-g729840c", + "Rev": "1e2bbed6e09c6c8047f52af965a6e301f346d04e" }, { "ImportPath": "github.com/docker/distribution/uuid", - "Comment": "v2.6.2", - "Rev": "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89" + "Comment": "v2.6.2-7-g729840c", + "Rev": "1e2bbed6e09c6c8047f52af965a6e301f346d04e" }, { "ImportPath": "github.com/docker/distribution/version", - "Comment": "v2.6.2", - "Rev": "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89" + "Comment": "v2.6.2-7-g729840c", + "Rev": "1e2bbed6e09c6c8047f52af965a6e301f346d04e" }, { "ImportPath": "github.com/docker/docker/builder/dockerfile/command", - "Comment": "v1.11.2-1-gb80676c6a", + "Comment": "v1.11.2-1-g9283250", "Rev": "b9f10c951893f9a00865890a5232e85d770c1087" }, { "ImportPath": "github.com/docker/docker/builder/dockerfile/parser", - "Comment": "v1.11.2-1-gb80676c6a", + "Comment": "v1.11.2-1-g9283250", "Rev": "b9f10c951893f9a00865890a5232e85d770c1087" }, { "ImportPath": "github.com/docker/docker/cliconfig", - "Comment": "v1.11.2-1-gb80676c6a", + "Comment": "v1.11.2-1-g9283250", "Rev": "b9f10c951893f9a00865890a5232e85d770c1087" }, { "ImportPath": "github.com/docker/docker/opts", - "Comment": "v1.11.2-1-gb80676c6a", + "Comment": "v1.11.2-1-g9283250", "Rev": "b9f10c951893f9a00865890a5232e85d770c1087" }, { "ImportPath": "github.com/docker/docker/pkg/archive", - "Comment": "v1.11.2-1-gb80676c6a", + "Comment": "v1.11.2-1-g9283250", "Rev": "b9f10c951893f9a00865890a5232e85d770c1087" }, { "ImportPath": "github.com/docker/docker/pkg/fileutils", - "Comment": "v1.11.2-1-gb80676c6a", + "Comment": "v1.11.2-1-g9283250", "Rev": "b9f10c951893f9a00865890a5232e85d770c1087" }, { "ImportPath": "github.com/docker/docker/pkg/homedir", - "Comment": "v1.11.2-1-gb80676c6a", + "Comment": "v1.11.2-1-g9283250", "Rev": "b9f10c951893f9a00865890a5232e85d770c1087" }, { "ImportPath": "github.com/docker/docker/pkg/idtools", - "Comment": "v1.11.2-1-gb80676c6a", + "Comment": "v1.11.2-1-g9283250", "Rev": "b9f10c951893f9a00865890a5232e85d770c1087" }, { "ImportPath": "github.com/docker/docker/pkg/ioutils", - "Comment": "v1.11.2-1-gb80676c6a", + "Comment": "v1.11.2-1-g9283250", "Rev": "b9f10c951893f9a00865890a5232e85d770c1087" }, { "ImportPath": "github.com/docker/docker/pkg/jsonlog", - "Comment": "v1.11.2-1-gb80676c6a", + "Comment": "v1.11.2-1-g9283250", "Rev": "b9f10c951893f9a00865890a5232e85d770c1087" }, { "ImportPath": "github.com/docker/docker/pkg/jsonmessage", - "Comment": "v1.11.2-1-gb80676c6a", + "Comment": "v1.11.2-1-g9283250", "Rev": "b9f10c951893f9a00865890a5232e85d770c1087" }, { "ImportPath": "github.com/docker/docker/pkg/longpath", - "Comment": "v1.11.2-1-gb80676c6a", + "Comment": "v1.11.2-1-g9283250", "Rev": "b9f10c951893f9a00865890a5232e85d770c1087" }, { "ImportPath": "github.com/docker/docker/pkg/mount", - "Comment": "v1.11.2-1-gb80676c6a", + "Comment": "v1.11.2-1-g9283250", "Rev": "b9f10c951893f9a00865890a5232e85d770c1087" }, { "ImportPath": "github.com/docker/docker/pkg/pools", - "Comment": "v1.11.2-1-gb80676c6a", + "Comment": "v1.11.2-1-g9283250", "Rev": "b9f10c951893f9a00865890a5232e85d770c1087" }, { "ImportPath": "github.com/docker/docker/pkg/promise", - "Comment": "v1.11.2-1-gb80676c6a", + "Comment": "v1.11.2-1-g9283250", "Rev": "b9f10c951893f9a00865890a5232e85d770c1087" }, { "ImportPath": "github.com/docker/docker/pkg/stdcopy", - "Comment": "v1.11.2-1-gb80676c6a", + "Comment": "v1.11.2-1-g9283250", "Rev": "b9f10c951893f9a00865890a5232e85d770c1087" }, { "ImportPath": "github.com/docker/docker/pkg/symlink", - "Comment": "v1.11.2-1-gb80676c6a", + "Comment": "v1.11.2-1-g9283250", "Rev": "b9f10c951893f9a00865890a5232e85d770c1087" }, { "ImportPath": "github.com/docker/docker/pkg/system", - "Comment": "v1.11.2-1-gb80676c6a", + "Comment": "v1.11.2-1-g9283250", "Rev": "b9f10c951893f9a00865890a5232e85d770c1087" }, { "ImportPath": "github.com/docker/docker/pkg/term", - "Comment": "v1.11.2-1-gb80676c6a", + "Comment": "v1.11.2-1-g9283250", "Rev": "b9f10c951893f9a00865890a5232e85d770c1087" }, { "ImportPath": "github.com/docker/docker/pkg/term/windows", - "Comment": "v1.11.2-1-gb80676c6a", + "Comment": "v1.11.2-1-g9283250", "Rev": "b9f10c951893f9a00865890a5232e85d770c1087" }, { @@ -10368,11 +10373,6 @@ "ImportPath": "vbom.ml/util/sortorder", "Rev": "db5cfe13f5cc80a4990d98e2e1b0707a4d1a5394" }, - { - "ImportPath": "github.com/docker/distribution/registry/client/auth/challenge", - "Comment": "v2.6.2", - "Rev": "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89" - }, { "ImportPath": "github.com/docker/distribution/vendor/golang.org/x/oauth2", "Comment": "v2.6.2", From b2d9319ae269e02a272758295b08d037d04d3440 Mon Sep 17 00:00:00 2001 From: Maciej Szulik Date: Fri, 29 Sep 2017 14:01:22 +0200 Subject: [PATCH 05/27] bump(github.com/google/cadvisor): c683567ed073eb6bcab81cccee79cd64a0e33811 --- Godeps/Godeps.json | 169 +++++++++++++++++++++++---------------------- 1 file changed, 87 insertions(+), 82 deletions(-) diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 079ae842581e..479867a85b07 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -1972,208 +1972,213 @@ }, { "ImportPath": "github.com/google/cadvisor/api", - "Comment": "v0.26.1", - "Rev": "d19cc94b760cd8f150a0a5d95b404dec39a121a1" + "Comment": "v0.26.1-7-gc683567", + "Rev": "c683567ed073eb6bcab81cccee79cd64a0e33811" }, { "ImportPath": "github.com/google/cadvisor/cache/memory", - "Comment": "v0.26.1", - "Rev": "d19cc94b760cd8f150a0a5d95b404dec39a121a1" + "Comment": "v0.26.1-7-gc683567", + "Rev": "c683567ed073eb6bcab81cccee79cd64a0e33811" }, { "ImportPath": "github.com/google/cadvisor/client/v2", - "Comment": "v0.26.1", - "Rev": "d19cc94b760cd8f150a0a5d95b404dec39a121a1" + "Comment": "v0.26.1-7-gc683567", + "Rev": "c683567ed073eb6bcab81cccee79cd64a0e33811" }, { "ImportPath": "github.com/google/cadvisor/collector", - "Comment": "v0.26.1", - "Rev": "d19cc94b760cd8f150a0a5d95b404dec39a121a1" + "Comment": "v0.26.1-7-gc683567", + "Rev": "c683567ed073eb6bcab81cccee79cd64a0e33811" }, { "ImportPath": "github.com/google/cadvisor/container", - "Comment": "v0.26.1", - "Rev": "d19cc94b760cd8f150a0a5d95b404dec39a121a1" + "Comment": "v0.26.1-7-gc683567", + "Rev": "c683567ed073eb6bcab81cccee79cd64a0e33811" }, { "ImportPath": "github.com/google/cadvisor/container/common", - "Comment": "v0.26.1", - "Rev": "d19cc94b760cd8f150a0a5d95b404dec39a121a1" + "Comment": "v0.26.1-7-gc683567", + "Rev": "c683567ed073eb6bcab81cccee79cd64a0e33811" + }, + { + "ImportPath": "github.com/google/cadvisor/container/crio", + "Comment": "v0.26.1-7-gc683567", + "Rev": "c683567ed073eb6bcab81cccee79cd64a0e33811" }, { "ImportPath": "github.com/google/cadvisor/container/docker", - "Comment": "v0.26.1", - "Rev": "d19cc94b760cd8f150a0a5d95b404dec39a121a1" + "Comment": "v0.26.1-7-gc683567", + "Rev": "c683567ed073eb6bcab81cccee79cd64a0e33811" }, { "ImportPath": "github.com/google/cadvisor/container/libcontainer", - "Comment": "v0.26.1", - "Rev": "d19cc94b760cd8f150a0a5d95b404dec39a121a1" + "Comment": "v0.26.1-7-gc683567", + "Rev": "c683567ed073eb6bcab81cccee79cd64a0e33811" }, { "ImportPath": "github.com/google/cadvisor/container/raw", - "Comment": "v0.26.1", - "Rev": "d19cc94b760cd8f150a0a5d95b404dec39a121a1" + "Comment": "v0.26.1-7-gc683567", + "Rev": "c683567ed073eb6bcab81cccee79cd64a0e33811" }, { "ImportPath": "github.com/google/cadvisor/container/rkt", - "Comment": "v0.26.1", - "Rev": "d19cc94b760cd8f150a0a5d95b404dec39a121a1" + "Comment": "v0.26.1-7-gc683567", + "Rev": "c683567ed073eb6bcab81cccee79cd64a0e33811" }, { "ImportPath": "github.com/google/cadvisor/container/systemd", - "Comment": "v0.26.1", - "Rev": "d19cc94b760cd8f150a0a5d95b404dec39a121a1" + "Comment": "v0.26.1-7-gc683567", + "Rev": "c683567ed073eb6bcab81cccee79cd64a0e33811" }, { "ImportPath": "github.com/google/cadvisor/devicemapper", - "Comment": "v0.26.1", - "Rev": "d19cc94b760cd8f150a0a5d95b404dec39a121a1" + "Comment": "v0.26.1-7-gc683567", + "Rev": "c683567ed073eb6bcab81cccee79cd64a0e33811" }, { "ImportPath": "github.com/google/cadvisor/events", - "Comment": "v0.26.1", - "Rev": "d19cc94b760cd8f150a0a5d95b404dec39a121a1" + "Comment": "v0.26.1-7-gc683567", + "Rev": "c683567ed073eb6bcab81cccee79cd64a0e33811" }, { "ImportPath": "github.com/google/cadvisor/fs", - "Comment": "v0.26.1", - "Rev": "d19cc94b760cd8f150a0a5d95b404dec39a121a1" + "Comment": "v0.26.1-7-gc683567", + "Rev": "c683567ed073eb6bcab81cccee79cd64a0e33811" }, { "ImportPath": "github.com/google/cadvisor/healthz", - "Comment": "v0.26.1", - "Rev": "d19cc94b760cd8f150a0a5d95b404dec39a121a1" + "Comment": "v0.26.1-7-gc683567", + "Rev": "c683567ed073eb6bcab81cccee79cd64a0e33811" }, { "ImportPath": "github.com/google/cadvisor/http", - "Comment": "v0.26.1", - "Rev": "d19cc94b760cd8f150a0a5d95b404dec39a121a1" + "Comment": "v0.26.1-7-gc683567", + "Rev": "c683567ed073eb6bcab81cccee79cd64a0e33811" }, { "ImportPath": "github.com/google/cadvisor/http/mux", - "Comment": "v0.26.1", - "Rev": "d19cc94b760cd8f150a0a5d95b404dec39a121a1" + "Comment": "v0.26.1-7-gc683567", + "Rev": "c683567ed073eb6bcab81cccee79cd64a0e33811" }, { "ImportPath": "github.com/google/cadvisor/info/v1", - "Comment": "v0.26.1", - "Rev": "d19cc94b760cd8f150a0a5d95b404dec39a121a1" + "Comment": "v0.26.1-7-gc683567", + "Rev": "c683567ed073eb6bcab81cccee79cd64a0e33811" }, { "ImportPath": "github.com/google/cadvisor/info/v2", - "Comment": "v0.26.1", - "Rev": "d19cc94b760cd8f150a0a5d95b404dec39a121a1" + "Comment": "v0.26.1-7-gc683567", + "Rev": "c683567ed073eb6bcab81cccee79cd64a0e33811" }, { "ImportPath": "github.com/google/cadvisor/machine", - "Comment": "v0.26.1", - "Rev": "d19cc94b760cd8f150a0a5d95b404dec39a121a1" + "Comment": "v0.26.1-7-gc683567", + "Rev": "c683567ed073eb6bcab81cccee79cd64a0e33811" }, { "ImportPath": "github.com/google/cadvisor/manager", - "Comment": "v0.26.1", - "Rev": "d19cc94b760cd8f150a0a5d95b404dec39a121a1" + "Comment": "v0.26.1-7-gc683567", + "Rev": "c683567ed073eb6bcab81cccee79cd64a0e33811" }, { "ImportPath": "github.com/google/cadvisor/manager/watcher", - "Comment": "v0.26.1", - "Rev": "d19cc94b760cd8f150a0a5d95b404dec39a121a1" + "Comment": "v0.26.1-7-gc683567", + "Rev": "c683567ed073eb6bcab81cccee79cd64a0e33811" }, { "ImportPath": "github.com/google/cadvisor/manager/watcher/raw", - "Comment": "v0.26.1", - "Rev": "d19cc94b760cd8f150a0a5d95b404dec39a121a1" + "Comment": "v0.26.1-7-gc683567", + "Rev": "c683567ed073eb6bcab81cccee79cd64a0e33811" }, { "ImportPath": "github.com/google/cadvisor/manager/watcher/rkt", - "Comment": "v0.26.1", - "Rev": "d19cc94b760cd8f150a0a5d95b404dec39a121a1" + "Comment": "v0.26.1-7-gc683567", + "Rev": "c683567ed073eb6bcab81cccee79cd64a0e33811" }, { "ImportPath": "github.com/google/cadvisor/metrics", - "Comment": "v0.26.1", - "Rev": "d19cc94b760cd8f150a0a5d95b404dec39a121a1" + "Comment": "v0.26.1-7-gc683567", + "Rev": "c683567ed073eb6bcab81cccee79cd64a0e33811" }, { "ImportPath": "github.com/google/cadvisor/pages", - "Comment": "v0.26.1", - "Rev": "d19cc94b760cd8f150a0a5d95b404dec39a121a1" + "Comment": "v0.26.1-7-gc683567", + "Rev": "c683567ed073eb6bcab81cccee79cd64a0e33811" }, { "ImportPath": "github.com/google/cadvisor/pages/static", - "Comment": "v0.26.1", - "Rev": "d19cc94b760cd8f150a0a5d95b404dec39a121a1" + "Comment": "v0.26.1-7-gc683567", + "Rev": "c683567ed073eb6bcab81cccee79cd64a0e33811" }, { "ImportPath": "github.com/google/cadvisor/storage", - "Comment": "v0.26.1", - "Rev": "d19cc94b760cd8f150a0a5d95b404dec39a121a1" + "Comment": "v0.26.1-7-gc683567", + "Rev": "c683567ed073eb6bcab81cccee79cd64a0e33811" }, { "ImportPath": "github.com/google/cadvisor/summary", - "Comment": "v0.26.1", - "Rev": "d19cc94b760cd8f150a0a5d95b404dec39a121a1" + "Comment": "v0.26.1-7-gc683567", + "Rev": "c683567ed073eb6bcab81cccee79cd64a0e33811" }, { "ImportPath": "github.com/google/cadvisor/utils", - "Comment": "v0.26.1", - "Rev": "d19cc94b760cd8f150a0a5d95b404dec39a121a1" + "Comment": "v0.26.1-7-gc683567", + "Rev": "c683567ed073eb6bcab81cccee79cd64a0e33811" }, { "ImportPath": "github.com/google/cadvisor/utils/cloudinfo", - "Comment": "v0.26.1", - "Rev": "d19cc94b760cd8f150a0a5d95b404dec39a121a1" + "Comment": "v0.26.1-7-gc683567", + "Rev": "c683567ed073eb6bcab81cccee79cd64a0e33811" }, { "ImportPath": "github.com/google/cadvisor/utils/cpuload", - "Comment": "v0.26.1", - "Rev": "d19cc94b760cd8f150a0a5d95b404dec39a121a1" + "Comment": "v0.26.1-7-gc683567", + "Rev": "c683567ed073eb6bcab81cccee79cd64a0e33811" }, { "ImportPath": "github.com/google/cadvisor/utils/cpuload/netlink", - "Comment": "v0.26.1", - "Rev": "d19cc94b760cd8f150a0a5d95b404dec39a121a1" + "Comment": "v0.26.1-7-gc683567", + "Rev": "c683567ed073eb6bcab81cccee79cd64a0e33811" }, { "ImportPath": "github.com/google/cadvisor/utils/docker", - "Comment": "v0.26.1", - "Rev": "d19cc94b760cd8f150a0a5d95b404dec39a121a1" + "Comment": "v0.26.1-7-gc683567", + "Rev": "c683567ed073eb6bcab81cccee79cd64a0e33811" }, { "ImportPath": "github.com/google/cadvisor/utils/oomparser", - "Comment": "v0.26.1", - "Rev": "d19cc94b760cd8f150a0a5d95b404dec39a121a1" + "Comment": "v0.26.1-7-gc683567", + "Rev": "c683567ed073eb6bcab81cccee79cd64a0e33811" }, { "ImportPath": "github.com/google/cadvisor/utils/sysfs", - "Comment": "v0.26.1", - "Rev": "d19cc94b760cd8f150a0a5d95b404dec39a121a1" + "Comment": "v0.26.1-7-gc683567", + "Rev": "c683567ed073eb6bcab81cccee79cd64a0e33811" }, { "ImportPath": "github.com/google/cadvisor/utils/sysinfo", - "Comment": "v0.26.1", - "Rev": "d19cc94b760cd8f150a0a5d95b404dec39a121a1" + "Comment": "v0.26.1-7-gc683567", + "Rev": "c683567ed073eb6bcab81cccee79cd64a0e33811" }, { "ImportPath": "github.com/google/cadvisor/utils/tail", - "Comment": "v0.26.1", - "Rev": "d19cc94b760cd8f150a0a5d95b404dec39a121a1" + "Comment": "v0.26.1-7-gc683567", + "Rev": "c683567ed073eb6bcab81cccee79cd64a0e33811" }, { "ImportPath": "github.com/google/cadvisor/validate", - "Comment": "v0.26.1", - "Rev": "d19cc94b760cd8f150a0a5d95b404dec39a121a1" + "Comment": "v0.26.1-7-gc683567", + "Rev": "c683567ed073eb6bcab81cccee79cd64a0e33811" }, { "ImportPath": "github.com/google/cadvisor/version", - "Comment": "v0.26.1", - "Rev": "d19cc94b760cd8f150a0a5d95b404dec39a121a1" + "Comment": "v0.26.1-7-gc683567", + "Rev": "c683567ed073eb6bcab81cccee79cd64a0e33811" }, { "ImportPath": "github.com/google/cadvisor/zfs", - "Comment": "v0.26.1", - "Rev": "d19cc94b760cd8f150a0a5d95b404dec39a121a1" + "Comment": "v0.26.1-7-gc683567", + "Rev": "c683567ed073eb6bcab81cccee79cd64a0e33811" }, { "ImportPath": "github.com/google/certificate-transparency/go", From 450a2e4d612e13a418773ece67e4a3696e42c552 Mon Sep 17 00:00:00 2001 From: Maciej Szulik Date: Fri, 29 Sep 2017 21:42:01 +0200 Subject: [PATCH 06/27] bump(github.com/emicklei/go-restful-swagger12): 885875a92c2ab7d6222e257e41f6ca2c1f010b4e --- Godeps/Godeps.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 479867a85b07..2bccff4c6506 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -1557,7 +1557,7 @@ { "ImportPath": "github.com/emicklei/go-restful-swagger12", "Comment": "1.0.1", - "Rev": "dcef7f55730566d41eae5db10e7d6981829720f6" + "Rev": "885875a92c2ab7d6222e257e41f6ca2c1f010b4e" }, { "ImportPath": "github.com/emicklei/go-restful/log", From cc2b9f16f62c2e6f9c4f5f1d6344171ef7d8450d Mon Sep 17 00:00:00 2001 From: Maciej Szulik Date: Fri, 29 Sep 2017 15:30:21 +0200 Subject: [PATCH 07/27] bump(k8s.io/kubernetes): a08f5eeb6246134f4ae5443c0593d72fd057ea7c --- Godeps/Godeps.json | 5362 +++++++++-------- .../arm/compute/availabilitysets.go | 68 +- .../azure-sdk-for-go/arm/compute/client.go | 9 +- .../azure-sdk-for-go/arm/compute/images.go | 463 ++ .../azure-sdk-for-go/arm/compute/models.go | 224 +- .../compute/{usageoperations.go => usage.go} | 49 +- .../azure-sdk-for-go/arm/compute/version.go | 20 +- .../compute/virtualmachineextensionimages.go | 33 +- .../arm/compute/virtualmachineextensions.go | 177 +- .../arm/compute/virtualmachineimages.go | 47 +- .../arm/compute/virtualmachines.go | 748 ++- .../arm/compute/virtualmachinescalesets.go | 843 ++- .../arm/compute/virtualmachinescalesetvms.go | 538 +- .../arm/compute/virtualmachinesizes.go | 11 +- .../arm/containerregistry/client.go | 12 +- .../arm/containerregistry/models.go | 137 +- .../arm/containerregistry/operations.go | 124 + .../arm/containerregistry/registries.go | 422 +- .../arm/containerregistry/version.go | 20 +- .../Azure/azure-sdk-for-go/arm/disk/client.go | 53 + .../Azure/azure-sdk-for-go/arm/disk/disks.go | 728 +++ .../Azure/azure-sdk-for-go/arm/disk/models.go | 278 + .../azure-sdk-for-go/arm/disk/snapshots.go | 733 +++ .../azure-sdk-for-go/arm/disk/version.go | 29 + .../arm/network/applicationgateways.go | 402 +- .../arm/network/bgpservicecommunities.go | 127 + .../azure-sdk-for-go/arm/network/client.go | 24 +- .../expressroutecircuitauthorizations.go | 166 +- .../network/expressroutecircuitpeerings.go | 152 +- .../arm/network/expressroutecircuits.go | 387 +- .../network/expressrouteserviceproviders.go | 18 +- .../arm/network/interfaces.go | 346 +- .../arm/network/loadbalancers.go | 170 +- .../arm/network/localnetworkgateways.go | 187 +- .../azure-sdk-for-go/arm/network/models.go | 1028 +++- .../arm/network/packetcaptures.go | 526 ++ .../arm/network/publicipaddresses.go | 188 +- .../arm/network/routefilterrules.go | 468 ++ .../arm/network/routefilters.go | 535 ++ .../azure-sdk-for-go/arm/network/routes.go | 148 +- .../arm/network/routetables.go | 166 +- .../arm/network/securitygroups.go | 160 +- .../arm/network/securityrules.go | 159 +- .../azure-sdk-for-go/arm/network/subnets.go | 166 +- .../azure-sdk-for-go/arm/network/usages.go | 17 +- .../azure-sdk-for-go/arm/network/version.go | 20 +- .../virtualnetworkgatewayconnections.go | 357 +- .../arm/network/virtualnetworkgateways.go | 483 +- .../arm/network/virtualnetworkpeerings.go | 135 +- .../arm/network/virtualnetworks.go | 177 +- .../azure-sdk-for-go/arm/network/watchers.go | 1131 ++++ .../azure-sdk-for-go/arm/storage/accounts.go | 449 +- .../azure-sdk-for-go/arm/storage/client.go | 11 +- .../azure-sdk-for-go/arm/storage/models.go | 229 +- .../storage/{usageoperations.go => usage.go} | 39 +- .../azure-sdk-for-go/arm/storage/version.go | 20 +- .../Azure/azure-sdk-for-go/storage/README.md | 5 - .../azure-sdk-for-go/storage/appendblob.go | 70 + .../storage/appendblob_test.go | 126 + .../azure-sdk-for-go/storage/authorization.go | 227 + .../storage/authorization_test.go | 230 + .../Azure/azure-sdk-for-go/storage/blob.go | 1657 ++--- .../azure-sdk-for-go/storage/blob_test.go | 1644 +---- .../azure-sdk-for-go/storage/blobsasuri.go | 106 + .../storage/blobsasuri_test.go | 185 + .../storage/blobserviceclient.go | 95 + .../azure-sdk-for-go/storage/blockblob.go | 240 + .../storage/blockblob_test.go | 134 + .../Azure/azure-sdk-for-go/storage/client.go | 606 +- .../azure-sdk-for-go/storage/client_test.go | 424 +- .../azure-sdk-for-go/storage/container.go | 453 ++ .../storage/container_test.go | 554 ++ .../azure-sdk-for-go/storage/copyblob.go | 223 + .../azure-sdk-for-go/storage/copyblob_test.go | 171 + .../azure-sdk-for-go/storage/directory.go | 222 + .../storage/directory_test.go | 170 + .../Azure/azure-sdk-for-go/storage/entity.go | 439 ++ .../azure-sdk-for-go/storage/entity_test.go | 550 ++ .../Azure/azure-sdk-for-go/storage/file.go | 982 +-- .../azure-sdk-for-go/storage/file_test.go | 650 +- .../storage/fileserviceclient.go | 324 + .../azure-sdk-for-go/storage/leaseblob.go | 187 + .../storage/leaseblob_test.go | 211 + .../Azure/azure-sdk-for-go/storage/message.go | 153 + .../azure-sdk-for-go/storage/message_test.go | 79 + .../Azure/azure-sdk-for-go/storage/odata.go | 33 + .../azure-sdk-for-go/storage/pageblob.go | 189 + .../azure-sdk-for-go/storage/pageblob_test.go | 179 + .../Azure/azure-sdk-for-go/storage/queue.go | 557 +- .../azure-sdk-for-go/storage/queue_test.go | 353 +- .../storage/queueserviceclient.go | 28 + .../Azure/azure-sdk-for-go/storage/share.go | 202 + .../azure-sdk-for-go/storage/share_test.go | 207 + .../azure-sdk-for-go/storage/storagepolicy.go | 47 + .../storage/storageservice.go | 117 + .../storage/storageservice_test.go | 85 + .../Azure/azure-sdk-for-go/storage/table.go | 413 +- .../azure-sdk-for-go/storage/table_batch.go | 302 + .../storage/table_batch_test.go | 216 + .../storage/table_entities.go | 357 -- .../azure-sdk-for-go/storage/table_test.go | 350 +- .../storage/tableserviceclient.go | 190 + .../Azure/azure-sdk-for-go/storage/util.go | 120 +- .../azure-sdk-for-go/storage/util_test.go | 115 +- .../Azure/azure-sdk-for-go/storage/version.go | 5 + ...helpers_test.go => parser_test_helpers.go} | 0 ...ities_test.go => parser_test_utilities.go} | 0 ..._handler_test.go => test_event_handler.go} | 0 .../Azure/go-autorest/autorest/adal/README.md | 253 + .../Azure/go-autorest/autorest/adal/config.go | 51 + .../go-autorest/autorest/adal/config_test.go | 30 + .../autorest/{azure => adal}/devicetoken.go | 131 +- .../{azure => adal}/devicetoken_test.go | 133 +- .../autorest/{azure => adal}/persist.go | 2 +- .../autorest/{azure => adal}/persist_test.go | 2 +- .../Azure/go-autorest/autorest/adal/sender.go | 46 + .../autorest/{azure => adal}/token.go | 175 +- .../autorest/{azure => adal}/token_test.go | 327 +- .../go-autorest/autorest/authorization.go | 57 + .../autorest/authorization_test.go | 137 + .../Azure/go-autorest/autorest/autorest.go | 1 + .../Azure/go-autorest/autorest/azure/async.go | 13 +- .../go-autorest/autorest/azure/async_test.go | 11 +- .../go-autorest/autorest/azure/config.go | 13 - .../autorest/azure/environments.go | 44 +- .../autorest/azure/environments_test.go | 34 +- .../Azure/go-autorest/autorest/client.go | 41 +- .../Azure/go-autorest/autorest/client_test.go | 31 +- .../go-autorest/autorest/date/unixtime.go | 109 + .../autorest/date/unixtime_test.go | 267 + .../Azure/go-autorest/autorest/preparer.go | 25 +- .../go-autorest/autorest/preparer_test.go | 60 +- .../Azure/go-autorest/autorest/responder.go | 21 + .../go-autorest/autorest/responder_test.go | 60 + .../Azure/go-autorest/autorest/sender.go | 3 +- .../Azure/go-autorest/autorest/sender_test.go | 33 + .../Azure/go-autorest/autorest/version.go | 29 +- .../go-autorest/autorest/version_test.go | 13 - vendor/github.com/coreos/go-oidc/http/http.go | 7 +- .../coreos/go-oidc/http/http_test.go | 7 + .../coreos/go-oidc/oidc/provider_test.go | 28 +- vendor/github.com/satori/uuid/.travis.yml | 22 + vendor/github.com/satori/uuid/LICENSE | 20 + vendor/github.com/satori/uuid/README.md | 65 + .../github.com/satori/uuid/benchmarks_test.go | 123 + vendor/github.com/satori/uuid/uuid.go | 481 ++ vendor/github.com/satori/uuid/uuid_test.go | 633 ++ vendor/gopkg.in/gcfg.v1/doc.go | 27 + vendor/gopkg.in/gcfg.v1/errors.go | 41 + vendor/gopkg.in/gcfg.v1/read.go | 94 +- vendor/gopkg.in/gcfg.v1/read_test.go | 44 +- vendor/gopkg.in/gcfg.v1/set.go | 56 +- vendor/gopkg.in/warnings.v0/LICENSE | 24 + vendor/gopkg.in/warnings.v0/README | 74 + vendor/gopkg.in/warnings.v0/warnings.go | 191 + vendor/gopkg.in/warnings.v0/warnings_test.go | 82 + vendor/k8s.io/kubernetes/README.md | 2 +- .../api/swagger-spec/apps_v1beta1.json | 16 +- .../api/swagger-spec/extensions_v1beta1.json | 40 +- .../storage.authorization.k8s.io_v1beta1.json | 1 - .../kubernetes/api/swagger-spec/v1.json | 20 +- .../kubernetes/cmd/kube-apiserver/app/BUILD | 3 +- .../cmd/kube-apiserver/app/aggregator.go | 90 +- .../app/options/options_test.go | 1 + .../cmd/kube-apiserver/app/server.go | 13 +- .../kubernetes/cmd/kube-proxy/app/BUILD | 1 + .../kubernetes/cmd/kube-proxy/app/server.go | 8 +- .../cmd/kubeadm/app/apis/kubeadm/types.go | 2 + .../app/apis/kubeadm/v1alpha1/types.go | 2 + .../app/apis/kubeadm/validation/validation.go | 33 + .../cmd/kubeadm/app/cmd/defaults.go | 4 + .../kubernetes/cmd/kubeadm/app/cmd/init.go | 23 +- .../kubernetes/cmd/kubeadm/app/cmd/join.go | 57 +- .../kubernetes/cmd/kubeadm/app/cmd/reset.go | 2 +- .../kubernetes/cmd/kubeadm/app/cmd/token.go | 6 + .../cmd/kubeadm/app/discovery/token/token.go | 2 +- .../cmd/kubeadm/app/master/manifests.go | 7 - .../cmd/kubeadm/app/phases/addons/addons.go | 10 +- .../app/phases/apiconfig/clusterroles.go | 2 +- .../app/phases/apiconfig/setupmaster.go | 11 +- .../cmd/kubeadm/app/phases/certs/certs.go | 7 +- .../app/phases/kubeconfig/kubeconfig.go | 8 +- .../cmd/kubeadm/app/preflight/checks.go | 20 +- .../kubernetes/cmd/kubelet/app/server.go | 7 +- vendor/k8s.io/kubernetes/examples/README.md | 4 +- .../kubernetes/examples/cockroachdb/demo.sh | 2 +- .../examples/guestbook-go/README.md | 4 +- .../kubernetes/examples/guestbook/README.md | 6 +- .../kubernetes/examples/javaee/mysql-pod.yaml | 18 +- .../examples/javaee/mysql-service.yaml | 8 +- .../examples/javaweb-tomcat-sidecar/README.md | 2 +- .../javaweb-tomcat-sidecar/javaweb-2.yaml | 3 +- .../javaweb-tomcat-sidecar/javaweb.yaml | 1 - .../k8s.io/kubernetes/examples/oms/README.md | 2 +- .../examples/openshift-origin/README.md | 10 +- .../examples/openshift-origin/create.sh | 2 +- .../openshift-origin/etcd-controller.yaml | 2 +- .../persistent-volume-provisioning/README.md | 2 +- .../quobyte/example-pod.yaml | 2 +- .../rbd/ceph-secret-user.yaml | 2 +- .../rbd/rbd-storage-class.yaml | 1 - .../phabricator/php-phabricator/run.sh | 1 - .../kubernetes/examples/phabricator/setup.sh | 1 - .../examples/phabricator/teardown.sh | 1 - .../examples/podsecuritypolicy/rbac/README.md | 14 +- .../podsecuritypolicy/rbac/policies.yaml | 1 - .../podsecuritypolicy/rbac/roles.yaml | 6 +- .../examples/selenium/selenium-hub-rc.yaml | 4 +- .../examples/selenium/selenium-hub-svc.yaml | 4 +- .../selenium/selenium-node-chrome-rc.yaml | 8 +- .../selenium/selenium-node-firefox-rc.yaml | 8 +- .../examples/selenium/selenium-test.py | 1 - .../spark/spark-worker-controller.yaml | 1 - .../examples/storage/cassandra/README.md | 2 +- .../cassandra/image/files/cassandra.yaml | 7 +- .../java/src/test/resources/cassandra.yaml | 2 +- .../examples/storage/hazelcast/README.md | 22 +- .../hazelcast/hazelcast-deployment.yaml | 18 +- .../storage/hazelcast/hazelcast-service.yaml | 2 +- .../examples/storage/mysql-galera/README.md | 10 +- .../storage/mysql-galera/image/Dockerfile | 8 +- .../mysql-galera/image/docker-entrypoint.sh | 40 +- .../storage/mysql-galera/pxc-node1.yaml | 14 +- .../storage/mysql-galera/pxc-node2.yaml | 16 +- .../storage/mysql-galera/pxc-node3.yaml | 16 +- .../storage/redis/image/redis-slave.conf | 2 +- .../examples/storage/redis/image/run.sh | 2 +- .../storage/redis/redis-controller.yaml | 1 - .../examples/storage/rethinkdb/gen-pod.sh | 2 +- .../examples/storage/rethinkdb/image/run.sh | 2 +- .../examples/storage/vitess/configure.sh | 1 - .../storage/vitess/create_test_table.sql | 1 - .../kubernetes/examples/storage/vitess/env.sh | 1 - .../vitess/etcd-controller-template.yaml | 1 - .../examples/storage/vitess/etcd-down.sh | 1 - .../storage/vitess/etcd-service-template.yaml | 1 - .../examples/storage/vitess/etcd-up.sh | 1 - .../storage/vitess/guestbook-controller.yaml | 1 - .../storage/vitess/guestbook-service.yaml | 1 - .../examples/storage/vitess/vitess-up.sh | 1 - .../vitess/vtctld-controller-template.yaml | 1 - .../storage/vitess/vtctld-service.yaml | 1 - .../examples/storage/vitess/vtctld-up.sh | 1 - .../vitess/vtgate-controller-template.yaml | 1 - .../storage/vitess/vtgate-service.yaml | 1 - .../examples/storage/vitess/vttablet-down.sh | 1 - .../storage/vitess/vttablet-pod-template.yaml | 1 - .../sysdig-cloud/sysdig-daemonset.yaml | 8 +- .../examples/sysdig-cloud/sysdig-rc.yaml | 8 +- .../examples/volumes/cephfs/README.md | 2 +- .../examples/volumes/cephfs/cephfs.yaml | 2 +- .../examples/volumes/fibre_channel/README.md | 6 +- .../examples/volumes/fibre_channel/fc.yaml | 2 +- .../examples/volumes/flocker/README.md | 2 +- .../examples/volumes/iscsi/README.md | 18 +- .../examples/volumes/iscsi/chap-secret.yaml | 2 +- .../examples/volumes/iscsi/iscsi-chap.yaml | 2 +- .../examples/volumes/nfs/nfs-data/run_nfs.sh | 2 +- .../examples/volumes/portworx/README.md | 4 +- .../examples/volumes/rbd/rbd-with-secret.json | 6 +- .../kubernetes/examples/volumes/rbd/rbd.json | 6 +- .../volumes/rbd/secret/ceph-secret.yaml | 2 +- .../examples/volumes/scaleio/README.md | 16 +- .../examples/volumes/scaleio/sc.yaml | 2 +- .../examples/volumes/storageos/README.md | 6 +- .../examples/volumes/vsphere/README.md | 8 +- .../cmd/federation-apiserver/app/server.go | 14 +- .../federation/pkg/kubefed/init/init_test.go | 6 +- .../federation/pkg/kubefed/util/BUILD | 2 + .../federation/pkg/kubefed/util/util.go | 25 +- .../federation/registry/cluster/etcd/etcd.go | 2 + vendor/k8s.io/kubernetes/pkg/api/types.go | 2 +- .../kubernetes/pkg/api/v1/generated.pb.go | 1443 ++--- vendor/k8s.io/kubernetes/pkg/api/v1/types.go | 2 +- .../pkg/api/v1/zz_generated.conversion.go | 5 +- .../pkg/api/v1/zz_generated.deepcopy.go | 25 +- .../pkg/api/validation/validation.go | 17 +- .../pkg/api/validation/validation_test.go | 29 +- .../pkg/api/zz_generated.deepcopy.go | 25 +- .../v1beta1/zz_generated.deepcopy.go | 18 +- .../authorization/zz_generated.deepcopy.go | 18 +- .../autoscaling/v1/zz_generated.deepcopy.go | 18 +- .../apis/autoscaling/v2alpha1/generated.pb.go | 167 +- .../apis/autoscaling/v2alpha1/generated.proto | 1 - .../apis/autoscaling/zz_generated.deepcopy.go | 18 +- .../apis/batch/v1/zz_generated.deepcopy.go | 18 +- .../pkg/apis/batch/zz_generated.deepcopy.go | 18 +- .../apis/componentconfig/v1alpha1/defaults.go | 2 +- .../v1alpha1/zz_generated.deepcopy.go | 18 +- .../componentconfig/zz_generated.deepcopy.go | 18 +- .../v1beta1/zz_generated.deepcopy.go | 18 +- .../apis/extensions/zz_generated.deepcopy.go | 18 +- .../pkg/apis/policy/validation/validation.go | 2 +- .../apis/policy/validation/validation_test.go | 108 + .../clientset_generated/.import-restrictions | 1 - .../pkg/cloudprovider/providers/aws/aws.go | 17 +- .../pkg/cloudprovider/providers/azure/BUILD | 8 +- .../cloudprovider/providers/azure/azure.go | 189 +- .../providers/azure/azure_backoff.go | 54 +- .../providers/azure/azure_blob.go | 111 - .../azure/azure_blobDiskController.go | 808 +++ .../providers/azure/azure_controllerCommon.go | 270 + .../providers/azure/azure_file.go | 21 +- .../providers/azure/azure_loadbalancer.go | 49 +- .../azure/azure_managedDiskController.go | 129 + .../providers/azure/azure_routes.go | 17 +- .../providers/azure/azure_storage.go | 250 - .../providers/azure/azure_test.go | 25 +- .../providers/azure/azure_util.go | 61 +- .../providers/azure/azure_wrap.go | 9 +- .../pkg/cloudprovider/providers/azure/vhd.go | 38 - .../pkg/cloudprovider/providers/gce/gce.go | 15 +- .../providers/gce/gce_addresses.go | 21 +- .../providers/gce/gce_annotations.go | 27 +- .../providers/gce/gce_healthchecks.go | 4 +- .../providers/gce/gce_healthchecks_test.go | 13 +- .../gce/gce_loadbalancer_external.go | 12 +- .../cloudprovider/providers/gce/gce_test.go | 17 + .../providers/openstack/openstack_volumes.go | 8 +- .../providers/vsphere/vsphere.go | 13 +- .../providers/vsphere/vsphere_test.go | 1 + .../providers/vsphere/vsphere_util.go | 1 + .../certificates/approver/sarapprove.go | 10 + .../certificates/approver/sarapprove_test.go | 6 +- .../controller/cronjob/cronjob_controller.go | 4 +- .../pkg/controller/daemon/daemoncontroller.go | 72 +- .../daemon/daemoncontroller_test.go | 41 +- .../controller/podautoscaler/horizontal.go | 8 +- .../podautoscaler/horizontal_test.go | 19 + .../statefulset/stateful_pod_control_test.go | 28 - .../statefulset/stateful_set_utils.go | 16 +- .../statefulset/stateful_set_utils_test.go | 28 - .../cache/actual_state_of_world.go | 17 - .../cache/actual_state_of_world_test.go | 83 - .../attachdetach/reconciler/reconciler.go | 5 + .../statusupdater/node_status_updater.go | 4 +- .../controller/volume/persistentvolume/BUILD | 1 - .../volume/persistentvolume/pv_controller.go | 8 +- .../pkg/credentialprovider/azure/BUILD | 2 +- .../azure/azure_credentials.go | 50 +- .../azure/azure_credentials_test.go | 3 +- .../generated/openapi/zz_generated.openapi.go | 174 +- .../admission/configuration/BUILD | 3 + .../external_admission_hook_manager.go | 7 + .../external_admission_hook_manager_test.go | 40 + .../configuration/initializer_manager_test.go | 16 + .../kubeapiserver/server/insecure_handler.go | 2 +- .../pkg/kubectl/cmd/set/set_selector.go | 6 +- .../cmd/testdata/edit/record_testcase.sh | 2 +- .../testcase-not-update-annotation/0.response | 1 - .../testcase-update-annotation/0.response | 1 - .../kubectl/plugins/examples/aging/aging.rb | 5 +- .../plugins/examples/aging/plugin.yaml | 2 +- .../kubernetes/pkg/kubectl/resource_filter.go | 19 +- vendor/k8s.io/kubernetes/pkg/kubelet/cm/BUILD | 1 + .../pkg/kubelet/cm/container_manager_linux.go | 65 +- .../cm/container_manager_linux_test.go | 8 + .../cm/container_manager_unsupported_test.go | 8 + .../pkg/kubelet/cm/node_container_manager.go | 4 + .../kubernetes/pkg/kubelet/dockershim/BUILD | 1 + .../pkg/kubelet/dockershim/docker_sandbox.go | 13 +- .../kubernetes/pkg/kubelet/dockershim/exec.go | 24 +- .../pkg/kubelet/dockershim/helpers.go | 60 +- .../pkg/kubelet/dockershim/helpers_test.go | 25 +- .../libdocker/kube_docker_client.go | 10 + .../pkg/kubelet/eviction/eviction_manager.go | 14 +- .../kubelet/eviction/eviction_manager_test.go | 119 +- .../pkg/kubelet/eviction/helpers.go | 35 +- .../pkg/kubelet/eviction/helpers_test.go | 4 +- .../kubernetes/pkg/kubelet/eviction/types.go | 12 +- .../k8s.io/kubernetes/pkg/kubelet/kubelet.go | 2 +- .../kubernetes/pkg/kubelet/kubelet_pods.go | 2 +- .../kubelet/kuberuntime/kuberuntime_logs.go | 8 +- .../k8s.io/kubernetes/pkg/kubelet/pleg/BUILD | 1 - .../kubernetes/pkg/kubelet/pleg/generic.go | 44 - .../pkg/kubelet/pleg/generic_test.go | 55 - .../kubernetes/pkg/kubelet/prober/prober.go | 39 +- .../kubernetes/pkg/kubelet/server/BUILD | 3 + .../thirdparty/tprregistration_controller.go | 41 +- .../pkg/printers/internalversion/describe.go | 13 +- .../storage/storage.go | 2 + .../storage/storage.go | 2 + .../controllerrevision/storage/storage.go | 2 + .../apps/statefulset/storage/storage.go | 2 + .../storage/storage.go | 2 + .../registry/batch/cronjob/storage/storage.go | 2 + .../pkg/registry/batch/job/storage/storage.go | 2 + .../pkg/registry/cachesize/cachesize.go | 99 +- .../certificates/storage/storage.go | 2 + .../core/configmap/storage/storage.go | 2 + .../registry/core/endpoint/storage/storage.go | 2 + .../registry/core/event/storage/storage.go | 2 + .../core/limitrange/storage/storage.go | 2 + .../core/namespace/storage/storage.go | 2 + .../pkg/registry/core/node/rest/proxy.go | 2 +- .../pkg/registry/core/node/storage/storage.go | 2 + .../core/persistentvolume/storage/storage.go | 2 + .../persistentvolumeclaim/storage/storage.go | 2 + .../registry/core/pod/rest/subresources.go | 2 +- .../pkg/registry/core/pod/storage/storage.go | 2 + .../core/podtemplate/storage/storage.go | 2 + .../replicationcontroller/storage/storage.go | 2 + .../core/resourcequota/storage/storage.go | 2 + .../registry/core/secret/storage/storage.go | 2 + .../pkg/registry/core/service/proxy.go | 2 +- .../registry/core/service/storage/storage.go | 2 + .../core/serviceaccount/storage/storage.go | 2 + .../extensions/daemonset/storage/storage.go | 2 + .../extensions/deployment/storage/storage.go | 2 + .../extensions/ingress/storage/storage.go | 2 + .../networkpolicy/storage/storage.go | 2 + .../podsecuritypolicy/storage/storage.go | 2 + .../extensions/replicaset/storage/storage.go | 2 + .../thirdpartyresource/storage/storage.go | 2 + .../thirdpartyresourcedata/storage/storage.go | 2 + .../networkpolicy/storage/storage.go | 2 + .../poddisruptionbudget/storage/storage.go | 2 + .../rbac/clusterrole/storage/storage.go | 2 + .../clusterrolebinding/storage/storage.go | 2 + .../pkg/registry/rbac/role/storage/storage.go | 2 + .../rbac/rolebinding/storage/storage.go | 2 + .../settings/podpreset/storage/storage.go | 2 + .../storage/storageclass/storage/storage.go | 2 + .../k8s.io/kubernetes/pkg/util/mount/fake.go | 8 + .../k8s.io/kubernetes/pkg/util/mount/mount.go | 46 +- .../kubernetes/pkg/util/mount/mount_linux.go | 13 +- .../pkg/util/mount/mount_unsupported.go | 12 +- .../pkg/util/mount/nsenter_mount.go | 9 + .../util/mount/nsenter_mount_unsupported.go | 8 + .../pkg/util/removeall/removeall_test.go | 6 + vendor/k8s.io/kubernetes/pkg/util/util.go | 9 + vendor/k8s.io/kubernetes/pkg/version/base.go | 2 +- .../kubernetes/pkg/volume/azure_dd/BUILD | 38 +- .../pkg/volume/azure_dd/attacher.go | 203 +- .../pkg/volume/azure_dd/azure_common.go | 342 ++ ...{vhd_util_test.go => azure_common_test.go} | 2 +- .../pkg/volume/azure_dd/azure_dd.go | 359 +- .../pkg/volume/azure_dd/azure_dd_test.go | 126 +- .../pkg/volume/azure_dd/azure_mounter.go | 184 + .../pkg/volume/azure_dd/azure_provision.go | 205 +- .../pkg/volume/azure_dd/vhd_util.go | 145 - .../kubernetes/pkg/volume/fc/disk_manager.go | 44 + vendor/k8s.io/kubernetes/pkg/volume/fc/fc.go | 10 +- .../kubernetes/pkg/volume/fc/fc_util.go | 8 +- .../pkg/volume/glusterfs/glusterfs.go | 3 +- .../kubernetes/pkg/volume/iscsi/attacher.go | 5 - .../pkg/volume/iscsi/disk_manager.go | 33 + .../kubernetes/pkg/volume/iscsi/iscsi.go | 8 +- .../kubernetes/pkg/volume/iscsi/iscsi_util.go | 3 - .../kubernetes/pkg/volume/local/local.go | 12 +- .../pkg/volume/portworx/portworx.go | 8 +- .../pkg/volume/portworx/portworx_util.go | 56 +- .../kubernetes/pkg/volume/scaleio/BUILD | 1 + .../pkg/volume/scaleio/sio_client.go | 9 +- .../kubernetes/pkg/volume/scaleio/sio_mgr.go | 9 +- .../pkg/volume/scaleio/sio_mgr_test.go | 23 +- .../pkg/volume/scaleio/sio_plugin.go | 1 + .../kubernetes/pkg/volume/scaleio/sio_util.go | 20 +- .../pkg/volume/scaleio/sio_util_test.go | 4 +- .../pkg/volume/scaleio/sio_volume.go | 78 +- .../pkg/volume/scaleio/sio_volume_test.go | 114 +- .../k8s.io/kubernetes/pkg/volume/util/BUILD | 2 - .../kubernetes/pkg/volume/util/metrics.go | 63 - .../nestedpendingoperations.go | 6 +- .../nestedpendingoperations_test.go | 60 +- .../operationexecutor/operation_executor.go | 41 +- .../operation_executor_test.go | 24 +- .../operationexecutor/operation_generator.go | 81 +- .../k8s.io/kubernetes/pkg/volume/util/util.go | 21 +- .../pkg/admission/noderestriction/BUILD | 2 + .../admission/noderestriction/admission.go | 45 + .../noderestriction/admission_test.go | 211 +- .../authorizer/rbac/bootstrappolicy/policy.go | 3 + .../testdata/cluster-roles.yaml | 6 + .../algorithm/predicates/predicates.go | 22 +- .../algorithm/predicates/predicates_test.go | 26 +- .../plugin/pkg/scheduler/scheduler.go | 43 +- .../plugin/pkg/scheduler/scheduler_test.go | 11 +- .../pkg/scheduler/schedulercache/node_info.go | 5 +- .../pkg/scheduler/testing/fake_cache.go | 6 +- .../kubernetes/staging/prime-apimachinery.sh | 3 +- .../examples/client-go/README.md | 2 +- .../pkg/apiserver/customresource_handler.go | 13 +- .../pkg/apis/meta/v1/zz_generated.deepcopy.go | 18 +- .../pkg/runtime/zz_generated.deepcopy.go | 18 +- .../pkg/util/strategicpatch/patch.go | 2 +- .../pkg/util/strategicpatch/patch_test.go | 78 +- .../src/k8s.io/apiserver/Godeps/Godeps.json | 24 +- .../apiserver/pkg/admission/initializer/BUILD | 38 + .../admission/initializer/initializer_test.go | 122 + .../plugin/namespace/lifecycle/admission.go | 20 +- .../namespace/lifecycle/admission_test.go | 18 + .../pkg/apis/example/v1/generated.proto | 1 - .../request/x509/testdata/generate.sh | 1 - .../apiserver/pkg/endpoints/installer.go | 41 +- .../pkg/endpoints/openapi/openapi.go | 4 - .../generic/registry/storage_factory.go | 17 +- .../pkg/registry/generic/registry/store.go | 11 +- .../pkg/registry/generic/rest/proxy.go | 14 +- .../pkg/registry/generic/rest/proxy_test.go | 37 +- .../pkg/registry/generic/storage_decorator.go | 3 + .../src/k8s.io/apiserver/pkg/server/config.go | 10 +- .../pkg/server/filters/maxinflight.go | 5 + .../apiserver/pkg/server/filters/timeout.go | 12 +- .../apiserver/pkg/server/genericapiserver.go | 2 +- .../pkg/server/options/encryptionconfig/BUILD | 37 + .../encryptionconfig/encryptionconfig_test.go | 246 + .../apiserver/pkg/server/options/etcd.go | 69 +- .../pkg/server/options/server_run_options.go | 15 + .../apiserver/pkg/server/routes/swagger.go | 5 +- .../pkg/storage/etcd/testing/utils.go | 3 +- .../src/k8s.io/client-go/Godeps/Godeps.json | 20 +- .../k8s.io/client-go/discovery/restmapper.go | 16 + .../client-go/discovery/restmapper_test.go | 36 + .../README.md | 4 +- .../src/k8s.io/client-go/pkg/api/types.go | 2 +- .../client-go/pkg/api/v1/generated.pb.go | 1443 ++--- .../src/k8s.io/client-go/pkg/api/v1/types.go | 2 +- .../pkg/api/v1/zz_generated.conversion.go | 5 +- .../pkg/api/v1/zz_generated.deepcopy.go | 7 +- .../pkg/api/zz_generated.deepcopy.go | 7 +- .../apis/autoscaling/v2alpha1/generated.pb.go | 167 +- .../apis/autoscaling/v2alpha1/generated.proto | 1 - .../src/k8s.io/client-go/pkg/util/util.go | 9 + .../src/k8s.io/client-go/pkg/version/base.go | 2 +- .../plugin/pkg/client/auth/azure/BUILD | 3 +- .../plugin/pkg/client/auth/azure/azure.go | 17 +- .../pkg/client/auth/azure/azure_test.go | 6 +- .../cmd/client-gen/test_apis/README | 4 + .../cmd/go-to-protobuf/.gitignore | 1 + .../code-generator/cmd/import-boss/.gitignore | 1 + .../cmd/lister-gen/.import-restrictions | 1 + .../code-generator/cmd/openapi-gen/README | 12 + .../code-generator/cmd/set-gen/.gitignore | 1 + .../artifacts/self-contained/etcd-pod.yaml | 1 - .../pkg/apis/apiregistration/helpers.go | 12 + .../apiregistration/v1beta1/generated.proto | 1 - .../v1beta1/zz_generated.deepcopy.go | 18 +- .../apiregistration/zz_generated.deepcopy.go | 18 +- .../kube-aggregator/pkg/apiserver/BUILD | 1 + .../pkg/apiserver/handler_proxy_test.go | 30 +- .../clientset_generated/clientset/BUILD | 24 + .../pkg/controllers/autoregister/BUILD | 1 + .../autoregister/autoregister_controller.go | 118 +- .../autoregister_controller_test.go | 150 +- .../status/available_controller.go | 5 +- .../pkg/registry/apiservice/strategy.go | 5 + .../sample-apiserver/hack/update-codegen.sh | 2 +- .../test/e2e/cluster-logging/sd_events.go | 93 + .../test/e2e/cluster-logging/sd_utils.go | 53 +- vendor/k8s.io/kubernetes/test/e2e/cronjob.go | 3 + vendor/k8s.io/kubernetes/test/e2e/e2e.go | 5 + .../test/e2e/framework/firewall_util.go | 2 +- .../test/e2e/framework/google_compute.go | 100 + .../test/e2e/framework/ingress_utils.go | 22 +- .../kubernetes/test/e2e/framework/pv_util.go | 14 +- .../test/e2e/framework/service_util.go | 28 + .../test/e2e/framework/statefulset_utils.go | 188 +- .../test/e2e/framework/test_context.go | 5 + .../kubernetes/test/e2e/framework/util.go | 52 +- .../kubernetes/test/e2e/generated/bindata.go | 2 +- vendor/k8s.io/kubernetes/test/e2e/ingress.go | 4 +- vendor/k8s.io/kubernetes/test/e2e/kubectl.go | 38 - vendor/k8s.io/kubernetes/test/e2e/service.go | 63 +- .../k8s.io/kubernetes/test/e2e/statefulset.go | 95 +- .../k8s.io/kubernetes/test/e2e/storage/OWNERS | 4 +- .../testing-manifests/ingress/http/ing.yaml | 1 - .../testing-manifests/ingress/http/svc.yaml | 1 - .../ingress/static-ip/ing.yaml | 1 - .../ingress/static-ip/secret.yaml | 1 - .../serviceloadbalancer/nginxsvc.yaml | 1 - .../statefulset/mysql-galera/service.yaml | 1 - .../statefulset/redis/service.yaml | 1 - .../statefulset/zookeeper/service.yaml | 1 - .../kubernetes/test/e2e/upgrades/ingress.go | 2 +- .../test/e2e/upgrades/statefulset.go | 6 +- .../test/e2e_node/remote/node_conformance.go | 27 +- .../test/e2e_node/remote/node_e2e.go | 37 +- .../kubernetes/test/e2e_node/remote/remote.go | 8 +- .../kubernetes/test/e2e_node/remote/types.go | 6 +- .../test/e2e_node/runner/local/run_local.go | 18 +- .../test/e2e_node/runner/remote/run_remote.go | 9 +- .../test/e2e_node/services/kubelet.go | 2 +- .../kubernetes/test/e2e_node/system/types.go | 49 +- .../test/e2e_node/system/validators.go | 6 +- .../test/fixtures/doc-yaml/admin/daemon.yaml | 2 +- .../doc-yaml/user-guide/deployment.yaml | 2 +- .../fixtures/doc-yaml/user-guide/ingress.yaml | 1 - .../fixtures/doc-yaml/user-guide/job.yaml | 1 - .../logging-demo/synthetic_0_25lps.yaml | 1 - .../logging-demo/synthetic_10lps.yaml | 1 - .../pkg/kubectl/plugins/env/plugin.yaml | 2 +- .../pkg/kubectl/plugins/tree/plugin.yaml | 1 - .../test/integration/auth/node_test.go | 50 +- .../k8s.io/kubernetes/vendor/k8s.io/kube-gen | 1 - 595 files changed, 36521 insertions(+), 15977 deletions(-) mode change 100644 => 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/compute/availabilitysets.go mode change 100644 => 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/compute/client.go create mode 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/compute/images.go mode change 100644 => 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/compute/models.go rename vendor/github.com/Azure/azure-sdk-for-go/arm/compute/{usageoperations.go => usage.go} (60%) mode change 100644 => 100755 mode change 100644 => 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/compute/version.go mode change 100644 => 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensionimages.go mode change 100644 => 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensions.go mode change 100644 => 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineimages.go mode change 100644 => 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachines.go mode change 100644 => 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesets.go mode change 100644 => 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetvms.go mode change 100644 => 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinesizes.go mode change 100644 => 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/client.go mode change 100644 => 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/models.go create mode 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/operations.go mode change 100644 => 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/registries.go mode change 100644 => 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/version.go create mode 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/disk/client.go create mode 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/disk/disks.go create mode 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/disk/models.go create mode 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/disk/snapshots.go create mode 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/disk/version.go mode change 100644 => 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/network/applicationgateways.go create mode 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/network/bgpservicecommunities.go mode change 100644 => 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/network/client.go mode change 100644 => 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitauthorizations.go mode change 100644 => 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitpeerings.go mode change 100644 => 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuits.go mode change 100644 => 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressrouteserviceproviders.go mode change 100644 => 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaces.go mode change 100644 => 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancers.go mode change 100644 => 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/network/localnetworkgateways.go mode change 100644 => 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/network/models.go create mode 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/network/packetcaptures.go mode change 100644 => 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/network/publicipaddresses.go create mode 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/network/routefilterrules.go create mode 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/network/routefilters.go mode change 100644 => 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/network/routes.go mode change 100644 => 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/network/routetables.go mode change 100644 => 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/network/securitygroups.go mode change 100644 => 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/network/securityrules.go mode change 100644 => 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/network/subnets.go mode change 100644 => 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/network/usages.go mode change 100644 => 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/network/version.go mode change 100644 => 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgatewayconnections.go mode change 100644 => 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgateways.go mode change 100644 => 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkpeerings.go mode change 100644 => 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworks.go create mode 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/network/watchers.go mode change 100644 => 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/storage/accounts.go mode change 100644 => 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/storage/client.go mode change 100644 => 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/storage/models.go rename vendor/github.com/Azure/azure-sdk-for-go/arm/storage/{usageoperations.go => usage.go} (60%) mode change 100644 => 100755 mode change 100644 => 100755 vendor/github.com/Azure/azure-sdk-for-go/arm/storage/version.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/README.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/appendblob.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/appendblob_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/authorization_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/container.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/container_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/copyblob.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/copyblob_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/directory_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/entity_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/leaseblob.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/leaseblob_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/message.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/message_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/odata.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/queueserviceclient.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/share.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/share_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/storagepolicy.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch_test.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/table_entities.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/tableserviceclient.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/storage/version.go rename vendor/github.com/Azure/go-ansiterm/{parser_test_helpers_test.go => parser_test_helpers.go} (100%) rename vendor/github.com/Azure/go-ansiterm/{parser_test_utilities_test.go => parser_test_utilities.go} (100%) rename vendor/github.com/Azure/go-ansiterm/{test_event_handler_test.go => test_event_handler.go} (100%) create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/README.md create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/config.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/config_test.go rename vendor/github.com/Azure/go-autorest/autorest/{azure => adal}/devicetoken.go (64%) rename vendor/github.com/Azure/go-autorest/autorest/{azure => adal}/devicetoken_test.go (62%) rename vendor/github.com/Azure/go-autorest/autorest/{azure => adal}/persist.go (99%) rename vendor/github.com/Azure/go-autorest/autorest/{azure => adal}/persist_test.go (99%) create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/sender.go rename vendor/github.com/Azure/go-autorest/autorest/{azure => adal}/token.go (69%) rename vendor/github.com/Azure/go-autorest/autorest/{azure => adal}/token_test.go (54%) create mode 100644 vendor/github.com/Azure/go-autorest/autorest/authorization.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/authorization_test.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/config.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/unixtime_test.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/version_test.go create mode 100644 vendor/github.com/satori/uuid/.travis.yml create mode 100644 vendor/github.com/satori/uuid/LICENSE create mode 100644 vendor/github.com/satori/uuid/README.md create mode 100644 vendor/github.com/satori/uuid/benchmarks_test.go create mode 100644 vendor/github.com/satori/uuid/uuid.go create mode 100644 vendor/github.com/satori/uuid/uuid_test.go create mode 100644 vendor/gopkg.in/gcfg.v1/errors.go create mode 100644 vendor/gopkg.in/warnings.v0/LICENSE create mode 100644 vendor/gopkg.in/warnings.v0/README create mode 100644 vendor/gopkg.in/warnings.v0/warnings.go create mode 100644 vendor/gopkg.in/warnings.v0/warnings_test.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_blob.go create mode 100644 vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_blobDiskController.go create mode 100644 vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controllerCommon.go create mode 100644 vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_managedDiskController.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/vhd.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubeapiserver/admission/configuration/external_admission_hook_manager_test.go create mode 100644 vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common.go rename vendor/k8s.io/kubernetes/pkg/volume/azure_dd/{vhd_util_test.go => azure_common_test.go} (98%) create mode 100644 vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_mounter.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/volume/azure_dd/vhd_util.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/volume/util/metrics.go create mode 100644 vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/admission/initializer/BUILD create mode 100644 vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/admission/initializer/initializer_test.go create mode 100644 vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/BUILD create mode 100644 vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/encryptionconfig_test.go create mode 100644 vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/cmd/client-gen/test_apis/README create mode 100644 vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/.gitignore create mode 100644 vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/cmd/import-boss/.gitignore create mode 100644 vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/cmd/lister-gen/.import-restrictions create mode 100644 vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/cmd/openapi-gen/README create mode 100644 vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/cmd/set-gen/.gitignore create mode 100644 vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/BUILD create mode 100644 vendor/k8s.io/kubernetes/test/e2e/cluster-logging/sd_events.go delete mode 120000 vendor/k8s.io/kubernetes/vendor/k8s.io/kube-gen diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 2bccff4c6506..40f2197d275a 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -1,14 +1,14 @@ { "ImportPath": "github.com/openshift/origin", - "GoVersion": "go1.7", + "GoVersion": "go1.8", "GodepVersion": "v79", "Packages": [ "github.com/elazarl/goproxy", "github.com/golang/mock/gomock", "github.com/containernetworking/cni/plugins/ipam/host-local", "github.com/containernetworking/cni/plugins/main/loopback", - "k8s.io/kubernetes/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/protoc-gen-gogo", - "k8s.io/kubernetes/staging/src/k8s.io/code-generator/cmd/client-gen", + "k8s.io/code-generator/cmd/go-to-protobuf/protoc-gen-gogo", + "k8s.io/code-generator/cmd/client-gen", "k8s.io/kubernetes/pkg/api/testing/compat", "k8s.io/kubernetes/test/e2e/generated", "github.com/onsi/ginkgo/ginkgo", @@ -24,6 +24,7 @@ "./cmd/kubefed/...", "./cmd/oc/...", "./cmd/openshift/...", + "./cmd/template-service-broker/...", "bitbucket.org/bertimus9/systemstat", "github.com/aws/aws-sdk-go/service/route53", "github.com/codegangsta/negroni", @@ -71,61 +72,71 @@ }, { "ImportPath": "github.com/Azure/azure-sdk-for-go/arm/compute", - "Comment": "v7.0.1-beta", - "Rev": "0984e0641ae43b89283223034574d6465be93bf4" + "Comment": "v10.0.4-beta-1-g786cc84", + "Rev": "786cc84138518bf7fd6d60e92fad1ac9d1a117ad" }, { "ImportPath": "github.com/Azure/azure-sdk-for-go/arm/containerregistry", - "Comment": "v7.0.1-beta", - "Rev": "0984e0641ae43b89283223034574d6465be93bf4" + "Comment": "v10.0.4-beta-1-g786cc84", + "Rev": "786cc84138518bf7fd6d60e92fad1ac9d1a117ad" + }, + { + "ImportPath": "github.com/Azure/azure-sdk-for-go/arm/disk", + "Comment": "v10.0.4-beta-1-g786cc84", + "Rev": "786cc84138518bf7fd6d60e92fad1ac9d1a117ad" }, { "ImportPath": "github.com/Azure/azure-sdk-for-go/arm/network", - "Comment": "v7.0.1-beta", - "Rev": "0984e0641ae43b89283223034574d6465be93bf4" + "Comment": "v10.0.4-beta-1-g786cc84", + "Rev": "786cc84138518bf7fd6d60e92fad1ac9d1a117ad" }, { "ImportPath": "github.com/Azure/azure-sdk-for-go/arm/storage", - "Comment": "v7.0.1-beta", - "Rev": "0984e0641ae43b89283223034574d6465be93bf4" + "Comment": "v10.0.4-beta-1-g786cc84", + "Rev": "786cc84138518bf7fd6d60e92fad1ac9d1a117ad" }, { "ImportPath": "github.com/Azure/azure-sdk-for-go/storage", - "Comment": "v7.0.1-beta", - "Rev": "0984e0641ae43b89283223034574d6465be93bf4" + "Comment": "v10.0.4-beta-1-g786cc84", + "Rev": "786cc84138518bf7fd6d60e92fad1ac9d1a117ad" }, { "ImportPath": "github.com/Azure/go-ansiterm", - "Rev": "7e0a0b69f76673d5d2f451ee59d9d02cfa006527" + "Rev": "70b2c90b260171e829f1ebd7c17f600c11858dbe" }, { "ImportPath": "github.com/Azure/go-ansiterm/winterm", - "Rev": "7e0a0b69f76673d5d2f451ee59d9d02cfa006527" + "Rev": "70b2c90b260171e829f1ebd7c17f600c11858dbe" }, { "ImportPath": "github.com/Azure/go-autorest/autorest", - "Comment": "v7.2.3", - "Rev": "d7c034a8af24eda120dd6460bfcd6d9ed14e43ca" + "Comment": "v8.0.0", + "Rev": "58f6f26e200fa5dfb40c9cd1c83f3e2c860d779d" + }, + { + "ImportPath": "github.com/Azure/go-autorest/autorest/adal", + "Comment": "v8.0.0", + "Rev": "58f6f26e200fa5dfb40c9cd1c83f3e2c860d779d" }, { "ImportPath": "github.com/Azure/go-autorest/autorest/azure", - "Comment": "v7.2.3", - "Rev": "d7c034a8af24eda120dd6460bfcd6d9ed14e43ca" + "Comment": "v8.0.0", + "Rev": "58f6f26e200fa5dfb40c9cd1c83f3e2c860d779d" }, { "ImportPath": "github.com/Azure/go-autorest/autorest/date", - "Comment": "v7.2.3", - "Rev": "d7c034a8af24eda120dd6460bfcd6d9ed14e43ca" + "Comment": "v8.0.0", + "Rev": "58f6f26e200fa5dfb40c9cd1c83f3e2c860d779d" }, { "ImportPath": "github.com/Azure/go-autorest/autorest/to", - "Comment": "v7.2.3", - "Rev": "d7c034a8af24eda120dd6460bfcd6d9ed14e43ca" + "Comment": "v8.0.0", + "Rev": "58f6f26e200fa5dfb40c9cd1c83f3e2c860d779d" }, { "ImportPath": "github.com/Azure/go-autorest/autorest/validation", - "Comment": "v7.2.3", - "Rev": "d7c034a8af24eda120dd6460bfcd6d9ed14e43ca" + "Comment": "v8.0.0", + "Rev": "58f6f26e200fa5dfb40c9cd1c83f3e2c860d779d" }, { "ImportPath": "github.com/MakeNowJust/heredoc", @@ -1006,23 +1017,23 @@ }, { "ImportPath": "github.com/coreos/go-oidc/http", - "Rev": "be73733bb8cc830d0205609b95d125215f8e9c70" + "Rev": "a4973d9a4225417aecf5d450a9522f00c1f7130f" }, { "ImportPath": "github.com/coreos/go-oidc/jose", - "Rev": "be73733bb8cc830d0205609b95d125215f8e9c70" + "Rev": "a4973d9a4225417aecf5d450a9522f00c1f7130f" }, { "ImportPath": "github.com/coreos/go-oidc/key", - "Rev": "be73733bb8cc830d0205609b95d125215f8e9c70" + "Rev": "a4973d9a4225417aecf5d450a9522f00c1f7130f" }, { "ImportPath": "github.com/coreos/go-oidc/oauth2", - "Rev": "be73733bb8cc830d0205609b95d125215f8e9c70" + "Rev": "a4973d9a4225417aecf5d450a9522f00c1f7130f" }, { "ImportPath": "github.com/coreos/go-oidc/oidc", - "Rev": "be73733bb8cc830d0205609b95d125215f8e9c70" + "Rev": "a4973d9a4225417aecf5d450a9522f00c1f7130f" }, { "ImportPath": "github.com/coreos/go-semver/semver", @@ -3332,6 +3343,11 @@ "ImportPath": "github.com/samuel/go-zookeeper/zk", "Rev": "177002e16a0061912f02377e2dd8951a8b3551bc" }, + { + "ImportPath": "github.com/satori/uuid", + "Comment": "v1.1.0-8-g5bf94b6", + "Rev": "5bf94b69c6b68ee1b541973bb8e1144db23a194b" + }, { "ImportPath": "github.com/seccomp/libseccomp-golang", "Rev": "1b506fc7c24eec5a3693cdcbed40d9c226cfc6a1" @@ -3957,23 +3973,23 @@ }, { "ImportPath": "gopkg.in/gcfg.v1", - "Comment": "v1.0.0", - "Rev": "083575c3955c85df16fe9590cceab64d03f5eb6e" + "Comment": "v1.2.0", + "Rev": "27e4946190b4a327b539185f2b5b1f7c84730728" }, { "ImportPath": "gopkg.in/gcfg.v1/scanner", - "Comment": "v1.0.0", - "Rev": "083575c3955c85df16fe9590cceab64d03f5eb6e" + "Comment": "v1.2.0", + "Rev": "27e4946190b4a327b539185f2b5b1f7c84730728" }, { "ImportPath": "gopkg.in/gcfg.v1/token", - "Comment": "v1.0.0", - "Rev": "083575c3955c85df16fe9590cceab64d03f5eb6e" + "Comment": "v1.2.0", + "Rev": "27e4946190b4a327b539185f2b5b1f7c84730728" }, { "ImportPath": "gopkg.in/gcfg.v1/types", - "Comment": "v1.0.0", - "Rev": "083575c3955c85df16fe9590cceab64d03f5eb6e" + "Comment": "v1.2.0", + "Rev": "27e4946190b4a327b539185f2b5b1f7c84730728" }, { "ImportPath": "gopkg.in/inf.v0", @@ -3990,1649 +4006,1699 @@ "Comment": "v1.0-16-g20b71e5", "Rev": "20b71e5b60d756d3d2f80def009790325acc2b23" }, + { + "ImportPath": "gopkg.in/warnings.v0", + "Comment": "v0.1.1", + "Rev": "8a331561fe74dadba6edfc59f3be66c22c3b065d" + }, { "ImportPath": "gopkg.in/yaml.v2", "Rev": "a83829b6f1293c91addabc89d0571c246397bbf4" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/scheme", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/typed/apiextensions/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/internalinterfaces", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/internalinterfaces", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/pkg/cmd/server", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/finalizer", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/status", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/test/integration/testserver", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/api/equality", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/api/errors", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/api/meta", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/api/resource", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/api/testing", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/api/validation", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/api/validation/path", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/apimachinery", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/apimachinery/announced", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/apimachinery/registered", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/apis/meta/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/validation", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1alpha1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/conversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/conversion/queryparams", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/conversion/unstructured", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/fields", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/labels", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/openapi", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/runtime", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/runtime/schema", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/runtime/serializer", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/json", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/protobuf", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/recognizer", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/streaming", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/versioning", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/selection", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/types", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/cache", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/clock", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/diff", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/errors", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/framer", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/httpstream", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/httpstream/spdy", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/intstr", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/json", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/jsonmergepatch", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/mergepatch", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/net", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/rand", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/remotecommand", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/runtime", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/sets", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/strategicpatch", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/uuid", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/validation", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/validation/field", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/wait", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/yaml", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/version", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/watch", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/third_party/forked/golang/json", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/third_party/forked/golang/netutil", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/third_party/forked/golang/reflect", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/admission", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/admission/initializer", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/apis/apiserver", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/apis/apiserver/install", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/apis/audit", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/apis/audit/install", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/apis/audit/v1alpha1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/apis/audit/validation", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/audit", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/audit/policy", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/authentication/authenticator", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/authentication/authenticatorfactory", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/authentication/group", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/authentication/request/anonymous", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/authentication/request/bearertoken", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/authentication/request/headerrequest", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/authentication/request/union", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/authentication/request/websocket", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/authentication/request/x509", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" + }, + { + "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/authentication/token/cache", + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/authentication/token/tokenfile", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" + }, + { + "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/authentication/token/union", + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/authentication/user", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/authorization/authorizer", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/authorization/authorizerfactory", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/authorization/union", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/endpoints", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/endpoints/discovery", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/endpoints/filters", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/endpoints/handlers", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/negotiation", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/endpoints/metrics", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/endpoints/openapi", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/endpoints/request", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/features", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/registry/generic", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/registry/generic/registry", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/registry/generic/rest", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/registry/rest", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/registry/rest/resttest", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/filters", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/healthz", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/httplog", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/mux", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/openapi", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/options", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/routes", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/routes/data/swagger", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/storage", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/storage", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/storage/errors", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/storage/etcd", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/storage/etcd/etcdtest", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/storage/etcd/metrics", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/storage/etcd/testing", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/testingcert", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/storage/etcd/util", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/storage/etcd3", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/storage/names", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/storage/storagebackend", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/factory", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/storage/testing", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/storage/value", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/aes", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/identity", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/secretbox", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/util/feature", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/util/flag", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/util/flushwriter", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/util/proxy", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/util/trace", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/util/trie", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/util/webhook", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/util/wsstream", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/plugin/pkg/audit/log", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/plugin/pkg/audit/webhook", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/password/keystone", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/password/passwordfile", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/request/basicauth", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/anytoken", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/oidc", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/plugin/pkg/authorizer/webhook", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/discovery", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/discovery/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/dynamic", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/informers", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/informers/admissionregistration", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/informers/admissionregistration/v1alpha1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/informers/apps", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/informers/apps/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/informers/autoscaling", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/informers/autoscaling/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/informers/autoscaling/v2alpha1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/informers/batch", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/informers/batch/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/informers/batch/v2alpha1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/informers/certificates", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/informers/certificates/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/informers/core", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/informers/core/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/informers/extensions", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/informers/extensions/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/informers/internalinterfaces", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/informers/networking", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/informers/networking/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/informers/policy", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/informers/policy/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/informers/rbac", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/informers/rbac/v1alpha1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/informers/rbac/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/informers/settings", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/informers/settings/v1alpha1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/informers/storage", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/informers/storage/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/informers/storage/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/kubernetes", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/kubernetes/scheme", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/kubernetes/typed/batch/v1/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/kubernetes/typed/batch/v2alpha1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/kubernetes/typed/core/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/kubernetes/typed/settings/v1alpha1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/listers/admissionregistration/v1alpha1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/listers/apps/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/listers/autoscaling/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/listers/autoscaling/v2alpha1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/listers/batch/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/listers/batch/v2alpha1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/listers/certificates/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/listers/core/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/listers/extensions/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/listers/networking/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/listers/policy/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/listers/rbac/v1alpha1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/listers/rbac/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/listers/settings/v1alpha1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/listers/storage/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/listers/storage/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/api", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/api/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/api/v1/ref", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/admissionregistration", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/admissionregistration/v1alpha1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/apps", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/apps/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/authentication", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/authentication/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/authentication/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/authorization", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/authorization/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/authorization/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/autoscaling", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/autoscaling/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/batch", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/batch/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/batch/v2alpha1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/certificates", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/certificates/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/extensions", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/extensions/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/networking", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/networking/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/policy", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/policy/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/rbac", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/rbac/v1alpha1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/rbac/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/settings", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/settings/v1alpha1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/storage", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/storage/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/storage/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/util", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/util/parsers", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/version", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/plugin/pkg/client/auth", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/plugin/pkg/client/auth/azure", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/plugin/pkg/client/auth/gcp", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/plugin/pkg/client/auth/oidc", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/rest", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/rest/watch", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/testing", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/third_party/forked/golang/template", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/tools/auth", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/tools/cache", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/tools/clientcmd", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/tools/clientcmd/api", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/tools/clientcmd/api/latest", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/tools/clientcmd/api/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/tools/metrics", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/tools/portforward", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/tools/record", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/tools/remotecommand", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/transport", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/util/cert", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/util/cert/triple", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/util/exec", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/util/flowcontrol", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/util/homedir", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/util/integer", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/util/jsonpath", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/util/workqueue", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" + }, + { + "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/code-generator/cmd/client-gen/args", + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" + }, + { + "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/code-generator/cmd/client-gen/generators", + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" + }, + { + "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/code-generator/cmd/client-gen/generators/fake", + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" + }, + { + "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/code-generator/cmd/client-gen/generators/scheme", + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" + }, + { + "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/code-generator/cmd/client-gen/generators/util", + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" + }, + { + "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/code-generator/cmd/client-gen/path", + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" + }, + { + "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/code-generator/cmd/client-gen/types", + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/gengo/args", @@ -5681,4683 +5747,4763 @@ }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/install", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/validation", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/apiserver", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" + }, + { + "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme", + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" + }, + { + "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1", + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/scheme", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/typed/apiregistration/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/client/informers/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/client/informers/internalversion/internalinterfaces", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/client/listers/apiregistration/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/controllers", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/controllers/status", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/registry/apiservice", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/registry/apiservice/etcd", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/cmd/genutils", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/cmd/kube-apiserver/app", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/cmd/kube-apiserver/app/options", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/cmd/kube-apiserver/app/preflight", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/cmd/kube-controller-manager/app", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/cmd/kube-controller-manager/app/options", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/cmd/kube-proxy/app", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/fuzzer", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/cmd/kubeadm/app/constants", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/cmd/kubelet/app", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/cmd/kubelet/app/options", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" - }, - { - "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/code-generator/cmd/client-gen", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" - }, - { - "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/code-generator/cmd/client-gen/args", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" - }, - { - "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/code-generator/cmd/client-gen/generators", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" - }, - { - "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/code-generator/cmd/client-gen/generators/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" - }, - { - "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/code-generator/cmd/client-gen/generators/scheme", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" - }, - { - "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/code-generator/cmd/client-gen/path", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" - }, - { - "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/code-generator/cmd/client-gen/types", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" - }, - { - "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/code-generator/cmd/conversion-gen/generators", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" - }, - { - "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/protobuf", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" - }, - { - "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/protoc-gen-gogo", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" - }, - { - "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/code-generator/cmd/informer-gen/generators", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" - }, - { - "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/code-generator/cmd/lister-gen/generators", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" - }, - { - "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/code-generator/cmd/openapi-gen/generators", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/federation/apis/federation", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/federation/apis/federation/install", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/federation/apis/federation/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/scheme", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/autoscaling/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/batch/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/core/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/extensions/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/federation/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/scheme", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/autoscaling/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/batch/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/extensions/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/federation/pkg/dnsprovider", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/federation/pkg/dnsprovider/providers/coredns", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/federation/pkg/dnsprovider/providers/coredns/stubs", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/federation/pkg/dnsprovider/rrstype", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/federation/pkg/federatedtypes", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/federation/pkg/federatedtypes/crudtester", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/federation/pkg/federation-controller/replicaset", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/federation/pkg/federation-controller/util", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/federation/pkg/federation-controller/util/deletionhelper", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/federation/pkg/federation-controller/util/eventsink", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/federation/pkg/federation-controller/util/finalizers", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/federation/pkg/federation-controller/util/planner", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/federation/pkg/federation-controller/util/podanalyzer", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/federation/pkg/federation-controller/util/replicapreferences", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/federation/pkg/kubefed", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/federation/pkg/kubefed/init", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/federation/pkg/kubefed/util", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/api", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/endpoints", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/events", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/helper", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/helper/qos", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/install", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/persistentvolume", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/pod", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/ref", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/resource", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/service", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/testapi", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/testing", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/testing/compat", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/util", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/v1/endpoints", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/v1/helper", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/v1/helper/qos", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/v1/node", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/v1/pod", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/v1/ref", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/v1/resource", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/v1/service", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/v1/validation", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/validation", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/abac", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/abac/latest", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/abac/v0", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/abac/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/admission", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/admission/install", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/admission/v1alpha1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/admissionregistration", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/admissionregistration/install", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/admissionregistration/validation", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/apps", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/apps/install", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/apps/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/apps/validation", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/authentication", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/authentication/install", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/authentication/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/authentication/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/authorization", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/authorization/install", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/authorization/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/authorization/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/authorization/validation", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/autoscaling", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/autoscaling/install", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/autoscaling/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/autoscaling/validation", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/batch", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/batch/install", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/batch/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/batch/v2alpha1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/batch/validation", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/certificates", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/certificates/install", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/certificates/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/certificates/validation", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/componentconfig", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/componentconfig/install", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/extensions", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/extensions/install", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/extensions/validation", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/imagepolicy", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/imagepolicy/install", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/networking", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/networking/install", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/networking/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/networking/validation", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/policy", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/policy/install", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/policy/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/policy/validation", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/rbac", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/rbac/install", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/rbac/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/rbac/validation", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/settings", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/settings/install", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/settings/v1alpha1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/settings/validation", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/storage", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/storage/install", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/storage/util", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/storage/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/storage/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/storage/validation", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/auth/authorizer/abac", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/auth/nodeidentifier", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/bootstrap/api", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/capabilities", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/chaosclient", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/admissionregistration/v1alpha1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/admissionregistration/v1alpha1/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/apps/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/apps/v1beta1/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1beta1/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1beta1/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v1/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v2alpha1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v2alpha1/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v1/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v2alpha1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v2alpha1/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/certificates/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/certificates/v1beta1/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/networking/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/networking/v1/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/policy/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/policy/v1beta1/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1beta1/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/settings/v1alpha1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/settings/v1alpha1/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1beta1/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/conditions", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/admissionregistration", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/admissionregistration/v1alpha1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/apps", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/apps/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/autoscaling", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/autoscaling/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/autoscaling/v2alpha1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/batch", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/batch/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/batch/v2alpha1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/certificates", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/certificates/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/core", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/core/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/extensions", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/extensions/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/internalinterfaces", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/networking", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/networking/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/policy", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/policy/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/rbac", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/rbac/v1alpha1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/rbac/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/settings", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/settings/v1alpha1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/storage", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/storage/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/storage/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/admissionregistration", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/admissionregistration/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/apps", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/apps/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/autoscaling", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/autoscaling/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/batch", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/batch/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/certificates", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/certificates/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/extensions", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/extensions/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/networking", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/networking/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/policy", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/policy/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/rbac", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/rbac/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/settings", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/settings/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/storage", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/storage/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/leaderelection", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/leaderelection/resourcelock", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/admissionregistration/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/admissionregistration/v1alpha1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/apps/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/apps/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/autoscaling/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/autoscaling/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/autoscaling/v2alpha1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/batch/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/batch/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/batch/v2alpha1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/certificates/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/certificates/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/core/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/core/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/extensions/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/extensions/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/networking/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/networking/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/policy/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/policy/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/rbac/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/rbac/v1alpha1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/rbac/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/settings/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/settings/v1alpha1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/storage/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/storage/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/storage/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/metrics", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/metrics/prometheus", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/retry", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/unversioned", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/aws", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/azure", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/gce", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/mesos", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/openstack", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/ovirt", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/photon", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/rackspace", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/bootstrap", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/certificates", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/certificates/approver", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/certificates/signer", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/cronjob", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/daemon", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/daemon/util", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/deployment", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/deployment/util", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/disruption", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/endpoint", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/garbagecollector", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/history", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/job", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/namespace", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/namespace/deletion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/node", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/podautoscaler", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/podgc", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/replicaset", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/replication", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/resourcequota", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/route", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/service", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/serviceaccount", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/statefulset", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/ttl", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/volume/attachdetach", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/volume/attachdetach/populator", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/volume/attachdetach/util", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/volume/events", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/volume/persistentvolume", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/credentialprovider", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/credentialprovider/aws", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/credentialprovider/azure", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/credentialprovider/gcp", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/credentialprovider/rancher", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/features", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/fieldpath", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/generated", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/generated/openapi", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubeapiserver", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubeapiserver/admission", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubeapiserver/admission/configuration", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubeapiserver/authenticator", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubeapiserver/authorizer", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubeapiserver/options", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubeapiserver/server", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubectl", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubectl/cmd", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubectl/cmd/auth", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubectl/cmd/config", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubectl/cmd/rollout", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubectl/cmd/set", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubectl/cmd/templates", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubectl/cmd/util", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubectl/cmd/util/editor", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubectl/metricsutil", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubectl/plugins", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubectl/resource", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubectl/util", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/apis", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/apis/cri", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/cadvisor", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/certificate", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/client", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/cm", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/cm/util", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/config", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/configmap", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/container", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/dockershim", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/dockershim/cm", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/dockershim/errors", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/dockershim/remote", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/dockershim/securitycontext", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/envvars", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/events", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/eviction", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/eviction/api", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/gpu", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/gpu/nvidia", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/images", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/kuberuntime", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/leaky", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/lifecycle", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/metrics", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/network", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/network/cni", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/network/hairpin", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/network/hostport", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/network/kubenet", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/pleg", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/pod", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/preemption", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/prober", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/prober/results", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/qos", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/remote", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/rkt", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/secret", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/server", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/server/portforward", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/server/remotecommand", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/server/stats", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/server/streaming", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/status", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/sysctl", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/types", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/util", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/util/cache", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/util/csr", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/util/format", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/util/ioutils", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/util/queue", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/util/sliceutils", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/volumemanager", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/volumemanager/cache", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/volumemanager/populator", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/master", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/master/ports", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/master/thirdparty", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/master/tunneler", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/metrics", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/printers", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/printers/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/printers/storage", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/probe", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/probe/exec", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/probe/http", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/probe/tcp", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/config", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/healthcheck", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/iptables", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/userspace", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/util", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/winuserspace", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/quota", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/quota/evaluator/core", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/quota/generic", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/quota/install", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/admissionregistration/externaladmissionhookconfiguration", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/admissionregistration/externaladmissionhookconfiguration/storage", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/admissionregistration/initializerconfiguration", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/admissionregistration/initializerconfiguration/storage", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/admissionregistration/rest", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/apps/controllerrevision", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/apps/controllerrevision/storage", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/apps/rest", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/apps/statefulset", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/apps/statefulset/storage", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/authentication/rest", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/authentication/tokenreview", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/authorization/localsubjectaccessreview", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/authorization/rest", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/authorization/selfsubjectaccessreview", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/authorization/subjectaccessreview", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/authorization/util", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/autoscaling/horizontalpodautoscaler", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/autoscaling/horizontalpodautoscaler/storage", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/autoscaling/rest", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/batch/cronjob", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/batch/cronjob/storage", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/batch/job", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/batch/job/storage", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/batch/rest", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/cachesize", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/certificates/certificates", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/certificates/certificates/storage", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/certificates/rest", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/componentstatus", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/configmap", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/configmap/storage", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/endpoint", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/endpoint/storage", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/event", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/event/storage", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/limitrange", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/limitrange/storage", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/namespace", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/namespace/storage", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/node", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/node/rest", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/node/storage", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/persistentvolume", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/persistentvolume/storage", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/persistentvolumeclaim", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/persistentvolumeclaim/storage", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/pod", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/pod/rest", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/pod/storage", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/podtemplate", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/podtemplate/storage", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/rangeallocation", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/replicationcontroller", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/replicationcontroller/storage", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/resourcequota", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/resourcequota/storage", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/rest", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/secret", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/secret/storage", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/service", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/service/allocator", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/service/allocator/storage", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/service/ipallocator", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/service/ipallocator/controller", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/service/portallocator", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/service/portallocator/controller", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/service/storage", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/serviceaccount", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/serviceaccount/storage", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/extensions/controller/storage", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/extensions/daemonset", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/extensions/daemonset/storage", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/extensions/deployment", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/extensions/deployment/storage", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/extensions/ingress", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/extensions/ingress/storage", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/extensions/networkpolicy", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/extensions/networkpolicy/storage", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/extensions/podsecuritypolicy", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/extensions/podsecuritypolicy/storage", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/extensions/replicaset", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/extensions/replicaset/storage", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/extensions/rest", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresource", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresource/storage", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresourcedata", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresourcedata/storage", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/networking/networkpolicy", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/networking/networkpolicy/storage", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/networking/rest", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/policy/poddisruptionbudget", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/policy/poddisruptionbudget/storage", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/policy/rest", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/rbac", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/rbac/clusterrole", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/rbac/clusterrole/policybased", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/rbac/clusterrole/storage", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding/policybased", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding/storage", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/rbac/reconciliation", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/rbac/rest", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/rbac/role", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/rbac/role/policybased", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/rbac/role/storage", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/rbac/rolebinding", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/rbac/rolebinding/policybased", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/rbac/rolebinding/storage", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/rbac/validation", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/registrytest", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/settings/podpreset", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/settings/podpreset/storage", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/settings/rest", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/storage/rest", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/storage/storageclass", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/storage/storageclass/storage", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/routes", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/security/apparmor", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/security/podsecuritypolicy", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/security/podsecuritypolicy/apparmor", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/security/podsecuritypolicy/capabilities", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/security/podsecuritypolicy/group", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/security/podsecuritypolicy/selinux", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/security/podsecuritypolicy/sysctl", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/security/podsecuritypolicy/user", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/security/podsecuritypolicy/util", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/securitycontext", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/serviceaccount", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/ssh", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/util", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/async", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/bandwidth", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/config", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/configz", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/crlf", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/dbus", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/ebtables", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/env", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/exec", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/flock", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/goroutinemap", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/hash", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/i18n", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/interrupt", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/io", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/ipconfig", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/iptables", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/keymutex", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/labels", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/limitwriter", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/logs", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/maps", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/metrics", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/mount", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/net/sets", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/netsh", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/node", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/oom", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/parsers", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/procfs", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/util/reflector/prometheus", + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/removeall", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/resourcecontainer", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/rlimit", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/selinux", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/slice", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/strings", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/sysctl", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/system", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/tail", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/taints", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/term", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/tolerations", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/version", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/workqueue/prometheus", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/version", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/version/prometheus", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/version/verflag", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/aws_ebs", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/azure_dd", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/azure_file", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/cephfs", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/cinder", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/configmap", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/downwardapi", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/empty_dir", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/fc", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/flexvolume", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/flocker", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/gce_pd", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/git_repo", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/glusterfs", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/host_path", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/iscsi", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/local", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/nfs", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/photon_pd", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/portworx", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/projected", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/quobyte", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/rbd", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/scaleio", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/secret", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/storageos", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/util", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/util/operationexecutor", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/util/types", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/util/volumehelper", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/validation", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/vsphere_volume", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/pkg/watch/json", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/plugin/cmd/kube-scheduler/app", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/plugin/cmd/kube-scheduler/app/options", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/admit", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/alwayspullimages", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/antiaffinity", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/defaulttolerationseconds", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/deny", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/exec", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/gc", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/imagepolicy", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/initialization", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/initialresources", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/limitranger", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/namespace/autoprovision", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/namespace/exists", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/noderestriction", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/label", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/podnodeselector", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/podpreset", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/install", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/v1alpha1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/validation", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/resourcequota", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/install", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/validation", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/security/podsecuritypolicy", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/securitycontext/scdeny", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/serviceaccount", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/storageclass/setdefault", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/webhook", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/bootstrap", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/auth/authorizer/node", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider/defaults", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/api", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/api/latest", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/api/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/api/validation", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/core", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/factory", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/metrics", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/util", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" + }, + { + "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1", + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" + }, + { + "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver", + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" + }, + { + "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion", + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" + }, + { + "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/pkg/cmd/server", + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/api/equality", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/api/errors", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/api/meta", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/api/resource", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/api/testing", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/api/validation", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/api/validation/path", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/apimachinery", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/apimachinery/announced", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/apimachinery/registered", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/apis/meta/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/validation", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/conversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/conversion/queryparams", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/fields", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/labels", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/openapi", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/runtime", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/runtime/schema", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/runtime/serializer", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/runtime/serializer/json", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/selection", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/types", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/clock", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/diff", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/errors", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/httpstream/spdy", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/intstr", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/net", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/rand", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" + }, + { + "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/remotecommand", + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/runtime", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/sets", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/strategicpatch", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/uuid", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/validation", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/validation/field", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/wait", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/yaml", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/version", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/watch", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apimachinery/third_party/forked/golang/netutil", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/admission", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" + }, + { + "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/admission/initializer", + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/apis/apiserver", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/apis/apiserver/install", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" + }, + { + "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/apis/audit", + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" + }, + { + "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/apis/audit/v1alpha1", + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" + }, + { + "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/apis/audit/validation", + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" + }, + { + "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/audit", + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" + }, + { + "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/audit/policy", + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/authentication/authenticator", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" + }, + { + "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/authentication/authenticatorfactory", + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/authentication/group", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/authentication/request/anonymous", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/authentication/request/bearertoken", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/authentication/request/headerrequest", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/authentication/request/union", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/authentication/request/websocket", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/authentication/request/x509", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" + }, + { + "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/authentication/token/cache", + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" + }, + { + "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/authentication/token/union", + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/authentication/user", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/authorization/authorizer", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/authorization/authorizerfactory", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/authorization/union", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/endpoints/discovery", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/endpoints/filters", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/endpoints/handlers", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/endpoints/openapi", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/endpoints/request", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" + }, + { + "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/features", + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/registry/generic", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/registry/generic/registry", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/registry/generic/rest", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/registry/rest", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/filters", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/healthz", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" - }, - { - "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/httplog", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/mux", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/options", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" + }, + { + "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig", + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/routes", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/storage", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/storage", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/storage/etcd", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/storage/etcd/etcdtest", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/storage/etcd/testing", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/storage/etcd/util", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/storage/names", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/storage/storagebackend", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/storage/storagebackend/factory", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/util/feature", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/util/flag", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/util/wsstream", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" + }, + { + "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/plugin/pkg/audit/log", + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" + }, + { + "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/plugin/pkg/audit/webhook", + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/apiserver/plugin/pkg/authorizer/webhook", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/discovery", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" + }, + { + "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/discovery/cached", + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/discovery/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/dynamic", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" + }, + { + "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/informers", + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/kubernetes", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/kubernetes/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/kubernetes/scheme", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/kubernetes/typed/core/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/api", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/api/v1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" + }, + { + "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/authentication/v1", + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/rest", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/rest/fake", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/testing", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/tools/cache", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/tools/clientcmd", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/tools/clientcmd/api", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/tools/portforward", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/tools/record", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/tools/remotecommand", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/transport", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/util/cert", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/util/flowcontrol", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/util/homedir", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/util/integer", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/util/jsonpath", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/util/testing", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/client-go/util/workqueue", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" + }, + { + "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/code-generator/cmd/client-gen", + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" + }, + { + "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/code-generator/cmd/conversion-gen/generators", + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" + }, + { + "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/protobuf", + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" + }, + { + "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/protoc-gen-gogo", + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" + }, + { + "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/code-generator/cmd/openapi-gen/generators", + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/install", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/apiserver", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" + }, + { + "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset", + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/typed/apiregistration/internalversion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/test/e2e", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/test/e2e/chaosmonkey", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/test/e2e/common", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/test/e2e/framework", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/test/e2e/framework/ginkgowrapper", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/test/e2e/generated", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/test/e2e/perf", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/test/e2e/perftype", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/test/e2e/scheduling", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/test/e2e/upgrades", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/test/e2e_federation", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/test/e2e_federation/framework", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/test/e2e_federation/upgrades", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/test/images/net/common", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/test/images/net/nat", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/test/utils", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/test/utils/junit", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/third_party/forked/golang/expansion", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/third_party/forked/gonum/graph", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/third_party/forked/gonum/graph/internal/linear", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/third_party/forked/gonum/graph/simple", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/third_party/forked/gonum/graph/traverse", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/metrics/pkg/apis/custom_metrics", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/metrics/pkg/apis/custom_metrics/v1alpha1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/metrics/pkg/apis/metrics", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/metrics/pkg/apis/metrics/v1alpha1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/scheme", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "k8s.io/kubernetes/staging/src/k8s.io/metrics/pkg/client/custom_metrics", - "Comment": "v1.7.0-144-g80709908fd", - "Rev": "b0608fa189530bca78d7459a87318652b116171e" + "Comment": "v1.7.6-166-ga08f5eeb62", + "Rev": "a08f5eeb6246134f4ae5443c0593d72fd057ea7c" }, { "ImportPath": "github.com/kubernetes-incubator/cri-o/client", diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/availabilitysets.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/availabilitysets.go old mode 100644 new mode 100755 index 34a4d2df8269..738c2c61ea0f --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/availabilitysets.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/availabilitysets.go @@ -14,14 +14,13 @@ package compute // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" "net/http" ) @@ -44,26 +43,21 @@ func NewAvailabilitySetsClientWithBaseURI(baseURI string, subscriptionID string) // CreateOrUpdate create or update an availability set. // -// resourceGroupName is the name of the resource group. name is the name of -// the availability set. parameters is parameters supplied to the Create +// resourceGroupName is the name of the resource group. name is the name of the +// availability set. parameters is parameters supplied to the Create // Availability Set operation. func (client AvailabilitySetsClient) CreateOrUpdate(resourceGroupName string, name string, parameters AvailabilitySet) (result AvailabilitySet, err error) { - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.AvailabilitySetProperties", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.AvailabilitySetProperties.Statuses", Name: validation.ReadOnly, Rule: true, Chain: nil}}}}}}); err != nil { - return result, validation.NewErrorWithValidationError(err, "compute.AvailabilitySetsClient", "CreateOrUpdate") - } - req, err := client.CreateOrUpdatePreparer(resourceGroupName, name, parameters) if err != nil { - return result, autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "CreateOrUpdate", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "CreateOrUpdate", nil, "Failure preparing request") + return } resp, err := client.CreateOrUpdateSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "CreateOrUpdate", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "CreateOrUpdate", resp, "Failure sending request") + return } result, err = client.CreateOrUpdateResponder(resp) @@ -82,8 +76,9 @@ func (client AvailabilitySetsClient) CreateOrUpdatePreparer(resourceGroupName st "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2016-04-30-preview" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -119,16 +114,18 @@ func (client AvailabilitySetsClient) CreateOrUpdateResponder(resp *http.Response // // resourceGroupName is the name of the resource group. availabilitySetName is // the name of the availability set. -func (client AvailabilitySetsClient) Delete(resourceGroupName string, availabilitySetName string) (result autorest.Response, err error) { +func (client AvailabilitySetsClient) Delete(resourceGroupName string, availabilitySetName string) (result OperationStatusResponse, err error) { req, err := client.DeletePreparer(resourceGroupName, availabilitySetName) if err != nil { - return result, autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "Delete", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "Delete", nil, "Failure preparing request") + return } resp, err := client.DeleteSender(req) if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "Delete", resp, "Failure sending request") + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "Delete", resp, "Failure sending request") + return } result, err = client.DeleteResponder(resp) @@ -147,8 +144,9 @@ func (client AvailabilitySetsClient) DeletePreparer(resourceGroupName string, av "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2016-04-30-preview" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -167,13 +165,14 @@ func (client AvailabilitySetsClient) DeleteSender(req *http.Request) (*http.Resp // DeleteResponder handles the response to the Delete request. The method always // closes the http.Response Body. -func (client AvailabilitySetsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { +func (client AvailabilitySetsClient) DeleteResponder(resp *http.Response) (result OperationStatusResponse, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } @@ -184,13 +183,15 @@ func (client AvailabilitySetsClient) DeleteResponder(resp *http.Response) (resul func (client AvailabilitySetsClient) Get(resourceGroupName string, availabilitySetName string) (result AvailabilitySet, err error) { req, err := client.GetPreparer(resourceGroupName, availabilitySetName) if err != nil { - return result, autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "Get", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "Get", nil, "Failure preparing request") + return } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "Get", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "Get", resp, "Failure sending request") + return } result, err = client.GetResponder(resp) @@ -209,8 +210,9 @@ func (client AvailabilitySetsClient) GetPreparer(resourceGroupName string, avail "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2016-04-30-preview" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -246,13 +248,15 @@ func (client AvailabilitySetsClient) GetResponder(resp *http.Response) (result A func (client AvailabilitySetsClient) List(resourceGroupName string) (result AvailabilitySetListResult, err error) { req, err := client.ListPreparer(resourceGroupName) if err != nil { - return result, autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "List", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "List", nil, "Failure preparing request") + return } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "List", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "List", resp, "Failure sending request") + return } result, err = client.ListResponder(resp) @@ -270,8 +274,9 @@ func (client AvailabilitySetsClient) ListPreparer(resourceGroupName string) (*ht "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2016-04-30-preview" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -309,13 +314,15 @@ func (client AvailabilitySetsClient) ListResponder(resp *http.Response) (result func (client AvailabilitySetsClient) ListAvailableSizes(resourceGroupName string, availabilitySetName string) (result VirtualMachineSizeListResult, err error) { req, err := client.ListAvailableSizesPreparer(resourceGroupName, availabilitySetName) if err != nil { - return result, autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "ListAvailableSizes", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "ListAvailableSizes", nil, "Failure preparing request") + return } resp, err := client.ListAvailableSizesSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "ListAvailableSizes", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "ListAvailableSizes", resp, "Failure sending request") + return } result, err = client.ListAvailableSizesResponder(resp) @@ -334,8 +341,9 @@ func (client AvailabilitySetsClient) ListAvailableSizesPreparer(resourceGroupNam "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2016-04-30-preview" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/client.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/client.go old mode 100644 new mode 100755 index e8f3fb3e6d3d..c60452b9d794 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/client.go @@ -1,5 +1,5 @@ // Package compute implements the Azure ARM Compute service API version -// 2016-03-30. +// 2016-04-30-preview. // // The Compute Management Client. package compute @@ -18,7 +18,7 @@ package compute // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. @@ -27,9 +27,6 @@ import ( ) const ( - // APIVersion is the version of the Compute - APIVersion = "2016-03-30" - // DefaultBaseURI is the default URI used for the service Compute DefaultBaseURI = "https://management.azure.com" ) @@ -38,7 +35,6 @@ const ( type ManagementClient struct { autorest.Client BaseURI string - APIVersion string SubscriptionID string } @@ -52,7 +48,6 @@ func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { return ManagementClient{ Client: autorest.NewClientWithUserAgent(UserAgent()), BaseURI: baseURI, - APIVersion: APIVersion, SubscriptionID: subscriptionID, } } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/images.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/images.go new file mode 100755 index 000000000000..64f14dd082ff --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/images.go @@ -0,0 +1,463 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// ImagesClient is the the Compute Management Client. +type ImagesClient struct { + ManagementClient +} + +// NewImagesClient creates an instance of the ImagesClient client. +func NewImagesClient(subscriptionID string) ImagesClient { + return NewImagesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewImagesClientWithBaseURI creates an instance of the ImagesClient client. +func NewImagesClientWithBaseURI(baseURI string, subscriptionID string) ImagesClient { + return ImagesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate create or update an image. This method may poll for +// completion. Polling can be canceled by passing the cancel channel argument. +// The channel will be used to cancel polling and any outstanding HTTP +// requests. +// +// resourceGroupName is the name of the resource group. imageName is the name +// of the image. parameters is parameters supplied to the Create Image +// operation. +func (client ImagesClient) CreateOrUpdate(resourceGroupName string, imageName string, parameters Image, cancel <-chan struct{}) (<-chan Image, <-chan error) { + resultChan := make(chan Image, 1) + errChan := make(chan error, 1) + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.ImageProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.ImageProperties.StorageProfile", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.ImageProperties.StorageProfile.OsDisk", Name: validation.Null, Rule: true, Chain: nil}}}, + }}}}}); err != nil { + errChan <- validation.NewErrorWithValidationError(err, "compute.ImagesClient", "CreateOrUpdate") + close(errChan) + close(resultChan) + return resultChan, errChan + } + + go func() { + var err error + var result Image + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.CreateOrUpdatePreparer(resourceGroupName, imageName, parameters, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ImagesClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.ImagesClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ImagesClient", "CreateOrUpdate", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client ImagesClient) CreateOrUpdatePreparer(resourceGroupName string, imageName string, parameters Image, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "imageName": autorest.Encode("path", imageName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-04-30-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client ImagesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client ImagesClient) CreateOrUpdateResponder(resp *http.Response) (result Image, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes an Image. This method may poll for completion. Polling can be +// canceled by passing the cancel channel argument. The channel will be used to +// cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. imageName is the name +// of the image. +func (client ImagesClient) Delete(resourceGroupName string, imageName string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { + resultChan := make(chan OperationStatusResponse, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result OperationStatusResponse + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.DeletePreparer(resourceGroupName, imageName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ImagesClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.ImagesClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ImagesClient", "Delete", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// DeletePreparer prepares the Delete request. +func (client ImagesClient) DeletePreparer(resourceGroupName string, imageName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "imageName": autorest.Encode("path", imageName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-04-30-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client ImagesClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client ImagesClient) DeleteResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Get gets an image. +// +// resourceGroupName is the name of the resource group. imageName is the name +// of the image. expand is the expand expression to apply on the operation. +func (client ImagesClient) Get(resourceGroupName string, imageName string, expand string) (result Image, err error) { + req, err := client.GetPreparer(resourceGroupName, imageName, expand) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ImagesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.ImagesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ImagesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ImagesClient) GetPreparer(resourceGroupName string, imageName string, expand string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "imageName": autorest.Encode("path", imageName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-04-30-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ImagesClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ImagesClient) GetResponder(resp *http.Response) (result Image, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets the list of Images in the subscription. Use nextLink property in +// the response to get the next page of Images. Do this till nextLink is not +// null to fetch all the Images. +func (client ImagesClient) List() (result ImageListResult, err error) { + req, err := client.ListPreparer() + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ImagesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.ImagesClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ImagesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ImagesClient) ListPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-04-30-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/images", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ImagesClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ImagesClient) ListResponder(resp *http.Response) (result ImageListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client ImagesClient) ListNextResults(lastResults ImageListResult) (result ImageListResult, err error) { + req, err := lastResults.ImageListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.ImagesClient", "List", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.ImagesClient", "List", resp, "Failure sending next results request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ImagesClient", "List", resp, "Failure responding to next results request") + } + + return +} + +// ListByResourceGroup gets the list of images under a resource group. +// +// resourceGroupName is the name of the resource group. +func (client ImagesClient) ListByResourceGroup(resourceGroupName string) (result ImageListResult, err error) { + req, err := client.ListByResourceGroupPreparer(resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ImagesClient", "ListByResourceGroup", nil, "Failure preparing request") + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.ImagesClient", "ListByResourceGroup", resp, "Failure sending request") + return + } + + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ImagesClient", "ListByResourceGroup", resp, "Failure responding to request") + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client ImagesClient) ListByResourceGroupPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-04-30-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client ImagesClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client ImagesClient) ListByResourceGroupResponder(resp *http.Response) (result ImageListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByResourceGroupNextResults retrieves the next set of results, if any. +func (client ImagesClient) ListByResourceGroupNextResults(lastResults ImageListResult) (result ImageListResult, err error) { + req, err := lastResults.ImageListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.ImagesClient", "ListByResourceGroup", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.ImagesClient", "ListByResourceGroup", resp, "Failure sending next results request") + } + + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ImagesClient", "ListByResourceGroup", resp, "Failure responding to next results request") + } + + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/models.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/models.go old mode 100644 new mode 100755 index 13dbe637c4a0..a9524daeffac --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/models.go @@ -14,7 +14,7 @@ package compute // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. @@ -66,6 +66,19 @@ const ( InstanceView InstanceViewTypes = "instanceView" ) +// OperatingSystemStateTypes enumerates the values for operating system state +// types. +type OperatingSystemStateTypes string + +const ( + // Generalized specifies the generalized state for operating system state + // types. + Generalized OperatingSystemStateTypes = "Generalized" + // Specialized specifies the specialized state for operating system state + // types. + Specialized OperatingSystemStateTypes = "Specialized" +) + // OperatingSystemTypes enumerates the values for operating system types. type OperatingSystemTypes string @@ -94,6 +107,15 @@ const ( HTTPS ProtocolTypes = "Https" ) +// ResourceIdentityType enumerates the values for resource identity type. +type ResourceIdentityType string + +const ( + // SystemAssigned specifies the system assigned state for resource identity + // type. + SystemAssigned ResourceIdentityType = "SystemAssigned" +) + // SettingNames enumerates the values for setting names. type SettingNames string @@ -117,6 +139,16 @@ const ( Warning StatusLevelTypes = "Warning" ) +// StorageAccountTypes enumerates the values for storage account types. +type StorageAccountTypes string + +const ( + // PremiumLRS specifies the premium lrs state for storage account types. + PremiumLRS StorageAccountTypes = "Premium_LRS" + // StandardLRS specifies the standard lrs state for storage account types. + StandardLRS StorageAccountTypes = "Standard_LRS" +) + // UpgradeMode enumerates the values for upgrade mode. type UpgradeMode string @@ -127,8 +159,8 @@ const ( Manual UpgradeMode = "Manual" ) -// VirtualMachineScaleSetSkuScaleType enumerates the values for virtual -// machine scale set sku scale type. +// VirtualMachineScaleSetSkuScaleType enumerates the values for virtual machine +// scale set sku scale type. type VirtualMachineScaleSetSkuScaleType string const ( @@ -335,8 +367,8 @@ const ( // AdditionalUnattendContent is additional XML formatted information that can // be included in the Unattend.xml file, which is used by Windows Setup. -// Contents are defined by setting name, component name, and the pass in -// which the content is a applied. +// Contents are defined by setting name, component name, and the pass in which +// the content is a applied. type AdditionalUnattendContent struct { PassName PassNames `json:"passName,omitempty"` ComponentName ComponentNames `json:"componentName,omitempty"` @@ -374,6 +406,7 @@ type AvailabilitySet struct { Location *string `json:"location,omitempty"` Tags *map[string]*string `json:"tags,omitempty"` *AvailabilitySetProperties `json:"properties,omitempty"` + Sku *Sku `json:"sku,omitempty"` } // AvailabilitySetListResult is the List Availability Set operation response. @@ -388,6 +421,7 @@ type AvailabilitySetProperties struct { PlatformFaultDomainCount *int32 `json:"platformFaultDomainCount,omitempty"` VirtualMachines *[]SubResource `json:"virtualMachines,omitempty"` Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` + Managed *bool `json:"managed,omitempty"` } // BootDiagnostics is describes Boot Diagnostics. @@ -405,13 +439,14 @@ type BootDiagnosticsInstanceView struct { // DataDisk is describes a data disk. type DataDisk struct { - Lun *int32 `json:"lun,omitempty"` - Name *string `json:"name,omitempty"` - Vhd *VirtualHardDisk `json:"vhd,omitempty"` - Image *VirtualHardDisk `json:"image,omitempty"` - Caching CachingTypes `json:"caching,omitempty"` - CreateOption DiskCreateOptionTypes `json:"createOption,omitempty"` - DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + Lun *int32 `json:"lun,omitempty"` + Name *string `json:"name,omitempty"` + Vhd *VirtualHardDisk `json:"vhd,omitempty"` + Image *VirtualHardDisk `json:"image,omitempty"` + Caching CachingTypes `json:"caching,omitempty"` + CreateOption DiskCreateOptionTypes `json:"createOption,omitempty"` + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + ManagedDisk *ManagedDiskParameters `json:"managedDisk,omitempty"` } // DataDiskImage is contains the data disk images information. @@ -442,14 +477,79 @@ type HardwareProfile struct { VMSize VirtualMachineSizeTypes `json:"vmSize,omitempty"` } +// Image is describes an Image. +type Image struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + *ImageProperties `json:"properties,omitempty"` +} + +// ImageDataDisk is describes a data disk. +type ImageDataDisk struct { + Lun *int32 `json:"lun,omitempty"` + Snapshot *SubResource `json:"snapshot,omitempty"` + ManagedDisk *SubResource `json:"managedDisk,omitempty"` + BlobURI *string `json:"blobUri,omitempty"` + Caching CachingTypes `json:"caching,omitempty"` + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` +} + +// ImageListResult is the List Image operation response. +type ImageListResult struct { + autorest.Response `json:"-"` + Value *[]Image `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// ImageListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client ImageListResult) ImageListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// ImageOSDisk is describes an Operating System disk. +type ImageOSDisk struct { + OsType OperatingSystemTypes `json:"osType,omitempty"` + OsState OperatingSystemStateTypes `json:"osState,omitempty"` + Snapshot *SubResource `json:"snapshot,omitempty"` + ManagedDisk *SubResource `json:"managedDisk,omitempty"` + BlobURI *string `json:"blobUri,omitempty"` + Caching CachingTypes `json:"caching,omitempty"` + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` +} + +// ImageProperties is describes the properties of an Image. +type ImageProperties struct { + SourceVirtualMachine *SubResource `json:"sourceVirtualMachine,omitempty"` + StorageProfile *ImageStorageProfile `json:"storageProfile,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + // ImageReference is the image reference. type ImageReference struct { + ID *string `json:"id,omitempty"` Publisher *string `json:"publisher,omitempty"` Offer *string `json:"offer,omitempty"` Sku *string `json:"sku,omitempty"` Version *string `json:"version,omitempty"` } +// ImageStorageProfile is describes a storage profile. +type ImageStorageProfile struct { + OsDisk *ImageOSDisk `json:"osDisk,omitempty"` + DataDisks *[]ImageDataDisk `json:"dataDisks,omitempty"` +} + // InnerError is inner error details. type InnerError struct { Exceptiontype *string `json:"exceptiontype,omitempty"` @@ -520,6 +620,12 @@ type LongRunningOperationProperties struct { Output *map[string]interface{} `json:"output,omitempty"` } +// ManagedDiskParameters is the parameters of a managed disk. +type ManagedDiskParameters struct { + ID *string `json:"id,omitempty"` + StorageAccountType StorageAccountTypes `json:"storageAccountType,omitempty"` +} + // NetworkInterfaceReference is describes a network interface reference. type NetworkInterfaceReference struct { ID *string `json:"id,omitempty"` @@ -537,6 +643,16 @@ type NetworkProfile struct { NetworkInterfaces *[]NetworkInterfaceReference `json:"networkInterfaces,omitempty"` } +// OperationStatusResponse is operation status response +type OperationStatusResponse struct { + autorest.Response `json:"-"` + Name *string `json:"name,omitempty"` + Status *string `json:"status,omitempty"` + StartTime *date.Time `json:"startTime,omitempty"` + EndTime *date.Time `json:"endTime,omitempty"` + Error *APIError `json:"error,omitempty"` +} + // OSDisk is describes an Operating System disk. type OSDisk struct { OsType OperatingSystemTypes `json:"osType,omitempty"` @@ -547,6 +663,7 @@ type OSDisk struct { Caching CachingTypes `json:"caching,omitempty"` CreateOption DiskCreateOptionTypes `json:"createOption,omitempty"` DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + ManagedDisk *ManagedDiskParameters `json:"managedDisk,omitempty"` } // OSDiskImage is contains the os disk image information. @@ -581,7 +698,7 @@ type PurchasePlan struct { Product *string `json:"product,omitempty"` } -// Resource is the resource model definition. +// Resource is the Resource model definition. type Resource struct { ID *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` @@ -621,6 +738,11 @@ type SubResource struct { ID *string `json:"id,omitempty"` } +// SubResourceReadOnly is +type SubResourceReadOnly struct { + ID *string `json:"id,omitempty"` +} + // UpgradePolicy is describes an upgrade policy - automatic or manual. type UpgradePolicy struct { Mode UpgradeMode `json:"mode,omitempty"` @@ -640,8 +762,8 @@ type UsageName struct { LocalizedValue *string `json:"localizedValue,omitempty"` } -// VaultCertificate is describes a single certificate reference in a Key -// Vault, and where the certificate should reside on the VM. +// VaultCertificate is describes a single certificate reference in a Key Vault, +// and where the certificate should reside on the VM. type VaultCertificate struct { CertificateURL *string `json:"certificateUrl,omitempty"` CertificateStore *string `json:"certificateStore,omitempty"` @@ -670,10 +792,11 @@ type VirtualMachine struct { Plan *Plan `json:"plan,omitempty"` *VirtualMachineProperties `json:"properties,omitempty"` Resources *[]VirtualMachineExtension `json:"resources,omitempty"` + Identity *VirtualMachineIdentity `json:"identity,omitempty"` } -// VirtualMachineAgentInstanceView is the instance view of the VM Agent -// running on the virtual machine. +// VirtualMachineAgentInstanceView is the instance view of the VM Agent running +// on the virtual machine. type VirtualMachineAgentInstanceView struct { VMAgentVersion *string `json:"vmAgentVersion,omitempty"` ExtensionHandlers *[]VirtualMachineExtensionHandlerInstanceView `json:"extensionHandlers,omitempty"` @@ -711,8 +834,8 @@ type VirtualMachineExtension struct { *VirtualMachineExtensionProperties `json:"properties,omitempty"` } -// VirtualMachineExtensionHandlerInstanceView is the instance view of a -// virtual machine extension handler. +// VirtualMachineExtensionHandlerInstanceView is the instance view of a virtual +// machine extension handler. type VirtualMachineExtensionHandlerInstanceView struct { Type *string `json:"type,omitempty"` TypeHandlerVersion *string `json:"typeHandlerVersion,omitempty"` @@ -764,6 +887,13 @@ type VirtualMachineExtensionProperties struct { InstanceView *VirtualMachineExtensionInstanceView `json:"instanceView,omitempty"` } +// VirtualMachineIdentity is identity for the virtual machine. +type VirtualMachineIdentity struct { + PrincipalID *string `json:"principalId,omitempty"` + TenantID *string `json:"tenantId,omitempty"` + Type ResourceIdentityType `json:"type,omitempty"` +} + // VirtualMachineImage is describes a Virtual Machine Image. type VirtualMachineImage struct { autorest.Response `json:"-"` @@ -844,7 +974,20 @@ type VirtualMachineScaleSet struct { Location *string `json:"location,omitempty"` Tags *map[string]*string `json:"tags,omitempty"` Sku *Sku `json:"sku,omitempty"` + Plan *Plan `json:"plan,omitempty"` *VirtualMachineScaleSetProperties `json:"properties,omitempty"` + Identity *VirtualMachineScaleSetIdentity `json:"identity,omitempty"` +} + +// VirtualMachineScaleSetDataDisk is describes a virtual machine scale set data +// disk. +type VirtualMachineScaleSetDataDisk struct { + Name *string `json:"name,omitempty"` + Lun *int32 `json:"lun,omitempty"` + Caching CachingTypes `json:"caching,omitempty"` + CreateOption DiskCreateOptionTypes `json:"createOption,omitempty"` + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + ManagedDisk *VirtualMachineScaleSetManagedDiskParameters `json:"managedDisk,omitempty"` } // VirtualMachineScaleSetExtension is describes a Virtual Machine Scale Set @@ -873,8 +1016,16 @@ type VirtualMachineScaleSetExtensionProperties struct { ProvisioningState *string `json:"provisioningState,omitempty"` } -// VirtualMachineScaleSetInstanceView is the instance view of a virtual -// machine scale set. +// VirtualMachineScaleSetIdentity is identity for the virtual machine scale +// set. +type VirtualMachineScaleSetIdentity struct { + PrincipalID *string `json:"principalId,omitempty"` + TenantID *string `json:"tenantId,omitempty"` + Type ResourceIdentityType `json:"type,omitempty"` +} + +// VirtualMachineScaleSetInstanceView is the instance view of a virtual machine +// scale set. type VirtualMachineScaleSetInstanceView struct { autorest.Response `json:"-"` VirtualMachine *VirtualMachineScaleSetInstanceViewStatusesSummary `json:"virtualMachine,omitempty"` @@ -965,6 +1116,12 @@ func (client VirtualMachineScaleSetListWithLinkResult) VirtualMachineScaleSetLis autorest.WithBaseURL(to.String(client.NextLink))) } +// VirtualMachineScaleSetManagedDiskParameters is describes the parameters of a +// ScaleSet managed disk. +type VirtualMachineScaleSetManagedDiskParameters struct { + StorageAccountType StorageAccountTypes `json:"storageAccountType,omitempty"` +} + // VirtualMachineScaleSetNetworkConfiguration is describes a virtual machine // scale set network profile's network configurations. type VirtualMachineScaleSetNetworkConfiguration struct { @@ -989,12 +1146,13 @@ type VirtualMachineScaleSetNetworkProfile struct { // VirtualMachineScaleSetOSDisk is describes a virtual machine scale set // operating system disk. type VirtualMachineScaleSetOSDisk struct { - Name *string `json:"name,omitempty"` - Caching CachingTypes `json:"caching,omitempty"` - CreateOption DiskCreateOptionTypes `json:"createOption,omitempty"` - OsType OperatingSystemTypes `json:"osType,omitempty"` - Image *VirtualHardDisk `json:"image,omitempty"` - VhdContainers *[]string `json:"vhdContainers,omitempty"` + Name *string `json:"name,omitempty"` + Caching CachingTypes `json:"caching,omitempty"` + CreateOption DiskCreateOptionTypes `json:"createOption,omitempty"` + OsType OperatingSystemTypes `json:"osType,omitempty"` + Image *VirtualHardDisk `json:"image,omitempty"` + VhdContainers *[]string `json:"vhdContainers,omitempty"` + ManagedDisk *VirtualMachineScaleSetManagedDiskParameters `json:"managedDisk,omitempty"` } // VirtualMachineScaleSetOSProfile is describes a virtual machine scale set OS @@ -1016,6 +1174,7 @@ type VirtualMachineScaleSetProperties struct { VirtualMachineProfile *VirtualMachineScaleSetVMProfile `json:"virtualMachineProfile,omitempty"` ProvisioningState *string `json:"provisioningState,omitempty"` Overprovision *bool `json:"overprovision,omitempty"` + SinglePlacementGroup *bool `json:"singlePlacementGroup,omitempty"` } // VirtualMachineScaleSetSku is describes an available virtual machine scale @@ -1037,8 +1196,9 @@ type VirtualMachineScaleSetSkuCapacity struct { // VirtualMachineScaleSetStorageProfile is describes a virtual machine scale // set storage profile. type VirtualMachineScaleSetStorageProfile struct { - ImageReference *ImageReference `json:"imageReference,omitempty"` - OsDisk *VirtualMachineScaleSetOSDisk `json:"osDisk,omitempty"` + ImageReference *ImageReference `json:"imageReference,omitempty"` + OsDisk *VirtualMachineScaleSetOSDisk `json:"osDisk,omitempty"` + DataDisks *[]VirtualMachineScaleSetDataDisk `json:"dataDisks,omitempty"` } // VirtualMachineScaleSetVM is describes a virtual machine scale set virtual @@ -1088,10 +1248,11 @@ type VirtualMachineScaleSetVMInstanceView struct { Extensions *[]VirtualMachineExtensionInstanceView `json:"extensions,omitempty"` BootDiagnostics *BootDiagnosticsInstanceView `json:"bootDiagnostics,omitempty"` Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` + PlacementGroupID *string `json:"placementGroupId,omitempty"` } -// VirtualMachineScaleSetVMListResult is the List Virtual Machine Scale Set -// VMs operation response. +// VirtualMachineScaleSetVMListResult is the List Virtual Machine Scale Set VMs +// operation response. type VirtualMachineScaleSetVMListResult struct { autorest.Response `json:"-"` Value *[]VirtualMachineScaleSetVM `json:"value,omitempty"` @@ -1123,6 +1284,7 @@ type VirtualMachineScaleSetVMProfile struct { // machine scale set virtual machine. type VirtualMachineScaleSetVMProperties struct { LatestModelApplied *bool `json:"latestModelApplied,omitempty"` + VMID *string `json:"vmId,omitempty"` InstanceView *VirtualMachineInstanceView `json:"instanceView,omitempty"` HardwareProfile *HardwareProfile `json:"hardwareProfile,omitempty"` StorageProfile *StorageProfile `json:"storageProfile,omitempty"` diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/usageoperations.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/usage.go old mode 100644 new mode 100755 similarity index 60% rename from vendor/github.com/Azure/azure-sdk-for-go/arm/compute/usageoperations.go rename to vendor/github.com/Azure/azure-sdk-for-go/arm/compute/usage.go index 5fb5bd6f555e..97c53e0efd7f --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/usageoperations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/usage.go @@ -14,7 +14,7 @@ package compute // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. @@ -25,21 +25,19 @@ import ( "net/http" ) -// UsageOperationsClient is the the Compute Management Client. -type UsageOperationsClient struct { +// UsageClient is the the Compute Management Client. +type UsageClient struct { ManagementClient } -// NewUsageOperationsClient creates an instance of the UsageOperationsClient -// client. -func NewUsageOperationsClient(subscriptionID string) UsageOperationsClient { - return NewUsageOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +// NewUsageClient creates an instance of the UsageClient client. +func NewUsageClient(subscriptionID string) UsageClient { + return NewUsageClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewUsageOperationsClientWithBaseURI creates an instance of the -// UsageOperationsClient client. -func NewUsageOperationsClientWithBaseURI(baseURI string, subscriptionID string) UsageOperationsClient { - return UsageOperationsClient{NewWithBaseURI(baseURI, subscriptionID)} +// NewUsageClientWithBaseURI creates an instance of the UsageClient client. +func NewUsageClientWithBaseURI(baseURI string, subscriptionID string) UsageClient { + return UsageClient{NewWithBaseURI(baseURI, subscriptionID)} } // List gets, for the specified location, the current compute resource usage @@ -47,41 +45,44 @@ func NewUsageOperationsClientWithBaseURI(baseURI string, subscriptionID string) // subscription. // // location is the location for which resource usage is queried. -func (client UsageOperationsClient) List(location string) (result ListUsagesResult, err error) { +func (client UsageClient) List(location string) (result ListUsagesResult, err error) { if err := validation.Validate([]validation.Validation{ {TargetValue: location, Constraints: []validation.Constraint{{Target: "location", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}}}); err != nil { - return result, validation.NewErrorWithValidationError(err, "compute.UsageOperationsClient", "List") + return result, validation.NewErrorWithValidationError(err, "compute.UsageClient", "List") } req, err := client.ListPreparer(location) if err != nil { - return result, autorest.NewErrorWithError(err, "compute.UsageOperationsClient", "List", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "compute.UsageClient", "List", nil, "Failure preparing request") + return } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute.UsageOperationsClient", "List", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "compute.UsageClient", "List", resp, "Failure sending request") + return } result, err = client.ListResponder(resp) if err != nil { - err = autorest.NewErrorWithError(err, "compute.UsageOperationsClient", "List", resp, "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.UsageClient", "List", resp, "Failure responding to request") } return } // ListPreparer prepares the List request. -func (client UsageOperationsClient) ListPreparer(location string) (*http.Request, error) { +func (client UsageClient) ListPreparer(location string) (*http.Request, error) { pathParameters := map[string]interface{}{ "location": autorest.Encode("path", location), "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2016-04-30-preview" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -94,13 +95,13 @@ func (client UsageOperationsClient) ListPreparer(location string) (*http.Request // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. -func (client UsageOperationsClient) ListSender(req *http.Request) (*http.Response, error) { +func (client UsageClient) ListSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req) } // ListResponder handles the response to the List request. The method always // closes the http.Response Body. -func (client UsageOperationsClient) ListResponder(resp *http.Response) (result ListUsagesResult, err error) { +func (client UsageClient) ListResponder(resp *http.Response) (result ListUsagesResult, err error) { err = autorest.Respond( resp, client.ByInspecting(), @@ -112,10 +113,10 @@ func (client UsageOperationsClient) ListResponder(resp *http.Response) (result L } // ListNextResults retrieves the next set of results, if any. -func (client UsageOperationsClient) ListNextResults(lastResults ListUsagesResult) (result ListUsagesResult, err error) { +func (client UsageClient) ListNextResults(lastResults ListUsagesResult) (result ListUsagesResult, err error) { req, err := lastResults.ListUsagesResultPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "compute.UsageOperationsClient", "List", nil, "Failure preparing next results request") + return result, autorest.NewErrorWithError(err, "compute.UsageClient", "List", nil, "Failure preparing next results request") } if req == nil { return @@ -124,12 +125,12 @@ func (client UsageOperationsClient) ListNextResults(lastResults ListUsagesResult resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute.UsageOperationsClient", "List", resp, "Failure sending next results request") + return result, autorest.NewErrorWithError(err, "compute.UsageClient", "List", resp, "Failure sending next results request") } result, err = client.ListResponder(resp) if err != nil { - err = autorest.NewErrorWithError(err, "compute.UsageOperationsClient", "List", resp, "Failure responding to next results request") + err = autorest.NewErrorWithError(err, "compute.UsageClient", "List", resp, "Failure responding to next results request") } return diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/version.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/version.go old mode 100644 new mode 100755 index 3c4783ed6fa4..a5318ebf7983 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/version.go @@ -14,30 +14,16 @@ package compute // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. -import ( - "fmt" -) - -const ( - major = "7" - minor = "0" - patch = "1" - // Always begin a "tag" with a dash (as per http://semver.org) - tag = "-beta" - semVerFormat = "%s.%s.%s%s" - userAgentFormat = "Azure-SDK-for-Go/%s arm-%s/%s" -) - // UserAgent returns the UserAgent string to use when sending http.Requests. func UserAgent() string { - return fmt.Sprintf(userAgentFormat, Version(), "compute", "2016-03-30") + return "Azure-SDK-For-Go/v10.0.2-beta arm-compute/2016-04-30-preview" } // Version returns the semantic version (see http://semver.org) of the client. func Version() string { - return fmt.Sprintf(semVerFormat, major, minor, patch, tag) + return "v10.0.2-beta" } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensionimages.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensionimages.go old mode 100644 new mode 100755 index 089ebe10e820..fcd122704284 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensionimages.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensionimages.go @@ -14,7 +14,7 @@ package compute // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. @@ -35,8 +35,8 @@ func NewVirtualMachineExtensionImagesClient(subscriptionID string) VirtualMachin return NewVirtualMachineExtensionImagesClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewVirtualMachineExtensionImagesClientWithBaseURI creates an instance of -// the VirtualMachineExtensionImagesClient client. +// NewVirtualMachineExtensionImagesClientWithBaseURI creates an instance of the +// VirtualMachineExtensionImagesClient client. func NewVirtualMachineExtensionImagesClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineExtensionImagesClient { return VirtualMachineExtensionImagesClient{NewWithBaseURI(baseURI, subscriptionID)} } @@ -46,13 +46,15 @@ func NewVirtualMachineExtensionImagesClientWithBaseURI(baseURI string, subscript func (client VirtualMachineExtensionImagesClient) Get(location string, publisherName string, typeParameter string, version string) (result VirtualMachineExtensionImage, err error) { req, err := client.GetPreparer(location, publisherName, typeParameter, version) if err != nil { - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "Get", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "Get", nil, "Failure preparing request") + return } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "Get", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "Get", resp, "Failure sending request") + return } result, err = client.GetResponder(resp) @@ -73,8 +75,9 @@ func (client VirtualMachineExtensionImagesClient) GetPreparer(location string, p "version": autorest.Encode("path", version), } + const APIVersion = "2016-04-30-preview" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -109,13 +112,15 @@ func (client VirtualMachineExtensionImagesClient) GetResponder(resp *http.Respon func (client VirtualMachineExtensionImagesClient) ListTypes(location string, publisherName string) (result ListVirtualMachineExtensionImage, err error) { req, err := client.ListTypesPreparer(location, publisherName) if err != nil { - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "ListTypes", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "ListTypes", nil, "Failure preparing request") + return } resp, err := client.ListTypesSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "ListTypes", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "ListTypes", resp, "Failure sending request") + return } result, err = client.ListTypesResponder(resp) @@ -134,8 +139,9 @@ func (client VirtualMachineExtensionImagesClient) ListTypesPreparer(location str "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2016-04-30-preview" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -171,13 +177,15 @@ func (client VirtualMachineExtensionImagesClient) ListTypesResponder(resp *http. func (client VirtualMachineExtensionImagesClient) ListVersions(location string, publisherName string, typeParameter string, filter string, top *int32, orderby string) (result ListVirtualMachineExtensionImage, err error) { req, err := client.ListVersionsPreparer(location, publisherName, typeParameter, filter, top, orderby) if err != nil { - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "ListVersions", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "ListVersions", nil, "Failure preparing request") + return } resp, err := client.ListVersionsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "ListVersions", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "ListVersions", resp, "Failure sending request") + return } result, err = client.ListVersionsResponder(resp) @@ -197,8 +205,9 @@ func (client VirtualMachineExtensionImagesClient) ListVersionsPreparer(location "type": autorest.Encode("path", typeParameter), } + const APIVersion = "2016-04-30-preview" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } if len(filter) > 0 { queryParameters["$filter"] = autorest.Encode("query", filter) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensions.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensions.go old mode 100644 new mode 100755 index d94a2b9683a4..7a876cfef1a2 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensions.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensions.go @@ -14,14 +14,13 @@ package compute // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" "net/http" ) @@ -47,49 +46,56 @@ func NewVirtualMachineExtensionsClientWithBaseURI(baseURI string, subscriptionID // channel argument. The channel will be used to cancel polling and any // outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. vmName is the name of +// resourceGroupName is the name of the resource group. VMName is the name of // the virtual machine where the extension should be create or updated. -// vmExtensionName is the name of the virtual machine extension. +// VMExtensionName is the name of the virtual machine extension. // extensionParameters is parameters supplied to the Create Virtual Machine // Extension operation. -func (client VirtualMachineExtensionsClient) CreateOrUpdate(resourceGroupName string, vmName string, vmExtensionName string, extensionParameters VirtualMachineExtension, cancel <-chan struct{}) (result autorest.Response, err error) { - if err := validation.Validate([]validation.Validation{ - {TargetValue: extensionParameters, - Constraints: []validation.Constraint{{Target: "extensionParameters.VirtualMachineExtensionProperties", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "extensionParameters.VirtualMachineExtensionProperties.ProvisioningState", Name: validation.ReadOnly, Rule: true, Chain: nil}}}}}}); err != nil { - return result, validation.NewErrorWithValidationError(err, "compute.VirtualMachineExtensionsClient", "CreateOrUpdate") - } - - req, err := client.CreateOrUpdatePreparer(resourceGroupName, vmName, vmExtensionName, extensionParameters, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "CreateOrUpdate", nil, "Failure preparing request") - } - - resp, err := client.CreateOrUpdateSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "CreateOrUpdate", resp, "Failure sending request") - } - - result, err = client.CreateOrUpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "CreateOrUpdate", resp, "Failure responding to request") - } - - return +func (client VirtualMachineExtensionsClient) CreateOrUpdate(resourceGroupName string, VMName string, VMExtensionName string, extensionParameters VirtualMachineExtension, cancel <-chan struct{}) (<-chan VirtualMachineExtension, <-chan error) { + resultChan := make(chan VirtualMachineExtension, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result VirtualMachineExtension + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.CreateOrUpdatePreparer(resourceGroupName, VMName, VMExtensionName, extensionParameters, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client VirtualMachineExtensionsClient) CreateOrUpdatePreparer(resourceGroupName string, vmName string, vmExtensionName string, extensionParameters VirtualMachineExtension, cancel <-chan struct{}) (*http.Request, error) { +func (client VirtualMachineExtensionsClient) CreateOrUpdatePreparer(resourceGroupName string, VMName string, VMExtensionName string, extensionParameters VirtualMachineExtension, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), - "vmExtensionName": autorest.Encode("path", vmExtensionName), - "vmName": autorest.Encode("path", vmName), + "vmExtensionName": autorest.Encode("path", VMExtensionName), + "vmName": autorest.Encode("path", VMName), } + const APIVersion = "2016-04-30-preview" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -112,55 +118,70 @@ func (client VirtualMachineExtensionsClient) CreateOrUpdateSender(req *http.Requ // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always // closes the http.Response Body. -func (client VirtualMachineExtensionsClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { +func (client VirtualMachineExtensionsClient) CreateOrUpdateResponder(resp *http.Response) (result VirtualMachineExtension, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } // Delete the operation to delete the extension. This method may poll for -// completion. Polling can be canceled by passing the cancel channel -// argument. The channel will be used to cancel polling and any outstanding -// HTTP requests. +// completion. Polling can be canceled by passing the cancel channel argument. +// The channel will be used to cancel polling and any outstanding HTTP +// requests. // -// resourceGroupName is the name of the resource group. vmName is the name of -// the virtual machine where the extension should be deleted. vmExtensionName +// resourceGroupName is the name of the resource group. VMName is the name of +// the virtual machine where the extension should be deleted. VMExtensionName // is the name of the virtual machine extension. -func (client VirtualMachineExtensionsClient) Delete(resourceGroupName string, vmName string, vmExtensionName string, cancel <-chan struct{}) (result autorest.Response, err error) { - req, err := client.DeletePreparer(resourceGroupName, vmName, vmExtensionName, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "Delete", nil, "Failure preparing request") - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "Delete", resp, "Failure sending request") - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "Delete", resp, "Failure responding to request") - } - - return +func (client VirtualMachineExtensionsClient) Delete(resourceGroupName string, VMName string, VMExtensionName string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { + resultChan := make(chan OperationStatusResponse, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result OperationStatusResponse + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.DeletePreparer(resourceGroupName, VMName, VMExtensionName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "Delete", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // DeletePreparer prepares the Delete request. -func (client VirtualMachineExtensionsClient) DeletePreparer(resourceGroupName string, vmName string, vmExtensionName string, cancel <-chan struct{}) (*http.Request, error) { +func (client VirtualMachineExtensionsClient) DeletePreparer(resourceGroupName string, VMName string, VMExtensionName string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), - "vmExtensionName": autorest.Encode("path", vmExtensionName), - "vmName": autorest.Encode("path", vmName), + "vmExtensionName": autorest.Encode("path", VMExtensionName), + "vmName": autorest.Encode("path", VMName), } + const APIVersion = "2016-04-30-preview" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -181,32 +202,35 @@ func (client VirtualMachineExtensionsClient) DeleteSender(req *http.Request) (*h // DeleteResponder handles the response to the Delete request. The method always // closes the http.Response Body. -func (client VirtualMachineExtensionsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { +func (client VirtualMachineExtensionsClient) DeleteResponder(resp *http.Response) (result OperationStatusResponse, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } // Get the operation to get the extension. // -// resourceGroupName is the name of the resource group. vmName is the name of -// the virtual machine containing the extension. vmExtensionName is the name -// of the virtual machine extension. expand is the expand expression to apply -// on the operation. -func (client VirtualMachineExtensionsClient) Get(resourceGroupName string, vmName string, vmExtensionName string, expand string) (result VirtualMachineExtension, err error) { - req, err := client.GetPreparer(resourceGroupName, vmName, vmExtensionName, expand) +// resourceGroupName is the name of the resource group. VMName is the name of +// the virtual machine containing the extension. VMExtensionName is the name of +// the virtual machine extension. expand is the expand expression to apply on +// the operation. +func (client VirtualMachineExtensionsClient) Get(resourceGroupName string, VMName string, VMExtensionName string, expand string) (result VirtualMachineExtension, err error) { + req, err := client.GetPreparer(resourceGroupName, VMName, VMExtensionName, expand) if err != nil { - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "Get", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "Get", nil, "Failure preparing request") + return } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "Get", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "Get", resp, "Failure sending request") + return } result, err = client.GetResponder(resp) @@ -218,16 +242,17 @@ func (client VirtualMachineExtensionsClient) Get(resourceGroupName string, vmNam } // GetPreparer prepares the Get request. -func (client VirtualMachineExtensionsClient) GetPreparer(resourceGroupName string, vmName string, vmExtensionName string, expand string) (*http.Request, error) { +func (client VirtualMachineExtensionsClient) GetPreparer(resourceGroupName string, VMName string, VMExtensionName string, expand string) (*http.Request, error) { pathParameters := map[string]interface{}{ "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), - "vmExtensionName": autorest.Encode("path", vmExtensionName), - "vmName": autorest.Encode("path", vmName), + "vmExtensionName": autorest.Encode("path", VMExtensionName), + "vmName": autorest.Encode("path", VMName), } + const APIVersion = "2016-04-30-preview" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } if len(expand) > 0 { queryParameters["$expand"] = autorest.Encode("query", expand) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineimages.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineimages.go old mode 100644 new mode 100755 index db1877789378..6c090568f520 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineimages.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineimages.go @@ -14,7 +14,7 @@ package compute // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. @@ -49,13 +49,15 @@ func NewVirtualMachineImagesClientWithBaseURI(baseURI string, subscriptionID str func (client VirtualMachineImagesClient) Get(location string, publisherName string, offer string, skus string, version string) (result VirtualMachineImage, err error) { req, err := client.GetPreparer(location, publisherName, offer, skus, version) if err != nil { - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "Get", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "Get", nil, "Failure preparing request") + return } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "Get", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "Get", resp, "Failure sending request") + return } result, err = client.GetResponder(resp) @@ -77,8 +79,9 @@ func (client VirtualMachineImagesClient) GetPreparer(location string, publisherN "version": autorest.Encode("path", version), } + const APIVersion = "2016-04-30-preview" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -117,13 +120,15 @@ func (client VirtualMachineImagesClient) GetResponder(resp *http.Response) (resu func (client VirtualMachineImagesClient) List(location string, publisherName string, offer string, skus string, filter string, top *int32, orderby string) (result ListVirtualMachineImageResource, err error) { req, err := client.ListPreparer(location, publisherName, offer, skus, filter, top, orderby) if err != nil { - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "List", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "List", nil, "Failure preparing request") + return } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "List", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "List", resp, "Failure sending request") + return } result, err = client.ListResponder(resp) @@ -144,8 +149,9 @@ func (client VirtualMachineImagesClient) ListPreparer(location string, publisher "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2016-04-30-preview" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } if len(filter) > 0 { queryParameters["$filter"] = autorest.Encode("query", filter) @@ -192,13 +198,15 @@ func (client VirtualMachineImagesClient) ListResponder(resp *http.Response) (res func (client VirtualMachineImagesClient) ListOffers(location string, publisherName string) (result ListVirtualMachineImageResource, err error) { req, err := client.ListOffersPreparer(location, publisherName) if err != nil { - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListOffers", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListOffers", nil, "Failure preparing request") + return } resp, err := client.ListOffersSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListOffers", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListOffers", resp, "Failure sending request") + return } result, err = client.ListOffersResponder(resp) @@ -217,8 +225,9 @@ func (client VirtualMachineImagesClient) ListOffersPreparer(location string, pub "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2016-04-30-preview" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -255,13 +264,15 @@ func (client VirtualMachineImagesClient) ListOffersResponder(resp *http.Response func (client VirtualMachineImagesClient) ListPublishers(location string) (result ListVirtualMachineImageResource, err error) { req, err := client.ListPublishersPreparer(location) if err != nil { - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListPublishers", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListPublishers", nil, "Failure preparing request") + return } resp, err := client.ListPublishersSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListPublishers", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListPublishers", resp, "Failure sending request") + return } result, err = client.ListPublishersResponder(resp) @@ -279,8 +290,9 @@ func (client VirtualMachineImagesClient) ListPublishersPreparer(location string) "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2016-04-30-preview" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -318,13 +330,15 @@ func (client VirtualMachineImagesClient) ListPublishersResponder(resp *http.Resp func (client VirtualMachineImagesClient) ListSkus(location string, publisherName string, offer string) (result ListVirtualMachineImageResource, err error) { req, err := client.ListSkusPreparer(location, publisherName, offer) if err != nil { - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListSkus", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListSkus", nil, "Failure preparing request") + return } resp, err := client.ListSkusSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListSkus", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListSkus", resp, "Failure sending request") + return } result, err = client.ListSkusResponder(resp) @@ -344,8 +358,9 @@ func (client VirtualMachineImagesClient) ListSkusPreparer(location string, publi "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2016-04-30-preview" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachines.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachines.go old mode 100644 new mode 100755 index 626319737ce1..686b7ace2327 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachines.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachines.go @@ -14,7 +14,7 @@ package compute // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. @@ -43,52 +43,69 @@ func NewVirtualMachinesClientWithBaseURI(baseURI string, subscriptionID string) } // Capture captures the VM by copying virtual hard disks of the VM and outputs -// a template that can be used to create similar VMs. This method may poll -// for completion. Polling can be canceled by passing the cancel channel -// argument. The channel will be used to cancel polling and any outstanding -// HTTP requests. +// a template that can be used to create similar VMs. This method may poll for +// completion. Polling can be canceled by passing the cancel channel argument. +// The channel will be used to cancel polling and any outstanding HTTP +// requests. // -// resourceGroupName is the name of the resource group. vmName is the name of +// resourceGroupName is the name of the resource group. VMName is the name of // the virtual machine. parameters is parameters supplied to the Capture // Virtual Machine operation. -func (client VirtualMachinesClient) Capture(resourceGroupName string, vmName string, parameters VirtualMachineCaptureParameters, cancel <-chan struct{}) (result autorest.Response, err error) { +func (client VirtualMachinesClient) Capture(resourceGroupName string, VMName string, parameters VirtualMachineCaptureParameters, cancel <-chan struct{}) (<-chan VirtualMachineCaptureResult, <-chan error) { + resultChan := make(chan VirtualMachineCaptureResult, 1) + errChan := make(chan error, 1) if err := validation.Validate([]validation.Validation{ {TargetValue: parameters, Constraints: []validation.Constraint{{Target: "parameters.VhdPrefix", Name: validation.Null, Rule: true, Chain: nil}, {Target: "parameters.DestinationContainerName", Name: validation.Null, Rule: true, Chain: nil}, {Target: "parameters.OverwriteVhds", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewErrorWithValidationError(err, "compute.VirtualMachinesClient", "Capture") - } - - req, err := client.CapturePreparer(resourceGroupName, vmName, parameters, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Capture", nil, "Failure preparing request") - } - - resp, err := client.CaptureSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Capture", resp, "Failure sending request") - } - - result, err = client.CaptureResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Capture", resp, "Failure responding to request") - } - - return + errChan <- validation.NewErrorWithValidationError(err, "compute.VirtualMachinesClient", "Capture") + close(errChan) + close(resultChan) + return resultChan, errChan + } + + go func() { + var err error + var result VirtualMachineCaptureResult + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.CapturePreparer(resourceGroupName, VMName, parameters, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Capture", nil, "Failure preparing request") + return + } + + resp, err := client.CaptureSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Capture", resp, "Failure sending request") + return + } + + result, err = client.CaptureResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Capture", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // CapturePreparer prepares the Capture request. -func (client VirtualMachinesClient) CapturePreparer(resourceGroupName string, vmName string, parameters VirtualMachineCaptureParameters, cancel <-chan struct{}) (*http.Request, error) { +func (client VirtualMachinesClient) CapturePreparer(resourceGroupName string, VMName string, parameters VirtualMachineCaptureParameters, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), - "vmName": autorest.Encode("path", vmName), + "vmName": autorest.Encode("path", VMName), } + const APIVersion = "2016-04-30-preview" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -111,25 +128,111 @@ func (client VirtualMachinesClient) CaptureSender(req *http.Request) (*http.Resp // CaptureResponder handles the response to the Capture request. The method always // closes the http.Response Body. -func (client VirtualMachinesClient) CaptureResponder(resp *http.Response) (result autorest.Response, err error) { +func (client VirtualMachinesClient) CaptureResponder(resp *http.Response) (result VirtualMachineCaptureResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ConvertToManagedDisks converts virtual machine disks from blob-based to +// managed disks. Virtual machine must be stop-deallocated before invoking this +// operation. This method may poll for completion. Polling can be canceled by +// passing the cancel channel argument. The channel will be used to cancel +// polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. VMName is the name of +// the virtual machine. +func (client VirtualMachinesClient) ConvertToManagedDisks(resourceGroupName string, VMName string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { + resultChan := make(chan OperationStatusResponse, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result OperationStatusResponse + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.ConvertToManagedDisksPreparer(resourceGroupName, VMName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ConvertToManagedDisks", nil, "Failure preparing request") + return + } + + resp, err := client.ConvertToManagedDisksSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ConvertToManagedDisks", resp, "Failure sending request") + return + } + + result, err = client.ConvertToManagedDisksResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ConvertToManagedDisks", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// ConvertToManagedDisksPreparer prepares the ConvertToManagedDisks request. +func (client VirtualMachinesClient) ConvertToManagedDisksPreparer(resourceGroupName string, VMName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmName": autorest.Encode("path", VMName), + } + + const APIVersion = "2016-04-30-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/convertToManagedDisks", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// ConvertToManagedDisksSender sends the ConvertToManagedDisks request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) ConvertToManagedDisksSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// ConvertToManagedDisksResponder handles the response to the ConvertToManagedDisks request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) ConvertToManagedDisksResponder(resp *http.Response) (result OperationStatusResponse, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } // CreateOrUpdate the operation to create or update a virtual machine. This // method may poll for completion. Polling can be canceled by passing the -// cancel channel argument. The channel will be used to cancel polling and -// any outstanding HTTP requests. +// cancel channel argument. The channel will be used to cancel polling and any +// outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. vmName is the name of -// the virtual machine. parameters is parameters supplied to the Create -// Virtual Machine operation. -func (client VirtualMachinesClient) CreateOrUpdate(resourceGroupName string, vmName string, parameters VirtualMachine, cancel <-chan struct{}) (result autorest.Response, err error) { +// resourceGroupName is the name of the resource group. VMName is the name of +// the virtual machine. parameters is parameters supplied to the Create Virtual +// Machine operation. +func (client VirtualMachinesClient) CreateOrUpdate(resourceGroupName string, VMName string, parameters VirtualMachine, cancel <-chan struct{}) (<-chan VirtualMachine, <-chan error) { + resultChan := make(chan VirtualMachine, 1) + errChan := make(chan error, 1) if err := validation.Validate([]validation.Validation{ {TargetValue: parameters, Constraints: []validation.Constraint{{Target: "parameters.VirtualMachineProperties", Name: validation.Null, Rule: false, @@ -145,47 +248,56 @@ func (client VirtualMachinesClient) CreateOrUpdate(resourceGroupName string, vmN {Target: "parameters.VirtualMachineProperties.StorageProfile.OsDisk.EncryptionSettings.KeyEncryptionKey.SourceVault", Name: validation.Null, Rule: true, Chain: nil}, }}, }}, - {Target: "parameters.VirtualMachineProperties.StorageProfile.OsDisk.Name", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.VirtualMachineProperties.StorageProfile.OsDisk.Vhd", Name: validation.Null, Rule: true, Chain: nil}, }}, }}, - {Target: "parameters.VirtualMachineProperties.ProvisioningState", Name: validation.ReadOnly, Rule: true, Chain: nil}, - {Target: "parameters.VirtualMachineProperties.InstanceView", Name: validation.ReadOnly, Rule: true, Chain: nil}, - {Target: "parameters.VirtualMachineProperties.VMID", Name: validation.ReadOnly, Rule: true, Chain: nil}, - }}, - {Target: "parameters.Resources", Name: validation.ReadOnly, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewErrorWithValidationError(err, "compute.VirtualMachinesClient", "CreateOrUpdate") - } - - req, err := client.CreateOrUpdatePreparer(resourceGroupName, vmName, parameters, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "CreateOrUpdate", nil, "Failure preparing request") - } - - resp, err := client.CreateOrUpdateSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "CreateOrUpdate", resp, "Failure sending request") - } - - result, err = client.CreateOrUpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "CreateOrUpdate", resp, "Failure responding to request") - } - - return + }}}}}); err != nil { + errChan <- validation.NewErrorWithValidationError(err, "compute.VirtualMachinesClient", "CreateOrUpdate") + close(errChan) + close(resultChan) + return resultChan, errChan + } + + go func() { + var err error + var result VirtualMachine + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.CreateOrUpdatePreparer(resourceGroupName, VMName, parameters, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "CreateOrUpdate", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client VirtualMachinesClient) CreateOrUpdatePreparer(resourceGroupName string, vmName string, parameters VirtualMachine, cancel <-chan struct{}) (*http.Request, error) { +func (client VirtualMachinesClient) CreateOrUpdatePreparer(resourceGroupName string, VMName string, parameters VirtualMachine, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), - "vmName": autorest.Encode("path", vmName), + "vmName": autorest.Encode("path", VMName), } + const APIVersion = "2016-04-30-preview" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -208,13 +320,14 @@ func (client VirtualMachinesClient) CreateOrUpdateSender(req *http.Request) (*ht // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always // closes the http.Response Body. -func (client VirtualMachinesClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { +func (client VirtualMachinesClient) CreateOrUpdateResponder(resp *http.Response) (result VirtualMachine, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } @@ -224,38 +337,52 @@ func (client VirtualMachinesClient) CreateOrUpdateResponder(resp *http.Response) // by passing the cancel channel argument. The channel will be used to cancel // polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. vmName is the name of +// resourceGroupName is the name of the resource group. VMName is the name of // the virtual machine. -func (client VirtualMachinesClient) Deallocate(resourceGroupName string, vmName string, cancel <-chan struct{}) (result autorest.Response, err error) { - req, err := client.DeallocatePreparer(resourceGroupName, vmName, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Deallocate", nil, "Failure preparing request") - } - - resp, err := client.DeallocateSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Deallocate", resp, "Failure sending request") - } - - result, err = client.DeallocateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Deallocate", resp, "Failure responding to request") - } - - return +func (client VirtualMachinesClient) Deallocate(resourceGroupName string, VMName string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { + resultChan := make(chan OperationStatusResponse, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result OperationStatusResponse + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.DeallocatePreparer(resourceGroupName, VMName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Deallocate", nil, "Failure preparing request") + return + } + + resp, err := client.DeallocateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Deallocate", resp, "Failure sending request") + return + } + + result, err = client.DeallocateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Deallocate", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // DeallocatePreparer prepares the Deallocate request. -func (client VirtualMachinesClient) DeallocatePreparer(resourceGroupName string, vmName string, cancel <-chan struct{}) (*http.Request, error) { +func (client VirtualMachinesClient) DeallocatePreparer(resourceGroupName string, VMName string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), - "vmName": autorest.Encode("path", vmName), + "vmName": autorest.Encode("path", VMName), } + const APIVersion = "2016-04-30-preview" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -276,53 +403,68 @@ func (client VirtualMachinesClient) DeallocateSender(req *http.Request) (*http.R // DeallocateResponder handles the response to the Deallocate request. The method always // closes the http.Response Body. -func (client VirtualMachinesClient) DeallocateResponder(resp *http.Response) (result autorest.Response, err error) { +func (client VirtualMachinesClient) DeallocateResponder(resp *http.Response) (result OperationStatusResponse, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } // Delete the operation to delete a virtual machine. This method may poll for -// completion. Polling can be canceled by passing the cancel channel -// argument. The channel will be used to cancel polling and any outstanding -// HTTP requests. +// completion. Polling can be canceled by passing the cancel channel argument. +// The channel will be used to cancel polling and any outstanding HTTP +// requests. // -// resourceGroupName is the name of the resource group. vmName is the name of +// resourceGroupName is the name of the resource group. VMName is the name of // the virtual machine. -func (client VirtualMachinesClient) Delete(resourceGroupName string, vmName string, cancel <-chan struct{}) (result autorest.Response, err error) { - req, err := client.DeletePreparer(resourceGroupName, vmName, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Delete", nil, "Failure preparing request") - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Delete", resp, "Failure sending request") - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Delete", resp, "Failure responding to request") - } - - return +func (client VirtualMachinesClient) Delete(resourceGroupName string, VMName string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { + resultChan := make(chan OperationStatusResponse, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result OperationStatusResponse + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.DeletePreparer(resourceGroupName, VMName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Delete", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // DeletePreparer prepares the Delete request. -func (client VirtualMachinesClient) DeletePreparer(resourceGroupName string, vmName string, cancel <-chan struct{}) (*http.Request, error) { +func (client VirtualMachinesClient) DeletePreparer(resourceGroupName string, VMName string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), - "vmName": autorest.Encode("path", vmName), + "vmName": autorest.Encode("path", VMName), } + const APIVersion = "2016-04-30-preview" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -343,30 +485,33 @@ func (client VirtualMachinesClient) DeleteSender(req *http.Request) (*http.Respo // DeleteResponder handles the response to the Delete request. The method always // closes the http.Response Body. -func (client VirtualMachinesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { +func (client VirtualMachinesClient) DeleteResponder(resp *http.Response) (result OperationStatusResponse, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } // Generalize sets the state of the virtual machine to generalized. // -// resourceGroupName is the name of the resource group. vmName is the name of +// resourceGroupName is the name of the resource group. VMName is the name of // the virtual machine. -func (client VirtualMachinesClient) Generalize(resourceGroupName string, vmName string) (result autorest.Response, err error) { - req, err := client.GeneralizePreparer(resourceGroupName, vmName) +func (client VirtualMachinesClient) Generalize(resourceGroupName string, VMName string) (result OperationStatusResponse, err error) { + req, err := client.GeneralizePreparer(resourceGroupName, VMName) if err != nil { - return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Generalize", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Generalize", nil, "Failure preparing request") + return } resp, err := client.GeneralizeSender(req) if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Generalize", resp, "Failure sending request") + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Generalize", resp, "Failure sending request") + return } result, err = client.GeneralizeResponder(resp) @@ -378,15 +523,16 @@ func (client VirtualMachinesClient) Generalize(resourceGroupName string, vmName } // GeneralizePreparer prepares the Generalize request. -func (client VirtualMachinesClient) GeneralizePreparer(resourceGroupName string, vmName string) (*http.Request, error) { +func (client VirtualMachinesClient) GeneralizePreparer(resourceGroupName string, VMName string) (*http.Request, error) { pathParameters := map[string]interface{}{ "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), - "vmName": autorest.Encode("path", vmName), + "vmName": autorest.Encode("path", VMName), } + const APIVersion = "2016-04-30-preview" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -405,32 +551,35 @@ func (client VirtualMachinesClient) GeneralizeSender(req *http.Request) (*http.R // GeneralizeResponder handles the response to the Generalize request. The method always // closes the http.Response Body. -func (client VirtualMachinesClient) GeneralizeResponder(resp *http.Response) (result autorest.Response, err error) { +func (client VirtualMachinesClient) GeneralizeResponder(resp *http.Response) (result OperationStatusResponse, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } // Get retrieves information about the model view or the instance view of a // virtual machine. // -// resourceGroupName is the name of the resource group. vmName is the name of +// resourceGroupName is the name of the resource group. VMName is the name of // the virtual machine. expand is the expand expression to apply on the -// operation. Possible values include: 'instanceView' -func (client VirtualMachinesClient) Get(resourceGroupName string, vmName string, expand InstanceViewTypes) (result VirtualMachine, err error) { - req, err := client.GetPreparer(resourceGroupName, vmName, expand) +// operation. +func (client VirtualMachinesClient) Get(resourceGroupName string, VMName string, expand InstanceViewTypes) (result VirtualMachine, err error) { + req, err := client.GetPreparer(resourceGroupName, VMName, expand) if err != nil { - return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Get", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Get", nil, "Failure preparing request") + return } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Get", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Get", resp, "Failure sending request") + return } result, err = client.GetResponder(resp) @@ -442,15 +591,16 @@ func (client VirtualMachinesClient) Get(resourceGroupName string, vmName string, } // GetPreparer prepares the Get request. -func (client VirtualMachinesClient) GetPreparer(resourceGroupName string, vmName string, expand InstanceViewTypes) (*http.Request, error) { +func (client VirtualMachinesClient) GetPreparer(resourceGroupName string, VMName string, expand InstanceViewTypes) (*http.Request, error) { pathParameters := map[string]interface{}{ "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), - "vmName": autorest.Encode("path", vmName), + "vmName": autorest.Encode("path", VMName), } + const APIVersion = "2016-04-30-preview" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } if len(string(expand)) > 0 { queryParameters["$expand"] = autorest.Encode("query", expand) @@ -491,13 +641,15 @@ func (client VirtualMachinesClient) GetResponder(resp *http.Response) (result Vi func (client VirtualMachinesClient) List(resourceGroupName string) (result VirtualMachineListResult, err error) { req, err := client.ListPreparer(resourceGroupName) if err != nil { - return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "List", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "List", nil, "Failure preparing request") + return } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "List", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "List", resp, "Failure sending request") + return } result, err = client.ListResponder(resp) @@ -515,8 +667,9 @@ func (client VirtualMachinesClient) ListPreparer(resourceGroupName string) (*htt "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2016-04-30-preview" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -570,19 +723,21 @@ func (client VirtualMachinesClient) ListNextResults(lastResults VirtualMachineLi return } -// ListAll lists all of the virtual machines in the specified subscription. -// Use the nextLink property in the response to get the next page of virtual +// ListAll lists all of the virtual machines in the specified subscription. Use +// the nextLink property in the response to get the next page of virtual // machines. func (client VirtualMachinesClient) ListAll() (result VirtualMachineListResult, err error) { req, err := client.ListAllPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ListAll", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ListAll", nil, "Failure preparing request") + return } resp, err := client.ListAllSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ListAll", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ListAll", resp, "Failure sending request") + return } result, err = client.ListAllResponder(resp) @@ -599,8 +754,9 @@ func (client VirtualMachinesClient) ListAllPreparer() (*http.Request, error) { "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2016-04-30-preview" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -657,18 +813,20 @@ func (client VirtualMachinesClient) ListAllNextResults(lastResults VirtualMachin // ListAvailableSizes lists all available virtual machine sizes to which the // specified virtual machine can be resized. // -// resourceGroupName is the name of the resource group. vmName is the name of +// resourceGroupName is the name of the resource group. VMName is the name of // the virtual machine. -func (client VirtualMachinesClient) ListAvailableSizes(resourceGroupName string, vmName string) (result VirtualMachineSizeListResult, err error) { - req, err := client.ListAvailableSizesPreparer(resourceGroupName, vmName) +func (client VirtualMachinesClient) ListAvailableSizes(resourceGroupName string, VMName string) (result VirtualMachineSizeListResult, err error) { + req, err := client.ListAvailableSizesPreparer(resourceGroupName, VMName) if err != nil { - return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ListAvailableSizes", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ListAvailableSizes", nil, "Failure preparing request") + return } resp, err := client.ListAvailableSizesSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ListAvailableSizes", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ListAvailableSizes", resp, "Failure sending request") + return } result, err = client.ListAvailableSizesResponder(resp) @@ -680,15 +838,16 @@ func (client VirtualMachinesClient) ListAvailableSizes(resourceGroupName string, } // ListAvailableSizesPreparer prepares the ListAvailableSizes request. -func (client VirtualMachinesClient) ListAvailableSizesPreparer(resourceGroupName string, vmName string) (*http.Request, error) { +func (client VirtualMachinesClient) ListAvailableSizesPreparer(resourceGroupName string, VMName string) (*http.Request, error) { pathParameters := map[string]interface{}{ "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), - "vmName": autorest.Encode("path", vmName), + "vmName": autorest.Encode("path", VMName), } + const APIVersion = "2016-04-30-preview" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -719,44 +878,57 @@ func (client VirtualMachinesClient) ListAvailableSizesResponder(resp *http.Respo } // PowerOff the operation to power off (stop) a virtual machine. The virtual -// machine can be restarted with the same provisioned resources. You are -// still charged for this virtual machine. This method may poll for -// completion. Polling can be canceled by passing the cancel channel -// argument. The channel will be used to cancel polling and any outstanding -// HTTP requests. +// machine can be restarted with the same provisioned resources. You are still +// charged for this virtual machine. This method may poll for completion. +// Polling can be canceled by passing the cancel channel argument. The channel +// will be used to cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. vmName is the name of +// resourceGroupName is the name of the resource group. VMName is the name of // the virtual machine. -func (client VirtualMachinesClient) PowerOff(resourceGroupName string, vmName string, cancel <-chan struct{}) (result autorest.Response, err error) { - req, err := client.PowerOffPreparer(resourceGroupName, vmName, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "PowerOff", nil, "Failure preparing request") - } - - resp, err := client.PowerOffSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "PowerOff", resp, "Failure sending request") - } - - result, err = client.PowerOffResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "PowerOff", resp, "Failure responding to request") - } - - return +func (client VirtualMachinesClient) PowerOff(resourceGroupName string, VMName string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { + resultChan := make(chan OperationStatusResponse, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result OperationStatusResponse + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.PowerOffPreparer(resourceGroupName, VMName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "PowerOff", nil, "Failure preparing request") + return + } + + resp, err := client.PowerOffSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "PowerOff", resp, "Failure sending request") + return + } + + result, err = client.PowerOffResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "PowerOff", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // PowerOffPreparer prepares the PowerOff request. -func (client VirtualMachinesClient) PowerOffPreparer(resourceGroupName string, vmName string, cancel <-chan struct{}) (*http.Request, error) { +func (client VirtualMachinesClient) PowerOffPreparer(resourceGroupName string, VMName string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), - "vmName": autorest.Encode("path", vmName), + "vmName": autorest.Encode("path", VMName), } + const APIVersion = "2016-04-30-preview" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -777,13 +949,14 @@ func (client VirtualMachinesClient) PowerOffSender(req *http.Request) (*http.Res // PowerOffResponder handles the response to the PowerOff request. The method always // closes the http.Response Body. -func (client VirtualMachinesClient) PowerOffResponder(resp *http.Response) (result autorest.Response, err error) { +func (client VirtualMachinesClient) PowerOffResponder(resp *http.Response) (result OperationStatusResponse, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } @@ -792,38 +965,52 @@ func (client VirtualMachinesClient) PowerOffResponder(resp *http.Response) (resu // argument. The channel will be used to cancel polling and any outstanding // HTTP requests. // -// resourceGroupName is the name of the resource group. vmName is the name of +// resourceGroupName is the name of the resource group. VMName is the name of // the virtual machine. -func (client VirtualMachinesClient) Redeploy(resourceGroupName string, vmName string, cancel <-chan struct{}) (result autorest.Response, err error) { - req, err := client.RedeployPreparer(resourceGroupName, vmName, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Redeploy", nil, "Failure preparing request") - } - - resp, err := client.RedeploySender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Redeploy", resp, "Failure sending request") - } - - result, err = client.RedeployResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Redeploy", resp, "Failure responding to request") - } - - return +func (client VirtualMachinesClient) Redeploy(resourceGroupName string, VMName string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { + resultChan := make(chan OperationStatusResponse, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result OperationStatusResponse + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.RedeployPreparer(resourceGroupName, VMName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Redeploy", nil, "Failure preparing request") + return + } + + resp, err := client.RedeploySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Redeploy", resp, "Failure sending request") + return + } + + result, err = client.RedeployResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Redeploy", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // RedeployPreparer prepares the Redeploy request. -func (client VirtualMachinesClient) RedeployPreparer(resourceGroupName string, vmName string, cancel <-chan struct{}) (*http.Request, error) { +func (client VirtualMachinesClient) RedeployPreparer(resourceGroupName string, VMName string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), - "vmName": autorest.Encode("path", vmName), + "vmName": autorest.Encode("path", VMName), } + const APIVersion = "2016-04-30-preview" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -844,53 +1031,68 @@ func (client VirtualMachinesClient) RedeploySender(req *http.Request) (*http.Res // RedeployResponder handles the response to the Redeploy request. The method always // closes the http.Response Body. -func (client VirtualMachinesClient) RedeployResponder(resp *http.Response) (result autorest.Response, err error) { +func (client VirtualMachinesClient) RedeployResponder(resp *http.Response) (result OperationStatusResponse, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } -// Restart the operation to restart a virtual machine. This method may poll -// for completion. Polling can be canceled by passing the cancel channel -// argument. The channel will be used to cancel polling and any outstanding -// HTTP requests. +// Restart the operation to restart a virtual machine. This method may poll for +// completion. Polling can be canceled by passing the cancel channel argument. +// The channel will be used to cancel polling and any outstanding HTTP +// requests. // -// resourceGroupName is the name of the resource group. vmName is the name of +// resourceGroupName is the name of the resource group. VMName is the name of // the virtual machine. -func (client VirtualMachinesClient) Restart(resourceGroupName string, vmName string, cancel <-chan struct{}) (result autorest.Response, err error) { - req, err := client.RestartPreparer(resourceGroupName, vmName, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Restart", nil, "Failure preparing request") - } - - resp, err := client.RestartSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Restart", resp, "Failure sending request") - } - - result, err = client.RestartResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Restart", resp, "Failure responding to request") - } - - return +func (client VirtualMachinesClient) Restart(resourceGroupName string, VMName string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { + resultChan := make(chan OperationStatusResponse, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result OperationStatusResponse + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.RestartPreparer(resourceGroupName, VMName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Restart", nil, "Failure preparing request") + return + } + + resp, err := client.RestartSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Restart", resp, "Failure sending request") + return + } + + result, err = client.RestartResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Restart", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // RestartPreparer prepares the Restart request. -func (client VirtualMachinesClient) RestartPreparer(resourceGroupName string, vmName string, cancel <-chan struct{}) (*http.Request, error) { +func (client VirtualMachinesClient) RestartPreparer(resourceGroupName string, VMName string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), - "vmName": autorest.Encode("path", vmName), + "vmName": autorest.Encode("path", VMName), } + const APIVersion = "2016-04-30-preview" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -911,53 +1113,68 @@ func (client VirtualMachinesClient) RestartSender(req *http.Request) (*http.Resp // RestartResponder handles the response to the Restart request. The method always // closes the http.Response Body. -func (client VirtualMachinesClient) RestartResponder(resp *http.Response) (result autorest.Response, err error) { +func (client VirtualMachinesClient) RestartResponder(resp *http.Response) (result OperationStatusResponse, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } // Start the operation to start a virtual machine. This method may poll for -// completion. Polling can be canceled by passing the cancel channel -// argument. The channel will be used to cancel polling and any outstanding -// HTTP requests. +// completion. Polling can be canceled by passing the cancel channel argument. +// The channel will be used to cancel polling and any outstanding HTTP +// requests. // -// resourceGroupName is the name of the resource group. vmName is the name of +// resourceGroupName is the name of the resource group. VMName is the name of // the virtual machine. -func (client VirtualMachinesClient) Start(resourceGroupName string, vmName string, cancel <-chan struct{}) (result autorest.Response, err error) { - req, err := client.StartPreparer(resourceGroupName, vmName, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Start", nil, "Failure preparing request") - } - - resp, err := client.StartSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Start", resp, "Failure sending request") - } - - result, err = client.StartResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Start", resp, "Failure responding to request") - } - - return +func (client VirtualMachinesClient) Start(resourceGroupName string, VMName string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { + resultChan := make(chan OperationStatusResponse, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result OperationStatusResponse + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.StartPreparer(resourceGroupName, VMName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Start", nil, "Failure preparing request") + return + } + + resp, err := client.StartSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Start", resp, "Failure sending request") + return + } + + result, err = client.StartResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Start", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // StartPreparer prepares the Start request. -func (client VirtualMachinesClient) StartPreparer(resourceGroupName string, vmName string, cancel <-chan struct{}) (*http.Request, error) { +func (client VirtualMachinesClient) StartPreparer(resourceGroupName string, VMName string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), - "vmName": autorest.Encode("path", vmName), + "vmName": autorest.Encode("path", VMName), } + const APIVersion = "2016-04-30-preview" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -978,12 +1195,13 @@ func (client VirtualMachinesClient) StartSender(req *http.Request) (*http.Respon // StartResponder handles the response to the Start request. The method always // closes the http.Response Body. -func (client VirtualMachinesClient) StartResponder(resp *http.Response) (result autorest.Response, err error) { +func (client VirtualMachinesClient) StartResponder(resp *http.Response) (result OperationStatusResponse, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesets.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesets.go old mode 100644 new mode 100755 index 648e9fa4aec5..53700c8dd9e7 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesets.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesets.go @@ -14,7 +14,7 @@ package compute // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. @@ -43,44 +43,43 @@ func NewVirtualMachineScaleSetsClientWithBaseURI(baseURI string, subscriptionID } // CreateOrUpdate create or update a VM scale set. This method may poll for -// completion. Polling can be canceled by passing the cancel channel -// argument. The channel will be used to cancel polling and any outstanding -// HTTP requests. +// completion. Polling can be canceled by passing the cancel channel argument. +// The channel will be used to cancel polling and any outstanding HTTP +// requests. // -// resourceGroupName is the name of the resource group. name is the name of -// the VM scale set to create or update. parameters is the scale set object. -func (client VirtualMachineScaleSetsClient) CreateOrUpdate(resourceGroupName string, name string, parameters VirtualMachineScaleSet, cancel <-chan struct{}) (result autorest.Response, err error) { - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetProperties", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetProperties.VirtualMachineProfile", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetProperties.VirtualMachineProfile.StorageProfile", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetProperties.VirtualMachineProfile.StorageProfile.OsDisk", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetProperties.VirtualMachineProfile.StorageProfile.OsDisk.Name", Name: validation.Null, Rule: true, Chain: nil}}}, - }}, - }}, - {Target: "parameters.VirtualMachineScaleSetProperties.ProvisioningState", Name: validation.ReadOnly, Rule: true, Chain: nil}, - }}}}}); err != nil { - return result, validation.NewErrorWithValidationError(err, "compute.VirtualMachineScaleSetsClient", "CreateOrUpdate") - } - - req, err := client.CreateOrUpdatePreparer(resourceGroupName, name, parameters, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "CreateOrUpdate", nil, "Failure preparing request") - } - - resp, err := client.CreateOrUpdateSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "CreateOrUpdate", resp, "Failure sending request") - } - - result, err = client.CreateOrUpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "CreateOrUpdate", resp, "Failure responding to request") - } - - return +// resourceGroupName is the name of the resource group. name is the name of the +// VM scale set to create or update. parameters is the scale set object. +func (client VirtualMachineScaleSetsClient) CreateOrUpdate(resourceGroupName string, name string, parameters VirtualMachineScaleSet, cancel <-chan struct{}) (<-chan VirtualMachineScaleSet, <-chan error) { + resultChan := make(chan VirtualMachineScaleSet, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result VirtualMachineScaleSet + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.CreateOrUpdatePreparer(resourceGroupName, name, parameters, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // CreateOrUpdatePreparer prepares the CreateOrUpdate request. @@ -91,8 +90,9 @@ func (client VirtualMachineScaleSetsClient) CreateOrUpdatePreparer(resourceGroup "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2016-04-30-preview" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -115,56 +115,71 @@ func (client VirtualMachineScaleSetsClient) CreateOrUpdateSender(req *http.Reque // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always // closes the http.Response Body. -func (client VirtualMachineScaleSetsClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { +func (client VirtualMachineScaleSetsClient) CreateOrUpdateResponder(resp *http.Response) (result VirtualMachineScaleSet, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } // Deallocate deallocates specific virtual machines in a VM scale set. Shuts // down the virtual machines and releases the compute resources. You are not // billed for the compute resources that this virtual machine scale set -// deallocates. This method may poll for completion. Polling can be canceled -// by passing the cancel channel argument. The channel will be used to cancel +// deallocates. This method may poll for completion. Polling can be canceled by +// passing the cancel channel argument. The channel will be used to cancel // polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. vmScaleSetName is the -// name of the VM scale set. vmInstanceIDs is a list of virtual machine +// resourceGroupName is the name of the resource group. VMScaleSetName is the +// name of the VM scale set. VMInstanceIDs is a list of virtual machine // instance IDs from the VM scale set. -func (client VirtualMachineScaleSetsClient) Deallocate(resourceGroupName string, vmScaleSetName string, vmInstanceIDs *VirtualMachineScaleSetVMInstanceIDs, cancel <-chan struct{}) (result autorest.Response, err error) { - req, err := client.DeallocatePreparer(resourceGroupName, vmScaleSetName, vmInstanceIDs, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Deallocate", nil, "Failure preparing request") - } - - resp, err := client.DeallocateSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Deallocate", resp, "Failure sending request") - } - - result, err = client.DeallocateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Deallocate", resp, "Failure responding to request") - } - - return +func (client VirtualMachineScaleSetsClient) Deallocate(resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { + resultChan := make(chan OperationStatusResponse, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result OperationStatusResponse + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.DeallocatePreparer(resourceGroupName, VMScaleSetName, VMInstanceIDs, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Deallocate", nil, "Failure preparing request") + return + } + + resp, err := client.DeallocateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Deallocate", resp, "Failure sending request") + return + } + + result, err = client.DeallocateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Deallocate", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // DeallocatePreparer prepares the Deallocate request. -func (client VirtualMachineScaleSetsClient) DeallocatePreparer(resourceGroupName string, vmScaleSetName string, vmInstanceIDs *VirtualMachineScaleSetVMInstanceIDs, cancel <-chan struct{}) (*http.Request, error) { +func (client VirtualMachineScaleSetsClient) DeallocatePreparer(resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), - "vmScaleSetName": autorest.Encode("path", vmScaleSetName), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } + const APIVersion = "2016-04-30-preview" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -173,9 +188,9 @@ func (client VirtualMachineScaleSetsClient) DeallocatePreparer(resourceGroupName autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/deallocate", pathParameters), autorest.WithQueryParameters(queryParameters)) - if vmInstanceIDs != nil { + if VMInstanceIDs != nil { preparer = autorest.DecoratePreparer(preparer, - autorest.WithJSON(vmInstanceIDs)) + autorest.WithJSON(VMInstanceIDs)) } return preparer.Prepare(&http.Request{Cancel: cancel}) } @@ -190,52 +205,67 @@ func (client VirtualMachineScaleSetsClient) DeallocateSender(req *http.Request) // DeallocateResponder handles the response to the Deallocate request. The method always // closes the http.Response Body. -func (client VirtualMachineScaleSetsClient) DeallocateResponder(resp *http.Response) (result autorest.Response, err error) { +func (client VirtualMachineScaleSetsClient) DeallocateResponder(resp *http.Response) (result OperationStatusResponse, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } // Delete deletes a VM scale set. This method may poll for completion. Polling -// can be canceled by passing the cancel channel argument. The channel will -// be used to cancel polling and any outstanding HTTP requests. +// can be canceled by passing the cancel channel argument. The channel will be +// used to cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. vmScaleSetName is the +// resourceGroupName is the name of the resource group. VMScaleSetName is the // name of the VM scale set. -func (client VirtualMachineScaleSetsClient) Delete(resourceGroupName string, vmScaleSetName string, cancel <-chan struct{}) (result autorest.Response, err error) { - req, err := client.DeletePreparer(resourceGroupName, vmScaleSetName, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Delete", nil, "Failure preparing request") - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Delete", resp, "Failure sending request") - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Delete", resp, "Failure responding to request") - } - - return +func (client VirtualMachineScaleSetsClient) Delete(resourceGroupName string, VMScaleSetName string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { + resultChan := make(chan OperationStatusResponse, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result OperationStatusResponse + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.DeletePreparer(resourceGroupName, VMScaleSetName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Delete", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // DeletePreparer prepares the Delete request. -func (client VirtualMachineScaleSetsClient) DeletePreparer(resourceGroupName string, vmScaleSetName string, cancel <-chan struct{}) (*http.Request, error) { +func (client VirtualMachineScaleSetsClient) DeletePreparer(resourceGroupName string, VMScaleSetName string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), - "vmScaleSetName": autorest.Encode("path", vmScaleSetName), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } + const APIVersion = "2016-04-30-preview" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -256,13 +286,14 @@ func (client VirtualMachineScaleSetsClient) DeleteSender(req *http.Request) (*ht // DeleteResponder handles the response to the Delete request. The method always // closes the http.Response Body. -func (client VirtualMachineScaleSetsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { +func (client VirtualMachineScaleSetsClient) DeleteResponder(resp *http.Response) (result OperationStatusResponse, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } @@ -271,45 +302,62 @@ func (client VirtualMachineScaleSetsClient) DeleteResponder(resp *http.Response) // argument. The channel will be used to cancel polling and any outstanding // HTTP requests. // -// resourceGroupName is the name of the resource group. vmScaleSetName is the -// name of the VM scale set. vmInstanceIDs is a list of virtual machine +// resourceGroupName is the name of the resource group. VMScaleSetName is the +// name of the VM scale set. VMInstanceIDs is a list of virtual machine // instance IDs from the VM scale set. -func (client VirtualMachineScaleSetsClient) DeleteInstances(resourceGroupName string, vmScaleSetName string, vmInstanceIDs VirtualMachineScaleSetVMInstanceRequiredIDs, cancel <-chan struct{}) (result autorest.Response, err error) { +func (client VirtualMachineScaleSetsClient) DeleteInstances(resourceGroupName string, VMScaleSetName string, VMInstanceIDs VirtualMachineScaleSetVMInstanceRequiredIDs, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { + resultChan := make(chan OperationStatusResponse, 1) + errChan := make(chan error, 1) if err := validation.Validate([]validation.Validation{ - {TargetValue: vmInstanceIDs, - Constraints: []validation.Constraint{{Target: "vmInstanceIDs.InstanceIds", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewErrorWithValidationError(err, "compute.VirtualMachineScaleSetsClient", "DeleteInstances") - } - - req, err := client.DeleteInstancesPreparer(resourceGroupName, vmScaleSetName, vmInstanceIDs, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "DeleteInstances", nil, "Failure preparing request") - } - - resp, err := client.DeleteInstancesSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "DeleteInstances", resp, "Failure sending request") - } - - result, err = client.DeleteInstancesResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "DeleteInstances", resp, "Failure responding to request") - } - - return + {TargetValue: VMInstanceIDs, + Constraints: []validation.Constraint{{Target: "VMInstanceIDs.InstanceIds", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + errChan <- validation.NewErrorWithValidationError(err, "compute.VirtualMachineScaleSetsClient", "DeleteInstances") + close(errChan) + close(resultChan) + return resultChan, errChan + } + + go func() { + var err error + var result OperationStatusResponse + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.DeleteInstancesPreparer(resourceGroupName, VMScaleSetName, VMInstanceIDs, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "DeleteInstances", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteInstancesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "DeleteInstances", resp, "Failure sending request") + return + } + + result, err = client.DeleteInstancesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "DeleteInstances", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // DeleteInstancesPreparer prepares the DeleteInstances request. -func (client VirtualMachineScaleSetsClient) DeleteInstancesPreparer(resourceGroupName string, vmScaleSetName string, vmInstanceIDs VirtualMachineScaleSetVMInstanceRequiredIDs, cancel <-chan struct{}) (*http.Request, error) { +func (client VirtualMachineScaleSetsClient) DeleteInstancesPreparer(resourceGroupName string, VMScaleSetName string, VMInstanceIDs VirtualMachineScaleSetVMInstanceRequiredIDs, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), - "vmScaleSetName": autorest.Encode("path", vmScaleSetName), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } + const APIVersion = "2016-04-30-preview" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -317,7 +365,7 @@ func (client VirtualMachineScaleSetsClient) DeleteInstancesPreparer(resourceGrou autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/delete", pathParameters), - autorest.WithJSON(vmInstanceIDs), + autorest.WithJSON(VMInstanceIDs), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare(&http.Request{Cancel: cancel}) } @@ -332,30 +380,33 @@ func (client VirtualMachineScaleSetsClient) DeleteInstancesSender(req *http.Requ // DeleteInstancesResponder handles the response to the DeleteInstances request. The method always // closes the http.Response Body. -func (client VirtualMachineScaleSetsClient) DeleteInstancesResponder(resp *http.Response) (result autorest.Response, err error) { +func (client VirtualMachineScaleSetsClient) DeleteInstancesResponder(resp *http.Response) (result OperationStatusResponse, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } // Get display information about a virtual machine scale set. // -// resourceGroupName is the name of the resource group. vmScaleSetName is the +// resourceGroupName is the name of the resource group. VMScaleSetName is the // name of the VM scale set. -func (client VirtualMachineScaleSetsClient) Get(resourceGroupName string, vmScaleSetName string) (result VirtualMachineScaleSet, err error) { - req, err := client.GetPreparer(resourceGroupName, vmScaleSetName) +func (client VirtualMachineScaleSetsClient) Get(resourceGroupName string, VMScaleSetName string) (result VirtualMachineScaleSet, err error) { + req, err := client.GetPreparer(resourceGroupName, VMScaleSetName) if err != nil { - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Get", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Get", nil, "Failure preparing request") + return } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Get", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Get", resp, "Failure sending request") + return } result, err = client.GetResponder(resp) @@ -367,15 +418,16 @@ func (client VirtualMachineScaleSetsClient) Get(resourceGroupName string, vmScal } // GetPreparer prepares the Get request. -func (client VirtualMachineScaleSetsClient) GetPreparer(resourceGroupName string, vmScaleSetName string) (*http.Request, error) { +func (client VirtualMachineScaleSetsClient) GetPreparer(resourceGroupName string, VMScaleSetName string) (*http.Request, error) { pathParameters := map[string]interface{}{ "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), - "vmScaleSetName": autorest.Encode("path", vmScaleSetName), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } + const APIVersion = "2016-04-30-preview" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -407,18 +459,20 @@ func (client VirtualMachineScaleSetsClient) GetResponder(resp *http.Response) (r // GetInstanceView gets the status of a VM scale set instance. // -// resourceGroupName is the name of the resource group. vmScaleSetName is the +// resourceGroupName is the name of the resource group. VMScaleSetName is the // name of the VM scale set. -func (client VirtualMachineScaleSetsClient) GetInstanceView(resourceGroupName string, vmScaleSetName string) (result VirtualMachineScaleSetInstanceView, err error) { - req, err := client.GetInstanceViewPreparer(resourceGroupName, vmScaleSetName) +func (client VirtualMachineScaleSetsClient) GetInstanceView(resourceGroupName string, VMScaleSetName string) (result VirtualMachineScaleSetInstanceView, err error) { + req, err := client.GetInstanceViewPreparer(resourceGroupName, VMScaleSetName) if err != nil { - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "GetInstanceView", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "GetInstanceView", nil, "Failure preparing request") + return } resp, err := client.GetInstanceViewSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "GetInstanceView", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "GetInstanceView", resp, "Failure sending request") + return } result, err = client.GetInstanceViewResponder(resp) @@ -430,15 +484,16 @@ func (client VirtualMachineScaleSetsClient) GetInstanceView(resourceGroupName st } // GetInstanceViewPreparer prepares the GetInstanceView request. -func (client VirtualMachineScaleSetsClient) GetInstanceViewPreparer(resourceGroupName string, vmScaleSetName string) (*http.Request, error) { +func (client VirtualMachineScaleSetsClient) GetInstanceViewPreparer(resourceGroupName string, VMScaleSetName string) (*http.Request, error) { pathParameters := map[string]interface{}{ "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), - "vmScaleSetName": autorest.Encode("path", vmScaleSetName), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } + const APIVersion = "2016-04-30-preview" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -474,13 +529,15 @@ func (client VirtualMachineScaleSetsClient) GetInstanceViewResponder(resp *http. func (client VirtualMachineScaleSetsClient) List(resourceGroupName string) (result VirtualMachineScaleSetListResult, err error) { req, err := client.ListPreparer(resourceGroupName) if err != nil { - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "List", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "List", nil, "Failure preparing request") + return } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "List", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "List", resp, "Failure sending request") + return } result, err = client.ListResponder(resp) @@ -498,8 +555,9 @@ func (client VirtualMachineScaleSetsClient) ListPreparer(resourceGroupName strin "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2016-04-30-preview" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -554,19 +612,21 @@ func (client VirtualMachineScaleSetsClient) ListNextResults(lastResults VirtualM } // ListAll gets a list of all VM Scale Sets in the subscription, regardless of -// the associated resource group. Use nextLink property in the response to -// get the next page of VM Scale Sets. Do this till nextLink is not null to -// fetch all the VM Scale Sets. +// the associated resource group. Use nextLink property in the response to get +// the next page of VM Scale Sets. Do this till nextLink is not null to fetch +// all the VM Scale Sets. func (client VirtualMachineScaleSetsClient) ListAll() (result VirtualMachineScaleSetListWithLinkResult, err error) { req, err := client.ListAllPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListAll", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListAll", nil, "Failure preparing request") + return } resp, err := client.ListAllSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListAll", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListAll", resp, "Failure sending request") + return } result, err = client.ListAllResponder(resp) @@ -583,8 +643,9 @@ func (client VirtualMachineScaleSetsClient) ListAllPreparer() (*http.Request, er "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2016-04-30-preview" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -641,18 +702,20 @@ func (client VirtualMachineScaleSetsClient) ListAllNextResults(lastResults Virtu // ListSkus gets a list of SKUs available for your VM scale set, including the // minimum and maximum VM instances allowed for each SKU. // -// resourceGroupName is the name of the resource group. vmScaleSetName is the +// resourceGroupName is the name of the resource group. VMScaleSetName is the // name of the VM scale set. -func (client VirtualMachineScaleSetsClient) ListSkus(resourceGroupName string, vmScaleSetName string) (result VirtualMachineScaleSetListSkusResult, err error) { - req, err := client.ListSkusPreparer(resourceGroupName, vmScaleSetName) +func (client VirtualMachineScaleSetsClient) ListSkus(resourceGroupName string, VMScaleSetName string) (result VirtualMachineScaleSetListSkusResult, err error) { + req, err := client.ListSkusPreparer(resourceGroupName, VMScaleSetName) if err != nil { - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListSkus", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListSkus", nil, "Failure preparing request") + return } resp, err := client.ListSkusSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListSkus", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListSkus", resp, "Failure sending request") + return } result, err = client.ListSkusResponder(resp) @@ -664,15 +727,16 @@ func (client VirtualMachineScaleSetsClient) ListSkus(resourceGroupName string, v } // ListSkusPreparer prepares the ListSkus request. -func (client VirtualMachineScaleSetsClient) ListSkusPreparer(resourceGroupName string, vmScaleSetName string) (*http.Request, error) { +func (client VirtualMachineScaleSetsClient) ListSkusPreparer(resourceGroupName string, VMScaleSetName string) (*http.Request, error) { pathParameters := map[string]interface{}{ "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), - "vmScaleSetName": autorest.Encode("path", vmScaleSetName), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } + const APIVersion = "2016-04-30-preview" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -729,43 +793,57 @@ func (client VirtualMachineScaleSetsClient) ListSkusNextResults(lastResults Virt // PowerOff power off (stop) one or more virtual machines in a VM scale set. // Note that resources are still attached and you are getting charged for the // resources. Instead, use deallocate to release resources and avoid charges. -// This method may poll for completion. Polling can be canceled by passing -// the cancel channel argument. The channel will be used to cancel polling -// and any outstanding HTTP requests. +// This method may poll for completion. Polling can be canceled by passing the +// cancel channel argument. The channel will be used to cancel polling and any +// outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. vmScaleSetName is the -// name of the VM scale set. vmInstanceIDs is a list of virtual machine +// resourceGroupName is the name of the resource group. VMScaleSetName is the +// name of the VM scale set. VMInstanceIDs is a list of virtual machine // instance IDs from the VM scale set. -func (client VirtualMachineScaleSetsClient) PowerOff(resourceGroupName string, vmScaleSetName string, vmInstanceIDs *VirtualMachineScaleSetVMInstanceIDs, cancel <-chan struct{}) (result autorest.Response, err error) { - req, err := client.PowerOffPreparer(resourceGroupName, vmScaleSetName, vmInstanceIDs, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "PowerOff", nil, "Failure preparing request") - } - - resp, err := client.PowerOffSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "PowerOff", resp, "Failure sending request") - } - - result, err = client.PowerOffResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "PowerOff", resp, "Failure responding to request") - } - - return +func (client VirtualMachineScaleSetsClient) PowerOff(resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { + resultChan := make(chan OperationStatusResponse, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result OperationStatusResponse + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.PowerOffPreparer(resourceGroupName, VMScaleSetName, VMInstanceIDs, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "PowerOff", nil, "Failure preparing request") + return + } + + resp, err := client.PowerOffSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "PowerOff", resp, "Failure sending request") + return + } + + result, err = client.PowerOffResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "PowerOff", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // PowerOffPreparer prepares the PowerOff request. -func (client VirtualMachineScaleSetsClient) PowerOffPreparer(resourceGroupName string, vmScaleSetName string, vmInstanceIDs *VirtualMachineScaleSetVMInstanceIDs, cancel <-chan struct{}) (*http.Request, error) { +func (client VirtualMachineScaleSetsClient) PowerOffPreparer(resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), - "vmScaleSetName": autorest.Encode("path", vmScaleSetName), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } + const APIVersion = "2016-04-30-preview" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -774,9 +852,9 @@ func (client VirtualMachineScaleSetsClient) PowerOffPreparer(resourceGroupName s autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/poweroff", pathParameters), autorest.WithQueryParameters(queryParameters)) - if vmInstanceIDs != nil { + if VMInstanceIDs != nil { preparer = autorest.DecoratePreparer(preparer, - autorest.WithJSON(vmInstanceIDs)) + autorest.WithJSON(VMInstanceIDs)) } return preparer.Prepare(&http.Request{Cancel: cancel}) } @@ -791,53 +869,68 @@ func (client VirtualMachineScaleSetsClient) PowerOffSender(req *http.Request) (* // PowerOffResponder handles the response to the PowerOff request. The method always // closes the http.Response Body. -func (client VirtualMachineScaleSetsClient) PowerOffResponder(resp *http.Response) (result autorest.Response, err error) { +func (client VirtualMachineScaleSetsClient) PowerOffResponder(resp *http.Response) (result OperationStatusResponse, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } -// Reimage reimages (upgrade the operating system) one or more virtual -// machines in a VM scale set. This method may poll for completion. Polling -// can be canceled by passing the cancel channel argument. The channel will -// be used to cancel polling and any outstanding HTTP requests. +// Reimage reimages (upgrade the operating system) one or more virtual machines +// in a VM scale set. This method may poll for completion. Polling can be +// canceled by passing the cancel channel argument. The channel will be used to +// cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. vmScaleSetName is the +// resourceGroupName is the name of the resource group. VMScaleSetName is the // name of the VM scale set. -func (client VirtualMachineScaleSetsClient) Reimage(resourceGroupName string, vmScaleSetName string, cancel <-chan struct{}) (result autorest.Response, err error) { - req, err := client.ReimagePreparer(resourceGroupName, vmScaleSetName, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Reimage", nil, "Failure preparing request") - } - - resp, err := client.ReimageSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Reimage", resp, "Failure sending request") - } - - result, err = client.ReimageResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Reimage", resp, "Failure responding to request") - } - - return +func (client VirtualMachineScaleSetsClient) Reimage(resourceGroupName string, VMScaleSetName string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { + resultChan := make(chan OperationStatusResponse, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result OperationStatusResponse + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.ReimagePreparer(resourceGroupName, VMScaleSetName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Reimage", nil, "Failure preparing request") + return + } + + resp, err := client.ReimageSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Reimage", resp, "Failure sending request") + return + } + + result, err = client.ReimageResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Reimage", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // ReimagePreparer prepares the Reimage request. -func (client VirtualMachineScaleSetsClient) ReimagePreparer(resourceGroupName string, vmScaleSetName string, cancel <-chan struct{}) (*http.Request, error) { +func (client VirtualMachineScaleSetsClient) ReimagePreparer(resourceGroupName string, VMScaleSetName string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), - "vmScaleSetName": autorest.Encode("path", vmScaleSetName), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } + const APIVersion = "2016-04-30-preview" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -858,54 +951,152 @@ func (client VirtualMachineScaleSetsClient) ReimageSender(req *http.Request) (*h // ReimageResponder handles the response to the Reimage request. The method always // closes the http.Response Body. -func (client VirtualMachineScaleSetsClient) ReimageResponder(resp *http.Response) (result autorest.Response, err error) { +func (client VirtualMachineScaleSetsClient) ReimageResponder(resp *http.Response) (result OperationStatusResponse, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } -// Restart restarts one or more virtual machines in a VM scale set. This -// method may poll for completion. Polling can be canceled by passing the -// cancel channel argument. The channel will be used to cancel polling and -// any outstanding HTTP requests. +// ReimageAll reimages all the disks ( including data disks ) in the virtual +// machines in a virtual machine scale set. This operation is only supported +// for managed disks. This method may poll for completion. Polling can be +// canceled by passing the cancel channel argument. The channel will be used to +// cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. vmScaleSetName is the -// name of the VM scale set. vmInstanceIDs is a list of virtual machine -// instance IDs from the VM scale set. -func (client VirtualMachineScaleSetsClient) Restart(resourceGroupName string, vmScaleSetName string, vmInstanceIDs *VirtualMachineScaleSetVMInstanceIDs, cancel <-chan struct{}) (result autorest.Response, err error) { - req, err := client.RestartPreparer(resourceGroupName, vmScaleSetName, vmInstanceIDs, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Restart", nil, "Failure preparing request") - } +// resourceGroupName is the name of the resource group. VMScaleSetName is the +// name of the VM scale set. +func (client VirtualMachineScaleSetsClient) ReimageAll(resourceGroupName string, VMScaleSetName string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { + resultChan := make(chan OperationStatusResponse, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result OperationStatusResponse + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.ReimageAllPreparer(resourceGroupName, VMScaleSetName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ReimageAll", nil, "Failure preparing request") + return + } + + resp, err := client.ReimageAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ReimageAll", resp, "Failure sending request") + return + } + + result, err = client.ReimageAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ReimageAll", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} - resp, err := client.RestartSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Restart", resp, "Failure sending request") +// ReimageAllPreparer prepares the ReimageAll request. +func (client VirtualMachineScaleSetsClient) ReimageAllPreparer(resourceGroupName string, VMScaleSetName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - result, err = client.RestartResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Restart", resp, "Failure responding to request") + const APIVersion = "2016-04-30-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, } + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/reimageall", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// ReimageAllSender sends the ReimageAll request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetsClient) ReimageAllSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// ReimageAllResponder handles the response to the ReimageAll request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetsClient) ReimageAllResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} return } +// Restart restarts one or more virtual machines in a VM scale set. This method +// may poll for completion. Polling can be canceled by passing the cancel +// channel argument. The channel will be used to cancel polling and any +// outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. VMScaleSetName is the +// name of the VM scale set. VMInstanceIDs is a list of virtual machine +// instance IDs from the VM scale set. +func (client VirtualMachineScaleSetsClient) Restart(resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { + resultChan := make(chan OperationStatusResponse, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result OperationStatusResponse + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.RestartPreparer(resourceGroupName, VMScaleSetName, VMInstanceIDs, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Restart", nil, "Failure preparing request") + return + } + + resp, err := client.RestartSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Restart", resp, "Failure sending request") + return + } + + result, err = client.RestartResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Restart", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + // RestartPreparer prepares the Restart request. -func (client VirtualMachineScaleSetsClient) RestartPreparer(resourceGroupName string, vmScaleSetName string, vmInstanceIDs *VirtualMachineScaleSetVMInstanceIDs, cancel <-chan struct{}) (*http.Request, error) { +func (client VirtualMachineScaleSetsClient) RestartPreparer(resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), - "vmScaleSetName": autorest.Encode("path", vmScaleSetName), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } + const APIVersion = "2016-04-30-preview" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -914,9 +1105,9 @@ func (client VirtualMachineScaleSetsClient) RestartPreparer(resourceGroupName st autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/restart", pathParameters), autorest.WithQueryParameters(queryParameters)) - if vmInstanceIDs != nil { + if VMInstanceIDs != nil { preparer = autorest.DecoratePreparer(preparer, - autorest.WithJSON(vmInstanceIDs)) + autorest.WithJSON(VMInstanceIDs)) } return preparer.Prepare(&http.Request{Cancel: cancel}) } @@ -931,54 +1122,69 @@ func (client VirtualMachineScaleSetsClient) RestartSender(req *http.Request) (*h // RestartResponder handles the response to the Restart request. The method always // closes the http.Response Body. -func (client VirtualMachineScaleSetsClient) RestartResponder(resp *http.Response) (result autorest.Response, err error) { +func (client VirtualMachineScaleSetsClient) RestartResponder(resp *http.Response) (result OperationStatusResponse, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } -// Start starts one or more virtual machines in a VM scale set. This method -// may poll for completion. Polling can be canceled by passing the cancel -// channel argument. The channel will be used to cancel polling and any -// outstanding HTTP requests. +// Start starts one or more virtual machines in a VM scale set. This method may +// poll for completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. // -// resourceGroupName is the name of the resource group. vmScaleSetName is the -// name of the VM scale set. vmInstanceIDs is a list of virtual machine +// resourceGroupName is the name of the resource group. VMScaleSetName is the +// name of the VM scale set. VMInstanceIDs is a list of virtual machine // instance IDs from the VM scale set. -func (client VirtualMachineScaleSetsClient) Start(resourceGroupName string, vmScaleSetName string, vmInstanceIDs *VirtualMachineScaleSetVMInstanceIDs, cancel <-chan struct{}) (result autorest.Response, err error) { - req, err := client.StartPreparer(resourceGroupName, vmScaleSetName, vmInstanceIDs, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Start", nil, "Failure preparing request") - } - - resp, err := client.StartSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Start", resp, "Failure sending request") - } - - result, err = client.StartResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Start", resp, "Failure responding to request") - } - - return +func (client VirtualMachineScaleSetsClient) Start(resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { + resultChan := make(chan OperationStatusResponse, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result OperationStatusResponse + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.StartPreparer(resourceGroupName, VMScaleSetName, VMInstanceIDs, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Start", nil, "Failure preparing request") + return + } + + resp, err := client.StartSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Start", resp, "Failure sending request") + return + } + + result, err = client.StartResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Start", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // StartPreparer prepares the Start request. -func (client VirtualMachineScaleSetsClient) StartPreparer(resourceGroupName string, vmScaleSetName string, vmInstanceIDs *VirtualMachineScaleSetVMInstanceIDs, cancel <-chan struct{}) (*http.Request, error) { +func (client VirtualMachineScaleSetsClient) StartPreparer(resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), - "vmScaleSetName": autorest.Encode("path", vmScaleSetName), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } + const APIVersion = "2016-04-30-preview" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -987,9 +1193,9 @@ func (client VirtualMachineScaleSetsClient) StartPreparer(resourceGroupName stri autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/start", pathParameters), autorest.WithQueryParameters(queryParameters)) - if vmInstanceIDs != nil { + if VMInstanceIDs != nil { preparer = autorest.DecoratePreparer(preparer, - autorest.WithJSON(vmInstanceIDs)) + autorest.WithJSON(VMInstanceIDs)) } return preparer.Prepare(&http.Request{Cancel: cancel}) } @@ -1004,60 +1210,78 @@ func (client VirtualMachineScaleSetsClient) StartSender(req *http.Request) (*htt // StartResponder handles the response to the Start request. The method always // closes the http.Response Body. -func (client VirtualMachineScaleSetsClient) StartResponder(resp *http.Response) (result autorest.Response, err error) { +func (client VirtualMachineScaleSetsClient) StartResponder(resp *http.Response) (result OperationStatusResponse, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } // UpdateInstances upgrades one or more virtual machines to the latest SKU set -// in the VM scale set model. This method may poll for completion. Polling -// can be canceled by passing the cancel channel argument. The channel will -// be used to cancel polling and any outstanding HTTP requests. +// in the VM scale set model. This method may poll for completion. Polling can +// be canceled by passing the cancel channel argument. The channel will be used +// to cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. vmScaleSetName is the -// name of the VM scale set. vmInstanceIDs is a list of virtual machine +// resourceGroupName is the name of the resource group. VMScaleSetName is the +// name of the VM scale set. VMInstanceIDs is a list of virtual machine // instance IDs from the VM scale set. -func (client VirtualMachineScaleSetsClient) UpdateInstances(resourceGroupName string, vmScaleSetName string, vmInstanceIDs VirtualMachineScaleSetVMInstanceRequiredIDs, cancel <-chan struct{}) (result autorest.Response, err error) { +func (client VirtualMachineScaleSetsClient) UpdateInstances(resourceGroupName string, VMScaleSetName string, VMInstanceIDs VirtualMachineScaleSetVMInstanceRequiredIDs, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { + resultChan := make(chan OperationStatusResponse, 1) + errChan := make(chan error, 1) if err := validation.Validate([]validation.Validation{ - {TargetValue: vmInstanceIDs, - Constraints: []validation.Constraint{{Target: "vmInstanceIDs.InstanceIds", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewErrorWithValidationError(err, "compute.VirtualMachineScaleSetsClient", "UpdateInstances") - } - - req, err := client.UpdateInstancesPreparer(resourceGroupName, vmScaleSetName, vmInstanceIDs, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "UpdateInstances", nil, "Failure preparing request") - } - - resp, err := client.UpdateInstancesSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "UpdateInstances", resp, "Failure sending request") - } - - result, err = client.UpdateInstancesResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "UpdateInstances", resp, "Failure responding to request") - } - - return + {TargetValue: VMInstanceIDs, + Constraints: []validation.Constraint{{Target: "VMInstanceIDs.InstanceIds", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + errChan <- validation.NewErrorWithValidationError(err, "compute.VirtualMachineScaleSetsClient", "UpdateInstances") + close(errChan) + close(resultChan) + return resultChan, errChan + } + + go func() { + var err error + var result OperationStatusResponse + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.UpdateInstancesPreparer(resourceGroupName, VMScaleSetName, VMInstanceIDs, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "UpdateInstances", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateInstancesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "UpdateInstances", resp, "Failure sending request") + return + } + + result, err = client.UpdateInstancesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "UpdateInstances", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // UpdateInstancesPreparer prepares the UpdateInstances request. -func (client VirtualMachineScaleSetsClient) UpdateInstancesPreparer(resourceGroupName string, vmScaleSetName string, vmInstanceIDs VirtualMachineScaleSetVMInstanceRequiredIDs, cancel <-chan struct{}) (*http.Request, error) { +func (client VirtualMachineScaleSetsClient) UpdateInstancesPreparer(resourceGroupName string, VMScaleSetName string, VMInstanceIDs VirtualMachineScaleSetVMInstanceRequiredIDs, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), - "vmScaleSetName": autorest.Encode("path", vmScaleSetName), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } + const APIVersion = "2016-04-30-preview" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -1065,7 +1289,7 @@ func (client VirtualMachineScaleSetsClient) UpdateInstancesPreparer(resourceGrou autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/manualupgrade", pathParameters), - autorest.WithJSON(vmInstanceIDs), + autorest.WithJSON(VMInstanceIDs), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare(&http.Request{Cancel: cancel}) } @@ -1080,12 +1304,13 @@ func (client VirtualMachineScaleSetsClient) UpdateInstancesSender(req *http.Requ // UpdateInstancesResponder handles the response to the UpdateInstances request. The method always // closes the http.Response Body. -func (client VirtualMachineScaleSetsClient) UpdateInstancesResponder(resp *http.Response) (result autorest.Response, err error) { +func (client VirtualMachineScaleSetsClient) UpdateInstancesResponder(resp *http.Response) (result OperationStatusResponse, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetvms.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetvms.go old mode 100644 new mode 100755 index f0b309510e63..34e0934dcb68 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetvms.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetvms.go @@ -14,7 +14,7 @@ package compute // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. @@ -42,46 +42,60 @@ func NewVirtualMachineScaleSetVMsClientWithBaseURI(baseURI string, subscriptionI } // Deallocate deallocates a specific virtual machine in a VM scale set. Shuts -// down the virtual machine and releases the compute resources it uses. You -// are not billed for the compute resources of this virtual machine once it -// is deallocated. This method may poll for completion. Polling can be -// canceled by passing the cancel channel argument. The channel will be used -// to cancel polling and any outstanding HTTP requests. +// down the virtual machine and releases the compute resources it uses. You are +// not billed for the compute resources of this virtual machine once it is +// deallocated. This method may poll for completion. Polling can be canceled by +// passing the cancel channel argument. The channel will be used to cancel +// polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. vmScaleSetName is the +// resourceGroupName is the name of the resource group. VMScaleSetName is the // name of the VM scale set. instanceID is the instance ID of the virtual // machine. -func (client VirtualMachineScaleSetVMsClient) Deallocate(resourceGroupName string, vmScaleSetName string, instanceID string, cancel <-chan struct{}) (result autorest.Response, err error) { - req, err := client.DeallocatePreparer(resourceGroupName, vmScaleSetName, instanceID, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Deallocate", nil, "Failure preparing request") - } - - resp, err := client.DeallocateSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Deallocate", resp, "Failure sending request") - } - - result, err = client.DeallocateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Deallocate", resp, "Failure responding to request") - } - - return +func (client VirtualMachineScaleSetVMsClient) Deallocate(resourceGroupName string, VMScaleSetName string, instanceID string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { + resultChan := make(chan OperationStatusResponse, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result OperationStatusResponse + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.DeallocatePreparer(resourceGroupName, VMScaleSetName, instanceID, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Deallocate", nil, "Failure preparing request") + return + } + + resp, err := client.DeallocateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Deallocate", resp, "Failure sending request") + return + } + + result, err = client.DeallocateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Deallocate", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // DeallocatePreparer prepares the Deallocate request. -func (client VirtualMachineScaleSetVMsClient) DeallocatePreparer(resourceGroupName string, vmScaleSetName string, instanceID string, cancel <-chan struct{}) (*http.Request, error) { +func (client VirtualMachineScaleSetVMsClient) DeallocatePreparer(resourceGroupName string, VMScaleSetName string, instanceID string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ "instanceId": autorest.Encode("path", instanceID), "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), - "vmScaleSetName": autorest.Encode("path", vmScaleSetName), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } + const APIVersion = "2016-04-30-preview" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -102,13 +116,14 @@ func (client VirtualMachineScaleSetVMsClient) DeallocateSender(req *http.Request // DeallocateResponder handles the response to the Deallocate request. The method always // closes the http.Response Body. -func (client VirtualMachineScaleSetVMsClient) DeallocateResponder(resp *http.Response) (result autorest.Response, err error) { +func (client VirtualMachineScaleSetVMsClient) DeallocateResponder(resp *http.Response) (result OperationStatusResponse, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } @@ -117,40 +132,54 @@ func (client VirtualMachineScaleSetVMsClient) DeallocateResponder(resp *http.Res // argument. The channel will be used to cancel polling and any outstanding // HTTP requests. // -// resourceGroupName is the name of the resource group. vmScaleSetName is the +// resourceGroupName is the name of the resource group. VMScaleSetName is the // name of the VM scale set. instanceID is the instance ID of the virtual // machine. -func (client VirtualMachineScaleSetVMsClient) Delete(resourceGroupName string, vmScaleSetName string, instanceID string, cancel <-chan struct{}) (result autorest.Response, err error) { - req, err := client.DeletePreparer(resourceGroupName, vmScaleSetName, instanceID, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Delete", nil, "Failure preparing request") - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Delete", resp, "Failure sending request") - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Delete", resp, "Failure responding to request") - } - - return +func (client VirtualMachineScaleSetVMsClient) Delete(resourceGroupName string, VMScaleSetName string, instanceID string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { + resultChan := make(chan OperationStatusResponse, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result OperationStatusResponse + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.DeletePreparer(resourceGroupName, VMScaleSetName, instanceID, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Delete", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // DeletePreparer prepares the Delete request. -func (client VirtualMachineScaleSetVMsClient) DeletePreparer(resourceGroupName string, vmScaleSetName string, instanceID string, cancel <-chan struct{}) (*http.Request, error) { +func (client VirtualMachineScaleSetVMsClient) DeletePreparer(resourceGroupName string, VMScaleSetName string, instanceID string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ "instanceId": autorest.Encode("path", instanceID), "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), - "vmScaleSetName": autorest.Encode("path", vmScaleSetName), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } + const APIVersion = "2016-04-30-preview" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -171,31 +200,34 @@ func (client VirtualMachineScaleSetVMsClient) DeleteSender(req *http.Request) (* // DeleteResponder handles the response to the Delete request. The method always // closes the http.Response Body. -func (client VirtualMachineScaleSetVMsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { +func (client VirtualMachineScaleSetVMsClient) DeleteResponder(resp *http.Response) (result OperationStatusResponse, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } // Get gets a virtual machine from a VM scale set. // -// resourceGroupName is the name of the resource group. vmScaleSetName is the +// resourceGroupName is the name of the resource group. VMScaleSetName is the // name of the VM scale set. instanceID is the instance ID of the virtual // machine. -func (client VirtualMachineScaleSetVMsClient) Get(resourceGroupName string, vmScaleSetName string, instanceID string) (result VirtualMachineScaleSetVM, err error) { - req, err := client.GetPreparer(resourceGroupName, vmScaleSetName, instanceID) +func (client VirtualMachineScaleSetVMsClient) Get(resourceGroupName string, VMScaleSetName string, instanceID string) (result VirtualMachineScaleSetVM, err error) { + req, err := client.GetPreparer(resourceGroupName, VMScaleSetName, instanceID) if err != nil { - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Get", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Get", nil, "Failure preparing request") + return } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Get", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Get", resp, "Failure sending request") + return } result, err = client.GetResponder(resp) @@ -207,16 +239,17 @@ func (client VirtualMachineScaleSetVMsClient) Get(resourceGroupName string, vmSc } // GetPreparer prepares the Get request. -func (client VirtualMachineScaleSetVMsClient) GetPreparer(resourceGroupName string, vmScaleSetName string, instanceID string) (*http.Request, error) { +func (client VirtualMachineScaleSetVMsClient) GetPreparer(resourceGroupName string, VMScaleSetName string, instanceID string) (*http.Request, error) { pathParameters := map[string]interface{}{ "instanceId": autorest.Encode("path", instanceID), "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), - "vmScaleSetName": autorest.Encode("path", vmScaleSetName), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } + const APIVersion = "2016-04-30-preview" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -248,19 +281,21 @@ func (client VirtualMachineScaleSetVMsClient) GetResponder(resp *http.Response) // GetInstanceView gets the status of a virtual machine from a VM scale set. // -// resourceGroupName is the name of the resource group. vmScaleSetName is the +// resourceGroupName is the name of the resource group. VMScaleSetName is the // name of the VM scale set. instanceID is the instance ID of the virtual // machine. -func (client VirtualMachineScaleSetVMsClient) GetInstanceView(resourceGroupName string, vmScaleSetName string, instanceID string) (result VirtualMachineScaleSetVMInstanceView, err error) { - req, err := client.GetInstanceViewPreparer(resourceGroupName, vmScaleSetName, instanceID) +func (client VirtualMachineScaleSetVMsClient) GetInstanceView(resourceGroupName string, VMScaleSetName string, instanceID string) (result VirtualMachineScaleSetVMInstanceView, err error) { + req, err := client.GetInstanceViewPreparer(resourceGroupName, VMScaleSetName, instanceID) if err != nil { - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "GetInstanceView", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "GetInstanceView", nil, "Failure preparing request") + return } resp, err := client.GetInstanceViewSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "GetInstanceView", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "GetInstanceView", resp, "Failure sending request") + return } result, err = client.GetInstanceViewResponder(resp) @@ -272,16 +307,17 @@ func (client VirtualMachineScaleSetVMsClient) GetInstanceView(resourceGroupName } // GetInstanceViewPreparer prepares the GetInstanceView request. -func (client VirtualMachineScaleSetVMsClient) GetInstanceViewPreparer(resourceGroupName string, vmScaleSetName string, instanceID string) (*http.Request, error) { +func (client VirtualMachineScaleSetVMsClient) GetInstanceViewPreparer(resourceGroupName string, VMScaleSetName string, instanceID string) (*http.Request, error) { pathParameters := map[string]interface{}{ "instanceId": autorest.Encode("path", instanceID), "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), - "vmScaleSetName": autorest.Encode("path", vmScaleSetName), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } + const APIVersion = "2016-04-30-preview" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -320,13 +356,15 @@ func (client VirtualMachineScaleSetVMsClient) GetInstanceViewResponder(resp *htt func (client VirtualMachineScaleSetVMsClient) List(resourceGroupName string, virtualMachineScaleSetName string, filter string, selectParameter string, expand string) (result VirtualMachineScaleSetVMListResult, err error) { req, err := client.ListPreparer(resourceGroupName, virtualMachineScaleSetName, filter, selectParameter, expand) if err != nil { - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "List", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "List", nil, "Failure preparing request") + return } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "List", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "List", resp, "Failure sending request") + return } result, err = client.ListResponder(resp) @@ -345,8 +383,9 @@ func (client VirtualMachineScaleSetVMsClient) ListPreparer(resourceGroupName str "virtualMachineScaleSetName": autorest.Encode("path", virtualMachineScaleSetName), } + const APIVersion = "2016-04-30-preview" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } if len(filter) > 0 { queryParameters["$filter"] = autorest.Encode("query", filter) @@ -410,46 +449,60 @@ func (client VirtualMachineScaleSetVMsClient) ListNextResults(lastResults Virtua } // PowerOff power off (stop) a virtual machine in a VM scale set. Note that -// resources are still attached and you are getting charged for the -// resources. Instead, use deallocate to release resources and avoid charges. -// This method may poll for completion. Polling can be canceled by passing -// the cancel channel argument. The channel will be used to cancel polling -// and any outstanding HTTP requests. +// resources are still attached and you are getting charged for the resources. +// Instead, use deallocate to release resources and avoid charges. This method +// may poll for completion. Polling can be canceled by passing the cancel +// channel argument. The channel will be used to cancel polling and any +// outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. vmScaleSetName is the +// resourceGroupName is the name of the resource group. VMScaleSetName is the // name of the VM scale set. instanceID is the instance ID of the virtual // machine. -func (client VirtualMachineScaleSetVMsClient) PowerOff(resourceGroupName string, vmScaleSetName string, instanceID string, cancel <-chan struct{}) (result autorest.Response, err error) { - req, err := client.PowerOffPreparer(resourceGroupName, vmScaleSetName, instanceID, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "PowerOff", nil, "Failure preparing request") - } - - resp, err := client.PowerOffSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "PowerOff", resp, "Failure sending request") - } - - result, err = client.PowerOffResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "PowerOff", resp, "Failure responding to request") - } - - return +func (client VirtualMachineScaleSetVMsClient) PowerOff(resourceGroupName string, VMScaleSetName string, instanceID string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { + resultChan := make(chan OperationStatusResponse, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result OperationStatusResponse + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.PowerOffPreparer(resourceGroupName, VMScaleSetName, instanceID, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "PowerOff", nil, "Failure preparing request") + return + } + + resp, err := client.PowerOffSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "PowerOff", resp, "Failure sending request") + return + } + + result, err = client.PowerOffResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "PowerOff", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // PowerOffPreparer prepares the PowerOff request. -func (client VirtualMachineScaleSetVMsClient) PowerOffPreparer(resourceGroupName string, vmScaleSetName string, instanceID string, cancel <-chan struct{}) (*http.Request, error) { +func (client VirtualMachineScaleSetVMsClient) PowerOffPreparer(resourceGroupName string, VMScaleSetName string, instanceID string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ "instanceId": autorest.Encode("path", instanceID), "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), - "vmScaleSetName": autorest.Encode("path", vmScaleSetName), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } + const APIVersion = "2016-04-30-preview" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -470,55 +523,70 @@ func (client VirtualMachineScaleSetVMsClient) PowerOffSender(req *http.Request) // PowerOffResponder handles the response to the PowerOff request. The method always // closes the http.Response Body. -func (client VirtualMachineScaleSetVMsClient) PowerOffResponder(resp *http.Response) (result autorest.Response, err error) { +func (client VirtualMachineScaleSetVMsClient) PowerOffResponder(resp *http.Response) (result OperationStatusResponse, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } // Reimage reimages (upgrade the operating system) a specific virtual machine // in a VM scale set. This method may poll for completion. Polling can be -// canceled by passing the cancel channel argument. The channel will be used -// to cancel polling and any outstanding HTTP requests. +// canceled by passing the cancel channel argument. The channel will be used to +// cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. vmScaleSetName is the +// resourceGroupName is the name of the resource group. VMScaleSetName is the // name of the VM scale set. instanceID is the instance ID of the virtual // machine. -func (client VirtualMachineScaleSetVMsClient) Reimage(resourceGroupName string, vmScaleSetName string, instanceID string, cancel <-chan struct{}) (result autorest.Response, err error) { - req, err := client.ReimagePreparer(resourceGroupName, vmScaleSetName, instanceID, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Reimage", nil, "Failure preparing request") - } - - resp, err := client.ReimageSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Reimage", resp, "Failure sending request") - } - - result, err = client.ReimageResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Reimage", resp, "Failure responding to request") - } - - return +func (client VirtualMachineScaleSetVMsClient) Reimage(resourceGroupName string, VMScaleSetName string, instanceID string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { + resultChan := make(chan OperationStatusResponse, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result OperationStatusResponse + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.ReimagePreparer(resourceGroupName, VMScaleSetName, instanceID, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Reimage", nil, "Failure preparing request") + return + } + + resp, err := client.ReimageSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Reimage", resp, "Failure sending request") + return + } + + result, err = client.ReimageResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Reimage", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // ReimagePreparer prepares the Reimage request. -func (client VirtualMachineScaleSetVMsClient) ReimagePreparer(resourceGroupName string, vmScaleSetName string, instanceID string, cancel <-chan struct{}) (*http.Request, error) { +func (client VirtualMachineScaleSetVMsClient) ReimagePreparer(resourceGroupName string, VMScaleSetName string, instanceID string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ "instanceId": autorest.Encode("path", instanceID), "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), - "vmScaleSetName": autorest.Encode("path", vmScaleSetName), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } + const APIVersion = "2016-04-30-preview" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -539,55 +607,155 @@ func (client VirtualMachineScaleSetVMsClient) ReimageSender(req *http.Request) ( // ReimageResponder handles the response to the Reimage request. The method always // closes the http.Response Body. -func (client VirtualMachineScaleSetVMsClient) ReimageResponder(resp *http.Response) (result autorest.Response, err error) { +func (client VirtualMachineScaleSetVMsClient) ReimageResponder(resp *http.Response) (result OperationStatusResponse, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } -// Restart restarts a virtual machine in a VM scale set. This method may poll -// for completion. Polling can be canceled by passing the cancel channel -// argument. The channel will be used to cancel polling and any outstanding -// HTTP requests. +// ReimageAll allows you to re-image all the disks ( including data disks ) in +// the a virtual machine scale set instance. This operation is only supported +// for managed disks. This method may poll for completion. Polling can be +// canceled by passing the cancel channel argument. The channel will be used to +// cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. vmScaleSetName is the +// resourceGroupName is the name of the resource group. VMScaleSetName is the // name of the VM scale set. instanceID is the instance ID of the virtual // machine. -func (client VirtualMachineScaleSetVMsClient) Restart(resourceGroupName string, vmScaleSetName string, instanceID string, cancel <-chan struct{}) (result autorest.Response, err error) { - req, err := client.RestartPreparer(resourceGroupName, vmScaleSetName, instanceID, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Restart", nil, "Failure preparing request") +func (client VirtualMachineScaleSetVMsClient) ReimageAll(resourceGroupName string, VMScaleSetName string, instanceID string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { + resultChan := make(chan OperationStatusResponse, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result OperationStatusResponse + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.ReimageAllPreparer(resourceGroupName, VMScaleSetName, instanceID, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "ReimageAll", nil, "Failure preparing request") + return + } + + resp, err := client.ReimageAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "ReimageAll", resp, "Failure sending request") + return + } + + result, err = client.ReimageAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "ReimageAll", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// ReimageAllPreparer prepares the ReimageAll request. +func (client VirtualMachineScaleSetVMsClient) ReimageAllPreparer(resourceGroupName string, VMScaleSetName string, instanceID string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "instanceId": autorest.Encode("path", instanceID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - resp, err := client.RestartSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Restart", resp, "Failure sending request") + const APIVersion = "2016-04-30-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, } - result, err = client.RestartResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Restart", resp, "Failure responding to request") - } + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/reimageall", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// ReimageAllSender sends the ReimageAll request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetVMsClient) ReimageAllSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} +// ReimageAllResponder handles the response to the ReimageAll request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetVMsClient) ReimageAllResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} return } +// Restart restarts a virtual machine in a VM scale set. This method may poll +// for completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// resourceGroupName is the name of the resource group. VMScaleSetName is the +// name of the VM scale set. instanceID is the instance ID of the virtual +// machine. +func (client VirtualMachineScaleSetVMsClient) Restart(resourceGroupName string, VMScaleSetName string, instanceID string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { + resultChan := make(chan OperationStatusResponse, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result OperationStatusResponse + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.RestartPreparer(resourceGroupName, VMScaleSetName, instanceID, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Restart", nil, "Failure preparing request") + return + } + + resp, err := client.RestartSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Restart", resp, "Failure sending request") + return + } + + result, err = client.RestartResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Restart", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + // RestartPreparer prepares the Restart request. -func (client VirtualMachineScaleSetVMsClient) RestartPreparer(resourceGroupName string, vmScaleSetName string, instanceID string, cancel <-chan struct{}) (*http.Request, error) { +func (client VirtualMachineScaleSetVMsClient) RestartPreparer(resourceGroupName string, VMScaleSetName string, instanceID string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ "instanceId": autorest.Encode("path", instanceID), "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), - "vmScaleSetName": autorest.Encode("path", vmScaleSetName), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } + const APIVersion = "2016-04-30-preview" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -608,55 +776,70 @@ func (client VirtualMachineScaleSetVMsClient) RestartSender(req *http.Request) ( // RestartResponder handles the response to the Restart request. The method always // closes the http.Response Body. -func (client VirtualMachineScaleSetVMsClient) RestartResponder(resp *http.Response) (result autorest.Response, err error) { +func (client VirtualMachineScaleSetVMsClient) RestartResponder(resp *http.Response) (result OperationStatusResponse, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } // Start starts a virtual machine in a VM scale set. This method may poll for -// completion. Polling can be canceled by passing the cancel channel -// argument. The channel will be used to cancel polling and any outstanding -// HTTP requests. +// completion. Polling can be canceled by passing the cancel channel argument. +// The channel will be used to cancel polling and any outstanding HTTP +// requests. // -// resourceGroupName is the name of the resource group. vmScaleSetName is the +// resourceGroupName is the name of the resource group. VMScaleSetName is the // name of the VM scale set. instanceID is the instance ID of the virtual // machine. -func (client VirtualMachineScaleSetVMsClient) Start(resourceGroupName string, vmScaleSetName string, instanceID string, cancel <-chan struct{}) (result autorest.Response, err error) { - req, err := client.StartPreparer(resourceGroupName, vmScaleSetName, instanceID, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Start", nil, "Failure preparing request") - } - - resp, err := client.StartSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Start", resp, "Failure sending request") - } - - result, err = client.StartResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Start", resp, "Failure responding to request") - } - - return +func (client VirtualMachineScaleSetVMsClient) Start(resourceGroupName string, VMScaleSetName string, instanceID string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { + resultChan := make(chan OperationStatusResponse, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result OperationStatusResponse + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.StartPreparer(resourceGroupName, VMScaleSetName, instanceID, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Start", nil, "Failure preparing request") + return + } + + resp, err := client.StartSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Start", resp, "Failure sending request") + return + } + + result, err = client.StartResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Start", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // StartPreparer prepares the Start request. -func (client VirtualMachineScaleSetVMsClient) StartPreparer(resourceGroupName string, vmScaleSetName string, instanceID string, cancel <-chan struct{}) (*http.Request, error) { +func (client VirtualMachineScaleSetVMsClient) StartPreparer(resourceGroupName string, VMScaleSetName string, instanceID string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ "instanceId": autorest.Encode("path", instanceID), "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), - "vmScaleSetName": autorest.Encode("path", vmScaleSetName), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } + const APIVersion = "2016-04-30-preview" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -677,12 +860,13 @@ func (client VirtualMachineScaleSetVMsClient) StartSender(req *http.Request) (*h // StartResponder handles the response to the Start request. The method always // closes the http.Response Body. -func (client VirtualMachineScaleSetVMsClient) StartResponder(resp *http.Response) (result autorest.Response, err error) { +func (client VirtualMachineScaleSetVMsClient) StartResponder(resp *http.Response) (result OperationStatusResponse, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinesizes.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinesizes.go old mode 100644 new mode 100755 index 507e9f157ec4..c76b203e7e9d --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinesizes.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinesizes.go @@ -14,7 +14,7 @@ package compute // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. @@ -55,13 +55,15 @@ func (client VirtualMachineSizesClient) List(location string) (result VirtualMac req, err := client.ListPreparer(location) if err != nil { - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineSizesClient", "List", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineSizesClient", "List", nil, "Failure preparing request") + return } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "compute.VirtualMachineSizesClient", "List", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "compute.VirtualMachineSizesClient", "List", resp, "Failure sending request") + return } result, err = client.ListResponder(resp) @@ -79,8 +81,9 @@ func (client VirtualMachineSizesClient) ListPreparer(location string) (*http.Req "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2016-04-30-preview" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/client.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/client.go old mode 100644 new mode 100755 index 2257c451fb11..e5e99db676c3 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/client.go @@ -1,5 +1,6 @@ -// Package containerregistry implements the Azure ARM Containerregistry -// service API version 2016-06-27-preview. +// Package containerregistry implements the Azure ARM Containerregistry service +// API version 2017-03-01. +// // package containerregistry @@ -17,7 +18,7 @@ package containerregistry // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. @@ -26,9 +27,6 @@ import ( ) const ( - // APIVersion is the version of the Containerregistry - APIVersion = "2016-06-27-preview" - // DefaultBaseURI is the default URI used for the service Containerregistry DefaultBaseURI = "https://management.azure.com" ) @@ -37,7 +35,6 @@ const ( type ManagementClient struct { autorest.Client BaseURI string - APIVersion string SubscriptionID string } @@ -51,7 +48,6 @@ func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { return ManagementClient{ Client: autorest.NewClientWithUserAgent(UserAgent()), BaseURI: baseURI, - APIVersion: APIVersion, SubscriptionID: subscriptionID, } } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/models.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/models.go old mode 100644 new mode 100755 index ecf1ca8f648e..66edd68c5694 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/models.go @@ -14,7 +14,7 @@ package containerregistry // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. @@ -25,6 +25,75 @@ import ( "net/http" ) +// PasswordName enumerates the values for password name. +type PasswordName string + +const ( + // Password specifies the password state for password name. + Password PasswordName = "password" + // Password2 specifies the password 2 state for password name. + Password2 PasswordName = "password2" +) + +// ProvisioningState enumerates the values for provisioning state. +type ProvisioningState string + +const ( + // Creating specifies the creating state for provisioning state. + Creating ProvisioningState = "Creating" + // Succeeded specifies the succeeded state for provisioning state. + Succeeded ProvisioningState = "Succeeded" +) + +// SkuTier enumerates the values for sku tier. +type SkuTier string + +const ( + // Basic specifies the basic state for sku tier. + Basic SkuTier = "Basic" +) + +// OperationDefinition is the definition of a container registry operation. +type OperationDefinition struct { + Name *string `json:"name,omitempty"` + Display *OperationDisplayDefinition `json:"display,omitempty"` +} + +// OperationDisplayDefinition is the display information for a container +// registry operation. +type OperationDisplayDefinition struct { + Provider *string `json:"provider,omitempty"` + Resource *string `json:"resource,omitempty"` + Operation *string `json:"operation,omitempty"` + Description *string `json:"description,omitempty"` +} + +// OperationListResult is the result of a request to list container registry +// operations. +type OperationListResult struct { + autorest.Response `json:"-"` + Value *[]OperationDefinition `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// OperationListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client OperationListResult) OperationListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// RegenerateCredentialParameters is the parameters used to regenerate the +// login credential. +type RegenerateCredentialParameters struct { + Name PasswordName `json:"name,omitempty"` +} + // Registry is an object that represents a container registry. type Registry struct { autorest.Response `json:"-"` @@ -33,15 +102,25 @@ type Registry struct { Type *string `json:"type,omitempty"` Location *string `json:"location,omitempty"` Tags *map[string]*string `json:"tags,omitempty"` + Sku *Sku `json:"sku,omitempty"` *RegistryProperties `json:"properties,omitempty"` } -// RegistryCredentials is the result of a request to get the administrator -// login credentials for a container registry. -type RegistryCredentials struct { +// RegistryCreateParameters is the parameters for creating a container +// registry. +type RegistryCreateParameters struct { + Tags *map[string]*string `json:"tags,omitempty"` + Location *string `json:"location,omitempty"` + Sku *Sku `json:"sku,omitempty"` + *RegistryPropertiesCreateParameters `json:"properties,omitempty"` +} + +// RegistryListCredentialsResult is the response from the ListCredentials +// operation. +type RegistryListCredentialsResult struct { autorest.Response `json:"-"` - Username *string `json:"username,omitempty"` - Password *string `json:"password,omitempty"` + Username *string `json:"username,omitempty"` + Passwords *[]RegistryPassword `json:"passwords,omitempty"` } // RegistryListResult is the result of a request to list container registries. @@ -63,15 +142,15 @@ func (client RegistryListResult) RegistryListResultPreparer() (*http.Request, er autorest.WithBaseURL(to.String(client.NextLink))) } -// RegistryNameCheckRequest is a request to check whether the container -// registry name is available. +// RegistryNameCheckRequest is a request to check whether a container registry +// name is available. type RegistryNameCheckRequest struct { Name *string `json:"name,omitempty"` Type *string `json:"type,omitempty"` } -// RegistryNameStatus is the result of a request to check the availability of -// a container registry name. +// RegistryNameStatus is the result of a request to check the availability of a +// container registry name. type RegistryNameStatus struct { autorest.Response `json:"-"` NameAvailable *bool `json:"nameAvailable,omitempty"` @@ -79,19 +158,33 @@ type RegistryNameStatus struct { Message *string `json:"message,omitempty"` } +// RegistryPassword is the login password for the container registry. +type RegistryPassword struct { + Name PasswordName `json:"name,omitempty"` + Value *string `json:"value,omitempty"` +} + // RegistryProperties is the properties of a container registry. type RegistryProperties struct { - LoginServer *string `json:"loginServer,omitempty"` - CreationDate *date.Time `json:"creationDate,omitempty"` + LoginServer *string `json:"loginServer,omitempty"` + CreationDate *date.Time `json:"creationDate,omitempty"` + ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` + AdminUserEnabled *bool `json:"adminUserEnabled,omitempty"` + StorageAccount *StorageAccountProperties `json:"storageAccount,omitempty"` +} + +// RegistryPropertiesCreateParameters is the parameters for creating the +// properties of a container registry. +type RegistryPropertiesCreateParameters struct { AdminUserEnabled *bool `json:"adminUserEnabled,omitempty"` - StorageAccount *StorageAccountProperties `json:"storageAccount,omitempty"` + StorageAccount *StorageAccountParameters `json:"storageAccount,omitempty"` } // RegistryPropertiesUpdateParameters is the parameters for updating the // properties of a container registry. type RegistryPropertiesUpdateParameters struct { AdminUserEnabled *bool `json:"adminUserEnabled,omitempty"` - StorageAccount *StorageAccountProperties `json:"storageAccount,omitempty"` + StorageAccount *StorageAccountParameters `json:"storageAccount,omitempty"` } // RegistryUpdateParameters is the parameters for updating a container @@ -110,9 +203,21 @@ type Resource struct { Tags *map[string]*string `json:"tags,omitempty"` } -// StorageAccountProperties is the properties of a storage account for a +// Sku is the SKU of a container registry. +type Sku struct { + Name *string `json:"name,omitempty"` + Tier SkuTier `json:"tier,omitempty"` +} + +// StorageAccountParameters is the parameters of a storage account for a // container registry. -type StorageAccountProperties struct { +type StorageAccountParameters struct { Name *string `json:"name,omitempty"` AccessKey *string `json:"accessKey,omitempty"` } + +// StorageAccountProperties is the properties of a storage account for a +// container registry. +type StorageAccountProperties struct { + Name *string `json:"name,omitempty"` +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/operations.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/operations.go new file mode 100755 index 000000000000..a1694180c5fb --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/operations.go @@ -0,0 +1,124 @@ +package containerregistry + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// OperationsClient is the client for the Operations methods of the +// Containerregistry service. +type OperationsClient struct { + ManagementClient +} + +// NewOperationsClient creates an instance of the OperationsClient client. +func NewOperationsClient(subscriptionID string) OperationsClient { + return NewOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewOperationsClientWithBaseURI creates an instance of the OperationsClient +// client. +func NewOperationsClientWithBaseURI(baseURI string, subscriptionID string) OperationsClient { + return OperationsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List lists all of the available Azure Container Registry REST API +// operations. +func (client OperationsClient) List() (result OperationListResult, err error) { + req, err := client.ListPreparer() + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.OperationsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerregistry.OperationsClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.OperationsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client OperationsClient) ListPreparer() (*http.Request, error) { + const APIVersion = "2017-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/providers/Microsoft.ContainerRegistry/operations"), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client OperationsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client OperationsClient) ListResponder(resp *http.Response) (result OperationListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client OperationsClient) ListNextResults(lastResults OperationListResult) (result OperationListResult, err error) { + req, err := lastResults.OperationListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "containerregistry.OperationsClient", "List", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "containerregistry.OperationsClient", "List", resp, "Failure sending next results request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.OperationsClient", "List", resp, "Failure responding to next results request") + } + + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/registries.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/registries.go old mode 100644 new mode 100755 index c658cccf580b..fe03ba33818e --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/registries.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/registries.go @@ -14,7 +14,7 @@ package containerregistry // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. @@ -51,20 +51,26 @@ func NewRegistriesClientWithBaseURI(baseURI string, subscriptionID string) Regis func (client RegistriesClient) CheckNameAvailability(registryNameCheckRequest RegistryNameCheckRequest) (result RegistryNameStatus, err error) { if err := validation.Validate([]validation.Validation{ {TargetValue: registryNameCheckRequest, - Constraints: []validation.Constraint{{Target: "registryNameCheckRequest.Name", Name: validation.Null, Rule: true, Chain: nil}, + Constraints: []validation.Constraint{{Target: "registryNameCheckRequest.Name", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "registryNameCheckRequest.Name", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryNameCheckRequest.Name", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryNameCheckRequest.Name", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}, + }}, {Target: "registryNameCheckRequest.Type", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { return result, validation.NewErrorWithValidationError(err, "containerregistry.RegistriesClient", "CheckNameAvailability") } req, err := client.CheckNameAvailabilityPreparer(registryNameCheckRequest) if err != nil { - return result, autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "CheckNameAvailability", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "CheckNameAvailability", nil, "Failure preparing request") + return } resp, err := client.CheckNameAvailabilitySender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "CheckNameAvailability", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "CheckNameAvailability", resp, "Failure sending request") + return } result, err = client.CheckNameAvailabilityResponder(resp) @@ -81,8 +87,9 @@ func (client RegistriesClient) CheckNameAvailabilityPreparer(registryNameCheckRe "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -114,55 +121,80 @@ func (client RegistriesClient) CheckNameAvailabilityResponder(resp *http.Respons return } -// CreateOrUpdate creates or updates a container registry with the specified -// parameters. +// Create creates a container registry with the specified parameters. This +// method may poll for completion. Polling can be canceled by passing the +// cancel channel argument. The channel will be used to cancel polling and any +// outstanding HTTP requests. // // resourceGroupName is the name of the resource group to which the container // registry belongs. registryName is the name of the container registry. -// registry is the parameters for creating or updating a container registry. -func (client RegistriesClient) CreateOrUpdate(resourceGroupName string, registryName string, registry Registry) (result Registry, err error) { +// registryCreateParameters is the parameters for creating a container +// registry. +func (client RegistriesClient) Create(resourceGroupName string, registryName string, registryCreateParameters RegistryCreateParameters, cancel <-chan struct{}) (<-chan Registry, <-chan error) { + resultChan := make(chan Registry, 1) + errChan := make(chan error, 1) if err := validation.Validate([]validation.Validation{ - {TargetValue: registry, - Constraints: []validation.Constraint{{Target: "registry.RegistryProperties", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "registry.RegistryProperties.StorageAccount", Name: validation.Null, Rule: true, - Chain: []validation.Constraint{{Target: "registry.RegistryProperties.StorageAccount.Name", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "registry.RegistryProperties.StorageAccount.AccessKey", Name: validation.Null, Rule: true, Chain: nil}, - }}, - {Target: "registry.RegistryProperties.LoginServer", Name: validation.ReadOnly, Rule: true, Chain: nil}, - {Target: "registry.RegistryProperties.CreationDate", Name: validation.ReadOnly, Rule: true, Chain: nil}, - }}}}}); err != nil { - return result, validation.NewErrorWithValidationError(err, "containerregistry.RegistriesClient", "CreateOrUpdate") - } - - req, err := client.CreateOrUpdatePreparer(resourceGroupName, registryName, registry) - if err != nil { - return result, autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "CreateOrUpdate", nil, "Failure preparing request") - } - - resp, err := client.CreateOrUpdateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "CreateOrUpdate", resp, "Failure sending request") - } - - result, err = client.CreateOrUpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "CreateOrUpdate", resp, "Failure responding to request") - } - - return -} - -// CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client RegistriesClient) CreateOrUpdatePreparer(resourceGroupName string, registryName string, registry Registry) (*http.Request, error) { + {TargetValue: registryName, + Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}, + {TargetValue: registryCreateParameters, + Constraints: []validation.Constraint{{Target: "registryCreateParameters.Location", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "registryCreateParameters.Sku", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "registryCreateParameters.Sku.Name", Name: validation.Null, Rule: true, Chain: nil}}}, + {Target: "registryCreateParameters.RegistryPropertiesCreateParameters", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "registryCreateParameters.RegistryPropertiesCreateParameters.StorageAccount", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "registryCreateParameters.RegistryPropertiesCreateParameters.StorageAccount.Name", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "registryCreateParameters.RegistryPropertiesCreateParameters.StorageAccount.AccessKey", Name: validation.Null, Rule: true, Chain: nil}, + }}, + }}}}}); err != nil { + errChan <- validation.NewErrorWithValidationError(err, "containerregistry.RegistriesClient", "Create") + close(errChan) + close(resultChan) + return resultChan, errChan + } + + go func() { + var err error + var result Registry + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.CreatePreparer(resourceGroupName, registryName, registryCreateParameters, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "Create", nil, "Failure preparing request") + return + } + + resp, err := client.CreateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "Create", resp, "Failure sending request") + return + } + + result, err = client.CreateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "Create", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// CreatePreparer prepares the Create request. +func (client RegistriesClient) CreatePreparer(resourceGroupName string, registryName string, registryCreateParameters RegistryCreateParameters, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ "registryName": autorest.Encode("path", registryName), "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -170,24 +202,26 @@ func (client RegistriesClient) CreateOrUpdatePreparer(resourceGroupName string, autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}", pathParameters), - autorest.WithJSON(registry), + autorest.WithJSON(registryCreateParameters), autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare(&http.Request{}) + return preparer.Prepare(&http.Request{Cancel: cancel}) } -// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// CreateSender sends the Create request. The method will close the // http.Response Body if it receives an error. -func (client RegistriesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) +func (client RegistriesClient) CreateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) } -// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// CreateResponder handles the response to the Create request. The method always // closes the http.Response Body. -func (client RegistriesClient) CreateOrUpdateResponder(resp *http.Response) (result Registry, err error) { +func (client RegistriesClient) CreateResponder(resp *http.Response) (result Registry, err error) { err = autorest.Respond( resp, client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} @@ -199,15 +233,25 @@ func (client RegistriesClient) CreateOrUpdateResponder(resp *http.Response) (res // resourceGroupName is the name of the resource group to which the container // registry belongs. registryName is the name of the container registry. func (client RegistriesClient) Delete(resourceGroupName string, registryName string) (result autorest.Response, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: registryName, + Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "containerregistry.RegistriesClient", "Delete") + } + req, err := client.DeletePreparer(resourceGroupName, registryName) if err != nil { - return result, autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "Delete", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "Delete", nil, "Failure preparing request") + return } resp, err := client.DeleteSender(req) if err != nil { result.Response = resp - return result, autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "Delete", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "Delete", resp, "Failure sending request") + return } result, err = client.DeleteResponder(resp) @@ -226,8 +270,9 @@ func (client RegistriesClient) DeletePreparer(resourceGroupName string, registry "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -250,110 +295,57 @@ func (client RegistriesClient) DeleteResponder(resp *http.Response) (result auto err = autorest.Respond( resp, client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), autorest.ByClosing()) result.Response = resp return } -// GetCredentials gets the administrator login credentials for the specified -// container registry. +// Get gets the properties of the specified container registry. // // resourceGroupName is the name of the resource group to which the container // registry belongs. registryName is the name of the container registry. -func (client RegistriesClient) GetCredentials(resourceGroupName string, registryName string) (result RegistryCredentials, err error) { - req, err := client.GetCredentialsPreparer(resourceGroupName, registryName) - if err != nil { - return result, autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "GetCredentials", nil, "Failure preparing request") - } - - resp, err := client.GetCredentialsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "GetCredentials", resp, "Failure sending request") - } - - result, err = client.GetCredentialsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "GetCredentials", resp, "Failure responding to request") - } - - return -} - -// GetCredentialsPreparer prepares the GetCredentials request. -func (client RegistriesClient) GetCredentialsPreparer(resourceGroupName string, registryName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "registryName": autorest.Encode("path", registryName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, +func (client RegistriesClient) Get(resourceGroupName string, registryName string) (result Registry, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: registryName, + Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "containerregistry.RegistriesClient", "Get") } - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/getCredentials", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare(&http.Request{}) -} - -// GetCredentialsSender sends the GetCredentials request. The method will close the -// http.Response Body if it receives an error. -func (client RegistriesClient) GetCredentialsSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) -} - -// GetCredentialsResponder handles the response to the GetCredentials request. The method always -// closes the http.Response Body. -func (client RegistriesClient) GetCredentialsResponder(resp *http.Response) (result RegistryCredentials, err error) { - err = autorest.Respond( - resp, - client.ByInspecting(), - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetProperties gets the properties of the specified container registry. -// -// resourceGroupName is the name of the resource group to which the container -// registry belongs. registryName is the name of the container registry. -func (client RegistriesClient) GetProperties(resourceGroupName string, registryName string) (result Registry, err error) { - req, err := client.GetPropertiesPreparer(resourceGroupName, registryName) + req, err := client.GetPreparer(resourceGroupName, registryName) if err != nil { - return result, autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "GetProperties", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "Get", nil, "Failure preparing request") + return } - resp, err := client.GetPropertiesSender(req) + resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "GetProperties", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "Get", resp, "Failure sending request") + return } - result, err = client.GetPropertiesResponder(resp) + result, err = client.GetResponder(resp) if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "GetProperties", resp, "Failure responding to request") + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "Get", resp, "Failure responding to request") } return } -// GetPropertiesPreparer prepares the GetProperties request. -func (client RegistriesClient) GetPropertiesPreparer(resourceGroupName string, registryName string) (*http.Request, error) { +// GetPreparer prepares the Get request. +func (client RegistriesClient) GetPreparer(resourceGroupName string, registryName string) (*http.Request, error) { pathParameters := map[string]interface{}{ "registryName": autorest.Encode("path", registryName), "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -364,15 +356,15 @@ func (client RegistriesClient) GetPropertiesPreparer(resourceGroupName string, r return preparer.Prepare(&http.Request{}) } -// GetPropertiesSender sends the GetProperties request. The method will close the +// GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. -func (client RegistriesClient) GetPropertiesSender(req *http.Request) (*http.Response, error) { +func (client RegistriesClient) GetSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req) } -// GetPropertiesResponder handles the response to the GetProperties request. The method always +// GetResponder handles the response to the Get request. The method always // closes the http.Response Body. -func (client RegistriesClient) GetPropertiesResponder(resp *http.Response) (result Registry, err error) { +func (client RegistriesClient) GetResponder(resp *http.Response) (result Registry, err error) { err = autorest.Respond( resp, client.ByInspecting(), @@ -383,18 +375,19 @@ func (client RegistriesClient) GetPropertiesResponder(resp *http.Response) (resu return } -// List lists all the available container registries under the specified -// subscription. +// List lists all the container registries under the specified subscription. func (client RegistriesClient) List() (result RegistryListResult, err error) { req, err := client.ListPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "List", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "List", nil, "Failure preparing request") + return } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "List", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "List", resp, "Failure sending request") + return } result, err = client.ListResponder(resp) @@ -411,8 +404,9 @@ func (client RegistriesClient) ListPreparer() (*http.Request, error) { "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -466,21 +460,23 @@ func (client RegistriesClient) ListNextResults(lastResults RegistryListResult) ( return } -// ListByResourceGroup lists all the available container registries under the -// specified resource group. +// ListByResourceGroup lists all the container registries under the specified +// resource group. // // resourceGroupName is the name of the resource group to which the container // registry belongs. func (client RegistriesClient) ListByResourceGroup(resourceGroupName string) (result RegistryListResult, err error) { req, err := client.ListByResourceGroupPreparer(resourceGroupName) if err != nil { - return result, autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "ListByResourceGroup", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "ListByResourceGroup", nil, "Failure preparing request") + return } resp, err := client.ListByResourceGroupSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "ListByResourceGroup", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "ListByResourceGroup", resp, "Failure sending request") + return } result, err = client.ListByResourceGroupResponder(resp) @@ -498,8 +494,9 @@ func (client RegistriesClient) ListByResourceGroupPreparer(resourceGroupName str "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -553,60 +550,150 @@ func (client RegistriesClient) ListByResourceGroupNextResults(lastResults Regist return } -// RegenerateCredentials regenerates the administrator login credentials for -// the specified container registry. +// ListCredentials lists the login credentials for the specified container +// registry. +// +// resourceGroupName is the name of the resource group to which the container +// registry belongs. registryName is the name of the container registry. +func (client RegistriesClient) ListCredentials(resourceGroupName string, registryName string) (result RegistryListCredentialsResult, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: registryName, + Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "containerregistry.RegistriesClient", "ListCredentials") + } + + req, err := client.ListCredentialsPreparer(resourceGroupName, registryName) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "ListCredentials", nil, "Failure preparing request") + return + } + + resp, err := client.ListCredentialsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "ListCredentials", resp, "Failure sending request") + return + } + + result, err = client.ListCredentialsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "ListCredentials", resp, "Failure responding to request") + } + + return +} + +// ListCredentialsPreparer prepares the ListCredentials request. +func (client RegistriesClient) ListCredentialsPreparer(resourceGroupName string, registryName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "registryName": autorest.Encode("path", registryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/listCredentials", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListCredentialsSender sends the ListCredentials request. The method will close the +// http.Response Body if it receives an error. +func (client RegistriesClient) ListCredentialsSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListCredentialsResponder handles the response to the ListCredentials request. The method always +// closes the http.Response Body. +func (client RegistriesClient) ListCredentialsResponder(resp *http.Response) (result RegistryListCredentialsResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// RegenerateCredential regenerates one of the login credentials for the +// specified container registry. // // resourceGroupName is the name of the resource group to which the container // registry belongs. registryName is the name of the container registry. -func (client RegistriesClient) RegenerateCredentials(resourceGroupName string, registryName string) (result RegistryCredentials, err error) { - req, err := client.RegenerateCredentialsPreparer(resourceGroupName, registryName) +// regenerateCredentialParameters is specifies name of the password which +// should be regenerated -- password or password2. +func (client RegistriesClient) RegenerateCredential(resourceGroupName string, registryName string, regenerateCredentialParameters RegenerateCredentialParameters) (result RegistryListCredentialsResult, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: registryName, + Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "containerregistry.RegistriesClient", "RegenerateCredential") + } + + req, err := client.RegenerateCredentialPreparer(resourceGroupName, registryName, regenerateCredentialParameters) if err != nil { - return result, autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "RegenerateCredentials", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "RegenerateCredential", nil, "Failure preparing request") + return } - resp, err := client.RegenerateCredentialsSender(req) + resp, err := client.RegenerateCredentialSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "RegenerateCredentials", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "RegenerateCredential", resp, "Failure sending request") + return } - result, err = client.RegenerateCredentialsResponder(resp) + result, err = client.RegenerateCredentialResponder(resp) if err != nil { - err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "RegenerateCredentials", resp, "Failure responding to request") + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "RegenerateCredential", resp, "Failure responding to request") } return } -// RegenerateCredentialsPreparer prepares the RegenerateCredentials request. -func (client RegistriesClient) RegenerateCredentialsPreparer(resourceGroupName string, registryName string) (*http.Request, error) { +// RegenerateCredentialPreparer prepares the RegenerateCredential request. +func (client RegistriesClient) RegenerateCredentialPreparer(resourceGroupName string, registryName string, regenerateCredentialParameters RegenerateCredentialParameters) (*http.Request, error) { pathParameters := map[string]interface{}{ "registryName": autorest.Encode("path", registryName), "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( + autorest.AsJSON(), autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/regenerateCredentials", pathParameters), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/regenerateCredential", pathParameters), + autorest.WithJSON(regenerateCredentialParameters), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare(&http.Request{}) } -// RegenerateCredentialsSender sends the RegenerateCredentials request. The method will close the +// RegenerateCredentialSender sends the RegenerateCredential request. The method will close the // http.Response Body if it receives an error. -func (client RegistriesClient) RegenerateCredentialsSender(req *http.Request) (*http.Response, error) { +func (client RegistriesClient) RegenerateCredentialSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req) } -// RegenerateCredentialsResponder handles the response to the RegenerateCredentials request. The method always +// RegenerateCredentialResponder handles the response to the RegenerateCredential request. The method always // closes the http.Response Body. -func (client RegistriesClient) RegenerateCredentialsResponder(resp *http.Response) (result RegistryCredentials, err error) { +func (client RegistriesClient) RegenerateCredentialResponder(resp *http.Response) (result RegistryListCredentialsResult, err error) { err = autorest.Respond( resp, client.ByInspecting(), @@ -624,15 +711,25 @@ func (client RegistriesClient) RegenerateCredentialsResponder(resp *http.Respons // registryUpdateParameters is the parameters for updating a container // registry. func (client RegistriesClient) Update(resourceGroupName string, registryName string, registryUpdateParameters RegistryUpdateParameters) (result Registry, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: registryName, + Constraints: []validation.Constraint{{Target: "registryName", Name: validation.MaxLength, Rule: 50, Chain: nil}, + {Target: "registryName", Name: validation.MinLength, Rule: 5, Chain: nil}, + {Target: "registryName", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]*$`, Chain: nil}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "containerregistry.RegistriesClient", "Update") + } + req, err := client.UpdatePreparer(resourceGroupName, registryName, registryUpdateParameters) if err != nil { - return result, autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "Update", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "Update", nil, "Failure preparing request") + return } resp, err := client.UpdateSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "Update", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "Update", resp, "Failure sending request") + return } result, err = client.UpdateResponder(resp) @@ -651,8 +748,9 @@ func (client RegistriesClient) UpdatePreparer(resourceGroupName string, registry "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/version.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/version.go old mode 100644 new mode 100755 index e0d70c1b510c..e586a01ce796 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/version.go @@ -14,30 +14,16 @@ package containerregistry // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. -import ( - "fmt" -) - -const ( - major = "7" - minor = "0" - patch = "1" - // Always begin a "tag" with a dash (as per http://semver.org) - tag = "-beta" - semVerFormat = "%s.%s.%s%s" - userAgentFormat = "Azure-SDK-for-Go/%s arm-%s/%s" -) - // UserAgent returns the UserAgent string to use when sending http.Requests. func UserAgent() string { - return fmt.Sprintf(userAgentFormat, Version(), "containerregistry", "2016-06-27-preview") + return "Azure-SDK-For-Go/v10.0.2-beta arm-containerregistry/2017-03-01" } // Version returns the semantic version (see http://semver.org) of the client. func Version() string { - return fmt.Sprintf(semVerFormat, major, minor, patch, tag) + return "v10.0.2-beta" } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/disk/client.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/disk/client.go new file mode 100755 index 000000000000..8bab7acc1324 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/disk/client.go @@ -0,0 +1,53 @@ +// Package disk implements the Azure ARM Disk service API version +// 2016-04-30-preview. +// +// The Disk Resource Provider Client. +package disk + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" +) + +const ( + // DefaultBaseURI is the default URI used for the service Disk + DefaultBaseURI = "https://management.azure.com" +) + +// ManagementClient is the base client for Disk. +type ManagementClient struct { + autorest.Client + BaseURI string + SubscriptionID string +} + +// New creates an instance of the ManagementClient client. +func New(subscriptionID string) ManagementClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the ManagementClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { + return ManagementClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + SubscriptionID: subscriptionID, + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/disk/disks.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/disk/disks.go new file mode 100755 index 000000000000..4f7fce74fc48 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/disk/disks.go @@ -0,0 +1,728 @@ +package disk + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// DisksClient is the the Disk Resource Provider Client. +type DisksClient struct { + ManagementClient +} + +// NewDisksClient creates an instance of the DisksClient client. +func NewDisksClient(subscriptionID string) DisksClient { + return NewDisksClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewDisksClientWithBaseURI creates an instance of the DisksClient client. +func NewDisksClientWithBaseURI(baseURI string, subscriptionID string) DisksClient { + return DisksClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a disk. This method may poll for +// completion. Polling can be canceled by passing the cancel channel argument. +// The channel will be used to cancel polling and any outstanding HTTP +// requests. +// +// resourceGroupName is the name of the resource group. diskName is the name of +// the disk within the given subscription and resource group. diskParameter is +// disk object supplied in the body of the Put disk operation. +func (client DisksClient) CreateOrUpdate(resourceGroupName string, diskName string, diskParameter Model, cancel <-chan struct{}) (<-chan Model, <-chan error) { + resultChan := make(chan Model, 1) + errChan := make(chan error, 1) + if err := validation.Validate([]validation.Validation{ + {TargetValue: diskParameter, + Constraints: []validation.Constraint{{Target: "diskParameter.Properties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "diskParameter.Properties.CreationData", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "diskParameter.Properties.CreationData.ImageReference", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "diskParameter.Properties.CreationData.ImageReference.ID", Name: validation.Null, Rule: true, Chain: nil}}}, + }}, + {Target: "diskParameter.Properties.EncryptionSettings", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "diskParameter.Properties.EncryptionSettings.DiskEncryptionKey", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "diskParameter.Properties.EncryptionSettings.DiskEncryptionKey.SourceVault", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "diskParameter.Properties.EncryptionSettings.DiskEncryptionKey.SecretURL", Name: validation.Null, Rule: true, Chain: nil}, + }}, + {Target: "diskParameter.Properties.EncryptionSettings.KeyEncryptionKey", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "diskParameter.Properties.EncryptionSettings.KeyEncryptionKey.SourceVault", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "diskParameter.Properties.EncryptionSettings.KeyEncryptionKey.KeyURL", Name: validation.Null, Rule: true, Chain: nil}, + }}, + }}, + }}}}}); err != nil { + errChan <- validation.NewErrorWithValidationError(err, "disk.DisksClient", "CreateOrUpdate") + close(errChan) + close(resultChan) + return resultChan, errChan + } + + go func() { + var err error + var result Model + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.CreateOrUpdatePreparer(resourceGroupName, diskName, diskParameter, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.DisksClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "disk.DisksClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.DisksClient", "CreateOrUpdate", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client DisksClient) CreateOrUpdatePreparer(resourceGroupName string, diskName string, diskParameter Model, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "diskName": autorest.Encode("path", diskName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-04-30-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}", pathParameters), + autorest.WithJSON(diskParameter), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client DisksClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client DisksClient) CreateOrUpdateResponder(resp *http.Response) (result Model, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a disk. This method may poll for completion. Polling can be +// canceled by passing the cancel channel argument. The channel will be used to +// cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. diskName is the name of +// the disk within the given subscription and resource group. +func (client DisksClient) Delete(resourceGroupName string, diskName string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { + resultChan := make(chan OperationStatusResponse, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result OperationStatusResponse + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.DeletePreparer(resourceGroupName, diskName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.DisksClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "disk.DisksClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.DisksClient", "Delete", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// DeletePreparer prepares the Delete request. +func (client DisksClient) DeletePreparer(resourceGroupName string, diskName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "diskName": autorest.Encode("path", diskName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-04-30-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client DisksClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client DisksClient) DeleteResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Get gets information about a disk. +// +// resourceGroupName is the name of the resource group. diskName is the name of +// the disk within the given subscription and resource group. +func (client DisksClient) Get(resourceGroupName string, diskName string) (result Model, err error) { + req, err := client.GetPreparer(resourceGroupName, diskName) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.DisksClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "disk.DisksClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.DisksClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client DisksClient) GetPreparer(resourceGroupName string, diskName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "diskName": autorest.Encode("path", diskName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-04-30-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client DisksClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client DisksClient) GetResponder(resp *http.Response) (result Model, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GrantAccess grants access to a disk. This method may poll for completion. +// Polling can be canceled by passing the cancel channel argument. The channel +// will be used to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. diskName is the name of +// the disk within the given subscription and resource group. grantAccessData +// is access data object supplied in the body of the get disk access operation. +func (client DisksClient) GrantAccess(resourceGroupName string, diskName string, grantAccessData GrantAccessData, cancel <-chan struct{}) (<-chan AccessURI, <-chan error) { + resultChan := make(chan AccessURI, 1) + errChan := make(chan error, 1) + if err := validation.Validate([]validation.Validation{ + {TargetValue: grantAccessData, + Constraints: []validation.Constraint{{Target: "grantAccessData.DurationInSeconds", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + errChan <- validation.NewErrorWithValidationError(err, "disk.DisksClient", "GrantAccess") + close(errChan) + close(resultChan) + return resultChan, errChan + } + + go func() { + var err error + var result AccessURI + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.GrantAccessPreparer(resourceGroupName, diskName, grantAccessData, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.DisksClient", "GrantAccess", nil, "Failure preparing request") + return + } + + resp, err := client.GrantAccessSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "disk.DisksClient", "GrantAccess", resp, "Failure sending request") + return + } + + result, err = client.GrantAccessResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.DisksClient", "GrantAccess", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// GrantAccessPreparer prepares the GrantAccess request. +func (client DisksClient) GrantAccessPreparer(resourceGroupName string, diskName string, grantAccessData GrantAccessData, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "diskName": autorest.Encode("path", diskName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-04-30-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/beginGetAccess", pathParameters), + autorest.WithJSON(grantAccessData), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// GrantAccessSender sends the GrantAccess request. The method will close the +// http.Response Body if it receives an error. +func (client DisksClient) GrantAccessSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// GrantAccessResponder handles the response to the GrantAccess request. The method always +// closes the http.Response Body. +func (client DisksClient) GrantAccessResponder(resp *http.Response) (result AccessURI, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists all the disks under a subscription. +func (client DisksClient) List() (result ListType, err error) { + req, err := client.ListPreparer() + if err != nil { + err = autorest.NewErrorWithError(err, "disk.DisksClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "disk.DisksClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.DisksClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client DisksClient) ListPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-04-30-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/disks", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client DisksClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client DisksClient) ListResponder(resp *http.Response) (result ListType, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client DisksClient) ListNextResults(lastResults ListType) (result ListType, err error) { + req, err := lastResults.ListTypePreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "disk.DisksClient", "List", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "disk.DisksClient", "List", resp, "Failure sending next results request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.DisksClient", "List", resp, "Failure responding to next results request") + } + + return +} + +// ListByResourceGroup lists all the disks under a resource group. +// +// resourceGroupName is the name of the resource group. +func (client DisksClient) ListByResourceGroup(resourceGroupName string) (result ListType, err error) { + req, err := client.ListByResourceGroupPreparer(resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.DisksClient", "ListByResourceGroup", nil, "Failure preparing request") + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "disk.DisksClient", "ListByResourceGroup", resp, "Failure sending request") + return + } + + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.DisksClient", "ListByResourceGroup", resp, "Failure responding to request") + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client DisksClient) ListByResourceGroupPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-04-30-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client DisksClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client DisksClient) ListByResourceGroupResponder(resp *http.Response) (result ListType, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByResourceGroupNextResults retrieves the next set of results, if any. +func (client DisksClient) ListByResourceGroupNextResults(lastResults ListType) (result ListType, err error) { + req, err := lastResults.ListTypePreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "disk.DisksClient", "ListByResourceGroup", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "disk.DisksClient", "ListByResourceGroup", resp, "Failure sending next results request") + } + + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.DisksClient", "ListByResourceGroup", resp, "Failure responding to next results request") + } + + return +} + +// RevokeAccess revokes access to a disk. This method may poll for completion. +// Polling can be canceled by passing the cancel channel argument. The channel +// will be used to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. diskName is the name of +// the disk within the given subscription and resource group. +func (client DisksClient) RevokeAccess(resourceGroupName string, diskName string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { + resultChan := make(chan OperationStatusResponse, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result OperationStatusResponse + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.RevokeAccessPreparer(resourceGroupName, diskName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.DisksClient", "RevokeAccess", nil, "Failure preparing request") + return + } + + resp, err := client.RevokeAccessSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "disk.DisksClient", "RevokeAccess", resp, "Failure sending request") + return + } + + result, err = client.RevokeAccessResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.DisksClient", "RevokeAccess", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// RevokeAccessPreparer prepares the RevokeAccess request. +func (client DisksClient) RevokeAccessPreparer(resourceGroupName string, diskName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "diskName": autorest.Encode("path", diskName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-04-30-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/endGetAccess", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// RevokeAccessSender sends the RevokeAccess request. The method will close the +// http.Response Body if it receives an error. +func (client DisksClient) RevokeAccessSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// RevokeAccessResponder handles the response to the RevokeAccess request. The method always +// closes the http.Response Body. +func (client DisksClient) RevokeAccessResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Update updates (patches) a disk. This method may poll for completion. +// Polling can be canceled by passing the cancel channel argument. The channel +// will be used to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. diskName is the name of +// the disk within the given subscription and resource group. diskParameter is +// disk object supplied in the body of the Patch disk operation. +func (client DisksClient) Update(resourceGroupName string, diskName string, diskParameter UpdateType, cancel <-chan struct{}) (<-chan Model, <-chan error) { + resultChan := make(chan Model, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result Model + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.UpdatePreparer(resourceGroupName, diskName, diskParameter, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.DisksClient", "Update", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "disk.DisksClient", "Update", resp, "Failure sending request") + return + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.DisksClient", "Update", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// UpdatePreparer prepares the Update request. +func (client DisksClient) UpdatePreparer(resourceGroupName string, diskName string, diskParameter UpdateType, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "diskName": autorest.Encode("path", diskName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-04-30-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}", pathParameters), + autorest.WithJSON(diskParameter), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client DisksClient) UpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client DisksClient) UpdateResponder(resp *http.Response) (result Model, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/disk/models.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/disk/models.go new file mode 100755 index 000000000000..e8118696ad5a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/disk/models.go @@ -0,0 +1,278 @@ +package disk + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/go-autorest/autorest/to" + "net/http" +) + +// AccessLevel enumerates the values for access level. +type AccessLevel string + +const ( + // None specifies the none state for access level. + None AccessLevel = "None" + // Read specifies the read state for access level. + Read AccessLevel = "Read" +) + +// CreateOption enumerates the values for create option. +type CreateOption string + +const ( + // Attach specifies the attach state for create option. + Attach CreateOption = "Attach" + // Copy specifies the copy state for create option. + Copy CreateOption = "Copy" + // Empty specifies the empty state for create option. + Empty CreateOption = "Empty" + // FromImage specifies the from image state for create option. + FromImage CreateOption = "FromImage" + // Import specifies the import state for create option. + Import CreateOption = "Import" + // Restore specifies the restore state for create option. + Restore CreateOption = "Restore" +) + +// OperatingSystemTypes enumerates the values for operating system types. +type OperatingSystemTypes string + +const ( + // Linux specifies the linux state for operating system types. + Linux OperatingSystemTypes = "Linux" + // Windows specifies the windows state for operating system types. + Windows OperatingSystemTypes = "Windows" +) + +// StorageAccountTypes enumerates the values for storage account types. +type StorageAccountTypes string + +const ( + // PremiumLRS specifies the premium lrs state for storage account types. + PremiumLRS StorageAccountTypes = "Premium_LRS" + // StandardLRS specifies the standard lrs state for storage account types. + StandardLRS StorageAccountTypes = "Standard_LRS" +) + +// AccessURI is a disk access SAS uri. +type AccessURI struct { + autorest.Response `json:"-"` + *AccessURIOutput `json:"properties,omitempty"` +} + +// AccessURIOutput is azure properties, including output. +type AccessURIOutput struct { + *AccessURIRaw `json:"output,omitempty"` +} + +// AccessURIRaw is this object gets 'bubbled up' through flattening. +type AccessURIRaw struct { + AccessSAS *string `json:"accessSAS,omitempty"` +} + +// APIError is api error. +type APIError struct { + Details *[]APIErrorBase `json:"details,omitempty"` + Innererror *InnerError `json:"innererror,omitempty"` + Code *string `json:"code,omitempty"` + Target *string `json:"target,omitempty"` + Message *string `json:"message,omitempty"` +} + +// APIErrorBase is api error base. +type APIErrorBase struct { + Code *string `json:"code,omitempty"` + Target *string `json:"target,omitempty"` + Message *string `json:"message,omitempty"` +} + +// CreationData is data used when creating a disk. +type CreationData struct { + CreateOption CreateOption `json:"createOption,omitempty"` + StorageAccountID *string `json:"storageAccountId,omitempty"` + ImageReference *ImageDiskReference `json:"imageReference,omitempty"` + SourceURI *string `json:"sourceUri,omitempty"` + SourceResourceID *string `json:"sourceResourceId,omitempty"` +} + +// EncryptionSettings is encryption settings for disk or snapshot +type EncryptionSettings struct { + Enabled *bool `json:"enabled,omitempty"` + DiskEncryptionKey *KeyVaultAndSecretReference `json:"diskEncryptionKey,omitempty"` + KeyEncryptionKey *KeyVaultAndKeyReference `json:"keyEncryptionKey,omitempty"` +} + +// GrantAccessData is data used for requesting a SAS. +type GrantAccessData struct { + Access AccessLevel `json:"access,omitempty"` + DurationInSeconds *int32 `json:"durationInSeconds,omitempty"` +} + +// ImageDiskReference is the source image used for creating the disk. +type ImageDiskReference struct { + ID *string `json:"id,omitempty"` + Lun *int32 `json:"lun,omitempty"` +} + +// InnerError is inner error details. +type InnerError struct { + Exceptiontype *string `json:"exceptiontype,omitempty"` + Errordetail *string `json:"errordetail,omitempty"` +} + +// KeyVaultAndKeyReference is key Vault Key Url and vault id of KeK, KeK is +// optional and when provided is used to unwrap the encryptionKey +type KeyVaultAndKeyReference struct { + SourceVault *SourceVault `json:"sourceVault,omitempty"` + KeyURL *string `json:"keyUrl,omitempty"` +} + +// KeyVaultAndSecretReference is key Vault Secret Url and vault id of the +// encryption key +type KeyVaultAndSecretReference struct { + SourceVault *SourceVault `json:"sourceVault,omitempty"` + SecretURL *string `json:"secretUrl,omitempty"` +} + +// ListType is the List Disks operation response. +type ListType struct { + autorest.Response `json:"-"` + Value *[]Model `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// ListTypePreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client ListType) ListTypePreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// Model is disk resource. +type Model struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + *Properties `json:"properties,omitempty"` +} + +// OperationStatusResponse is operation status response +type OperationStatusResponse struct { + autorest.Response `json:"-"` + Name *string `json:"name,omitempty"` + Status *string `json:"status,omitempty"` + StartTime *date.Time `json:"startTime,omitempty"` + EndTime *date.Time `json:"endTime,omitempty"` + Error *APIError `json:"error,omitempty"` +} + +// Properties is disk resource properties. +type Properties struct { + AccountType StorageAccountTypes `json:"accountType,omitempty"` + TimeCreated *date.Time `json:"timeCreated,omitempty"` + OsType OperatingSystemTypes `json:"osType,omitempty"` + CreationData *CreationData `json:"creationData,omitempty"` + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + EncryptionSettings *EncryptionSettings `json:"encryptionSettings,omitempty"` + OwnerID *string `json:"ownerId,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// Resource is the Resource model definition. +type Resource struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// ResourceUpdate is the Resource model definition. +type ResourceUpdate struct { + Tags *map[string]*string `json:"tags,omitempty"` +} + +// Snapshot is snapshot resource. +type Snapshot struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + *Properties `json:"properties,omitempty"` +} + +// SnapshotList is the List Snapshots operation response. +type SnapshotList struct { + autorest.Response `json:"-"` + Value *[]Snapshot `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// SnapshotListPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client SnapshotList) SnapshotListPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// SnapshotUpdate is snapshot update resource. +type SnapshotUpdate struct { + Tags *map[string]*string `json:"tags,omitempty"` + *UpdateProperties `json:"properties,omitempty"` +} + +// SourceVault is the vault id is an Azure Resource Manager Resoure id in the +// form +// /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName} +type SourceVault struct { + ID *string `json:"id,omitempty"` +} + +// UpdateProperties is disk resource update properties. +type UpdateProperties struct { + AccountType StorageAccountTypes `json:"accountType,omitempty"` + OsType OperatingSystemTypes `json:"osType,omitempty"` + CreationData *CreationData `json:"creationData,omitempty"` + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + EncryptionSettings *EncryptionSettings `json:"encryptionSettings,omitempty"` +} + +// UpdateType is disk update resource. +type UpdateType struct { + Tags *map[string]*string `json:"tags,omitempty"` + *UpdateProperties `json:"properties,omitempty"` +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/disk/snapshots.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/disk/snapshots.go new file mode 100755 index 000000000000..f4e5579d045e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/disk/snapshots.go @@ -0,0 +1,733 @@ +package disk + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// SnapshotsClient is the the Disk Resource Provider Client. +type SnapshotsClient struct { + ManagementClient +} + +// NewSnapshotsClient creates an instance of the SnapshotsClient client. +func NewSnapshotsClient(subscriptionID string) SnapshotsClient { + return NewSnapshotsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewSnapshotsClientWithBaseURI creates an instance of the SnapshotsClient +// client. +func NewSnapshotsClientWithBaseURI(baseURI string, subscriptionID string) SnapshotsClient { + return SnapshotsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a snapshot. This method may poll for +// completion. Polling can be canceled by passing the cancel channel argument. +// The channel will be used to cancel polling and any outstanding HTTP +// requests. +// +// resourceGroupName is the name of the resource group. snapshotName is the +// name of the snapshot within the given subscription and resource group. +// snapshot is snapshot object supplied in the body of the Put disk operation. +func (client SnapshotsClient) CreateOrUpdate(resourceGroupName string, snapshotName string, snapshot Snapshot, cancel <-chan struct{}) (<-chan Snapshot, <-chan error) { + resultChan := make(chan Snapshot, 1) + errChan := make(chan error, 1) + if err := validation.Validate([]validation.Validation{ + {TargetValue: snapshot, + Constraints: []validation.Constraint{{Target: "snapshot.Properties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "snapshot.Properties.CreationData", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "snapshot.Properties.CreationData.ImageReference", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "snapshot.Properties.CreationData.ImageReference.ID", Name: validation.Null, Rule: true, Chain: nil}}}, + }}, + {Target: "snapshot.Properties.EncryptionSettings", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "snapshot.Properties.EncryptionSettings.DiskEncryptionKey", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "snapshot.Properties.EncryptionSettings.DiskEncryptionKey.SourceVault", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "snapshot.Properties.EncryptionSettings.DiskEncryptionKey.SecretURL", Name: validation.Null, Rule: true, Chain: nil}, + }}, + {Target: "snapshot.Properties.EncryptionSettings.KeyEncryptionKey", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "snapshot.Properties.EncryptionSettings.KeyEncryptionKey.SourceVault", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "snapshot.Properties.EncryptionSettings.KeyEncryptionKey.KeyURL", Name: validation.Null, Rule: true, Chain: nil}, + }}, + }}, + }}}}}); err != nil { + errChan <- validation.NewErrorWithValidationError(err, "disk.SnapshotsClient", "CreateOrUpdate") + close(errChan) + close(resultChan) + return resultChan, errChan + } + + go func() { + var err error + var result Snapshot + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.CreateOrUpdatePreparer(resourceGroupName, snapshotName, snapshot, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client SnapshotsClient) CreateOrUpdatePreparer(resourceGroupName string, snapshotName string, snapshot Snapshot, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "snapshotName": autorest.Encode("path", snapshotName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-04-30-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}", pathParameters), + autorest.WithJSON(snapshot), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client SnapshotsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client SnapshotsClient) CreateOrUpdateResponder(resp *http.Response) (result Snapshot, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a snapshot. This method may poll for completion. Polling can +// be canceled by passing the cancel channel argument. The channel will be used +// to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. snapshotName is the +// name of the snapshot within the given subscription and resource group. +func (client SnapshotsClient) Delete(resourceGroupName string, snapshotName string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { + resultChan := make(chan OperationStatusResponse, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result OperationStatusResponse + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.DeletePreparer(resourceGroupName, snapshotName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "Delete", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// DeletePreparer prepares the Delete request. +func (client SnapshotsClient) DeletePreparer(resourceGroupName string, snapshotName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "snapshotName": autorest.Encode("path", snapshotName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-04-30-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client SnapshotsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client SnapshotsClient) DeleteResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Get gets information about a snapshot. +// +// resourceGroupName is the name of the resource group. snapshotName is the +// name of the snapshot within the given subscription and resource group. +func (client SnapshotsClient) Get(resourceGroupName string, snapshotName string) (result Snapshot, err error) { + req, err := client.GetPreparer(resourceGroupName, snapshotName) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client SnapshotsClient) GetPreparer(resourceGroupName string, snapshotName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "snapshotName": autorest.Encode("path", snapshotName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-04-30-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client SnapshotsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client SnapshotsClient) GetResponder(resp *http.Response) (result Snapshot, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GrantAccess grants access to a snapshot. This method may poll for +// completion. Polling can be canceled by passing the cancel channel argument. +// The channel will be used to cancel polling and any outstanding HTTP +// requests. +// +// resourceGroupName is the name of the resource group. snapshotName is the +// name of the snapshot within the given subscription and resource group. +// grantAccessData is access data object supplied in the body of the get +// snapshot access operation. +func (client SnapshotsClient) GrantAccess(resourceGroupName string, snapshotName string, grantAccessData GrantAccessData, cancel <-chan struct{}) (<-chan AccessURI, <-chan error) { + resultChan := make(chan AccessURI, 1) + errChan := make(chan error, 1) + if err := validation.Validate([]validation.Validation{ + {TargetValue: grantAccessData, + Constraints: []validation.Constraint{{Target: "grantAccessData.DurationInSeconds", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + errChan <- validation.NewErrorWithValidationError(err, "disk.SnapshotsClient", "GrantAccess") + close(errChan) + close(resultChan) + return resultChan, errChan + } + + go func() { + var err error + var result AccessURI + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.GrantAccessPreparer(resourceGroupName, snapshotName, grantAccessData, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "GrantAccess", nil, "Failure preparing request") + return + } + + resp, err := client.GrantAccessSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "GrantAccess", resp, "Failure sending request") + return + } + + result, err = client.GrantAccessResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "GrantAccess", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// GrantAccessPreparer prepares the GrantAccess request. +func (client SnapshotsClient) GrantAccessPreparer(resourceGroupName string, snapshotName string, grantAccessData GrantAccessData, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "snapshotName": autorest.Encode("path", snapshotName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-04-30-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}/beginGetAccess", pathParameters), + autorest.WithJSON(grantAccessData), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// GrantAccessSender sends the GrantAccess request. The method will close the +// http.Response Body if it receives an error. +func (client SnapshotsClient) GrantAccessSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// GrantAccessResponder handles the response to the GrantAccess request. The method always +// closes the http.Response Body. +func (client SnapshotsClient) GrantAccessResponder(resp *http.Response) (result AccessURI, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists snapshots under a subscription. +func (client SnapshotsClient) List() (result SnapshotList, err error) { + req, err := client.ListPreparer() + if err != nil { + err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client SnapshotsClient) ListPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-04-30-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/snapshots", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client SnapshotsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client SnapshotsClient) ListResponder(resp *http.Response) (result SnapshotList, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client SnapshotsClient) ListNextResults(lastResults SnapshotList) (result SnapshotList, err error) { + req, err := lastResults.SnapshotListPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "disk.SnapshotsClient", "List", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "disk.SnapshotsClient", "List", resp, "Failure sending next results request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "List", resp, "Failure responding to next results request") + } + + return +} + +// ListByResourceGroup lists snapshots under a resource group. +// +// resourceGroupName is the name of the resource group. +func (client SnapshotsClient) ListByResourceGroup(resourceGroupName string) (result SnapshotList, err error) { + req, err := client.ListByResourceGroupPreparer(resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "ListByResourceGroup", nil, "Failure preparing request") + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "ListByResourceGroup", resp, "Failure sending request") + return + } + + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "ListByResourceGroup", resp, "Failure responding to request") + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client SnapshotsClient) ListByResourceGroupPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-04-30-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client SnapshotsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client SnapshotsClient) ListByResourceGroupResponder(resp *http.Response) (result SnapshotList, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByResourceGroupNextResults retrieves the next set of results, if any. +func (client SnapshotsClient) ListByResourceGroupNextResults(lastResults SnapshotList) (result SnapshotList, err error) { + req, err := lastResults.SnapshotListPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "disk.SnapshotsClient", "ListByResourceGroup", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "disk.SnapshotsClient", "ListByResourceGroup", resp, "Failure sending next results request") + } + + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "ListByResourceGroup", resp, "Failure responding to next results request") + } + + return +} + +// RevokeAccess revokes access to a snapshot. This method may poll for +// completion. Polling can be canceled by passing the cancel channel argument. +// The channel will be used to cancel polling and any outstanding HTTP +// requests. +// +// resourceGroupName is the name of the resource group. snapshotName is the +// name of the snapshot within the given subscription and resource group. +func (client SnapshotsClient) RevokeAccess(resourceGroupName string, snapshotName string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { + resultChan := make(chan OperationStatusResponse, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result OperationStatusResponse + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.RevokeAccessPreparer(resourceGroupName, snapshotName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "RevokeAccess", nil, "Failure preparing request") + return + } + + resp, err := client.RevokeAccessSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "RevokeAccess", resp, "Failure sending request") + return + } + + result, err = client.RevokeAccessResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "RevokeAccess", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// RevokeAccessPreparer prepares the RevokeAccess request. +func (client SnapshotsClient) RevokeAccessPreparer(resourceGroupName string, snapshotName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "snapshotName": autorest.Encode("path", snapshotName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-04-30-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}/endGetAccess", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// RevokeAccessSender sends the RevokeAccess request. The method will close the +// http.Response Body if it receives an error. +func (client SnapshotsClient) RevokeAccessSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// RevokeAccessResponder handles the response to the RevokeAccess request. The method always +// closes the http.Response Body. +func (client SnapshotsClient) RevokeAccessResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Update updates (patches) a snapshot. This method may poll for completion. +// Polling can be canceled by passing the cancel channel argument. The channel +// will be used to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. snapshotName is the +// name of the snapshot within the given subscription and resource group. +// snapshot is snapshot object supplied in the body of the Patch snapshot +// operation. +func (client SnapshotsClient) Update(resourceGroupName string, snapshotName string, snapshot SnapshotUpdate, cancel <-chan struct{}) (<-chan Snapshot, <-chan error) { + resultChan := make(chan Snapshot, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result Snapshot + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.UpdatePreparer(resourceGroupName, snapshotName, snapshot, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "Update", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "Update", resp, "Failure sending request") + return + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "Update", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// UpdatePreparer prepares the Update request. +func (client SnapshotsClient) UpdatePreparer(resourceGroupName string, snapshotName string, snapshot SnapshotUpdate, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "snapshotName": autorest.Encode("path", snapshotName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-04-30-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}", pathParameters), + autorest.WithJSON(snapshot), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client SnapshotsClient) UpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client SnapshotsClient) UpdateResponder(resp *http.Response) (result Snapshot, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/disk/version.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/disk/version.go new file mode 100755 index 000000000000..a78b351a0e36 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/disk/version.go @@ -0,0 +1,29 @@ +package disk + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return "Azure-SDK-For-Go/v10.0.2-beta arm-disk/2016-04-30-preview" +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return "v10.0.2-beta" +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/applicationgateways.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/applicationgateways.go old mode 100644 new mode 100755 index 872fbbf7c5b3..4ab4e0734905 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/applicationgateways.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/applicationgateways.go @@ -14,7 +14,7 @@ package network // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. @@ -25,11 +25,7 @@ import ( "net/http" ) -// ApplicationGatewaysClient is the the Microsoft Azure Network management API -// provides a RESTful set of web services that interact with Microsoft Azure -// Networks service to manage your network resources. The API has entities -// that capture the relationship between an end user and the Microsoft Azure -// Networks service. +// ApplicationGatewaysClient is the composite Swagger for Network Client type ApplicationGatewaysClient struct { ManagementClient } @@ -48,30 +44,43 @@ func NewApplicationGatewaysClientWithBaseURI(baseURI string, subscriptionID stri // BackendHealth gets the backend health of the specified application gateway // in a resource group. This method may poll for completion. Polling can be -// canceled by passing the cancel channel argument. The channel will be used -// to cancel polling and any outstanding HTTP requests. +// canceled by passing the cancel channel argument. The channel will be used to +// cancel polling and any outstanding HTTP requests. // // resourceGroupName is the name of the resource group. applicationGatewayName -// is the name of the application gateway. expand is expands -// BackendAddressPool and BackendHttpSettings referenced in backend health. -func (client ApplicationGatewaysClient) BackendHealth(resourceGroupName string, applicationGatewayName string, expand string, cancel <-chan struct{}) (result autorest.Response, err error) { - req, err := client.BackendHealthPreparer(resourceGroupName, applicationGatewayName, expand, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "BackendHealth", nil, "Failure preparing request") - } - - resp, err := client.BackendHealthSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "BackendHealth", resp, "Failure sending request") - } - - result, err = client.BackendHealthResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "BackendHealth", resp, "Failure responding to request") - } - - return +// is the name of the application gateway. expand is expands BackendAddressPool +// and BackendHttpSettings referenced in backend health. +func (client ApplicationGatewaysClient) BackendHealth(resourceGroupName string, applicationGatewayName string, expand string, cancel <-chan struct{}) (<-chan ApplicationGatewayBackendHealth, <-chan error) { + resultChan := make(chan ApplicationGatewayBackendHealth, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result ApplicationGatewayBackendHealth + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.BackendHealthPreparer(resourceGroupName, applicationGatewayName, expand, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "BackendHealth", nil, "Failure preparing request") + return + } + + resp, err := client.BackendHealthSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "BackendHealth", resp, "Failure sending request") + return + } + + result, err = client.BackendHealthResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "BackendHealth", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // BackendHealthPreparer prepares the BackendHealth request. @@ -82,8 +91,9 @@ func (client ApplicationGatewaysClient) BackendHealthPreparer(resourceGroupName "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } if len(expand) > 0 { queryParameters["$expand"] = autorest.Encode("query", expand) @@ -107,52 +117,71 @@ func (client ApplicationGatewaysClient) BackendHealthSender(req *http.Request) ( // BackendHealthResponder handles the response to the BackendHealth request. The method always // closes the http.Response Body. -func (client ApplicationGatewaysClient) BackendHealthResponder(resp *http.Response) (result autorest.Response, err error) { +func (client ApplicationGatewaysClient) BackendHealthResponder(resp *http.Response) (result ApplicationGatewayBackendHealth, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } // CreateOrUpdate creates or updates the specified application gateway. This // method may poll for completion. Polling can be canceled by passing the -// cancel channel argument. The channel will be used to cancel polling and -// any outstanding HTTP requests. +// cancel channel argument. The channel will be used to cancel polling and any +// outstanding HTTP requests. // // resourceGroupName is the name of the resource group. applicationGatewayName -// is the name of the application gateway. parameters is parameters supplied -// to the create or update application gateway operation. -func (client ApplicationGatewaysClient) CreateOrUpdate(resourceGroupName string, applicationGatewayName string, parameters ApplicationGateway, cancel <-chan struct{}) (result autorest.Response, err error) { +// is the name of the application gateway. parameters is parameters supplied to +// the create or update application gateway operation. +func (client ApplicationGatewaysClient) CreateOrUpdate(resourceGroupName string, applicationGatewayName string, parameters ApplicationGateway, cancel <-chan struct{}) (<-chan ApplicationGateway, <-chan error) { + resultChan := make(chan ApplicationGateway, 1) + errChan := make(chan error, 1) if err := validation.Validate([]validation.Validation{ {TargetValue: parameters, Constraints: []validation.Constraint{{Target: "parameters.ApplicationGatewayPropertiesFormat", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "parameters.ApplicationGatewayPropertiesFormat.WebApplicationFirewallConfiguration", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.ApplicationGatewayPropertiesFormat.WebApplicationFirewallConfiguration.Enabled", Name: validation.Null, Rule: true, Chain: nil}}}, - {Target: "parameters.ApplicationGatewayPropertiesFormat.OperationalState", Name: validation.ReadOnly, Rule: true, Chain: nil}, + Chain: []validation.Constraint{{Target: "parameters.ApplicationGatewayPropertiesFormat.WebApplicationFirewallConfiguration.Enabled", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.ApplicationGatewayPropertiesFormat.WebApplicationFirewallConfiguration.RuleSetType", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.ApplicationGatewayPropertiesFormat.WebApplicationFirewallConfiguration.RuleSetVersion", Name: validation.Null, Rule: true, Chain: nil}, + }}, }}}}}); err != nil { - return result, validation.NewErrorWithValidationError(err, "network.ApplicationGatewaysClient", "CreateOrUpdate") - } - - req, err := client.CreateOrUpdatePreparer(resourceGroupName, applicationGatewayName, parameters, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "CreateOrUpdate", nil, "Failure preparing request") - } - - resp, err := client.CreateOrUpdateSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "CreateOrUpdate", resp, "Failure sending request") - } - - result, err = client.CreateOrUpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "CreateOrUpdate", resp, "Failure responding to request") - } - - return + errChan <- validation.NewErrorWithValidationError(err, "network.ApplicationGatewaysClient", "CreateOrUpdate") + close(errChan) + close(resultChan) + return resultChan, errChan + } + + go func() { + var err error + var result ApplicationGateway + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.CreateOrUpdatePreparer(resourceGroupName, applicationGatewayName, parameters, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "CreateOrUpdate", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // CreateOrUpdatePreparer prepares the CreateOrUpdate request. @@ -163,8 +192,9 @@ func (client ApplicationGatewaysClient) CreateOrUpdatePreparer(resourceGroupName "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -187,41 +217,55 @@ func (client ApplicationGatewaysClient) CreateOrUpdateSender(req *http.Request) // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always // closes the http.Response Body. -func (client ApplicationGatewaysClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { +func (client ApplicationGatewaysClient) CreateOrUpdateResponder(resp *http.Response) (result ApplicationGateway, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } // Delete deletes the specified application gateway. This method may poll for -// completion. Polling can be canceled by passing the cancel channel -// argument. The channel will be used to cancel polling and any outstanding -// HTTP requests. +// completion. Polling can be canceled by passing the cancel channel argument. +// The channel will be used to cancel polling and any outstanding HTTP +// requests. // // resourceGroupName is the name of the resource group. applicationGatewayName // is the name of the application gateway. -func (client ApplicationGatewaysClient) Delete(resourceGroupName string, applicationGatewayName string, cancel <-chan struct{}) (result autorest.Response, err error) { - req, err := client.DeletePreparer(resourceGroupName, applicationGatewayName, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Delete", nil, "Failure preparing request") - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Delete", resp, "Failure sending request") - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Delete", resp, "Failure responding to request") - } - - return +func (client ApplicationGatewaysClient) Delete(resourceGroupName string, applicationGatewayName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { + resultChan := make(chan autorest.Response, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result autorest.Response + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.DeletePreparer(resourceGroupName, applicationGatewayName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Delete", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // DeletePreparer prepares the Delete request. @@ -232,8 +276,9 @@ func (client ApplicationGatewaysClient) DeletePreparer(resourceGroupName string, "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -271,13 +316,15 @@ func (client ApplicationGatewaysClient) DeleteResponder(resp *http.Response) (re func (client ApplicationGatewaysClient) Get(resourceGroupName string, applicationGatewayName string) (result ApplicationGateway, err error) { req, err := client.GetPreparer(resourceGroupName, applicationGatewayName) if err != nil { - return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Get", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Get", nil, "Failure preparing request") + return } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Get", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Get", resp, "Failure sending request") + return } result, err = client.GetResponder(resp) @@ -296,8 +343,9 @@ func (client ApplicationGatewaysClient) GetPreparer(resourceGroupName string, ap "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -333,13 +381,15 @@ func (client ApplicationGatewaysClient) GetResponder(resp *http.Response) (resul func (client ApplicationGatewaysClient) List(resourceGroupName string) (result ApplicationGatewayListResult, err error) { req, err := client.ListPreparer(resourceGroupName) if err != nil { - return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "List", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "List", nil, "Failure preparing request") + return } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "List", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "List", resp, "Failure sending request") + return } result, err = client.ListResponder(resp) @@ -357,8 +407,9 @@ func (client ApplicationGatewaysClient) ListPreparer(resourceGroupName string) ( "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -416,13 +467,15 @@ func (client ApplicationGatewaysClient) ListNextResults(lastResults ApplicationG func (client ApplicationGatewaysClient) ListAll() (result ApplicationGatewayListResult, err error) { req, err := client.ListAllPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "ListAll", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "ListAll", nil, "Failure preparing request") + return } resp, err := client.ListAllSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "ListAll", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "ListAll", resp, "Failure sending request") + return } result, err = client.ListAllResponder(resp) @@ -439,8 +492,9 @@ func (client ApplicationGatewaysClient) ListAllPreparer() (*http.Request, error) "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -494,33 +548,108 @@ func (client ApplicationGatewaysClient) ListAllNextResults(lastResults Applicati return } -// Start starts the specified application gateway. This method may poll for -// completion. Polling can be canceled by passing the cancel channel -// argument. The channel will be used to cancel polling and any outstanding -// HTTP requests. -// -// resourceGroupName is the name of the resource group. applicationGatewayName -// is the name of the application gateway. -func (client ApplicationGatewaysClient) Start(resourceGroupName string, applicationGatewayName string, cancel <-chan struct{}) (result autorest.Response, err error) { - req, err := client.StartPreparer(resourceGroupName, applicationGatewayName, cancel) +// ListAvailableWafRuleSets lists all available web application firewall rule +// sets. +func (client ApplicationGatewaysClient) ListAvailableWafRuleSets() (result ApplicationGatewayAvailableWafRuleSetsResult, err error) { + req, err := client.ListAvailableWafRuleSetsPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Start", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "ListAvailableWafRuleSets", nil, "Failure preparing request") + return } - resp, err := client.StartSender(req) + resp, err := client.ListAvailableWafRuleSetsSender(req) if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Start", resp, "Failure sending request") + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "ListAvailableWafRuleSets", resp, "Failure sending request") + return } - result, err = client.StartResponder(resp) + result, err = client.ListAvailableWafRuleSetsResponder(resp) if err != nil { - err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Start", resp, "Failure responding to request") + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "ListAvailableWafRuleSets", resp, "Failure responding to request") + } + + return +} + +// ListAvailableWafRuleSetsPreparer prepares the ListAvailableWafRuleSets request. +func (client ApplicationGatewaysClient) ListAvailableWafRuleSetsPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, } + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/applicationGatewayAvailableWafRuleSets", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListAvailableWafRuleSetsSender sends the ListAvailableWafRuleSets request. The method will close the +// http.Response Body if it receives an error. +func (client ApplicationGatewaysClient) ListAvailableWafRuleSetsSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListAvailableWafRuleSetsResponder handles the response to the ListAvailableWafRuleSets request. The method always +// closes the http.Response Body. +func (client ApplicationGatewaysClient) ListAvailableWafRuleSetsResponder(resp *http.Response) (result ApplicationGatewayAvailableWafRuleSetsResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} return } +// Start starts the specified application gateway. This method may poll for +// completion. Polling can be canceled by passing the cancel channel argument. +// The channel will be used to cancel polling and any outstanding HTTP +// requests. +// +// resourceGroupName is the name of the resource group. applicationGatewayName +// is the name of the application gateway. +func (client ApplicationGatewaysClient) Start(resourceGroupName string, applicationGatewayName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { + resultChan := make(chan autorest.Response, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result autorest.Response + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.StartPreparer(resourceGroupName, applicationGatewayName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Start", nil, "Failure preparing request") + return + } + + resp, err := client.StartSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Start", resp, "Failure sending request") + return + } + + result, err = client.StartResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Start", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + // StartPreparer prepares the Start request. func (client ApplicationGatewaysClient) StartPreparer(resourceGroupName string, applicationGatewayName string, cancel <-chan struct{}) (*http.Request, error) { pathParameters := map[string]interface{}{ @@ -529,8 +658,9 @@ func (client ApplicationGatewaysClient) StartPreparer(resourceGroupName string, "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -563,29 +693,42 @@ func (client ApplicationGatewaysClient) StartResponder(resp *http.Response) (res // Stop stops the specified application gateway in a resource group. This // method may poll for completion. Polling can be canceled by passing the -// cancel channel argument. The channel will be used to cancel polling and -// any outstanding HTTP requests. +// cancel channel argument. The channel will be used to cancel polling and any +// outstanding HTTP requests. // // resourceGroupName is the name of the resource group. applicationGatewayName // is the name of the application gateway. -func (client ApplicationGatewaysClient) Stop(resourceGroupName string, applicationGatewayName string, cancel <-chan struct{}) (result autorest.Response, err error) { - req, err := client.StopPreparer(resourceGroupName, applicationGatewayName, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Stop", nil, "Failure preparing request") - } - - resp, err := client.StopSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Stop", resp, "Failure sending request") - } - - result, err = client.StopResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Stop", resp, "Failure responding to request") - } - - return +func (client ApplicationGatewaysClient) Stop(resourceGroupName string, applicationGatewayName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { + resultChan := make(chan autorest.Response, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result autorest.Response + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.StopPreparer(resourceGroupName, applicationGatewayName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Stop", nil, "Failure preparing request") + return + } + + resp, err := client.StopSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Stop", resp, "Failure sending request") + return + } + + result, err = client.StopResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Stop", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // StopPreparer prepares the Stop request. @@ -596,8 +739,9 @@ func (client ApplicationGatewaysClient) StopPreparer(resourceGroupName string, a "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/bgpservicecommunities.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/bgpservicecommunities.go new file mode 100755 index 000000000000..5024f5164965 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/bgpservicecommunities.go @@ -0,0 +1,127 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// BgpServiceCommunitiesClient is the composite Swagger for Network Client +type BgpServiceCommunitiesClient struct { + ManagementClient +} + +// NewBgpServiceCommunitiesClient creates an instance of the +// BgpServiceCommunitiesClient client. +func NewBgpServiceCommunitiesClient(subscriptionID string) BgpServiceCommunitiesClient { + return NewBgpServiceCommunitiesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewBgpServiceCommunitiesClientWithBaseURI creates an instance of the +// BgpServiceCommunitiesClient client. +func NewBgpServiceCommunitiesClientWithBaseURI(baseURI string, subscriptionID string) BgpServiceCommunitiesClient { + return BgpServiceCommunitiesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List gets all the available bgp service communities. +func (client BgpServiceCommunitiesClient) List() (result BgpServiceCommunityListResult, err error) { + req, err := client.ListPreparer() + if err != nil { + err = autorest.NewErrorWithError(err, "network.BgpServiceCommunitiesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.BgpServiceCommunitiesClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.BgpServiceCommunitiesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client BgpServiceCommunitiesClient) ListPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/bgpServiceCommunities", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client BgpServiceCommunitiesClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client BgpServiceCommunitiesClient) ListResponder(resp *http.Response) (result BgpServiceCommunityListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client BgpServiceCommunitiesClient) ListNextResults(lastResults BgpServiceCommunityListResult) (result BgpServiceCommunityListResult, err error) { + req, err := lastResults.BgpServiceCommunityListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.BgpServiceCommunitiesClient", "List", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.BgpServiceCommunitiesClient", "List", resp, "Failure sending next results request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.BgpServiceCommunitiesClient", "List", resp, "Failure responding to next results request") + } + + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/client.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/client.go old mode 100644 new mode 100755 index 74e3d7e491b7..ec5bf7ced542 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/client.go @@ -1,10 +1,6 @@ -// Package network implements the Azure ARM Network service API version -// 2016-09-01. +// Package network implements the Azure ARM Network service API version . // -// The Microsoft Azure Network management API provides a RESTful set of web -// services that interact with Microsoft Azure Networks service to manage -// your network resources. The API has entities that capture the relationship -// between an end user and the Microsoft Azure Networks service. +// Composite Swagger for Network Client package network // Copyright (c) Microsoft and contributors. All rights reserved. @@ -21,7 +17,7 @@ package network // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. @@ -32,9 +28,6 @@ import ( ) const ( - // APIVersion is the version of the Network - APIVersion = "2016-09-01" - // DefaultBaseURI is the default URI used for the service Network DefaultBaseURI = "https://management.azure.com" ) @@ -43,7 +36,6 @@ const ( type ManagementClient struct { autorest.Client BaseURI string - APIVersion string SubscriptionID string } @@ -57,7 +49,6 @@ func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { return ManagementClient{ Client: autorest.NewClientWithUserAgent(UserAgent()), BaseURI: baseURI, - APIVersion: APIVersion, SubscriptionID: subscriptionID, } } @@ -71,13 +62,15 @@ func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { func (client ManagementClient) CheckDNSNameAvailability(location string, domainNameLabel string) (result DNSNameAvailabilityResult, err error) { req, err := client.CheckDNSNameAvailabilityPreparer(location, domainNameLabel) if err != nil { - return result, autorest.NewErrorWithError(err, "network.ManagementClient", "CheckDNSNameAvailability", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "network.ManagementClient", "CheckDNSNameAvailability", nil, "Failure preparing request") + return } resp, err := client.CheckDNSNameAvailabilitySender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network.ManagementClient", "CheckDNSNameAvailability", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "network.ManagementClient", "CheckDNSNameAvailability", resp, "Failure sending request") + return } result, err = client.CheckDNSNameAvailabilityResponder(resp) @@ -95,8 +88,9 @@ func (client ManagementClient) CheckDNSNameAvailabilityPreparer(location string, "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } if len(domainNameLabel) > 0 { queryParameters["domainNameLabel"] = autorest.Encode("query", domainNameLabel) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitauthorizations.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitauthorizations.go old mode 100644 new mode 100755 index eb0a2a075c2c..feb974fb97ca --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitauthorizations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitauthorizations.go @@ -14,7 +14,7 @@ package network // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. @@ -24,11 +24,8 @@ import ( "net/http" ) -// ExpressRouteCircuitAuthorizationsClient is the the Microsoft Azure Network -// management API provides a RESTful set of web services that interact with -// Microsoft Azure Networks service to manage your network resources. The API -// has entities that capture the relationship between an end user and the -// Microsoft Azure Networks service. +// ExpressRouteCircuitAuthorizationsClient is the composite Swagger for Network +// Client type ExpressRouteCircuitAuthorizationsClient struct { ManagementClient } @@ -39,39 +36,52 @@ func NewExpressRouteCircuitAuthorizationsClient(subscriptionID string) ExpressRo return NewExpressRouteCircuitAuthorizationsClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewExpressRouteCircuitAuthorizationsClientWithBaseURI creates an instance -// of the ExpressRouteCircuitAuthorizationsClient client. +// NewExpressRouteCircuitAuthorizationsClientWithBaseURI creates an instance of +// the ExpressRouteCircuitAuthorizationsClient client. func NewExpressRouteCircuitAuthorizationsClientWithBaseURI(baseURI string, subscriptionID string) ExpressRouteCircuitAuthorizationsClient { return ExpressRouteCircuitAuthorizationsClient{NewWithBaseURI(baseURI, subscriptionID)} } // CreateOrUpdate creates or updates an authorization in the specified express -// route circuit. This method may poll for completion. Polling can be -// canceled by passing the cancel channel argument. The channel will be used -// to cancel polling and any outstanding HTTP requests. +// route circuit. This method may poll for completion. Polling can be canceled +// by passing the cancel channel argument. The channel will be used to cancel +// polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. circuitName is the -// name of the express route circuit. authorizationName is the name of the -// authorization. authorizationParameters is parameters supplied to the -// create or update express route circuit authorization operation. -func (client ExpressRouteCircuitAuthorizationsClient) CreateOrUpdate(resourceGroupName string, circuitName string, authorizationName string, authorizationParameters ExpressRouteCircuitAuthorization, cancel <-chan struct{}) (result autorest.Response, err error) { - req, err := client.CreateOrUpdatePreparer(resourceGroupName, circuitName, authorizationName, authorizationParameters, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "CreateOrUpdate", nil, "Failure preparing request") - } - - resp, err := client.CreateOrUpdateSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "CreateOrUpdate", resp, "Failure sending request") - } - - result, err = client.CreateOrUpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "CreateOrUpdate", resp, "Failure responding to request") - } - - return +// resourceGroupName is the name of the resource group. circuitName is the name +// of the express route circuit. authorizationName is the name of the +// authorization. authorizationParameters is parameters supplied to the create +// or update express route circuit authorization operation. +func (client ExpressRouteCircuitAuthorizationsClient) CreateOrUpdate(resourceGroupName string, circuitName string, authorizationName string, authorizationParameters ExpressRouteCircuitAuthorization, cancel <-chan struct{}) (<-chan ExpressRouteCircuitAuthorization, <-chan error) { + resultChan := make(chan ExpressRouteCircuitAuthorization, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result ExpressRouteCircuitAuthorization + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.CreateOrUpdatePreparer(resourceGroupName, circuitName, authorizationName, authorizationParameters, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // CreateOrUpdatePreparer prepares the CreateOrUpdate request. @@ -83,8 +93,9 @@ func (client ExpressRouteCircuitAuthorizationsClient) CreateOrUpdatePreparer(res "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -107,13 +118,14 @@ func (client ExpressRouteCircuitAuthorizationsClient) CreateOrUpdateSender(req * // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always // closes the http.Response Body. -func (client ExpressRouteCircuitAuthorizationsClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { +func (client ExpressRouteCircuitAuthorizationsClient) CreateOrUpdateResponder(resp *http.Response) (result ExpressRouteCircuitAuthorization, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } @@ -122,27 +134,40 @@ func (client ExpressRouteCircuitAuthorizationsClient) CreateOrUpdateResponder(re // passing the cancel channel argument. The channel will be used to cancel // polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. circuitName is the -// name of the express route circuit. authorizationName is the name of the +// resourceGroupName is the name of the resource group. circuitName is the name +// of the express route circuit. authorizationName is the name of the // authorization. -func (client ExpressRouteCircuitAuthorizationsClient) Delete(resourceGroupName string, circuitName string, authorizationName string, cancel <-chan struct{}) (result autorest.Response, err error) { - req, err := client.DeletePreparer(resourceGroupName, circuitName, authorizationName, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "Delete", nil, "Failure preparing request") - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "Delete", resp, "Failure sending request") - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "Delete", resp, "Failure responding to request") - } - - return +func (client ExpressRouteCircuitAuthorizationsClient) Delete(resourceGroupName string, circuitName string, authorizationName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { + resultChan := make(chan autorest.Response, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result autorest.Response + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.DeletePreparer(resourceGroupName, circuitName, authorizationName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "Delete", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // DeletePreparer prepares the Delete request. @@ -154,8 +179,9 @@ func (client ExpressRouteCircuitAuthorizationsClient) DeletePreparer(resourceGro "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -189,19 +215,21 @@ func (client ExpressRouteCircuitAuthorizationsClient) DeleteResponder(resp *http // Get gets the specified authorization from the specified express route // circuit. // -// resourceGroupName is the name of the resource group. circuitName is the -// name of the express route circuit. authorizationName is the name of the +// resourceGroupName is the name of the resource group. circuitName is the name +// of the express route circuit. authorizationName is the name of the // authorization. func (client ExpressRouteCircuitAuthorizationsClient) Get(resourceGroupName string, circuitName string, authorizationName string) (result ExpressRouteCircuitAuthorization, err error) { req, err := client.GetPreparer(resourceGroupName, circuitName, authorizationName) if err != nil { - return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "Get", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "Get", nil, "Failure preparing request") + return } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "Get", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "Get", resp, "Failure sending request") + return } result, err = client.GetResponder(resp) @@ -221,8 +249,9 @@ func (client ExpressRouteCircuitAuthorizationsClient) GetPreparer(resourceGroupN "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -254,18 +283,20 @@ func (client ExpressRouteCircuitAuthorizationsClient) GetResponder(resp *http.Re // List gets all authorizations in an express route circuit. // -// resourceGroupName is the name of the resource group. circuitName is the -// name of the circuit. +// resourceGroupName is the name of the resource group. circuitName is the name +// of the circuit. func (client ExpressRouteCircuitAuthorizationsClient) List(resourceGroupName string, circuitName string) (result AuthorizationListResult, err error) { req, err := client.ListPreparer(resourceGroupName, circuitName) if err != nil { - return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "List", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "List", nil, "Failure preparing request") + return } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "List", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "List", resp, "Failure sending request") + return } result, err = client.ListResponder(resp) @@ -284,8 +315,9 @@ func (client ExpressRouteCircuitAuthorizationsClient) ListPreparer(resourceGroup "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitpeerings.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitpeerings.go old mode 100644 new mode 100755 index a459574b89b7..28b926f24438 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitpeerings.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitpeerings.go @@ -14,7 +14,7 @@ package network // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. @@ -24,11 +24,8 @@ import ( "net/http" ) -// ExpressRouteCircuitPeeringsClient is the the Microsoft Azure Network -// management API provides a RESTful set of web services that interact with -// Microsoft Azure Networks service to manage your network resources. The API -// has entities that capture the relationship between an end user and the -// Microsoft Azure Networks service. +// ExpressRouteCircuitPeeringsClient is the composite Swagger for Network +// Client type ExpressRouteCircuitPeeringsClient struct { ManagementClient } @@ -50,28 +47,41 @@ func NewExpressRouteCircuitPeeringsClientWithBaseURI(baseURI string, subscriptio // passing the cancel channel argument. The channel will be used to cancel // polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. circuitName is the -// name of the express route circuit. peeringName is the name of the peering. +// resourceGroupName is the name of the resource group. circuitName is the name +// of the express route circuit. peeringName is the name of the peering. // peeringParameters is parameters supplied to the create or update express // route circuit peering operation. -func (client ExpressRouteCircuitPeeringsClient) CreateOrUpdate(resourceGroupName string, circuitName string, peeringName string, peeringParameters ExpressRouteCircuitPeering, cancel <-chan struct{}) (result autorest.Response, err error) { - req, err := client.CreateOrUpdatePreparer(resourceGroupName, circuitName, peeringName, peeringParameters, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "CreateOrUpdate", nil, "Failure preparing request") - } - - resp, err := client.CreateOrUpdateSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "CreateOrUpdate", resp, "Failure sending request") - } - - result, err = client.CreateOrUpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "CreateOrUpdate", resp, "Failure responding to request") - } - - return +func (client ExpressRouteCircuitPeeringsClient) CreateOrUpdate(resourceGroupName string, circuitName string, peeringName string, peeringParameters ExpressRouteCircuitPeering, cancel <-chan struct{}) (<-chan ExpressRouteCircuitPeering, <-chan error) { + resultChan := make(chan ExpressRouteCircuitPeering, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result ExpressRouteCircuitPeering + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.CreateOrUpdatePreparer(resourceGroupName, circuitName, peeringName, peeringParameters, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // CreateOrUpdatePreparer prepares the CreateOrUpdate request. @@ -83,8 +93,9 @@ func (client ExpressRouteCircuitPeeringsClient) CreateOrUpdatePreparer(resourceG "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -107,13 +118,14 @@ func (client ExpressRouteCircuitPeeringsClient) CreateOrUpdateSender(req *http.R // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always // closes the http.Response Body. -func (client ExpressRouteCircuitPeeringsClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { +func (client ExpressRouteCircuitPeeringsClient) CreateOrUpdateResponder(resp *http.Response) (result ExpressRouteCircuitPeering, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } @@ -122,26 +134,39 @@ func (client ExpressRouteCircuitPeeringsClient) CreateOrUpdateResponder(resp *ht // passing the cancel channel argument. The channel will be used to cancel // polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. circuitName is the -// name of the express route circuit. peeringName is the name of the peering. -func (client ExpressRouteCircuitPeeringsClient) Delete(resourceGroupName string, circuitName string, peeringName string, cancel <-chan struct{}) (result autorest.Response, err error) { - req, err := client.DeletePreparer(resourceGroupName, circuitName, peeringName, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "Delete", nil, "Failure preparing request") - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "Delete", resp, "Failure sending request") - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "Delete", resp, "Failure responding to request") - } - - return +// resourceGroupName is the name of the resource group. circuitName is the name +// of the express route circuit. peeringName is the name of the peering. +func (client ExpressRouteCircuitPeeringsClient) Delete(resourceGroupName string, circuitName string, peeringName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { + resultChan := make(chan autorest.Response, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result autorest.Response + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.DeletePreparer(resourceGroupName, circuitName, peeringName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "Delete", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // DeletePreparer prepares the Delete request. @@ -153,8 +178,9 @@ func (client ExpressRouteCircuitPeeringsClient) DeletePreparer(resourceGroupName "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -188,18 +214,20 @@ func (client ExpressRouteCircuitPeeringsClient) DeleteResponder(resp *http.Respo // Get gets the specified authorization from the specified express route // circuit. // -// resourceGroupName is the name of the resource group. circuitName is the -// name of the express route circuit. peeringName is the name of the peering. +// resourceGroupName is the name of the resource group. circuitName is the name +// of the express route circuit. peeringName is the name of the peering. func (client ExpressRouteCircuitPeeringsClient) Get(resourceGroupName string, circuitName string, peeringName string) (result ExpressRouteCircuitPeering, err error) { req, err := client.GetPreparer(resourceGroupName, circuitName, peeringName) if err != nil { - return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "Get", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "Get", nil, "Failure preparing request") + return } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "Get", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "Get", resp, "Failure sending request") + return } result, err = client.GetResponder(resp) @@ -219,8 +247,9 @@ func (client ExpressRouteCircuitPeeringsClient) GetPreparer(resourceGroupName st "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -252,18 +281,20 @@ func (client ExpressRouteCircuitPeeringsClient) GetResponder(resp *http.Response // List gets all peerings in a specified express route circuit. // -// resourceGroupName is the name of the resource group. circuitName is the -// name of the express route circuit. +// resourceGroupName is the name of the resource group. circuitName is the name +// of the express route circuit. func (client ExpressRouteCircuitPeeringsClient) List(resourceGroupName string, circuitName string) (result ExpressRouteCircuitPeeringListResult, err error) { req, err := client.ListPreparer(resourceGroupName, circuitName) if err != nil { - return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "List", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "List", nil, "Failure preparing request") + return } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "List", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "List", resp, "Failure sending request") + return } result, err = client.ListResponder(resp) @@ -282,8 +313,9 @@ func (client ExpressRouteCircuitPeeringsClient) ListPreparer(resourceGroupName s "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuits.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuits.go old mode 100644 new mode 100755 index 6572e92b625f..b60aa10a4e94 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuits.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuits.go @@ -14,7 +14,7 @@ package network // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. @@ -24,11 +24,7 @@ import ( "net/http" ) -// ExpressRouteCircuitsClient is the the Microsoft Azure Network management -// API provides a RESTful set of web services that interact with Microsoft -// Azure Networks service to manage your network resources. The API has -// entities that capture the relationship between an end user and the -// Microsoft Azure Networks service. +// ExpressRouteCircuitsClient is the composite Swagger for Network Client type ExpressRouteCircuitsClient struct { ManagementClient } @@ -50,27 +46,40 @@ func NewExpressRouteCircuitsClientWithBaseURI(baseURI string, subscriptionID str // argument. The channel will be used to cancel polling and any outstanding // HTTP requests. // -// resourceGroupName is the name of the resource group. circuitName is the -// name of the circuit. parameters is parameters supplied to the create or -// update express route circuit operation. -func (client ExpressRouteCircuitsClient) CreateOrUpdate(resourceGroupName string, circuitName string, parameters ExpressRouteCircuit, cancel <-chan struct{}) (result autorest.Response, err error) { - req, err := client.CreateOrUpdatePreparer(resourceGroupName, circuitName, parameters, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "CreateOrUpdate", nil, "Failure preparing request") - } - - resp, err := client.CreateOrUpdateSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "CreateOrUpdate", resp, "Failure sending request") - } - - result, err = client.CreateOrUpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "CreateOrUpdate", resp, "Failure responding to request") - } - - return +// resourceGroupName is the name of the resource group. circuitName is the name +// of the circuit. parameters is parameters supplied to the create or update +// express route circuit operation. +func (client ExpressRouteCircuitsClient) CreateOrUpdate(resourceGroupName string, circuitName string, parameters ExpressRouteCircuit, cancel <-chan struct{}) (<-chan ExpressRouteCircuit, <-chan error) { + resultChan := make(chan ExpressRouteCircuit, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result ExpressRouteCircuit + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.CreateOrUpdatePreparer(resourceGroupName, circuitName, parameters, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // CreateOrUpdatePreparer prepares the CreateOrUpdate request. @@ -81,8 +90,9 @@ func (client ExpressRouteCircuitsClient) CreateOrUpdatePreparer(resourceGroupNam "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -105,41 +115,55 @@ func (client ExpressRouteCircuitsClient) CreateOrUpdateSender(req *http.Request) // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always // closes the http.Response Body. -func (client ExpressRouteCircuitsClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { +func (client ExpressRouteCircuitsClient) CreateOrUpdateResponder(resp *http.Response) (result ExpressRouteCircuit, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } -// Delete deletes the specified express route circuit. This method may poll -// for completion. Polling can be canceled by passing the cancel channel -// argument. The channel will be used to cancel polling and any outstanding -// HTTP requests. +// Delete deletes the specified express route circuit. This method may poll for +// completion. Polling can be canceled by passing the cancel channel argument. +// The channel will be used to cancel polling and any outstanding HTTP +// requests. // -// resourceGroupName is the name of the resource group. circuitName is the -// name of the express route circuit. -func (client ExpressRouteCircuitsClient) Delete(resourceGroupName string, circuitName string, cancel <-chan struct{}) (result autorest.Response, err error) { - req, err := client.DeletePreparer(resourceGroupName, circuitName, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "Delete", nil, "Failure preparing request") - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "Delete", resp, "Failure sending request") - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "Delete", resp, "Failure responding to request") - } - - return +// resourceGroupName is the name of the resource group. circuitName is the name +// of the express route circuit. +func (client ExpressRouteCircuitsClient) Delete(resourceGroupName string, circuitName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { + resultChan := make(chan autorest.Response, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result autorest.Response + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.DeletePreparer(resourceGroupName, circuitName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "Delete", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // DeletePreparer prepares the Delete request. @@ -150,8 +174,9 @@ func (client ExpressRouteCircuitsClient) DeletePreparer(resourceGroupName string "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -184,18 +209,20 @@ func (client ExpressRouteCircuitsClient) DeleteResponder(resp *http.Response) (r // Get gets information about the specified express route circuit. // -// resourceGroupName is the name of the resource group. circuitName is the -// name of express route circuit. +// resourceGroupName is the name of the resource group. circuitName is the name +// of express route circuit. func (client ExpressRouteCircuitsClient) Get(resourceGroupName string, circuitName string) (result ExpressRouteCircuit, err error) { req, err := client.GetPreparer(resourceGroupName, circuitName) if err != nil { - return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "Get", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "Get", nil, "Failure preparing request") + return } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "Get", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "Get", resp, "Failure sending request") + return } result, err = client.GetResponder(resp) @@ -214,8 +241,9 @@ func (client ExpressRouteCircuitsClient) GetPreparer(resourceGroupName string, c "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -248,18 +276,20 @@ func (client ExpressRouteCircuitsClient) GetResponder(resp *http.Response) (resu // GetPeeringStats gets all stats from an express route circuit in a resource // group. // -// resourceGroupName is the name of the resource group. circuitName is the -// name of the express route circuit. peeringName is the name of the peering. +// resourceGroupName is the name of the resource group. circuitName is the name +// of the express route circuit. peeringName is the name of the peering. func (client ExpressRouteCircuitsClient) GetPeeringStats(resourceGroupName string, circuitName string, peeringName string) (result ExpressRouteCircuitStats, err error) { req, err := client.GetPeeringStatsPreparer(resourceGroupName, circuitName, peeringName) if err != nil { - return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "GetPeeringStats", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "GetPeeringStats", nil, "Failure preparing request") + return } resp, err := client.GetPeeringStatsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "GetPeeringStats", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "GetPeeringStats", resp, "Failure sending request") + return } result, err = client.GetPeeringStatsResponder(resp) @@ -279,8 +309,9 @@ func (client ExpressRouteCircuitsClient) GetPeeringStatsPreparer(resourceGroupNa "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -313,18 +344,20 @@ func (client ExpressRouteCircuitsClient) GetPeeringStatsResponder(resp *http.Res // GetStats gets all the stats from an express route circuit in a resource // group. // -// resourceGroupName is the name of the resource group. circuitName is the -// name of the express route circuit. +// resourceGroupName is the name of the resource group. circuitName is the name +// of the express route circuit. func (client ExpressRouteCircuitsClient) GetStats(resourceGroupName string, circuitName string) (result ExpressRouteCircuitStats, err error) { req, err := client.GetStatsPreparer(resourceGroupName, circuitName) if err != nil { - return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "GetStats", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "GetStats", nil, "Failure preparing request") + return } resp, err := client.GetStatsSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "GetStats", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "GetStats", resp, "Failure sending request") + return } result, err = client.GetStatsResponder(resp) @@ -343,8 +376,9 @@ func (client ExpressRouteCircuitsClient) GetStatsPreparer(resourceGroupName stri "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -380,13 +414,15 @@ func (client ExpressRouteCircuitsClient) GetStatsResponder(resp *http.Response) func (client ExpressRouteCircuitsClient) List(resourceGroupName string) (result ExpressRouteCircuitListResult, err error) { req, err := client.ListPreparer(resourceGroupName) if err != nil { - return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "List", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "List", nil, "Failure preparing request") + return } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "List", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "List", resp, "Failure sending request") + return } result, err = client.ListResponder(resp) @@ -404,8 +440,9 @@ func (client ExpressRouteCircuitsClient) ListPreparer(resourceGroupName string) "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -463,13 +500,15 @@ func (client ExpressRouteCircuitsClient) ListNextResults(lastResults ExpressRout func (client ExpressRouteCircuitsClient) ListAll() (result ExpressRouteCircuitListResult, err error) { req, err := client.ListAllPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListAll", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListAll", nil, "Failure preparing request") + return } resp, err := client.ListAllSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListAll", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListAll", resp, "Failure sending request") + return } result, err = client.ListAllResponder(resp) @@ -486,8 +525,9 @@ func (client ExpressRouteCircuitsClient) ListAllPreparer() (*http.Request, error "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -543,31 +583,44 @@ func (client ExpressRouteCircuitsClient) ListAllNextResults(lastResults ExpressR // ListArpTable gets the currently advertised ARP table associated with the // express route circuit in a resource group. This method may poll for -// completion. Polling can be canceled by passing the cancel channel -// argument. The channel will be used to cancel polling and any outstanding -// HTTP requests. +// completion. Polling can be canceled by passing the cancel channel argument. +// The channel will be used to cancel polling and any outstanding HTTP +// requests. // -// resourceGroupName is the name of the resource group. circuitName is the -// name of the express route circuit. peeringName is the name of the peering. +// resourceGroupName is the name of the resource group. circuitName is the name +// of the express route circuit. peeringName is the name of the peering. // devicePath is the path of the device. -func (client ExpressRouteCircuitsClient) ListArpTable(resourceGroupName string, circuitName string, peeringName string, devicePath string, cancel <-chan struct{}) (result autorest.Response, err error) { - req, err := client.ListArpTablePreparer(resourceGroupName, circuitName, peeringName, devicePath, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListArpTable", nil, "Failure preparing request") - } - - resp, err := client.ListArpTableSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListArpTable", resp, "Failure sending request") - } - - result, err = client.ListArpTableResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListArpTable", resp, "Failure responding to request") - } - - return +func (client ExpressRouteCircuitsClient) ListArpTable(resourceGroupName string, circuitName string, peeringName string, devicePath string, cancel <-chan struct{}) (<-chan ExpressRouteCircuitsArpTableListResult, <-chan error) { + resultChan := make(chan ExpressRouteCircuitsArpTableListResult, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result ExpressRouteCircuitsArpTableListResult + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.ListArpTablePreparer(resourceGroupName, circuitName, peeringName, devicePath, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListArpTable", nil, "Failure preparing request") + return + } + + resp, err := client.ListArpTableSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListArpTable", resp, "Failure sending request") + return + } + + result, err = client.ListArpTableResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListArpTable", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // ListArpTablePreparer prepares the ListArpTable request. @@ -580,8 +633,9 @@ func (client ExpressRouteCircuitsClient) ListArpTablePreparer(resourceGroupName "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -602,43 +656,57 @@ func (client ExpressRouteCircuitsClient) ListArpTableSender(req *http.Request) ( // ListArpTableResponder handles the response to the ListArpTable request. The method always // closes the http.Response Body. -func (client ExpressRouteCircuitsClient) ListArpTableResponder(resp *http.Response) (result autorest.Response, err error) { +func (client ExpressRouteCircuitsClient) ListArpTableResponder(resp *http.Response) (result ExpressRouteCircuitsArpTableListResult, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } // ListRoutesTable gets the currently advertised routes table associated with // the express route circuit in a resource group. This method may poll for -// completion. Polling can be canceled by passing the cancel channel -// argument. The channel will be used to cancel polling and any outstanding -// HTTP requests. +// completion. Polling can be canceled by passing the cancel channel argument. +// The channel will be used to cancel polling and any outstanding HTTP +// requests. // -// resourceGroupName is the name of the resource group. circuitName is the -// name of the express route circuit. peeringName is the name of the peering. +// resourceGroupName is the name of the resource group. circuitName is the name +// of the express route circuit. peeringName is the name of the peering. // devicePath is the path of the device. -func (client ExpressRouteCircuitsClient) ListRoutesTable(resourceGroupName string, circuitName string, peeringName string, devicePath string, cancel <-chan struct{}) (result autorest.Response, err error) { - req, err := client.ListRoutesTablePreparer(resourceGroupName, circuitName, peeringName, devicePath, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListRoutesTable", nil, "Failure preparing request") - } - - resp, err := client.ListRoutesTableSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListRoutesTable", resp, "Failure sending request") - } - - result, err = client.ListRoutesTableResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListRoutesTable", resp, "Failure responding to request") - } - - return +func (client ExpressRouteCircuitsClient) ListRoutesTable(resourceGroupName string, circuitName string, peeringName string, devicePath string, cancel <-chan struct{}) (<-chan ExpressRouteCircuitsRoutesTableListResult, <-chan error) { + resultChan := make(chan ExpressRouteCircuitsRoutesTableListResult, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result ExpressRouteCircuitsRoutesTableListResult + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.ListRoutesTablePreparer(resourceGroupName, circuitName, peeringName, devicePath, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListRoutesTable", nil, "Failure preparing request") + return + } + + resp, err := client.ListRoutesTableSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListRoutesTable", resp, "Failure sending request") + return + } + + result, err = client.ListRoutesTableResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListRoutesTable", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // ListRoutesTablePreparer prepares the ListRoutesTable request. @@ -651,8 +719,9 @@ func (client ExpressRouteCircuitsClient) ListRoutesTablePreparer(resourceGroupNa "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -673,13 +742,14 @@ func (client ExpressRouteCircuitsClient) ListRoutesTableSender(req *http.Request // ListRoutesTableResponder handles the response to the ListRoutesTable request. The method always // closes the http.Response Body. -func (client ExpressRouteCircuitsClient) ListRoutesTableResponder(resp *http.Response) (result autorest.Response, err error) { +func (client ExpressRouteCircuitsClient) ListRoutesTableResponder(resp *http.Response) (result ExpressRouteCircuitsRoutesTableListResult, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } @@ -689,27 +759,40 @@ func (client ExpressRouteCircuitsClient) ListRoutesTableResponder(resp *http.Res // channel argument. The channel will be used to cancel polling and any // outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. circuitName is the -// name of the express route circuit. peeringName is the name of the peering. +// resourceGroupName is the name of the resource group. circuitName is the name +// of the express route circuit. peeringName is the name of the peering. // devicePath is the path of the device. -func (client ExpressRouteCircuitsClient) ListRoutesTableSummary(resourceGroupName string, circuitName string, peeringName string, devicePath string, cancel <-chan struct{}) (result autorest.Response, err error) { - req, err := client.ListRoutesTableSummaryPreparer(resourceGroupName, circuitName, peeringName, devicePath, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListRoutesTableSummary", nil, "Failure preparing request") - } - - resp, err := client.ListRoutesTableSummarySender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListRoutesTableSummary", resp, "Failure sending request") - } - - result, err = client.ListRoutesTableSummaryResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListRoutesTableSummary", resp, "Failure responding to request") - } - - return +func (client ExpressRouteCircuitsClient) ListRoutesTableSummary(resourceGroupName string, circuitName string, peeringName string, devicePath string, cancel <-chan struct{}) (<-chan ExpressRouteCircuitsRoutesTableSummaryListResult, <-chan error) { + resultChan := make(chan ExpressRouteCircuitsRoutesTableSummaryListResult, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result ExpressRouteCircuitsRoutesTableSummaryListResult + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.ListRoutesTableSummaryPreparer(resourceGroupName, circuitName, peeringName, devicePath, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListRoutesTableSummary", nil, "Failure preparing request") + return + } + + resp, err := client.ListRoutesTableSummarySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListRoutesTableSummary", resp, "Failure sending request") + return + } + + result, err = client.ListRoutesTableSummaryResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListRoutesTableSummary", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // ListRoutesTableSummaryPreparer prepares the ListRoutesTableSummary request. @@ -722,8 +805,9 @@ func (client ExpressRouteCircuitsClient) ListRoutesTableSummaryPreparer(resource "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -744,12 +828,13 @@ func (client ExpressRouteCircuitsClient) ListRoutesTableSummarySender(req *http. // ListRoutesTableSummaryResponder handles the response to the ListRoutesTableSummary request. The method always // closes the http.Response Body. -func (client ExpressRouteCircuitsClient) ListRoutesTableSummaryResponder(resp *http.Response) (result autorest.Response, err error) { +func (client ExpressRouteCircuitsClient) ListRoutesTableSummaryResponder(resp *http.Response) (result ExpressRouteCircuitsRoutesTableSummaryListResult, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressrouteserviceproviders.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressrouteserviceproviders.go old mode 100644 new mode 100755 index 9d0450ecf093..94db9a4f6147 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressrouteserviceproviders.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressrouteserviceproviders.go @@ -14,7 +14,7 @@ package network // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. @@ -24,11 +24,8 @@ import ( "net/http" ) -// ExpressRouteServiceProvidersClient is the the Microsoft Azure Network -// management API provides a RESTful set of web services that interact with -// Microsoft Azure Networks service to manage your network resources. The API -// has entities that capture the relationship between an end user and the -// Microsoft Azure Networks service. +// ExpressRouteServiceProvidersClient is the composite Swagger for Network +// Client type ExpressRouteServiceProvidersClient struct { ManagementClient } @@ -49,13 +46,15 @@ func NewExpressRouteServiceProvidersClientWithBaseURI(baseURI string, subscripti func (client ExpressRouteServiceProvidersClient) List() (result ExpressRouteServiceProviderListResult, err error) { req, err := client.ListPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "network.ExpressRouteServiceProvidersClient", "List", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "network.ExpressRouteServiceProvidersClient", "List", nil, "Failure preparing request") + return } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network.ExpressRouteServiceProvidersClient", "List", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "network.ExpressRouteServiceProvidersClient", "List", resp, "Failure sending request") + return } result, err = client.ListResponder(resp) @@ -72,8 +71,9 @@ func (client ExpressRouteServiceProvidersClient) ListPreparer() (*http.Request, "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaces.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaces.go old mode 100644 new mode 100755 index 9fbf7cddd4a5..8918c08b4b70 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaces.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaces.go @@ -14,22 +14,17 @@ package network // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" "net/http" ) -// InterfacesClient is the the Microsoft Azure Network management API provides -// a RESTful set of web services that interact with Microsoft Azure Networks -// service to manage your network resources. The API has entities that -// capture the relationship between an end user and the Microsoft Azure -// Networks service. +// InterfacesClient is the composite Swagger for Network Client type InterfacesClient struct { ManagementClient } @@ -50,40 +45,40 @@ func NewInterfacesClientWithBaseURI(baseURI string, subscriptionID string) Inter // argument. The channel will be used to cancel polling and any outstanding // HTTP requests. // -// resourceGroupName is the name of the resource group. networkInterfaceName -// is the name of the network interface. parameters is parameters supplied to -// the create or update network interface operation. -func (client InterfacesClient) CreateOrUpdate(resourceGroupName string, networkInterfaceName string, parameters Interface, cancel <-chan struct{}) (result autorest.Response, err error) { - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.InterfacePropertiesFormat", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.InterfacePropertiesFormat.NetworkSecurityGroup", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.InterfacePropertiesFormat.NetworkSecurityGroup.SecurityGroupPropertiesFormat", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.InterfacePropertiesFormat.NetworkSecurityGroup.SecurityGroupPropertiesFormat.NetworkInterfaces", Name: validation.ReadOnly, Rule: true, Chain: nil}, - {Target: "parameters.InterfacePropertiesFormat.NetworkSecurityGroup.SecurityGroupPropertiesFormat.Subnets", Name: validation.ReadOnly, Rule: true, Chain: nil}, - }}, - }}, - }}}}}); err != nil { - return result, validation.NewErrorWithValidationError(err, "network.InterfacesClient", "CreateOrUpdate") - } - - req, err := client.CreateOrUpdatePreparer(resourceGroupName, networkInterfaceName, parameters, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "CreateOrUpdate", nil, "Failure preparing request") - } - - resp, err := client.CreateOrUpdateSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "CreateOrUpdate", resp, "Failure sending request") - } - - result, err = client.CreateOrUpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "network.InterfacesClient", "CreateOrUpdate", resp, "Failure responding to request") - } - - return +// resourceGroupName is the name of the resource group. networkInterfaceName is +// the name of the network interface. parameters is parameters supplied to the +// create or update network interface operation. +func (client InterfacesClient) CreateOrUpdate(resourceGroupName string, networkInterfaceName string, parameters Interface, cancel <-chan struct{}) (<-chan Interface, <-chan error) { + resultChan := make(chan Interface, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result Interface + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.CreateOrUpdatePreparer(resourceGroupName, networkInterfaceName, parameters, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "CreateOrUpdate", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // CreateOrUpdatePreparer prepares the CreateOrUpdate request. @@ -94,8 +89,9 @@ func (client InterfacesClient) CreateOrUpdatePreparer(resourceGroupName string, "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -118,41 +114,55 @@ func (client InterfacesClient) CreateOrUpdateSender(req *http.Request) (*http.Re // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always // closes the http.Response Body. -func (client InterfacesClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { +func (client InterfacesClient) CreateOrUpdateResponder(resp *http.Response) (result Interface, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } // Delete deletes the specified network interface. This method may poll for -// completion. Polling can be canceled by passing the cancel channel -// argument. The channel will be used to cancel polling and any outstanding -// HTTP requests. +// completion. Polling can be canceled by passing the cancel channel argument. +// The channel will be used to cancel polling and any outstanding HTTP +// requests. // -// resourceGroupName is the name of the resource group. networkInterfaceName -// is the name of the network interface. -func (client InterfacesClient) Delete(resourceGroupName string, networkInterfaceName string, cancel <-chan struct{}) (result autorest.Response, err error) { - req, err := client.DeletePreparer(resourceGroupName, networkInterfaceName, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "Delete", nil, "Failure preparing request") - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "Delete", resp, "Failure sending request") - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "network.InterfacesClient", "Delete", resp, "Failure responding to request") - } - - return +// resourceGroupName is the name of the resource group. networkInterfaceName is +// the name of the network interface. +func (client InterfacesClient) Delete(resourceGroupName string, networkInterfaceName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { + resultChan := make(chan autorest.Response, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result autorest.Response + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.DeletePreparer(resourceGroupName, networkInterfaceName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "Delete", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // DeletePreparer prepares the Delete request. @@ -163,8 +173,9 @@ func (client InterfacesClient) DeletePreparer(resourceGroupName string, networkI "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -197,19 +208,20 @@ func (client InterfacesClient) DeleteResponder(resp *http.Response) (result auto // Get gets information about the specified network interface. // -// resourceGroupName is the name of the resource group. networkInterfaceName -// is the name of the network interface. expand is expands referenced -// resources. +// resourceGroupName is the name of the resource group. networkInterfaceName is +// the name of the network interface. expand is expands referenced resources. func (client InterfacesClient) Get(resourceGroupName string, networkInterfaceName string, expand string) (result Interface, err error) { req, err := client.GetPreparer(resourceGroupName, networkInterfaceName, expand) if err != nil { - return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "Get", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "Get", nil, "Failure preparing request") + return } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "Get", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "Get", resp, "Failure sending request") + return } result, err = client.GetResponder(resp) @@ -228,8 +240,9 @@ func (client InterfacesClient) GetPreparer(resourceGroupName string, networkInte "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } if len(expand) > 0 { queryParameters["$expand"] = autorest.Encode("query", expand) @@ -262,31 +275,44 @@ func (client InterfacesClient) GetResponder(resp *http.Response) (result Interfa return } -// GetEffectiveRouteTable gets all route tables applied to a network -// interface. This method may poll for completion. Polling can be canceled by -// passing the cancel channel argument. The channel will be used to cancel -// polling and any outstanding HTTP requests. +// GetEffectiveRouteTable gets all route tables applied to a network interface. +// This method may poll for completion. Polling can be canceled by passing the +// cancel channel argument. The channel will be used to cancel polling and any +// outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. networkInterfaceName -// is the name of the network interface. -func (client InterfacesClient) GetEffectiveRouteTable(resourceGroupName string, networkInterfaceName string, cancel <-chan struct{}) (result autorest.Response, err error) { - req, err := client.GetEffectiveRouteTablePreparer(resourceGroupName, networkInterfaceName, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "GetEffectiveRouteTable", nil, "Failure preparing request") - } - - resp, err := client.GetEffectiveRouteTableSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "GetEffectiveRouteTable", resp, "Failure sending request") - } - - result, err = client.GetEffectiveRouteTableResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "network.InterfacesClient", "GetEffectiveRouteTable", resp, "Failure responding to request") - } - - return +// resourceGroupName is the name of the resource group. networkInterfaceName is +// the name of the network interface. +func (client InterfacesClient) GetEffectiveRouteTable(resourceGroupName string, networkInterfaceName string, cancel <-chan struct{}) (<-chan EffectiveRouteListResult, <-chan error) { + resultChan := make(chan EffectiveRouteListResult, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result EffectiveRouteListResult + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.GetEffectiveRouteTablePreparer(resourceGroupName, networkInterfaceName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "GetEffectiveRouteTable", nil, "Failure preparing request") + return + } + + resp, err := client.GetEffectiveRouteTableSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "GetEffectiveRouteTable", resp, "Failure sending request") + return + } + + result, err = client.GetEffectiveRouteTableResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "GetEffectiveRouteTable", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // GetEffectiveRouteTablePreparer prepares the GetEffectiveRouteTable request. @@ -297,8 +323,9 @@ func (client InterfacesClient) GetEffectiveRouteTablePreparer(resourceGroupName "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -319,13 +346,14 @@ func (client InterfacesClient) GetEffectiveRouteTableSender(req *http.Request) ( // GetEffectiveRouteTableResponder handles the response to the GetEffectiveRouteTable request. The method always // closes the http.Response Body. -func (client InterfacesClient) GetEffectiveRouteTableResponder(resp *http.Response) (result autorest.Response, err error) { +func (client InterfacesClient) GetEffectiveRouteTableResponder(resp *http.Response) (result EffectiveRouteListResult, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } @@ -339,13 +367,15 @@ func (client InterfacesClient) GetEffectiveRouteTableResponder(resp *http.Respon func (client InterfacesClient) GetVirtualMachineScaleSetNetworkInterface(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, expand string) (result Interface, err error) { req, err := client.GetVirtualMachineScaleSetNetworkInterfacePreparer(resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName, expand) if err != nil { - return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "GetVirtualMachineScaleSetNetworkInterface", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "GetVirtualMachineScaleSetNetworkInterface", nil, "Failure preparing request") + return } resp, err := client.GetVirtualMachineScaleSetNetworkInterfaceSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "GetVirtualMachineScaleSetNetworkInterface", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "GetVirtualMachineScaleSetNetworkInterface", resp, "Failure sending request") + return } result, err = client.GetVirtualMachineScaleSetNetworkInterfaceResponder(resp) @@ -366,8 +396,9 @@ func (client InterfacesClient) GetVirtualMachineScaleSetNetworkInterfacePreparer "virtualMachineScaleSetName": autorest.Encode("path", virtualMachineScaleSetName), } + const APIVersion = "2016-09-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } if len(expand) > 0 { queryParameters["$expand"] = autorest.Encode("query", expand) @@ -406,13 +437,15 @@ func (client InterfacesClient) GetVirtualMachineScaleSetNetworkInterfaceResponde func (client InterfacesClient) List(resourceGroupName string) (result InterfaceListResult, err error) { req, err := client.ListPreparer(resourceGroupName) if err != nil { - return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "List", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "List", nil, "Failure preparing request") + return } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "List", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "List", resp, "Failure sending request") + return } result, err = client.ListResponder(resp) @@ -430,8 +463,9 @@ func (client InterfacesClient) ListPreparer(resourceGroupName string) (*http.Req "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -489,13 +523,15 @@ func (client InterfacesClient) ListNextResults(lastResults InterfaceListResult) func (client InterfacesClient) ListAll() (result InterfaceListResult, err error) { req, err := client.ListAllPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListAll", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListAll", nil, "Failure preparing request") + return } resp, err := client.ListAllSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListAll", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListAll", resp, "Failure sending request") + return } result, err = client.ListAllResponder(resp) @@ -512,8 +548,9 @@ func (client InterfacesClient) ListAllPreparer() (*http.Request, error) { "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -568,30 +605,43 @@ func (client InterfacesClient) ListAllNextResults(lastResults InterfaceListResul } // ListEffectiveNetworkSecurityGroups gets all network security groups applied -// to a network interface. This method may poll for completion. Polling can -// be canceled by passing the cancel channel argument. The channel will be -// used to cancel polling and any outstanding HTTP requests. +// to a network interface. This method may poll for completion. Polling can be +// canceled by passing the cancel channel argument. The channel will be used to +// cancel polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. networkInterfaceName -// is the name of the network interface. -func (client InterfacesClient) ListEffectiveNetworkSecurityGroups(resourceGroupName string, networkInterfaceName string, cancel <-chan struct{}) (result autorest.Response, err error) { - req, err := client.ListEffectiveNetworkSecurityGroupsPreparer(resourceGroupName, networkInterfaceName, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListEffectiveNetworkSecurityGroups", nil, "Failure preparing request") - } - - resp, err := client.ListEffectiveNetworkSecurityGroupsSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListEffectiveNetworkSecurityGroups", resp, "Failure sending request") - } - - result, err = client.ListEffectiveNetworkSecurityGroupsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListEffectiveNetworkSecurityGroups", resp, "Failure responding to request") - } - - return +// resourceGroupName is the name of the resource group. networkInterfaceName is +// the name of the network interface. +func (client InterfacesClient) ListEffectiveNetworkSecurityGroups(resourceGroupName string, networkInterfaceName string, cancel <-chan struct{}) (<-chan EffectiveNetworkSecurityGroupListResult, <-chan error) { + resultChan := make(chan EffectiveNetworkSecurityGroupListResult, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result EffectiveNetworkSecurityGroupListResult + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.ListEffectiveNetworkSecurityGroupsPreparer(resourceGroupName, networkInterfaceName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListEffectiveNetworkSecurityGroups", nil, "Failure preparing request") + return + } + + resp, err := client.ListEffectiveNetworkSecurityGroupsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListEffectiveNetworkSecurityGroups", resp, "Failure sending request") + return + } + + result, err = client.ListEffectiveNetworkSecurityGroupsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListEffectiveNetworkSecurityGroups", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // ListEffectiveNetworkSecurityGroupsPreparer prepares the ListEffectiveNetworkSecurityGroups request. @@ -602,8 +652,9 @@ func (client InterfacesClient) ListEffectiveNetworkSecurityGroupsPreparer(resour "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -624,31 +675,34 @@ func (client InterfacesClient) ListEffectiveNetworkSecurityGroupsSender(req *htt // ListEffectiveNetworkSecurityGroupsResponder handles the response to the ListEffectiveNetworkSecurityGroups request. The method always // closes the http.Response Body. -func (client InterfacesClient) ListEffectiveNetworkSecurityGroupsResponder(resp *http.Response) (result autorest.Response, err error) { +func (client InterfacesClient) ListEffectiveNetworkSecurityGroupsResponder(resp *http.Response) (result EffectiveNetworkSecurityGroupListResult, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } -// ListVirtualMachineScaleSetNetworkInterfaces gets all network interfaces in -// a virtual machine scale set. +// ListVirtualMachineScaleSetNetworkInterfaces gets all network interfaces in a +// virtual machine scale set. // // resourceGroupName is the name of the resource group. // virtualMachineScaleSetName is the name of the virtual machine scale set. func (client InterfacesClient) ListVirtualMachineScaleSetNetworkInterfaces(resourceGroupName string, virtualMachineScaleSetName string) (result InterfaceListResult, err error) { req, err := client.ListVirtualMachineScaleSetNetworkInterfacesPreparer(resourceGroupName, virtualMachineScaleSetName) if err != nil { - return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetNetworkInterfaces", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetNetworkInterfaces", nil, "Failure preparing request") + return } resp, err := client.ListVirtualMachineScaleSetNetworkInterfacesSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetNetworkInterfaces", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetNetworkInterfaces", resp, "Failure sending request") + return } result, err = client.ListVirtualMachineScaleSetNetworkInterfacesResponder(resp) @@ -667,8 +721,9 @@ func (client InterfacesClient) ListVirtualMachineScaleSetNetworkInterfacesPrepar "virtualMachineScaleSetName": autorest.Encode("path", virtualMachineScaleSetName), } + const APIVersion = "2016-09-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -731,13 +786,15 @@ func (client InterfacesClient) ListVirtualMachineScaleSetNetworkInterfacesNextRe func (client InterfacesClient) ListVirtualMachineScaleSetVMNetworkInterfaces(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string) (result InterfaceListResult, err error) { req, err := client.ListVirtualMachineScaleSetVMNetworkInterfacesPreparer(resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex) if err != nil { - return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetVMNetworkInterfaces", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetVMNetworkInterfaces", nil, "Failure preparing request") + return } resp, err := client.ListVirtualMachineScaleSetVMNetworkInterfacesSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetVMNetworkInterfaces", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetVMNetworkInterfaces", resp, "Failure sending request") + return } result, err = client.ListVirtualMachineScaleSetVMNetworkInterfacesResponder(resp) @@ -757,8 +814,9 @@ func (client InterfacesClient) ListVirtualMachineScaleSetVMNetworkInterfacesPrep "virtualMachineScaleSetName": autorest.Encode("path", virtualMachineScaleSetName), } + const APIVersion = "2016-09-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancers.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancers.go old mode 100644 new mode 100755 index 30012ff35b90..11d649b270a0 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancers.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancers.go @@ -14,7 +14,7 @@ package network // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. @@ -24,11 +24,7 @@ import ( "net/http" ) -// LoadBalancersClient is the the Microsoft Azure Network management API -// provides a RESTful set of web services that interact with Microsoft Azure -// Networks service to manage your network resources. The API has entities -// that capture the relationship between an end user and the Microsoft Azure -// Networks service. +// LoadBalancersClient is the composite Swagger for Network Client type LoadBalancersClient struct { ManagementClient } @@ -46,31 +42,44 @@ func NewLoadBalancersClientWithBaseURI(baseURI string, subscriptionID string) Lo } // CreateOrUpdate creates or updates a load balancer. This method may poll for -// completion. Polling can be canceled by passing the cancel channel -// argument. The channel will be used to cancel polling and any outstanding -// HTTP requests. +// completion. Polling can be canceled by passing the cancel channel argument. +// The channel will be used to cancel polling and any outstanding HTTP +// requests. // -// resourceGroupName is the name of the resource group. loadBalancerName is -// the name of the load balancer. parameters is parameters supplied to the -// create or update load balancer operation. -func (client LoadBalancersClient) CreateOrUpdate(resourceGroupName string, loadBalancerName string, parameters LoadBalancer, cancel <-chan struct{}) (result autorest.Response, err error) { - req, err := client.CreateOrUpdatePreparer(resourceGroupName, loadBalancerName, parameters, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "CreateOrUpdate", nil, "Failure preparing request") - } - - resp, err := client.CreateOrUpdateSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "CreateOrUpdate", resp, "Failure sending request") - } - - result, err = client.CreateOrUpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "CreateOrUpdate", resp, "Failure responding to request") - } - - return +// resourceGroupName is the name of the resource group. loadBalancerName is the +// name of the load balancer. parameters is parameters supplied to the create +// or update load balancer operation. +func (client LoadBalancersClient) CreateOrUpdate(resourceGroupName string, loadBalancerName string, parameters LoadBalancer, cancel <-chan struct{}) (<-chan LoadBalancer, <-chan error) { + resultChan := make(chan LoadBalancer, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result LoadBalancer + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.CreateOrUpdatePreparer(resourceGroupName, loadBalancerName, parameters, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "CreateOrUpdate", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // CreateOrUpdatePreparer prepares the CreateOrUpdate request. @@ -81,8 +90,9 @@ func (client LoadBalancersClient) CreateOrUpdatePreparer(resourceGroupName strin "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -105,41 +115,55 @@ func (client LoadBalancersClient) CreateOrUpdateSender(req *http.Request) (*http // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always // closes the http.Response Body. -func (client LoadBalancersClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { +func (client LoadBalancersClient) CreateOrUpdateResponder(resp *http.Response) (result LoadBalancer, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } // Delete deletes the specified load balancer. This method may poll for -// completion. Polling can be canceled by passing the cancel channel -// argument. The channel will be used to cancel polling and any outstanding -// HTTP requests. +// completion. Polling can be canceled by passing the cancel channel argument. +// The channel will be used to cancel polling and any outstanding HTTP +// requests. // -// resourceGroupName is the name of the resource group. loadBalancerName is -// the name of the load balancer. -func (client LoadBalancersClient) Delete(resourceGroupName string, loadBalancerName string, cancel <-chan struct{}) (result autorest.Response, err error) { - req, err := client.DeletePreparer(resourceGroupName, loadBalancerName, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "Delete", nil, "Failure preparing request") - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "Delete", resp, "Failure sending request") - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "Delete", resp, "Failure responding to request") - } - - return +// resourceGroupName is the name of the resource group. loadBalancerName is the +// name of the load balancer. +func (client LoadBalancersClient) Delete(resourceGroupName string, loadBalancerName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { + resultChan := make(chan autorest.Response, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result autorest.Response + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.DeletePreparer(resourceGroupName, loadBalancerName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "Delete", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // DeletePreparer prepares the Delete request. @@ -150,8 +174,9 @@ func (client LoadBalancersClient) DeletePreparer(resourceGroupName string, loadB "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -184,18 +209,20 @@ func (client LoadBalancersClient) DeleteResponder(resp *http.Response) (result a // Get gets the specified load balancer. // -// resourceGroupName is the name of the resource group. loadBalancerName is -// the name of the load balancer. expand is expands referenced resources. +// resourceGroupName is the name of the resource group. loadBalancerName is the +// name of the load balancer. expand is expands referenced resources. func (client LoadBalancersClient) Get(resourceGroupName string, loadBalancerName string, expand string) (result LoadBalancer, err error) { req, err := client.GetPreparer(resourceGroupName, loadBalancerName, expand) if err != nil { - return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "Get", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "Get", nil, "Failure preparing request") + return } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "Get", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "Get", resp, "Failure sending request") + return } result, err = client.GetResponder(resp) @@ -214,8 +241,9 @@ func (client LoadBalancersClient) GetPreparer(resourceGroupName string, loadBala "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } if len(expand) > 0 { queryParameters["$expand"] = autorest.Encode("query", expand) @@ -254,13 +282,15 @@ func (client LoadBalancersClient) GetResponder(resp *http.Response) (result Load func (client LoadBalancersClient) List(resourceGroupName string) (result LoadBalancerListResult, err error) { req, err := client.ListPreparer(resourceGroupName) if err != nil { - return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "List", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "List", nil, "Failure preparing request") + return } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "List", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "List", resp, "Failure sending request") + return } result, err = client.ListResponder(resp) @@ -278,8 +308,9 @@ func (client LoadBalancersClient) ListPreparer(resourceGroupName string) (*http. "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -337,13 +368,15 @@ func (client LoadBalancersClient) ListNextResults(lastResults LoadBalancerListRe func (client LoadBalancersClient) ListAll() (result LoadBalancerListResult, err error) { req, err := client.ListAllPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "ListAll", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "ListAll", nil, "Failure preparing request") + return } resp, err := client.ListAllSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "ListAll", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "ListAll", resp, "Failure sending request") + return } result, err = client.ListAllResponder(resp) @@ -360,8 +393,9 @@ func (client LoadBalancersClient) ListAllPreparer() (*http.Request, error) { "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/localnetworkgateways.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/localnetworkgateways.go old mode 100644 new mode 100755 index fd48306f245d..c1e66076e198 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/localnetworkgateways.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/localnetworkgateways.go @@ -14,7 +14,7 @@ package network // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. @@ -25,11 +25,7 @@ import ( "net/http" ) -// LocalNetworkGatewaysClient is the the Microsoft Azure Network management -// API provides a RESTful set of web services that interact with Microsoft -// Azure Networks service to manage your network resources. The API has -// entities that capture the relationship between an end user and the -// Microsoft Azure Networks service. +// LocalNetworkGatewaysClient is the composite Swagger for Network Client type LocalNetworkGatewaysClient struct { ManagementClient } @@ -47,41 +43,55 @@ func NewLocalNetworkGatewaysClientWithBaseURI(baseURI string, subscriptionID str } // CreateOrUpdate creates or updates a local network gateway in the specified -// resource group. This method may poll for completion. Polling can be -// canceled by passing the cancel channel argument. The channel will be used -// to cancel polling and any outstanding HTTP requests. +// resource group. This method may poll for completion. Polling can be canceled +// by passing the cancel channel argument. The channel will be used to cancel +// polling and any outstanding HTTP requests. // -// resourceGroupName is the name of the resource group. -// localNetworkGatewayName is the name of the local network gateway. -// parameters is parameters supplied to the create or update local network -// gateway operation. -func (client LocalNetworkGatewaysClient) CreateOrUpdate(resourceGroupName string, localNetworkGatewayName string, parameters LocalNetworkGateway, cancel <-chan struct{}) (result autorest.Response, err error) { +// resourceGroupName is the name of the resource group. localNetworkGatewayName +// is the name of the local network gateway. parameters is parameters supplied +// to the create or update local network gateway operation. +func (client LocalNetworkGatewaysClient) CreateOrUpdate(resourceGroupName string, localNetworkGatewayName string, parameters LocalNetworkGateway, cancel <-chan struct{}) (<-chan LocalNetworkGateway, <-chan error) { + resultChan := make(chan LocalNetworkGateway, 1) + errChan := make(chan error, 1) if err := validation.Validate([]validation.Validation{ + {TargetValue: localNetworkGatewayName, + Constraints: []validation.Constraint{{Target: "localNetworkGatewayName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.LocalNetworkGatewayPropertiesFormat", Name: validation.Null, Rule: true, - Chain: []validation.Constraint{{Target: "parameters.LocalNetworkGatewayPropertiesFormat.LocalNetworkAddressSpace", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.LocalNetworkGatewayPropertiesFormat.ProvisioningState", Name: validation.ReadOnly, Rule: true, Chain: nil}, - }}}}}); err != nil { - return result, validation.NewErrorWithValidationError(err, "network.LocalNetworkGatewaysClient", "CreateOrUpdate") - } - - req, err := client.CreateOrUpdatePreparer(resourceGroupName, localNetworkGatewayName, parameters, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "CreateOrUpdate", nil, "Failure preparing request") - } - - resp, err := client.CreateOrUpdateSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "CreateOrUpdate", resp, "Failure sending request") - } - - result, err = client.CreateOrUpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "CreateOrUpdate", resp, "Failure responding to request") + Constraints: []validation.Constraint{{Target: "parameters.LocalNetworkGatewayPropertiesFormat", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + errChan <- validation.NewErrorWithValidationError(err, "network.LocalNetworkGatewaysClient", "CreateOrUpdate") + close(errChan) + close(resultChan) + return resultChan, errChan } - return + go func() { + var err error + var result LocalNetworkGateway + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.CreateOrUpdatePreparer(resourceGroupName, localNetworkGatewayName, parameters, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "CreateOrUpdate", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // CreateOrUpdatePreparer prepares the CreateOrUpdate request. @@ -92,8 +102,9 @@ func (client LocalNetworkGatewaysClient) CreateOrUpdatePreparer(resourceGroupNam "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -116,41 +127,64 @@ func (client LocalNetworkGatewaysClient) CreateOrUpdateSender(req *http.Request) // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always // closes the http.Response Body. -func (client LocalNetworkGatewaysClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { +func (client LocalNetworkGatewaysClient) CreateOrUpdateResponder(resp *http.Response) (result LocalNetworkGateway, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } -// Delete deletes the specified local network gateway. This method may poll -// for completion. Polling can be canceled by passing the cancel channel -// argument. The channel will be used to cancel polling and any outstanding -// HTTP requests. +// Delete deletes the specified local network gateway. This method may poll for +// completion. Polling can be canceled by passing the cancel channel argument. +// The channel will be used to cancel polling and any outstanding HTTP +// requests. // -// resourceGroupName is the name of the resource group. -// localNetworkGatewayName is the name of the local network gateway. -func (client LocalNetworkGatewaysClient) Delete(resourceGroupName string, localNetworkGatewayName string, cancel <-chan struct{}) (result autorest.Response, err error) { - req, err := client.DeletePreparer(resourceGroupName, localNetworkGatewayName, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "Delete", nil, "Failure preparing request") - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "Delete", resp, "Failure sending request") - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "Delete", resp, "Failure responding to request") +// resourceGroupName is the name of the resource group. localNetworkGatewayName +// is the name of the local network gateway. +func (client LocalNetworkGatewaysClient) Delete(resourceGroupName string, localNetworkGatewayName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { + resultChan := make(chan autorest.Response, 1) + errChan := make(chan error, 1) + if err := validation.Validate([]validation.Validation{ + {TargetValue: localNetworkGatewayName, + Constraints: []validation.Constraint{{Target: "localNetworkGatewayName", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + errChan <- validation.NewErrorWithValidationError(err, "network.LocalNetworkGatewaysClient", "Delete") + close(errChan) + close(resultChan) + return resultChan, errChan } - return + go func() { + var err error + var result autorest.Response + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.DeletePreparer(resourceGroupName, localNetworkGatewayName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "Delete", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // DeletePreparer prepares the Delete request. @@ -161,8 +195,9 @@ func (client LocalNetworkGatewaysClient) DeletePreparer(resourceGroupName string "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -195,18 +230,26 @@ func (client LocalNetworkGatewaysClient) DeleteResponder(resp *http.Response) (r // Get gets the specified local network gateway in a resource group. // -// resourceGroupName is the name of the resource group. -// localNetworkGatewayName is the name of the local network gateway. +// resourceGroupName is the name of the resource group. localNetworkGatewayName +// is the name of the local network gateway. func (client LocalNetworkGatewaysClient) Get(resourceGroupName string, localNetworkGatewayName string) (result LocalNetworkGateway, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: localNetworkGatewayName, + Constraints: []validation.Constraint{{Target: "localNetworkGatewayName", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "network.LocalNetworkGatewaysClient", "Get") + } + req, err := client.GetPreparer(resourceGroupName, localNetworkGatewayName) if err != nil { - return result, autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "Get", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "Get", nil, "Failure preparing request") + return } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "Get", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "Get", resp, "Failure sending request") + return } result, err = client.GetResponder(resp) @@ -225,8 +268,9 @@ func (client LocalNetworkGatewaysClient) GetPreparer(resourceGroupName string, l "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -262,13 +306,15 @@ func (client LocalNetworkGatewaysClient) GetResponder(resp *http.Response) (resu func (client LocalNetworkGatewaysClient) List(resourceGroupName string) (result LocalNetworkGatewayListResult, err error) { req, err := client.ListPreparer(resourceGroupName) if err != nil { - return result, autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "List", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "List", nil, "Failure preparing request") + return } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "List", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "List", resp, "Failure sending request") + return } result, err = client.ListResponder(resp) @@ -286,8 +332,9 @@ func (client LocalNetworkGatewaysClient) ListPreparer(resourceGroupName string) "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/models.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/models.go old mode 100644 new mode 100755 index bb514f796b31..505691db01b3 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/models.go @@ -14,16 +14,27 @@ package network // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/date" "github.com/Azure/go-autorest/autorest/to" "net/http" ) +// Access enumerates the values for access. +type Access string + +const ( + // Allow specifies the allow state for access. + Allow Access = "Allow" + // Deny specifies the deny state for access. + Deny Access = "Deny" +) + // ApplicationGatewayBackendHealthServerHealth enumerates the values for // application gateway backend health server health. type ApplicationGatewayBackendHealthServerHealth string @@ -32,6 +43,9 @@ const ( // Down specifies the down state for application gateway backend health // server health. Down ApplicationGatewayBackendHealthServerHealth = "Down" + // Draining specifies the draining state for application gateway backend + // health server health. + Draining ApplicationGatewayBackendHealthServerHealth = "Draining" // Partial specifies the partial state for application gateway backend // health server health. Partial ApplicationGatewayBackendHealthServerHealth = "Partial" @@ -51,18 +65,18 @@ const ( // Disabled specifies the disabled state for application gateway cookie // based affinity. Disabled ApplicationGatewayCookieBasedAffinity = "Disabled" - // Enabled specifies the enabled state for application gateway cookie - // based affinity. + // Enabled specifies the enabled state for application gateway cookie based + // affinity. Enabled ApplicationGatewayCookieBasedAffinity = "Enabled" ) -// ApplicationGatewayFirewallMode enumerates the values for application -// gateway firewall mode. +// ApplicationGatewayFirewallMode enumerates the values for application gateway +// firewall mode. type ApplicationGatewayFirewallMode string const ( - // Detection specifies the detection state for application gateway - // firewall mode. + // Detection specifies the detection state for application gateway firewall + // mode. Detection ApplicationGatewayFirewallMode = "Detection" // Prevention specifies the prevention state for application gateway // firewall mode. @@ -117,14 +131,14 @@ const ( type ApplicationGatewaySkuName string const ( - // StandardLarge specifies the standard large state for application - // gateway sku name. + // StandardLarge specifies the standard large state for application gateway + // sku name. StandardLarge ApplicationGatewaySkuName = "Standard_Large" // StandardMedium specifies the standard medium state for application // gateway sku name. StandardMedium ApplicationGatewaySkuName = "Standard_Medium" - // StandardSmall specifies the standard small state for application - // gateway sku name. + // StandardSmall specifies the standard small state for application gateway + // sku name. StandardSmall ApplicationGatewaySkuName = "Standard_Small" // WAFLarge specifies the waf large state for application gateway sku name. WAFLarge ApplicationGatewaySkuName = "WAF_Large" @@ -159,6 +173,16 @@ const ( WAF ApplicationGatewayTier = "WAF" ) +// AssociationType enumerates the values for association type. +type AssociationType string + +const ( + // Associated specifies the associated state for association type. + Associated AssociationType = "Associated" + // Contains specifies the contains state for association type. + Contains AssociationType = "Contains" +) + // AuthorizationUseStatus enumerates the values for authorization use status. type AuthorizationUseStatus string @@ -169,18 +193,71 @@ const ( InUse AuthorizationUseStatus = "InUse" ) +// BgpPeerState enumerates the values for bgp peer state. +type BgpPeerState string + +const ( + // BgpPeerStateConnected specifies the bgp peer state connected state for + // bgp peer state. + BgpPeerStateConnected BgpPeerState = "Connected" + // BgpPeerStateConnecting specifies the bgp peer state connecting state for + // bgp peer state. + BgpPeerStateConnecting BgpPeerState = "Connecting" + // BgpPeerStateIdle specifies the bgp peer state idle state for bgp peer + // state. + BgpPeerStateIdle BgpPeerState = "Idle" + // BgpPeerStateStopped specifies the bgp peer state stopped state for bgp + // peer state. + BgpPeerStateStopped BgpPeerState = "Stopped" + // BgpPeerStateUnknown specifies the bgp peer state unknown state for bgp + // peer state. + BgpPeerStateUnknown BgpPeerState = "Unknown" +) + +// DhGroup enumerates the values for dh group. +type DhGroup string + +const ( + // DHGroup1 specifies the dh group 1 state for dh group. + DHGroup1 DhGroup = "DHGroup1" + // DHGroup14 specifies the dh group 14 state for dh group. + DHGroup14 DhGroup = "DHGroup14" + // DHGroup2 specifies the dh group 2 state for dh group. + DHGroup2 DhGroup = "DHGroup2" + // DHGroup2048 specifies the dh group 2048 state for dh group. + DHGroup2048 DhGroup = "DHGroup2048" + // DHGroup24 specifies the dh group 24 state for dh group. + DHGroup24 DhGroup = "DHGroup24" + // ECP256 specifies the ecp256 state for dh group. + ECP256 DhGroup = "ECP256" + // ECP384 specifies the ecp384 state for dh group. + ECP384 DhGroup = "ECP384" + // None specifies the none state for dh group. + None DhGroup = "None" +) + +// Direction enumerates the values for direction. +type Direction string + +const ( + // Inbound specifies the inbound state for direction. + Inbound Direction = "Inbound" + // Outbound specifies the outbound state for direction. + Outbound Direction = "Outbound" +) + // EffectiveRouteSource enumerates the values for effective route source. type EffectiveRouteSource string const ( - // EffectiveRouteSourceDefault specifies the effective route source - // default state for effective route source. + // EffectiveRouteSourceDefault specifies the effective route source default + // state for effective route source. EffectiveRouteSourceDefault EffectiveRouteSource = "Default" - // EffectiveRouteSourceUnknown specifies the effective route source - // unknown state for effective route source. - EffectiveRouteSourceUnknown EffectiveRouteSource = "Unknown" - // EffectiveRouteSourceUser specifies the effective route source user + // EffectiveRouteSourceUnknown specifies the effective route source unknown // state for effective route source. + EffectiveRouteSourceUnknown EffectiveRouteSource = "Unknown" + // EffectiveRouteSourceUser specifies the effective route source user state + // for effective route source. EffectiveRouteSourceUser EffectiveRouteSource = "User" // EffectiveRouteSourceVirtualNetworkGateway specifies the effective route // source virtual network gateway state for effective route source. @@ -211,8 +288,8 @@ const ( // NotConfigured specifies the not configured state for express route // circuit peering advertised public prefix state. NotConfigured ExpressRouteCircuitPeeringAdvertisedPublicPrefixState = "NotConfigured" - // ValidationNeeded specifies the validation needed state for express - // route circuit peering advertised public prefix state. + // ValidationNeeded specifies the validation needed state for express route + // circuit peering advertised public prefix state. ValidationNeeded ExpressRouteCircuitPeeringAdvertisedPublicPrefixState = "ValidationNeeded" ) @@ -242,13 +319,13 @@ const ( // AzurePublicPeering specifies the azure public peering state for express // route circuit peering type. AzurePublicPeering ExpressRouteCircuitPeeringType = "AzurePublicPeering" - // MicrosoftPeering specifies the microsoft peering state for express - // route circuit peering type. + // MicrosoftPeering specifies the microsoft peering state for express route + // circuit peering type. MicrosoftPeering ExpressRouteCircuitPeeringType = "MicrosoftPeering" ) -// ExpressRouteCircuitSkuFamily enumerates the values for express route -// circuit sku family. +// ExpressRouteCircuitSkuFamily enumerates the values for express route circuit +// sku family. type ExpressRouteCircuitSkuFamily string const ( @@ -273,6 +350,36 @@ const ( ExpressRouteCircuitSkuTierStandard ExpressRouteCircuitSkuTier = "Standard" ) +// IkeEncryption enumerates the values for ike encryption. +type IkeEncryption string + +const ( + // AES128 specifies the aes128 state for ike encryption. + AES128 IkeEncryption = "AES128" + // AES192 specifies the aes192 state for ike encryption. + AES192 IkeEncryption = "AES192" + // AES256 specifies the aes256 state for ike encryption. + AES256 IkeEncryption = "AES256" + // DES specifies the des state for ike encryption. + DES IkeEncryption = "DES" + // DES3 specifies the des3 state for ike encryption. + DES3 IkeEncryption = "DES3" +) + +// IkeIntegrity enumerates the values for ike integrity. +type IkeIntegrity string + +const ( + // MD5 specifies the md5 state for ike integrity. + MD5 IkeIntegrity = "MD5" + // SHA1 specifies the sha1 state for ike integrity. + SHA1 IkeIntegrity = "SHA1" + // SHA256 specifies the sha256 state for ike integrity. + SHA256 IkeIntegrity = "SHA256" + // SHA384 specifies the sha384 state for ike integrity. + SHA384 IkeIntegrity = "SHA384" +) + // IPAllocationMethod enumerates the values for ip allocation method. type IPAllocationMethod string @@ -283,6 +390,63 @@ const ( Static IPAllocationMethod = "Static" ) +// IpsecEncryption enumerates the values for ipsec encryption. +type IpsecEncryption string + +const ( + // IpsecEncryptionAES128 specifies the ipsec encryption aes128 state for + // ipsec encryption. + IpsecEncryptionAES128 IpsecEncryption = "AES128" + // IpsecEncryptionAES192 specifies the ipsec encryption aes192 state for + // ipsec encryption. + IpsecEncryptionAES192 IpsecEncryption = "AES192" + // IpsecEncryptionAES256 specifies the ipsec encryption aes256 state for + // ipsec encryption. + IpsecEncryptionAES256 IpsecEncryption = "AES256" + // IpsecEncryptionDES specifies the ipsec encryption des state for ipsec + // encryption. + IpsecEncryptionDES IpsecEncryption = "DES" + // IpsecEncryptionDES3 specifies the ipsec encryption des3 state for ipsec + // encryption. + IpsecEncryptionDES3 IpsecEncryption = "DES3" + // IpsecEncryptionGCMAES128 specifies the ipsec encryption gcmaes128 state + // for ipsec encryption. + IpsecEncryptionGCMAES128 IpsecEncryption = "GCMAES128" + // IpsecEncryptionGCMAES192 specifies the ipsec encryption gcmaes192 state + // for ipsec encryption. + IpsecEncryptionGCMAES192 IpsecEncryption = "GCMAES192" + // IpsecEncryptionGCMAES256 specifies the ipsec encryption gcmaes256 state + // for ipsec encryption. + IpsecEncryptionGCMAES256 IpsecEncryption = "GCMAES256" + // IpsecEncryptionNone specifies the ipsec encryption none state for ipsec + // encryption. + IpsecEncryptionNone IpsecEncryption = "None" +) + +// IpsecIntegrity enumerates the values for ipsec integrity. +type IpsecIntegrity string + +const ( + // IpsecIntegrityGCMAES128 specifies the ipsec integrity gcmaes128 state + // for ipsec integrity. + IpsecIntegrityGCMAES128 IpsecIntegrity = "GCMAES128" + // IpsecIntegrityGCMAES192 specifies the ipsec integrity gcmaes192 state + // for ipsec integrity. + IpsecIntegrityGCMAES192 IpsecIntegrity = "GCMAES192" + // IpsecIntegrityGCMAES256 specifies the ipsec integrity gcmaes256 state + // for ipsec integrity. + IpsecIntegrityGCMAES256 IpsecIntegrity = "GCMAES256" + // IpsecIntegrityMD5 specifies the ipsec integrity md5 state for ipsec + // integrity. + IpsecIntegrityMD5 IpsecIntegrity = "MD5" + // IpsecIntegritySHA1 specifies the ipsec integrity sha1 state for ipsec + // integrity. + IpsecIntegritySHA1 IpsecIntegrity = "SHA1" + // IpsecIntegritySHA256 specifies the ipsec integrity sha256 state for + // ipsec integrity. + IpsecIntegritySHA256 IpsecIntegrity = "SHA256" +) + // IPVersion enumerates the values for ip version. type IPVersion string @@ -306,6 +470,30 @@ const ( SourceIPProtocol LoadDistribution = "SourceIPProtocol" ) +// NextHopType enumerates the values for next hop type. +type NextHopType string + +const ( + // NextHopTypeHyperNetGateway specifies the next hop type hyper net gateway + // state for next hop type. + NextHopTypeHyperNetGateway NextHopType = "HyperNetGateway" + // NextHopTypeInternet specifies the next hop type internet state for next + // hop type. + NextHopTypeInternet NextHopType = "Internet" + // NextHopTypeNone specifies the next hop type none state for next hop + // type. + NextHopTypeNone NextHopType = "None" + // NextHopTypeVirtualAppliance specifies the next hop type virtual + // appliance state for next hop type. + NextHopTypeVirtualAppliance NextHopType = "VirtualAppliance" + // NextHopTypeVirtualNetworkGateway specifies the next hop type virtual + // network gateway state for next hop type. + NextHopTypeVirtualNetworkGateway NextHopType = "VirtualNetworkGateway" + // NextHopTypeVnetLocal specifies the next hop type vnet local state for + // next hop type. + NextHopTypeVnetLocal NextHopType = "VnetLocal" +) + // OperationStatus enumerates the values for operation status. type OperationStatus string @@ -318,6 +506,71 @@ const ( Succeeded OperationStatus = "Succeeded" ) +// PcError enumerates the values for pc error. +type PcError string + +const ( + // AgentStopped specifies the agent stopped state for pc error. + AgentStopped PcError = "AgentStopped" + // CaptureFailed specifies the capture failed state for pc error. + CaptureFailed PcError = "CaptureFailed" + // InternalError specifies the internal error state for pc error. + InternalError PcError = "InternalError" + // LocalFileFailed specifies the local file failed state for pc error. + LocalFileFailed PcError = "LocalFileFailed" + // StorageFailed specifies the storage failed state for pc error. + StorageFailed PcError = "StorageFailed" +) + +// PcProtocol enumerates the values for pc protocol. +type PcProtocol string + +const ( + // Any specifies the any state for pc protocol. + Any PcProtocol = "Any" + // TCP specifies the tcp state for pc protocol. + TCP PcProtocol = "TCP" + // UDP specifies the udp state for pc protocol. + UDP PcProtocol = "UDP" +) + +// PcStatus enumerates the values for pc status. +type PcStatus string + +const ( + // PcStatusError specifies the pc status error state for pc status. + PcStatusError PcStatus = "Error" + // PcStatusNotStarted specifies the pc status not started state for pc + // status. + PcStatusNotStarted PcStatus = "NotStarted" + // PcStatusRunning specifies the pc status running state for pc status. + PcStatusRunning PcStatus = "Running" + // PcStatusStopped specifies the pc status stopped state for pc status. + PcStatusStopped PcStatus = "Stopped" + // PcStatusUnknown specifies the pc status unknown state for pc status. + PcStatusUnknown PcStatus = "Unknown" +) + +// PfsGroup enumerates the values for pfs group. +type PfsGroup string + +const ( + // PfsGroupECP256 specifies the pfs group ecp256 state for pfs group. + PfsGroupECP256 PfsGroup = "ECP256" + // PfsGroupECP384 specifies the pfs group ecp384 state for pfs group. + PfsGroupECP384 PfsGroup = "ECP384" + // PfsGroupNone specifies the pfs group none state for pfs group. + PfsGroupNone PfsGroup = "None" + // PfsGroupPFS1 specifies the pfs group pfs1 state for pfs group. + PfsGroupPFS1 PfsGroup = "PFS1" + // PfsGroupPFS2 specifies the pfs group pfs2 state for pfs group. + PfsGroupPFS2 PfsGroup = "PFS2" + // PfsGroupPFS2048 specifies the pfs group pfs2048 state for pfs group. + PfsGroupPFS2048 PfsGroup = "PFS2048" + // PfsGroupPFS24 specifies the pfs group pfs24 state for pfs group. + PfsGroupPFS24 PfsGroup = "PFS24" +) + // ProbeProtocol enumerates the values for probe protocol. type ProbeProtocol string @@ -340,6 +593,34 @@ const ( X86 ProcessorArchitecture = "X86" ) +// Protocol enumerates the values for protocol. +type Protocol string + +const ( + // ProtocolTCP specifies the protocol tcp state for protocol. + ProtocolTCP Protocol = "TCP" + // ProtocolUDP specifies the protocol udp state for protocol. + ProtocolUDP Protocol = "UDP" +) + +// ProvisioningState enumerates the values for provisioning state. +type ProvisioningState string + +const ( + // ProvisioningStateDeleting specifies the provisioning state deleting + // state for provisioning state. + ProvisioningStateDeleting ProvisioningState = "Deleting" + // ProvisioningStateFailed specifies the provisioning state failed state + // for provisioning state. + ProvisioningStateFailed ProvisioningState = "Failed" + // ProvisioningStateSucceeded specifies the provisioning state succeeded + // state for provisioning state. + ProvisioningStateSucceeded ProvisioningState = "Succeeded" + // ProvisioningStateUpdating specifies the provisioning state updating + // state for provisioning state. + ProvisioningStateUpdating ProvisioningState = "Updating" +) + // RouteNextHopType enumerates the values for route next hop type. type RouteNextHopType string @@ -365,32 +646,39 @@ const ( type SecurityRuleAccess string const ( - // Allow specifies the allow state for security rule access. - Allow SecurityRuleAccess = "Allow" - // Deny specifies the deny state for security rule access. - Deny SecurityRuleAccess = "Deny" + // SecurityRuleAccessAllow specifies the security rule access allow state + // for security rule access. + SecurityRuleAccessAllow SecurityRuleAccess = "Allow" + // SecurityRuleAccessDeny specifies the security rule access deny state for + // security rule access. + SecurityRuleAccessDeny SecurityRuleAccess = "Deny" ) // SecurityRuleDirection enumerates the values for security rule direction. type SecurityRuleDirection string const ( - // Inbound specifies the inbound state for security rule direction. - Inbound SecurityRuleDirection = "Inbound" - // Outbound specifies the outbound state for security rule direction. - Outbound SecurityRuleDirection = "Outbound" + // SecurityRuleDirectionInbound specifies the security rule direction + // inbound state for security rule direction. + SecurityRuleDirectionInbound SecurityRuleDirection = "Inbound" + // SecurityRuleDirectionOutbound specifies the security rule direction + // outbound state for security rule direction. + SecurityRuleDirectionOutbound SecurityRuleDirection = "Outbound" ) // SecurityRuleProtocol enumerates the values for security rule protocol. type SecurityRuleProtocol string const ( - // Asterisk specifies the asterisk state for security rule protocol. - Asterisk SecurityRuleProtocol = "*" - // TCP specifies the tcp state for security rule protocol. - TCP SecurityRuleProtocol = "Tcp" - // UDP specifies the udp state for security rule protocol. - UDP SecurityRuleProtocol = "Udp" + // SecurityRuleProtocolAsterisk specifies the security rule protocol + // asterisk state for security rule protocol. + SecurityRuleProtocolAsterisk SecurityRuleProtocol = "*" + // SecurityRuleProtocolTCP specifies the security rule protocol tcp state + // for security rule protocol. + SecurityRuleProtocolTCP SecurityRuleProtocol = "Tcp" + // SecurityRuleProtocolUDP specifies the security rule protocol udp state + // for security rule protocol. + SecurityRuleProtocolUDP SecurityRuleProtocol = "Udp" ) // ServiceProviderProvisioningState enumerates the values for service provider @@ -485,6 +773,15 @@ const ( // network gateway sku name ultra performance state for virtual network // gateway sku name. VirtualNetworkGatewaySkuNameUltraPerformance VirtualNetworkGatewaySkuName = "UltraPerformance" + // VirtualNetworkGatewaySkuNameVpnGw1 specifies the virtual network gateway + // sku name vpn gw 1 state for virtual network gateway sku name. + VirtualNetworkGatewaySkuNameVpnGw1 VirtualNetworkGatewaySkuName = "VpnGw1" + // VirtualNetworkGatewaySkuNameVpnGw2 specifies the virtual network gateway + // sku name vpn gw 2 state for virtual network gateway sku name. + VirtualNetworkGatewaySkuNameVpnGw2 VirtualNetworkGatewaySkuName = "VpnGw2" + // VirtualNetworkGatewaySkuNameVpnGw3 specifies the virtual network gateway + // sku name vpn gw 3 state for virtual network gateway sku name. + VirtualNetworkGatewaySkuNameVpnGw3 VirtualNetworkGatewaySkuName = "VpnGw3" ) // VirtualNetworkGatewaySkuTier enumerates the values for virtual network @@ -506,6 +803,15 @@ const ( // network gateway sku tier ultra performance state for virtual network // gateway sku tier. VirtualNetworkGatewaySkuTierUltraPerformance VirtualNetworkGatewaySkuTier = "UltraPerformance" + // VirtualNetworkGatewaySkuTierVpnGw1 specifies the virtual network gateway + // sku tier vpn gw 1 state for virtual network gateway sku tier. + VirtualNetworkGatewaySkuTierVpnGw1 VirtualNetworkGatewaySkuTier = "VpnGw1" + // VirtualNetworkGatewaySkuTierVpnGw2 specifies the virtual network gateway + // sku tier vpn gw 2 state for virtual network gateway sku tier. + VirtualNetworkGatewaySkuTierVpnGw2 VirtualNetworkGatewaySkuTier = "VpnGw2" + // VirtualNetworkGatewaySkuTierVpnGw3 specifies the virtual network gateway + // sku tier vpn gw 3 state for virtual network gateway sku tier. + VirtualNetworkGatewaySkuTierVpnGw3 VirtualNetworkGatewaySkuTier = "VpnGw3" ) // VirtualNetworkGatewayType enumerates the values for virtual network gateway @@ -521,8 +827,8 @@ const ( VirtualNetworkGatewayTypeVpn VirtualNetworkGatewayType = "Vpn" ) -// VirtualNetworkPeeringState enumerates the values for virtual network -// peering state. +// VirtualNetworkPeeringState enumerates the values for virtual network peering +// state. type VirtualNetworkPeeringState string const ( @@ -547,8 +853,8 @@ const ( RouteBased VpnType = "RouteBased" ) -// AddressSpace is addressSpace contains an array of IP address ranges that -// can be used by subnets of the virtual network. +// AddressSpace is addressSpace contains an array of IP address ranges that can +// be used by subnets of the virtual network. type AddressSpace struct { AddressPrefixes *[]string `json:"addressPrefixes,omitempty"` } @@ -581,6 +887,13 @@ type ApplicationGatewayAuthenticationCertificatePropertiesFormat struct { ProvisioningState *string `json:"provisioningState,omitempty"` } +// ApplicationGatewayAvailableWafRuleSetsResult is response for +// ApplicationGatewayAvailableWafRuleSets API service call. +type ApplicationGatewayAvailableWafRuleSetsResult struct { + autorest.Response `json:"-"` + Value *[]ApplicationGatewayFirewallRuleSet `json:"value,omitempty"` +} + // ApplicationGatewayBackendAddress is backend address of an application // gateway. type ApplicationGatewayBackendAddress struct { @@ -634,8 +947,8 @@ type ApplicationGatewayBackendHealthServer struct { Health ApplicationGatewayBackendHealthServerHealth `json:"health,omitempty"` } -// ApplicationGatewayBackendHTTPSettings is backend address pool settings of -// an application gateway. +// ApplicationGatewayBackendHTTPSettings is backend address pool settings of an +// application gateway. type ApplicationGatewayBackendHTTPSettings struct { ID *string `json:"id,omitempty"` *ApplicationGatewayBackendHTTPSettingsPropertiesFormat `json:"properties,omitempty"` @@ -653,10 +966,59 @@ type ApplicationGatewayBackendHTTPSettingsPropertiesFormat struct { Probe *SubResource `json:"probe,omitempty"` AuthenticationCertificates *[]SubResource `json:"authenticationCertificates,omitempty"` ProvisioningState *string `json:"provisioningState,omitempty"` + ConnectionDraining *ApplicationGatewayConnectionDraining `json:"connectionDraining,omitempty"` +} + +// ApplicationGatewayConnectionDraining is connection draining allows open +// connections to a backend server to be active for a specified time after the +// backend server got removed from the configuration. +type ApplicationGatewayConnectionDraining struct { + Enabled *bool `json:"enabled,omitempty"` + DrainTimeoutInSec *int32 `json:"drainTimeoutInSec,omitempty"` +} + +// ApplicationGatewayFirewallDisabledRuleGroup is allows to disable rules +// within a rule group or an entire rule group. +type ApplicationGatewayFirewallDisabledRuleGroup struct { + RuleGroupName *string `json:"ruleGroupName,omitempty"` + Rules *[]int32 `json:"rules,omitempty"` } -// ApplicationGatewayFrontendIPConfiguration is frontend IP configuration of -// an application gateway. +// ApplicationGatewayFirewallRule is a web application firewall rule. +type ApplicationGatewayFirewallRule struct { + RuleID *int32 `json:"ruleId,omitempty"` + Description *string `json:"description,omitempty"` +} + +// ApplicationGatewayFirewallRuleGroup is a web application firewall rule +// group. +type ApplicationGatewayFirewallRuleGroup struct { + RuleGroupName *string `json:"ruleGroupName,omitempty"` + Description *string `json:"description,omitempty"` + Rules *[]ApplicationGatewayFirewallRule `json:"rules,omitempty"` +} + +// ApplicationGatewayFirewallRuleSet is a web application firewall rule set. +type ApplicationGatewayFirewallRuleSet struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + *ApplicationGatewayFirewallRuleSetPropertiesFormat `json:"properties,omitempty"` +} + +// ApplicationGatewayFirewallRuleSetPropertiesFormat is properties of the web +// application firewall rule set. +type ApplicationGatewayFirewallRuleSetPropertiesFormat struct { + ProvisioningState *string `json:"provisioningState,omitempty"` + RuleSetType *string `json:"ruleSetType,omitempty"` + RuleSetVersion *string `json:"ruleSetVersion,omitempty"` + RuleGroups *[]ApplicationGatewayFirewallRuleGroup `json:"ruleGroups,omitempty"` +} + +// ApplicationGatewayFrontendIPConfiguration is frontend IP configuration of an +// application gateway. type ApplicationGatewayFrontendIPConfiguration struct { ID *string `json:"id,omitempty"` *ApplicationGatewayFrontendIPConfigurationPropertiesFormat `json:"properties,omitempty"` @@ -872,11 +1234,14 @@ type ApplicationGatewayURLPathMapPropertiesFormat struct { ProvisioningState *string `json:"provisioningState,omitempty"` } -// ApplicationGatewayWebApplicationFirewallConfiguration is application -// gateway web application firewall configuration. +// ApplicationGatewayWebApplicationFirewallConfiguration is application gateway +// web application firewall configuration. type ApplicationGatewayWebApplicationFirewallConfiguration struct { - Enabled *bool `json:"enabled,omitempty"` - FirewallMode ApplicationGatewayFirewallMode `json:"firewallMode,omitempty"` + Enabled *bool `json:"enabled,omitempty"` + FirewallMode ApplicationGatewayFirewallMode `json:"firewallMode,omitempty"` + RuleSetType *string `json:"ruleSetType,omitempty"` + RuleSetVersion *string `json:"ruleSetVersion,omitempty"` + DisabledRuleGroups *[]ApplicationGatewayFirewallDisabledRuleGroup `json:"disabledRuleGroups,omitempty"` } // AuthorizationListResult is response for ListAuthorizations API service call @@ -907,13 +1272,13 @@ type AuthorizationPropertiesFormat struct { } // AzureAsyncOperationResult is the response body contains the status of the -// specified asynchronous operation, indicating whether it has succeeded, is -// in progress, or has failed. Note that this status is distinct from the -// HTTP status code returned for the Get Operation Status operation itself. -// If the asynchronous operation succeeded, the response body includes the -// HTTP status code for the successful request. If the asynchronous operation -// failed, the response body includes the HTTP status code for the failed -// request and error information regarding the failure. +// specified asynchronous operation, indicating whether it has succeeded, is in +// progress, or has failed. Note that this status is distinct from the HTTP +// status code returned for the Get Operation Status operation itself. If the +// asynchronous operation succeeded, the response body includes the HTTP status +// code for the successful request. If the asynchronous operation failed, the +// response body includes the HTTP status code for the failed request and error +// information regarding the failure. type AzureAsyncOperationResult struct { Status OperationStatus `json:"status,omitempty"` Error *Error `json:"error,omitempty"` @@ -936,14 +1301,78 @@ type BackendAddressPoolPropertiesFormat struct { ProvisioningState *string `json:"provisioningState,omitempty"` } -// BgpSettings is +// BGPCommunity is contains bgp community information offered in Service +// Community resources. +type BGPCommunity struct { + ServiceSupportedRegion *string `json:"serviceSupportedRegion,omitempty"` + CommunityName *string `json:"communityName,omitempty"` + CommunityValue *string `json:"communityValue,omitempty"` + CommunityPrefixes *[]string `json:"communityPrefixes,omitempty"` +} + +// BgpPeerStatus is bGP peer status details +type BgpPeerStatus struct { + LocalAddress *string `json:"localAddress,omitempty"` + Neighbor *string `json:"neighbor,omitempty"` + Asn *int32 `json:"asn,omitempty"` + State BgpPeerState `json:"state,omitempty"` + ConnectedDuration *string `json:"connectedDuration,omitempty"` + RoutesReceived *int64 `json:"routesReceived,omitempty"` + MessagesSent *int64 `json:"messagesSent,omitempty"` + MessagesReceived *int64 `json:"messagesReceived,omitempty"` +} + +// BgpPeerStatusListResult is response for list BGP peer status API service +// call +type BgpPeerStatusListResult struct { + autorest.Response `json:"-"` + Value *[]BgpPeerStatus `json:"value,omitempty"` +} + +// BgpServiceCommunity is service Community Properties. +type BgpServiceCommunity struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + *BgpServiceCommunityPropertiesFormat `json:"properties,omitempty"` +} + +// BgpServiceCommunityListResult is response for the ListServiceCommunity API +// service call. +type BgpServiceCommunityListResult struct { + autorest.Response `json:"-"` + Value *[]BgpServiceCommunity `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// BgpServiceCommunityListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client BgpServiceCommunityListResult) BgpServiceCommunityListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// BgpServiceCommunityPropertiesFormat is properties of Service Community. +type BgpServiceCommunityPropertiesFormat struct { + ServiceName *string `json:"serviceName,omitempty"` + BgpCommunities *[]BGPCommunity `json:"bgpCommunities,omitempty"` +} + +// BgpSettings is bGP settings details type BgpSettings struct { Asn *int64 `json:"asn,omitempty"` BgpPeeringAddress *string `json:"bgpPeeringAddress,omitempty"` PeerWeight *int32 `json:"peerWeight,omitempty"` } -// ConnectionResetSharedKey is +// ConnectionResetSharedKey is the virtual network connection reset shared key type ConnectionResetSharedKey struct { autorest.Response `json:"-"` KeyLength *int32 `json:"keyLength,omitempty"` @@ -955,9 +1384,9 @@ type ConnectionSharedKey struct { Value *string `json:"value,omitempty"` } -// DhcpOptions is dhcpOptions contains an array of DNS servers available to -// VMs deployed in the virtual network. Standard DHCP option for a subnet -// overrides VNET DHCP options. +// DhcpOptions is dhcpOptions contains an array of DNS servers available to VMs +// deployed in the virtual network. Standard DHCP option for a subnet overrides +// VNET DHCP options. type DhcpOptions struct { DNSServers *[]string `json:"dnsServers,omitempty"` } @@ -1109,8 +1538,8 @@ type ExpressRouteCircuitPeeringConfig struct { RoutingRegistryName *string `json:"routingRegistryName,omitempty"` } -// ExpressRouteCircuitPeeringListResult is response for ListPeering API -// service call retrieves all peerings that belong to an ExpressRouteCircuit. +// ExpressRouteCircuitPeeringListResult is response for ListPeering API service +// call retrieves all peerings that belong to an ExpressRouteCircuit. type ExpressRouteCircuitPeeringListResult struct { autorest.Response `json:"-"` Value *[]ExpressRouteCircuitPeering `json:"value,omitempty"` @@ -1146,6 +1575,7 @@ type ExpressRouteCircuitPeeringPropertiesFormat struct { ProvisioningState *string `json:"provisioningState,omitempty"` GatewayManagerEtag *string `json:"gatewayManagerEtag,omitempty"` LastModifiedBy *string `json:"lastModifiedBy,omitempty"` + RouteFilter *RouteFilter `json:"routeFilter,omitempty"` } // ExpressRouteCircuitPropertiesFormat is properties of ExpressRouteCircuit. @@ -1165,11 +1595,11 @@ type ExpressRouteCircuitPropertiesFormat struct { // ExpressRouteCircuitRoutesTable is the routes table associated with the // ExpressRouteCircuit type ExpressRouteCircuitRoutesTable struct { - Network *string `json:"network,omitempty"` - NextHop *string `json:"nextHop,omitempty"` - LocPrf *string `json:"locPrf,omitempty"` - Weight *int32 `json:"weight,omitempty"` - Path *string `json:"path,omitempty"` + NetworkProperty *string `json:"network,omitempty"` + NextHop *string `json:"nextHop,omitempty"` + LocPrf *string `json:"locPrf,omitempty"` + Weight *int32 `json:"weight,omitempty"` + Path *string `json:"path,omitempty"` } // ExpressRouteCircuitRoutesTableSummary is the routes table associated with @@ -1275,6 +1705,26 @@ type ExpressRouteServiceProviderPropertiesFormat struct { ProvisioningState *string `json:"provisioningState,omitempty"` } +// FlowLogInformation is information on the configuration of flow log. +type FlowLogInformation struct { + autorest.Response `json:"-"` + TargetResourceID *string `json:"targetResourceId,omitempty"` + *FlowLogProperties `json:"properties,omitempty"` +} + +// FlowLogProperties is parameters that define the configuration of flow log. +type FlowLogProperties struct { + StorageID *string `json:"storageId,omitempty"` + Enabled *bool `json:"enabled,omitempty"` + RetentionPolicy *RetentionPolicyParameters `json:"retentionPolicy,omitempty"` +} + +// FlowLogStatusParameters is parameters that define a resource to query flow +// log status. +type FlowLogStatusParameters struct { + TargetResourceID *string `json:"targetResourceId,omitempty"` +} + // FrontendIPConfiguration is frontend IP address of the load balancer. type FrontendIPConfiguration struct { ID *string `json:"id,omitempty"` @@ -1297,6 +1747,23 @@ type FrontendIPConfigurationPropertiesFormat struct { ProvisioningState *string `json:"provisioningState,omitempty"` } +// GatewayRoute is gateway routing details +type GatewayRoute struct { + LocalAddress *string `json:"localAddress,omitempty"` + NetworkProperty *string `json:"network,omitempty"` + NextHop *string `json:"nextHop,omitempty"` + SourcePeer *string `json:"sourcePeer,omitempty"` + Origin *string `json:"origin,omitempty"` + AsPath *string `json:"asPath,omitempty"` + Weight *int32 `json:"weight,omitempty"` +} + +// GatewayRouteListResult is list of virtual network gateway routes +type GatewayRouteListResult struct { + autorest.Response `json:"-"` + Value *[]GatewayRoute `json:"value,omitempty"` +} + // InboundNatPool is inbound NAT pool of the load balancer. type InboundNatPool struct { ID *string `json:"id,omitempty"` @@ -1347,6 +1814,12 @@ type Interface struct { Etag *string `json:"etag,omitempty"` } +// InterfaceAssociation is network interface and its custom security rules. +type InterfaceAssociation struct { + ID *string `json:"id,omitempty"` + SecurityRules *[]SecurityRule `json:"securityRules,omitempty"` +} + // InterfaceDNSSettings is dNS settings of a network interface. type InterfaceDNSSettings struct { DNSServers *[]string `json:"dnsServers,omitempty"` @@ -1437,6 +1910,19 @@ type IPConfigurationPropertiesFormat struct { ProvisioningState *string `json:"provisioningState,omitempty"` } +// IpsecPolicy is an IPSec Policy configuration for a virtual network gateway +// connection +type IpsecPolicy struct { + SaLifeTimeSeconds *int32 `json:"saLifeTimeSeconds,omitempty"` + SaDataSizeKilobytes *int32 `json:"saDataSizeKilobytes,omitempty"` + IpsecEncryption IpsecEncryption `json:"ipsecEncryption,omitempty"` + IpsecIntegrity IpsecIntegrity `json:"ipsecIntegrity,omitempty"` + IkeEncryption IkeEncryption `json:"ikeEncryption,omitempty"` + IkeIntegrity IkeIntegrity `json:"ikeIntegrity,omitempty"` + DhGroup DhGroup `json:"dhGroup,omitempty"` + PfsGroup PfsGroup `json:"pfsGroup,omitempty"` +} + // LoadBalancer is loadBalancer resource type LoadBalancer struct { autorest.Response `json:"-"` @@ -1544,6 +2030,23 @@ type LocalNetworkGatewayPropertiesFormat struct { ProvisioningState *string `json:"provisioningState,omitempty"` } +// NextHopParameters is parameters that define the source and destination +// endpoint. +type NextHopParameters struct { + TargetResourceID *string `json:"targetResourceId,omitempty"` + SourceIPAddress *string `json:"sourceIPAddress,omitempty"` + DestinationIPAddress *string `json:"destinationIPAddress,omitempty"` + TargetNicResourceID *string `json:"targetNicResourceId,omitempty"` +} + +// NextHopResult is the information about next hop from the specified VM. +type NextHopResult struct { + autorest.Response `json:"-"` + NextHopType NextHopType `json:"nextHopType,omitempty"` + NextHopIPAddress *string `json:"nextHopIpAddress,omitempty"` + RouteTableID *string `json:"routeTableId,omitempty"` +} + // OutboundNatRule is outbound NAT pool of the load balancer. type OutboundNatRule struct { ID *string `json:"id,omitempty"` @@ -1560,6 +2063,97 @@ type OutboundNatRulePropertiesFormat struct { ProvisioningState *string `json:"provisioningState,omitempty"` } +// PacketCapture is parameters that define the create packet capture operation. +type PacketCapture struct { + *PacketCaptureParameters `json:"properties,omitempty"` +} + +// PacketCaptureFilter is filter that is applied to packet capture request. +// Multiple filters can be applied. +type PacketCaptureFilter struct { + Protocol PcProtocol `json:"protocol,omitempty"` + LocalIPAddress *string `json:"localIPAddress,omitempty"` + RemoteIPAddress *string `json:"remoteIPAddress,omitempty"` + LocalPort *string `json:"localPort,omitempty"` + RemotePort *string `json:"remotePort,omitempty"` +} + +// PacketCaptureListResult is list of packet capture sessions. +type PacketCaptureListResult struct { + autorest.Response `json:"-"` + Value *[]PacketCaptureResult `json:"value,omitempty"` +} + +// PacketCaptureParameters is parameters that define the create packet capture +// operation. +type PacketCaptureParameters struct { + Target *string `json:"target,omitempty"` + BytesToCapturePerPacket *int32 `json:"bytesToCapturePerPacket,omitempty"` + TotalBytesPerSession *int32 `json:"totalBytesPerSession,omitempty"` + TimeLimitInSeconds *int32 `json:"timeLimitInSeconds,omitempty"` + StorageLocation *PacketCaptureStorageLocation `json:"storageLocation,omitempty"` + Filters *[]PacketCaptureFilter `json:"filters,omitempty"` +} + +// PacketCaptureQueryStatusResult is status of packet capture session. +type PacketCaptureQueryStatusResult struct { + autorest.Response `json:"-"` + Name *string `json:"name,omitempty"` + ID *string `json:"id,omitempty"` + CaptureStartTime *date.Time `json:"captureStartTime,omitempty"` + PacketCaptureStatus PcStatus `json:"packetCaptureStatus,omitempty"` + StopReason *string `json:"stopReason,omitempty"` + PacketCaptureError *[]PcError `json:"packetCaptureError,omitempty"` +} + +// PacketCaptureResult is information about packet capture session. +type PacketCaptureResult struct { + autorest.Response `json:"-"` + Name *string `json:"name,omitempty"` + ID *string `json:"id,omitempty"` + Etag *string `json:"etag,omitempty"` + *PacketCaptureResultProperties `json:"properties,omitempty"` +} + +// PacketCaptureResultProperties is describes the properties of a packet +// capture session. +type PacketCaptureResultProperties struct { + Target *string `json:"target,omitempty"` + BytesToCapturePerPacket *int32 `json:"bytesToCapturePerPacket,omitempty"` + TotalBytesPerSession *int32 `json:"totalBytesPerSession,omitempty"` + TimeLimitInSeconds *int32 `json:"timeLimitInSeconds,omitempty"` + StorageLocation *PacketCaptureStorageLocation `json:"storageLocation,omitempty"` + Filters *[]PacketCaptureFilter `json:"filters,omitempty"` + ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` +} + +// PacketCaptureStorageLocation is describes the storage location for a packet +// capture session. +type PacketCaptureStorageLocation struct { + StorageID *string `json:"storageId,omitempty"` + StoragePath *string `json:"storagePath,omitempty"` + FilePath *string `json:"filePath,omitempty"` +} + +// PatchRouteFilter is route Filter Resource. +type PatchRouteFilter struct { + ID *string `json:"id,omitempty"` + *RouteFilterPropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` + Type *string `json:"type,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// PatchRouteFilterRule is route Filter Rule Resource +type PatchRouteFilterRule struct { + ID *string `json:"id,omitempty"` + *RouteFilterRulePropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + // Probe is a load balancer probe. type Probe struct { ID *string `json:"id,omitempty"` @@ -1591,8 +2185,8 @@ type PublicIPAddress struct { Etag *string `json:"etag,omitempty"` } -// PublicIPAddressDNSSettings is contains the FQDN of the DNS record -// associated with the public IP address. +// PublicIPAddressDNSSettings is contains FQDN of the DNS record associated +// with the public IP address type PublicIPAddressDNSSettings struct { DomainNameLabel *string `json:"domainNameLabel,omitempty"` Fqdn *string `json:"fqdn,omitempty"` @@ -1631,6 +2225,12 @@ type PublicIPAddressPropertiesFormat struct { ProvisioningState *string `json:"provisioningState,omitempty"` } +// QueryTroubleshootingParameters is parameters that define the resource to +// query the troubleshooting result. +type QueryTroubleshootingParameters struct { + TargetResourceID *string `json:"targetResourceId,omitempty"` +} + // Resource is type Resource struct { ID *string `json:"id,omitempty"` @@ -1655,6 +2255,13 @@ type ResourceNavigationLinkFormat struct { ProvisioningState *string `json:"provisioningState,omitempty"` } +// RetentionPolicyParameters is parameters that define the retention policy for +// flow log. +type RetentionPolicyParameters struct { + Days *int32 `json:"days,omitempty"` + Enabled *bool `json:"enabled,omitempty"` +} + // Route is route resource type Route struct { autorest.Response `json:"-"` @@ -1664,6 +2271,83 @@ type Route struct { Etag *string `json:"etag,omitempty"` } +// RouteFilter is route Filter Resource. +type RouteFilter struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + *RouteFilterPropertiesFormat `json:"properties,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// RouteFilterListResult is response for the ListRouteFilters API service call. +type RouteFilterListResult struct { + autorest.Response `json:"-"` + Value *[]RouteFilter `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// RouteFilterListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client RouteFilterListResult) RouteFilterListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// RouteFilterPropertiesFormat is route Filter Resource +type RouteFilterPropertiesFormat struct { + Rules *[]RouteFilterRule `json:"rules,omitempty"` + Peerings *[]ExpressRouteCircuitPeering `json:"peerings,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// RouteFilterRule is route Filter Rule Resource +type RouteFilterRule struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + *RouteFilterRulePropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Location *string `json:"location,omitempty"` + Etag *string `json:"etag,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// RouteFilterRuleListResult is response for the ListRouteFilterRules API +// service call +type RouteFilterRuleListResult struct { + autorest.Response `json:"-"` + Value *[]RouteFilterRule `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// RouteFilterRuleListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client RouteFilterRuleListResult) RouteFilterRuleListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// RouteFilterRulePropertiesFormat is route Filter Rule Resource +type RouteFilterRulePropertiesFormat struct { + Access Access `json:"access,omitempty"` + RouteFilterRuleType *string `json:"routeFilterRuleType,omitempty"` + Communities *[]string `json:"communities,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + // RouteListResult is response for the ListRoute API service call type RouteListResult struct { autorest.Response `json:"-"` @@ -1761,6 +2445,13 @@ func (client SecurityGroupListResult) SecurityGroupListResultPreparer() (*http.R autorest.WithBaseURL(to.String(client.NextLink))) } +// SecurityGroupNetworkInterface is network interface and all its associated +// security rules. +type SecurityGroupNetworkInterface struct { + ID *string `json:"id,omitempty"` + SecurityRuleAssociations *SecurityRuleAssociations `json:"securityRuleAssociations,omitempty"` +} + // SecurityGroupPropertiesFormat is network Security Group resource. type SecurityGroupPropertiesFormat struct { SecurityRules *[]SecurityRule `json:"securityRules,omitempty"` @@ -1771,6 +2462,19 @@ type SecurityGroupPropertiesFormat struct { ProvisioningState *string `json:"provisioningState,omitempty"` } +// SecurityGroupViewParameters is parameters that define the VM to check +// security groups for. +type SecurityGroupViewParameters struct { + TargetResourceID *string `json:"targetResourceId,omitempty"` +} + +// SecurityGroupViewResult is the information about security rules applied to +// the specified VM. +type SecurityGroupViewResult struct { + autorest.Response `json:"-"` + NetworkInterfaces *[]SecurityGroupNetworkInterface `json:"networkInterfaces,omitempty"` +} + // SecurityRule is network security rule. type SecurityRule struct { autorest.Response `json:"-"` @@ -1780,6 +2484,15 @@ type SecurityRule struct { Etag *string `json:"etag,omitempty"` } +// SecurityRuleAssociations is all security rules associated with the network +// interface. +type SecurityRuleAssociations struct { + NetworkInterfaceAssociation *InterfaceAssociation `json:"networkInterfaceAssociation,omitempty"` + SubnetAssociation *SubnetAssociation `json:"subnetAssociation,omitempty"` + DefaultSecurityRules *[]SecurityRule `json:"defaultSecurityRules,omitempty"` + EffectiveSecurityRules *[]EffectiveNetworkSecurityRule `json:"effectiveSecurityRules,omitempty"` +} + // SecurityRuleListResult is response for ListSecurityRule API service call. // Retrieves all security rules that belongs to a network security group. type SecurityRuleListResult struct { @@ -1829,6 +2542,12 @@ type Subnet struct { Etag *string `json:"etag,omitempty"` } +// SubnetAssociation is network interface and its custom security rules. +type SubnetAssociation struct { + ID *string `json:"id,omitempty"` + SecurityRules *[]SecurityRule `json:"securityRules,omitempty"` +} + // SubnetListResult is response for ListSubnets API service callRetrieves all // subnet that belongs to a virtual network type SubnetListResult struct { @@ -1864,6 +2583,79 @@ type SubResource struct { ID *string `json:"id,omitempty"` } +// Topology is topology of the specified resource group. +type Topology struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + CreatedDateTime *date.Time `json:"createdDateTime,omitempty"` + LastModified *date.Time `json:"lastModified,omitempty"` + Resources *[]TopologyResource `json:"resources,omitempty"` +} + +// TopologyAssociation is resources that have an association with the parent +// resource. +type TopologyAssociation struct { + Name *string `json:"name,omitempty"` + ResourceID *string `json:"resourceId,omitempty"` + AssociationType AssociationType `json:"associationType,omitempty"` +} + +// TopologyParameters is parameters that define the representation of topology. +type TopologyParameters struct { + TargetResourceGroupName *string `json:"targetResourceGroupName,omitempty"` +} + +// TopologyResource is the network resource topology information for the given +// resource group. +type TopologyResource struct { + Name *string `json:"name,omitempty"` + ID *string `json:"id,omitempty"` + Location *string `json:"location,omitempty"` + Associations *[]TopologyAssociation `json:"associations,omitempty"` +} + +// TroubleshootingDetails is information gained from troubleshooting of +// specified resource. +type TroubleshootingDetails struct { + ID *string `json:"id,omitempty"` + ReasonType *string `json:"reasonType,omitempty"` + Summary *string `json:"summary,omitempty"` + Detail *string `json:"detail,omitempty"` + RecommendedActions *[]TroubleshootingRecommendedActions `json:"recommendedActions,omitempty"` +} + +// TroubleshootingParameters is parameters that define the resource to +// troubleshoot. +type TroubleshootingParameters struct { + TargetResourceID *string `json:"targetResourceId,omitempty"` + *TroubleshootingProperties `json:"properties,omitempty"` +} + +// TroubleshootingProperties is storage location provided for troubleshoot. +type TroubleshootingProperties struct { + StorageID *string `json:"storageId,omitempty"` + StoragePath *string `json:"storagePath,omitempty"` +} + +// TroubleshootingRecommendedActions is recommended actions based on discovered +// issues. +type TroubleshootingRecommendedActions struct { + ActionID *string `json:"actionId,omitempty"` + ActionText *string `json:"actionText,omitempty"` + ActionURI *string `json:"actionUri,omitempty"` + ActionURIText *string `json:"actionUriText,omitempty"` +} + +// TroubleshootingResult is troubleshooting information gained from specified +// resource. +type TroubleshootingResult struct { + autorest.Response `json:"-"` + StartTime *date.Time `json:"startTime,omitempty"` + EndTime *date.Time `json:"endTime,omitempty"` + Code *string `json:"code,omitempty"` + Results *[]TroubleshootingDetails `json:"results,omitempty"` +} + // TunnelConnectionHealth is virtualNetworkGatewayConnection properties type TunnelConnectionHealth struct { Tunnel *string `json:"tunnel,omitempty"` @@ -1906,6 +2698,27 @@ func (client UsagesListResult) UsagesListResultPreparer() (*http.Request, error) autorest.WithBaseURL(to.String(client.NextLink))) } +// VerificationIPFlowParameters is parameters that define the IP flow to be +// verified. +type VerificationIPFlowParameters struct { + TargetResourceID *string `json:"targetResourceId,omitempty"` + Direction Direction `json:"direction,omitempty"` + Protocol Protocol `json:"protocol,omitempty"` + LocalPort *string `json:"localPort,omitempty"` + RemotePort *string `json:"remotePort,omitempty"` + LocalIPAddress *string `json:"localIPAddress,omitempty"` + RemoteIPAddress *string `json:"remoteIPAddress,omitempty"` + TargetNicResourceID *string `json:"targetNicResourceId,omitempty"` +} + +// VerificationIPFlowResult is results of IP flow verification on the target +// resource. +type VerificationIPFlowResult struct { + autorest.Response `json:"-"` + Access Access `json:"access,omitempty"` + RuleName *string `json:"ruleName,omitempty"` +} + // VirtualNetwork is virtual Network resource. type VirtualNetwork struct { autorest.Response `json:"-"` @@ -1966,25 +2779,27 @@ func (client VirtualNetworkGatewayConnectionListResult) VirtualNetworkGatewayCon // VirtualNetworkGatewayConnectionPropertiesFormat is // virtualNetworkGatewayConnection properties type VirtualNetworkGatewayConnectionPropertiesFormat struct { - AuthorizationKey *string `json:"authorizationKey,omitempty"` - VirtualNetworkGateway1 *VirtualNetworkGateway `json:"virtualNetworkGateway1,omitempty"` - VirtualNetworkGateway2 *VirtualNetworkGateway `json:"virtualNetworkGateway2,omitempty"` - LocalNetworkGateway2 *LocalNetworkGateway `json:"localNetworkGateway2,omitempty"` - ConnectionType VirtualNetworkGatewayConnectionType `json:"connectionType,omitempty"` - RoutingWeight *int32 `json:"routingWeight,omitempty"` - SharedKey *string `json:"sharedKey,omitempty"` - ConnectionStatus VirtualNetworkGatewayConnectionStatus `json:"connectionStatus,omitempty"` - TunnelConnectionStatus *[]TunnelConnectionHealth `json:"tunnelConnectionStatus,omitempty"` - EgressBytesTransferred *int64 `json:"egressBytesTransferred,omitempty"` - IngressBytesTransferred *int64 `json:"ingressBytesTransferred,omitempty"` - Peer *SubResource `json:"peer,omitempty"` - EnableBgp *bool `json:"enableBgp,omitempty"` - ResourceGUID *string `json:"resourceGuid,omitempty"` - ProvisioningState *string `json:"provisioningState,omitempty"` -} - -// VirtualNetworkGatewayIPConfiguration is iP configuration for virtual -// network gateway + AuthorizationKey *string `json:"authorizationKey,omitempty"` + VirtualNetworkGateway1 *VirtualNetworkGateway `json:"virtualNetworkGateway1,omitempty"` + VirtualNetworkGateway2 *VirtualNetworkGateway `json:"virtualNetworkGateway2,omitempty"` + LocalNetworkGateway2 *LocalNetworkGateway `json:"localNetworkGateway2,omitempty"` + ConnectionType VirtualNetworkGatewayConnectionType `json:"connectionType,omitempty"` + RoutingWeight *int32 `json:"routingWeight,omitempty"` + SharedKey *string `json:"sharedKey,omitempty"` + ConnectionStatus VirtualNetworkGatewayConnectionStatus `json:"connectionStatus,omitempty"` + TunnelConnectionStatus *[]TunnelConnectionHealth `json:"tunnelConnectionStatus,omitempty"` + EgressBytesTransferred *int64 `json:"egressBytesTransferred,omitempty"` + IngressBytesTransferred *int64 `json:"ingressBytesTransferred,omitempty"` + Peer *SubResource `json:"peer,omitempty"` + EnableBgp *bool `json:"enableBgp,omitempty"` + UsePolicyBasedTrafficSelectors *bool `json:"usePolicyBasedTrafficSelectors,omitempty"` + IpsecPolicies *[]IpsecPolicy `json:"ipsecPolicies,omitempty"` + ResourceGUID *string `json:"resourceGuid,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// VirtualNetworkGatewayIPConfiguration is iP configuration for virtual network +// gateway type VirtualNetworkGatewayIPConfiguration struct { ID *string `json:"id,omitempty"` *VirtualNetworkGatewayIPConfigurationPropertiesFormat `json:"properties,omitempty"` @@ -2043,8 +2858,8 @@ type VirtualNetworkGatewaySku struct { Capacity *int32 `json:"capacity,omitempty"` } -// VirtualNetworkListResult is response for the ListVirtualNetworks API -// service call. +// VirtualNetworkListResult is response for the ListVirtualNetworks API service +// call. type VirtualNetworkListResult struct { autorest.Response `json:"-"` Value *[]VirtualNetwork `json:"value,omitempty"` @@ -2120,9 +2935,9 @@ type VpnClientConfiguration struct { VpnClientRevokedCertificates *[]VpnClientRevokedCertificate `json:"vpnClientRevokedCertificates,omitempty"` } -// VpnClientParameters is vpnClientParameters +// VpnClientParameters is vpn Client Parameters for package generation type VpnClientParameters struct { - ProcessorArchitecture ProcessorArchitecture `json:"ProcessorArchitecture,omitempty"` + ProcessorArchitecture ProcessorArchitecture `json:"processorArchitecture,omitempty"` } // VpnClientRevokedCertificate is vPN client revoked certificate of virtual @@ -2134,8 +2949,8 @@ type VpnClientRevokedCertificate struct { Etag *string `json:"etag,omitempty"` } -// VpnClientRevokedCertificatePropertiesFormat is properties of the revoked -// VPN client certificate of virtual network gateway. +// VpnClientRevokedCertificatePropertiesFormat is properties of the revoked VPN +// client certificate of virtual network gateway. type VpnClientRevokedCertificatePropertiesFormat struct { Thumbprint *string `json:"thumbprint,omitempty"` ProvisioningState *string `json:"provisioningState,omitempty"` @@ -2156,3 +2971,26 @@ type VpnClientRootCertificatePropertiesFormat struct { PublicCertData *string `json:"publicCertData,omitempty"` ProvisioningState *string `json:"provisioningState,omitempty"` } + +// Watcher is network watcher in a resource group. +type Watcher struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Etag *string `json:"etag,omitempty"` + *WatcherPropertiesFormat `json:"properties,omitempty"` +} + +// WatcherListResult is list of network watcher resources. +type WatcherListResult struct { + autorest.Response `json:"-"` + Value *[]Watcher `json:"value,omitempty"` +} + +// WatcherPropertiesFormat is the network watcher properties. +type WatcherPropertiesFormat struct { + ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/packetcaptures.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/packetcaptures.go new file mode 100755 index 000000000000..fbeb0d9ef4b1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/packetcaptures.go @@ -0,0 +1,526 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// PacketCapturesClient is the composite Swagger for Network Client +type PacketCapturesClient struct { + ManagementClient +} + +// NewPacketCapturesClient creates an instance of the PacketCapturesClient +// client. +func NewPacketCapturesClient(subscriptionID string) PacketCapturesClient { + return NewPacketCapturesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewPacketCapturesClientWithBaseURI creates an instance of the +// PacketCapturesClient client. +func NewPacketCapturesClientWithBaseURI(baseURI string, subscriptionID string) PacketCapturesClient { + return PacketCapturesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Create create and start a packet capture on the specified VM. This method +// may poll for completion. Polling can be canceled by passing the cancel +// channel argument. The channel will be used to cancel polling and any +// outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. networkWatcherName is +// the name of the network watcher. packetCaptureName is the name of the packet +// capture session. parameters is parameters that define the create packet +// capture operation. +func (client PacketCapturesClient) Create(resourceGroupName string, networkWatcherName string, packetCaptureName string, parameters PacketCapture, cancel <-chan struct{}) (<-chan PacketCaptureResult, <-chan error) { + resultChan := make(chan PacketCaptureResult, 1) + errChan := make(chan error, 1) + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.PacketCaptureParameters", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.PacketCaptureParameters.Target", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.PacketCaptureParameters.StorageLocation", Name: validation.Null, Rule: true, Chain: nil}, + }}}}}); err != nil { + errChan <- validation.NewErrorWithValidationError(err, "network.PacketCapturesClient", "Create") + close(errChan) + close(resultChan) + return resultChan, errChan + } + + go func() { + var err error + var result PacketCaptureResult + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.CreatePreparer(resourceGroupName, networkWatcherName, packetCaptureName, parameters, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PacketCapturesClient", "Create", nil, "Failure preparing request") + return + } + + resp, err := client.CreateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.PacketCapturesClient", "Create", resp, "Failure sending request") + return + } + + result, err = client.CreateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PacketCapturesClient", "Create", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// CreatePreparer prepares the Create request. +func (client PacketCapturesClient) CreatePreparer(resourceGroupName string, networkWatcherName string, packetCaptureName string, parameters PacketCapture, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkWatcherName": autorest.Encode("path", networkWatcherName), + "packetCaptureName": autorest.Encode("path", packetCaptureName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateSender sends the Create request. The method will close the +// http.Response Body if it receives an error. +func (client PacketCapturesClient) CreateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateResponder handles the response to the Create request. The method always +// closes the http.Response Body. +func (client PacketCapturesClient) CreateResponder(resp *http.Response) (result PacketCaptureResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes the specified packet capture session. This method may poll +// for completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// resourceGroupName is the name of the resource group. networkWatcherName is +// the name of the network watcher. packetCaptureName is the name of the packet +// capture session. +func (client PacketCapturesClient) Delete(resourceGroupName string, networkWatcherName string, packetCaptureName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { + resultChan := make(chan autorest.Response, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result autorest.Response + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.DeletePreparer(resourceGroupName, networkWatcherName, packetCaptureName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PacketCapturesClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "network.PacketCapturesClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PacketCapturesClient", "Delete", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// DeletePreparer prepares the Delete request. +func (client PacketCapturesClient) DeletePreparer(resourceGroupName string, networkWatcherName string, packetCaptureName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkWatcherName": autorest.Encode("path", networkWatcherName), + "packetCaptureName": autorest.Encode("path", packetCaptureName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client PacketCapturesClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client PacketCapturesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets a packet capture session by name. +// +// resourceGroupName is the name of the resource group. networkWatcherName is +// the name of the network watcher. packetCaptureName is the name of the packet +// capture session. +func (client PacketCapturesClient) Get(resourceGroupName string, networkWatcherName string, packetCaptureName string) (result PacketCaptureResult, err error) { + req, err := client.GetPreparer(resourceGroupName, networkWatcherName, packetCaptureName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PacketCapturesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.PacketCapturesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PacketCapturesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client PacketCapturesClient) GetPreparer(resourceGroupName string, networkWatcherName string, packetCaptureName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkWatcherName": autorest.Encode("path", networkWatcherName), + "packetCaptureName": autorest.Encode("path", packetCaptureName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client PacketCapturesClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client PacketCapturesClient) GetResponder(resp *http.Response) (result PacketCaptureResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetStatus query the status of a running packet capture session. This method +// may poll for completion. Polling can be canceled by passing the cancel +// channel argument. The channel will be used to cancel polling and any +// outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. networkWatcherName is +// the name of the Network Watcher resource. packetCaptureName is the name +// given to the packet capture session. +func (client PacketCapturesClient) GetStatus(resourceGroupName string, networkWatcherName string, packetCaptureName string, cancel <-chan struct{}) (<-chan PacketCaptureQueryStatusResult, <-chan error) { + resultChan := make(chan PacketCaptureQueryStatusResult, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result PacketCaptureQueryStatusResult + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.GetStatusPreparer(resourceGroupName, networkWatcherName, packetCaptureName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PacketCapturesClient", "GetStatus", nil, "Failure preparing request") + return + } + + resp, err := client.GetStatusSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.PacketCapturesClient", "GetStatus", resp, "Failure sending request") + return + } + + result, err = client.GetStatusResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PacketCapturesClient", "GetStatus", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// GetStatusPreparer prepares the GetStatus request. +func (client PacketCapturesClient) GetStatusPreparer(resourceGroupName string, networkWatcherName string, packetCaptureName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkWatcherName": autorest.Encode("path", networkWatcherName), + "packetCaptureName": autorest.Encode("path", packetCaptureName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/queryStatus", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// GetStatusSender sends the GetStatus request. The method will close the +// http.Response Body if it receives an error. +func (client PacketCapturesClient) GetStatusSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// GetStatusResponder handles the response to the GetStatus request. The method always +// closes the http.Response Body. +func (client PacketCapturesClient) GetStatusResponder(resp *http.Response) (result PacketCaptureQueryStatusResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists all packet capture sessions within the specified resource group. +// +// resourceGroupName is the name of the resource group. networkWatcherName is +// the name of the Network Watcher resource. +func (client PacketCapturesClient) List(resourceGroupName string, networkWatcherName string) (result PacketCaptureListResult, err error) { + req, err := client.ListPreparer(resourceGroupName, networkWatcherName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PacketCapturesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.PacketCapturesClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PacketCapturesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client PacketCapturesClient) ListPreparer(resourceGroupName string, networkWatcherName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkWatcherName": autorest.Encode("path", networkWatcherName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client PacketCapturesClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client PacketCapturesClient) ListResponder(resp *http.Response) (result PacketCaptureListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Stop stops a specified packet capture session. This method may poll for +// completion. Polling can be canceled by passing the cancel channel argument. +// The channel will be used to cancel polling and any outstanding HTTP +// requests. +// +// resourceGroupName is the name of the resource group. networkWatcherName is +// the name of the network watcher. packetCaptureName is the name of the packet +// capture session. +func (client PacketCapturesClient) Stop(resourceGroupName string, networkWatcherName string, packetCaptureName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { + resultChan := make(chan autorest.Response, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result autorest.Response + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.StopPreparer(resourceGroupName, networkWatcherName, packetCaptureName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PacketCapturesClient", "Stop", nil, "Failure preparing request") + return + } + + resp, err := client.StopSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "network.PacketCapturesClient", "Stop", resp, "Failure sending request") + return + } + + result, err = client.StopResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PacketCapturesClient", "Stop", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// StopPreparer prepares the Stop request. +func (client PacketCapturesClient) StopPreparer(resourceGroupName string, networkWatcherName string, packetCaptureName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkWatcherName": autorest.Encode("path", networkWatcherName), + "packetCaptureName": autorest.Encode("path", packetCaptureName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/stop", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// StopSender sends the Stop request. The method will close the +// http.Response Body if it receives an error. +func (client PacketCapturesClient) StopSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// StopResponder handles the response to the Stop request. The method always +// closes the http.Response Body. +func (client PacketCapturesClient) StopResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/publicipaddresses.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/publicipaddresses.go old mode 100644 new mode 100755 index d8ef099a79cb..896bc817d91e --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/publicipaddresses.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/publicipaddresses.go @@ -14,7 +14,7 @@ package network // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. @@ -25,11 +25,7 @@ import ( "net/http" ) -// PublicIPAddressesClient is the the Microsoft Azure Network management API -// provides a RESTful set of web services that interact with Microsoft Azure -// Networks service to manage your network resources. The API has entities -// that capture the relationship between an end user and the Microsoft Azure -// Networks service. +// PublicIPAddressesClient is the composite Swagger for Network Client type PublicIPAddressesClient struct { ManagementClient } @@ -47,59 +43,58 @@ func NewPublicIPAddressesClientWithBaseURI(baseURI string, subscriptionID string } // CreateOrUpdate creates or updates a static or dynamic public IP address. -// This method may poll for completion. Polling can be canceled by passing -// the cancel channel argument. The channel will be used to cancel polling -// and any outstanding HTTP requests. +// This method may poll for completion. Polling can be canceled by passing the +// cancel channel argument. The channel will be used to cancel polling and any +// outstanding HTTP requests. // // resourceGroupName is the name of the resource group. publicIPAddressName is -// the name of the public IP address. parameters is parameters supplied to -// the create or update public IP address operation. -func (client PublicIPAddressesClient) CreateOrUpdate(resourceGroupName string, publicIPAddressName string, parameters PublicIPAddress, cancel <-chan struct{}) (result autorest.Response, err error) { +// the name of the public IP address. parameters is parameters supplied to the +// create or update public IP address operation. +func (client PublicIPAddressesClient) CreateOrUpdate(resourceGroupName string, publicIPAddressName string, parameters PublicIPAddress, cancel <-chan struct{}) (<-chan PublicIPAddress, <-chan error) { + resultChan := make(chan PublicIPAddress, 1) + errChan := make(chan error, 1) if err := validation.Validate([]validation.Validation{ {TargetValue: parameters, Constraints: []validation.Constraint{{Target: "parameters.PublicIPAddressPropertiesFormat", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "parameters.PublicIPAddressPropertiesFormat.IPConfiguration", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "parameters.PublicIPAddressPropertiesFormat.IPConfiguration.IPConfigurationPropertiesFormat", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.PublicIPAddressPropertiesFormat.IPConfiguration.IPConfigurationPropertiesFormat.Subnet", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.PublicIPAddressPropertiesFormat.IPConfiguration.IPConfigurationPropertiesFormat.Subnet.SubnetPropertiesFormat", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.PublicIPAddressPropertiesFormat.IPConfiguration.IPConfigurationPropertiesFormat.Subnet.SubnetPropertiesFormat.NetworkSecurityGroup", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.PublicIPAddressPropertiesFormat.IPConfiguration.IPConfigurationPropertiesFormat.Subnet.SubnetPropertiesFormat.NetworkSecurityGroup.SecurityGroupPropertiesFormat", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.PublicIPAddressPropertiesFormat.IPConfiguration.IPConfigurationPropertiesFormat.Subnet.SubnetPropertiesFormat.NetworkSecurityGroup.SecurityGroupPropertiesFormat.NetworkInterfaces", Name: validation.ReadOnly, Rule: true, Chain: nil}, - {Target: "parameters.PublicIPAddressPropertiesFormat.IPConfiguration.IPConfigurationPropertiesFormat.Subnet.SubnetPropertiesFormat.NetworkSecurityGroup.SecurityGroupPropertiesFormat.Subnets", Name: validation.ReadOnly, Rule: true, Chain: nil}, - }}, - }}, - {Target: "parameters.PublicIPAddressPropertiesFormat.IPConfiguration.IPConfigurationPropertiesFormat.Subnet.SubnetPropertiesFormat.RouteTable", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.PublicIPAddressPropertiesFormat.IPConfiguration.IPConfigurationPropertiesFormat.Subnet.SubnetPropertiesFormat.RouteTable.RouteTablePropertiesFormat", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.PublicIPAddressPropertiesFormat.IPConfiguration.IPConfigurationPropertiesFormat.Subnet.SubnetPropertiesFormat.RouteTable.RouteTablePropertiesFormat.Subnets", Name: validation.ReadOnly, Rule: true, Chain: nil}}}, - }}, - {Target: "parameters.PublicIPAddressPropertiesFormat.IPConfiguration.IPConfigurationPropertiesFormat.Subnet.SubnetPropertiesFormat.IPConfigurations", Name: validation.ReadOnly, Rule: true, Chain: nil}, - }}, - }}, - {Target: "parameters.PublicIPAddressPropertiesFormat.IPConfiguration.IPConfigurationPropertiesFormat.PublicIPAddress", Name: validation.Null, Rule: false, Chain: nil}, - }}, + Chain: []validation.Constraint{{Target: "parameters.PublicIPAddressPropertiesFormat.IPConfiguration.IPConfigurationPropertiesFormat.PublicIPAddress", Name: validation.Null, Rule: false, Chain: nil}}}, }}, - {Target: "parameters.PublicIPAddressPropertiesFormat.IPConfiguration", Name: validation.ReadOnly, Rule: true, Chain: nil}, }}}}}); err != nil { - return result, validation.NewErrorWithValidationError(err, "network.PublicIPAddressesClient", "CreateOrUpdate") - } - - req, err := client.CreateOrUpdatePreparer(resourceGroupName, publicIPAddressName, parameters, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "CreateOrUpdate", nil, "Failure preparing request") - } - - resp, err := client.CreateOrUpdateSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "CreateOrUpdate", resp, "Failure sending request") - } - - result, err = client.CreateOrUpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "CreateOrUpdate", resp, "Failure responding to request") - } - - return + errChan <- validation.NewErrorWithValidationError(err, "network.PublicIPAddressesClient", "CreateOrUpdate") + close(errChan) + close(resultChan) + return resultChan, errChan + } + + go func() { + var err error + var result PublicIPAddress + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.CreateOrUpdatePreparer(resourceGroupName, publicIPAddressName, parameters, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "CreateOrUpdate", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // CreateOrUpdatePreparer prepares the CreateOrUpdate request. @@ -110,8 +105,9 @@ func (client PublicIPAddressesClient) CreateOrUpdatePreparer(resourceGroupName s "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -134,41 +130,55 @@ func (client PublicIPAddressesClient) CreateOrUpdateSender(req *http.Request) (* // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always // closes the http.Response Body. -func (client PublicIPAddressesClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { +func (client PublicIPAddressesClient) CreateOrUpdateResponder(resp *http.Response) (result PublicIPAddress, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } // Delete deletes the specified public IP address. This method may poll for -// completion. Polling can be canceled by passing the cancel channel -// argument. The channel will be used to cancel polling and any outstanding -// HTTP requests. +// completion. Polling can be canceled by passing the cancel channel argument. +// The channel will be used to cancel polling and any outstanding HTTP +// requests. // // resourceGroupName is the name of the resource group. publicIPAddressName is // the name of the subnet. -func (client PublicIPAddressesClient) Delete(resourceGroupName string, publicIPAddressName string, cancel <-chan struct{}) (result autorest.Response, err error) { - req, err := client.DeletePreparer(resourceGroupName, publicIPAddressName, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "Delete", nil, "Failure preparing request") - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "Delete", resp, "Failure sending request") - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "Delete", resp, "Failure responding to request") - } - - return +func (client PublicIPAddressesClient) Delete(resourceGroupName string, publicIPAddressName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { + resultChan := make(chan autorest.Response, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result autorest.Response + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.DeletePreparer(resourceGroupName, publicIPAddressName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "Delete", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // DeletePreparer prepares the Delete request. @@ -179,8 +189,9 @@ func (client PublicIPAddressesClient) DeletePreparer(resourceGroupName string, p "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -218,13 +229,15 @@ func (client PublicIPAddressesClient) DeleteResponder(resp *http.Response) (resu func (client PublicIPAddressesClient) Get(resourceGroupName string, publicIPAddressName string, expand string) (result PublicIPAddress, err error) { req, err := client.GetPreparer(resourceGroupName, publicIPAddressName, expand) if err != nil { - return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "Get", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "Get", nil, "Failure preparing request") + return } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "Get", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "Get", resp, "Failure sending request") + return } result, err = client.GetResponder(resp) @@ -243,8 +256,9 @@ func (client PublicIPAddressesClient) GetPreparer(resourceGroupName string, publ "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } if len(expand) > 0 { queryParameters["$expand"] = autorest.Encode("query", expand) @@ -283,13 +297,15 @@ func (client PublicIPAddressesClient) GetResponder(resp *http.Response) (result func (client PublicIPAddressesClient) List(resourceGroupName string) (result PublicIPAddressListResult, err error) { req, err := client.ListPreparer(resourceGroupName) if err != nil { - return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "List", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "List", nil, "Failure preparing request") + return } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "List", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "List", resp, "Failure sending request") + return } result, err = client.ListResponder(resp) @@ -307,8 +323,9 @@ func (client PublicIPAddressesClient) ListPreparer(resourceGroupName string) (*h "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -366,13 +383,15 @@ func (client PublicIPAddressesClient) ListNextResults(lastResults PublicIPAddres func (client PublicIPAddressesClient) ListAll() (result PublicIPAddressListResult, err error) { req, err := client.ListAllPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "ListAll", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "ListAll", nil, "Failure preparing request") + return } resp, err := client.ListAllSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "ListAll", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "ListAll", resp, "Failure sending request") + return } result, err = client.ListAllResponder(resp) @@ -389,8 +408,9 @@ func (client PublicIPAddressesClient) ListAllPreparer() (*http.Request, error) { "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routefilterrules.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routefilterrules.go new file mode 100755 index 000000000000..378a75bb2f12 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routefilterrules.go @@ -0,0 +1,468 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// RouteFilterRulesClient is the composite Swagger for Network Client +type RouteFilterRulesClient struct { + ManagementClient +} + +// NewRouteFilterRulesClient creates an instance of the RouteFilterRulesClient +// client. +func NewRouteFilterRulesClient(subscriptionID string) RouteFilterRulesClient { + return NewRouteFilterRulesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewRouteFilterRulesClientWithBaseURI creates an instance of the +// RouteFilterRulesClient client. +func NewRouteFilterRulesClientWithBaseURI(baseURI string, subscriptionID string) RouteFilterRulesClient { + return RouteFilterRulesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a route in the specified route filter. +// This method may poll for completion. Polling can be canceled by passing the +// cancel channel argument. The channel will be used to cancel polling and any +// outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. routeFilterName is the +// name of the route filter. ruleName is the name of the route filter rule. +// routeFilterRuleParameters is parameters supplied to the create or update +// route filter rule operation. +func (client RouteFilterRulesClient) CreateOrUpdate(resourceGroupName string, routeFilterName string, ruleName string, routeFilterRuleParameters RouteFilterRule, cancel <-chan struct{}) (<-chan RouteFilterRule, <-chan error) { + resultChan := make(chan RouteFilterRule, 1) + errChan := make(chan error, 1) + if err := validation.Validate([]validation.Validation{ + {TargetValue: routeFilterRuleParameters, + Constraints: []validation.Constraint{{Target: "routeFilterRuleParameters.RouteFilterRulePropertiesFormat", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "routeFilterRuleParameters.RouteFilterRulePropertiesFormat.RouteFilterRuleType", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "routeFilterRuleParameters.RouteFilterRulePropertiesFormat.Communities", Name: validation.Null, Rule: true, Chain: nil}, + }}}}}); err != nil { + errChan <- validation.NewErrorWithValidationError(err, "network.RouteFilterRulesClient", "CreateOrUpdate") + close(errChan) + close(resultChan) + return resultChan, errChan + } + + go func() { + var err error + var result RouteFilterRule + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.CreateOrUpdatePreparer(resourceGroupName, routeFilterName, ruleName, routeFilterRuleParameters, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteFilterRulesClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.RouteFilterRulesClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteFilterRulesClient", "CreateOrUpdate", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client RouteFilterRulesClient) CreateOrUpdatePreparer(resourceGroupName string, routeFilterName string, ruleName string, routeFilterRuleParameters RouteFilterRule, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "routeFilterName": autorest.Encode("path", routeFilterName), + "ruleName": autorest.Encode("path", ruleName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}", pathParameters), + autorest.WithJSON(routeFilterRuleParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client RouteFilterRulesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client RouteFilterRulesClient) CreateOrUpdateResponder(resp *http.Response) (result RouteFilterRule, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes the specified rule from a route filter. This method may poll +// for completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// resourceGroupName is the name of the resource group. routeFilterName is the +// name of the route filter. ruleName is the name of the rule. +func (client RouteFilterRulesClient) Delete(resourceGroupName string, routeFilterName string, ruleName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { + resultChan := make(chan autorest.Response, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result autorest.Response + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.DeletePreparer(resourceGroupName, routeFilterName, ruleName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteFilterRulesClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "network.RouteFilterRulesClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteFilterRulesClient", "Delete", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// DeletePreparer prepares the Delete request. +func (client RouteFilterRulesClient) DeletePreparer(resourceGroupName string, routeFilterName string, ruleName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "routeFilterName": autorest.Encode("path", routeFilterName), + "ruleName": autorest.Encode("path", ruleName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client RouteFilterRulesClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client RouteFilterRulesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the specified rule from a route filter. +// +// resourceGroupName is the name of the resource group. routeFilterName is the +// name of the route filter. ruleName is the name of the rule. +func (client RouteFilterRulesClient) Get(resourceGroupName string, routeFilterName string, ruleName string) (result RouteFilterRule, err error) { + req, err := client.GetPreparer(resourceGroupName, routeFilterName, ruleName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteFilterRulesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.RouteFilterRulesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteFilterRulesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client RouteFilterRulesClient) GetPreparer(resourceGroupName string, routeFilterName string, ruleName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "routeFilterName": autorest.Encode("path", routeFilterName), + "ruleName": autorest.Encode("path", ruleName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client RouteFilterRulesClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client RouteFilterRulesClient) GetResponder(resp *http.Response) (result RouteFilterRule, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByRouteFilter gets all RouteFilterRules in a route filter. +// +// resourceGroupName is the name of the resource group. routeFilterName is the +// name of the route filter. +func (client RouteFilterRulesClient) ListByRouteFilter(resourceGroupName string, routeFilterName string) (result RouteFilterRuleListResult, err error) { + req, err := client.ListByRouteFilterPreparer(resourceGroupName, routeFilterName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteFilterRulesClient", "ListByRouteFilter", nil, "Failure preparing request") + return + } + + resp, err := client.ListByRouteFilterSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.RouteFilterRulesClient", "ListByRouteFilter", resp, "Failure sending request") + return + } + + result, err = client.ListByRouteFilterResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteFilterRulesClient", "ListByRouteFilter", resp, "Failure responding to request") + } + + return +} + +// ListByRouteFilterPreparer prepares the ListByRouteFilter request. +func (client RouteFilterRulesClient) ListByRouteFilterPreparer(resourceGroupName string, routeFilterName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "routeFilterName": autorest.Encode("path", routeFilterName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListByRouteFilterSender sends the ListByRouteFilter request. The method will close the +// http.Response Body if it receives an error. +func (client RouteFilterRulesClient) ListByRouteFilterSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListByRouteFilterResponder handles the response to the ListByRouteFilter request. The method always +// closes the http.Response Body. +func (client RouteFilterRulesClient) ListByRouteFilterResponder(resp *http.Response) (result RouteFilterRuleListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByRouteFilterNextResults retrieves the next set of results, if any. +func (client RouteFilterRulesClient) ListByRouteFilterNextResults(lastResults RouteFilterRuleListResult) (result RouteFilterRuleListResult, err error) { + req, err := lastResults.RouteFilterRuleListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.RouteFilterRulesClient", "ListByRouteFilter", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListByRouteFilterSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.RouteFilterRulesClient", "ListByRouteFilter", resp, "Failure sending next results request") + } + + result, err = client.ListByRouteFilterResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteFilterRulesClient", "ListByRouteFilter", resp, "Failure responding to next results request") + } + + return +} + +// Update updates a route in the specified route filter. This method may poll +// for completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// resourceGroupName is the name of the resource group. routeFilterName is the +// name of the route filter. ruleName is the name of the route filter rule. +// routeFilterRuleParameters is parameters supplied to the update route filter +// rule operation. +func (client RouteFilterRulesClient) Update(resourceGroupName string, routeFilterName string, ruleName string, routeFilterRuleParameters PatchRouteFilterRule, cancel <-chan struct{}) (<-chan RouteFilterRule, <-chan error) { + resultChan := make(chan RouteFilterRule, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result RouteFilterRule + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.UpdatePreparer(resourceGroupName, routeFilterName, ruleName, routeFilterRuleParameters, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteFilterRulesClient", "Update", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.RouteFilterRulesClient", "Update", resp, "Failure sending request") + return + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteFilterRulesClient", "Update", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// UpdatePreparer prepares the Update request. +func (client RouteFilterRulesClient) UpdatePreparer(resourceGroupName string, routeFilterName string, ruleName string, routeFilterRuleParameters PatchRouteFilterRule, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "routeFilterName": autorest.Encode("path", routeFilterName), + "ruleName": autorest.Encode("path", ruleName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}", pathParameters), + autorest.WithJSON(routeFilterRuleParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client RouteFilterRulesClient) UpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client RouteFilterRulesClient) UpdateResponder(resp *http.Response) (result RouteFilterRule, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routefilters.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routefilters.go new file mode 100755 index 000000000000..860ccae5c394 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routefilters.go @@ -0,0 +1,535 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// RouteFiltersClient is the composite Swagger for Network Client +type RouteFiltersClient struct { + ManagementClient +} + +// NewRouteFiltersClient creates an instance of the RouteFiltersClient client. +func NewRouteFiltersClient(subscriptionID string) RouteFiltersClient { + return NewRouteFiltersClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewRouteFiltersClientWithBaseURI creates an instance of the +// RouteFiltersClient client. +func NewRouteFiltersClientWithBaseURI(baseURI string, subscriptionID string) RouteFiltersClient { + return RouteFiltersClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a route filter in a specified resource +// group. This method may poll for completion. Polling can be canceled by +// passing the cancel channel argument. The channel will be used to cancel +// polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. routeFilterName is the +// name of the route filter. routeFilterParameters is parameters supplied to +// the create or update route filter operation. +func (client RouteFiltersClient) CreateOrUpdate(resourceGroupName string, routeFilterName string, routeFilterParameters RouteFilter, cancel <-chan struct{}) (<-chan RouteFilter, <-chan error) { + resultChan := make(chan RouteFilter, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result RouteFilter + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.CreateOrUpdatePreparer(resourceGroupName, routeFilterName, routeFilterParameters, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteFiltersClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.RouteFiltersClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteFiltersClient", "CreateOrUpdate", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client RouteFiltersClient) CreateOrUpdatePreparer(resourceGroupName string, routeFilterName string, routeFilterParameters RouteFilter, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "routeFilterName": autorest.Encode("path", routeFilterName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}", pathParameters), + autorest.WithJSON(routeFilterParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client RouteFiltersClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client RouteFiltersClient) CreateOrUpdateResponder(resp *http.Response) (result RouteFilter, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes the specified route filter. This method may poll for +// completion. Polling can be canceled by passing the cancel channel argument. +// The channel will be used to cancel polling and any outstanding HTTP +// requests. +// +// resourceGroupName is the name of the resource group. routeFilterName is the +// name of the route filter. +func (client RouteFiltersClient) Delete(resourceGroupName string, routeFilterName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { + resultChan := make(chan autorest.Response, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result autorest.Response + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.DeletePreparer(resourceGroupName, routeFilterName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteFiltersClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "network.RouteFiltersClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteFiltersClient", "Delete", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// DeletePreparer prepares the Delete request. +func (client RouteFiltersClient) DeletePreparer(resourceGroupName string, routeFilterName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "routeFilterName": autorest.Encode("path", routeFilterName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client RouteFiltersClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client RouteFiltersClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the specified route filter. +// +// resourceGroupName is the name of the resource group. routeFilterName is the +// name of the route filter. expand is expands referenced express route bgp +// peering resources. +func (client RouteFiltersClient) Get(resourceGroupName string, routeFilterName string, expand string) (result RouteFilter, err error) { + req, err := client.GetPreparer(resourceGroupName, routeFilterName, expand) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteFiltersClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.RouteFiltersClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteFiltersClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client RouteFiltersClient) GetPreparer(resourceGroupName string, routeFilterName string, expand string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "routeFilterName": autorest.Encode("path", routeFilterName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client RouteFiltersClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client RouteFiltersClient) GetResponder(resp *http.Response) (result RouteFilter, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets all route filters in a subscription. +func (client RouteFiltersClient) List() (result RouteFilterListResult, err error) { + req, err := client.ListPreparer() + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteFiltersClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.RouteFiltersClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteFiltersClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client RouteFiltersClient) ListPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/routeFilters", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client RouteFiltersClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client RouteFiltersClient) ListResponder(resp *http.Response) (result RouteFilterListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client RouteFiltersClient) ListNextResults(lastResults RouteFilterListResult) (result RouteFilterListResult, err error) { + req, err := lastResults.RouteFilterListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.RouteFiltersClient", "List", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.RouteFiltersClient", "List", resp, "Failure sending next results request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteFiltersClient", "List", resp, "Failure responding to next results request") + } + + return +} + +// ListByResourceGroup gets all route filters in a resource group. +// +// resourceGroupName is the name of the resource group. +func (client RouteFiltersClient) ListByResourceGroup(resourceGroupName string) (result RouteFilterListResult, err error) { + req, err := client.ListByResourceGroupPreparer(resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteFiltersClient", "ListByResourceGroup", nil, "Failure preparing request") + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.RouteFiltersClient", "ListByResourceGroup", resp, "Failure sending request") + return + } + + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteFiltersClient", "ListByResourceGroup", resp, "Failure responding to request") + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client RouteFiltersClient) ListByResourceGroupPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client RouteFiltersClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client RouteFiltersClient) ListByResourceGroupResponder(resp *http.Response) (result RouteFilterListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByResourceGroupNextResults retrieves the next set of results, if any. +func (client RouteFiltersClient) ListByResourceGroupNextResults(lastResults RouteFilterListResult) (result RouteFilterListResult, err error) { + req, err := lastResults.RouteFilterListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.RouteFiltersClient", "ListByResourceGroup", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.RouteFiltersClient", "ListByResourceGroup", resp, "Failure sending next results request") + } + + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteFiltersClient", "ListByResourceGroup", resp, "Failure responding to next results request") + } + + return +} + +// Update updates a route filter in a specified resource group. This method may +// poll for completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// resourceGroupName is the name of the resource group. routeFilterName is the +// name of the route filter. routeFilterParameters is parameters supplied to +// the update route filter operation. +func (client RouteFiltersClient) Update(resourceGroupName string, routeFilterName string, routeFilterParameters PatchRouteFilter, cancel <-chan struct{}) (<-chan RouteFilter, <-chan error) { + resultChan := make(chan RouteFilter, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result RouteFilter + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.UpdatePreparer(resourceGroupName, routeFilterName, routeFilterParameters, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteFiltersClient", "Update", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.RouteFiltersClient", "Update", resp, "Failure sending request") + return + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteFiltersClient", "Update", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// UpdatePreparer prepares the Update request. +func (client RouteFiltersClient) UpdatePreparer(resourceGroupName string, routeFilterName string, routeFilterParameters PatchRouteFilter, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "routeFilterName": autorest.Encode("path", routeFilterName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}", pathParameters), + autorest.WithJSON(routeFilterParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client RouteFiltersClient) UpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client RouteFiltersClient) UpdateResponder(resp *http.Response) (result RouteFilter, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routes.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routes.go old mode 100644 new mode 100755 index 9aa1cf5054dc..1366d3c14472 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routes.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routes.go @@ -14,7 +14,7 @@ package network // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. @@ -24,11 +24,7 @@ import ( "net/http" ) -// RoutesClient is the the Microsoft Azure Network management API provides a -// RESTful set of web services that interact with Microsoft Azure Networks -// service to manage your network resources. The API has entities that -// capture the relationship between an end user and the Microsoft Azure -// Networks service. +// RoutesClient is the composite Swagger for Network Client type RoutesClient struct { ManagementClient } @@ -43,33 +39,45 @@ func NewRoutesClientWithBaseURI(baseURI string, subscriptionID string) RoutesCli return RoutesClient{NewWithBaseURI(baseURI, subscriptionID)} } -// CreateOrUpdate creates or updates a route in the specified route table. -// This method may poll for completion. Polling can be canceled by passing -// the cancel channel argument. The channel will be used to cancel polling -// and any outstanding HTTP requests. +// CreateOrUpdate creates or updates a route in the specified route table. This +// method may poll for completion. Polling can be canceled by passing the +// cancel channel argument. The channel will be used to cancel polling and any +// outstanding HTTP requests. // // resourceGroupName is the name of the resource group. routeTableName is the -// name of the route table. routeName is the name of the route. -// routeParameters is parameters supplied to the create or update route -// operation. -func (client RoutesClient) CreateOrUpdate(resourceGroupName string, routeTableName string, routeName string, routeParameters Route, cancel <-chan struct{}) (result autorest.Response, err error) { - req, err := client.CreateOrUpdatePreparer(resourceGroupName, routeTableName, routeName, routeParameters, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "network.RoutesClient", "CreateOrUpdate", nil, "Failure preparing request") - } - - resp, err := client.CreateOrUpdateSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "network.RoutesClient", "CreateOrUpdate", resp, "Failure sending request") - } - - result, err = client.CreateOrUpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "network.RoutesClient", "CreateOrUpdate", resp, "Failure responding to request") - } - - return +// name of the route table. routeName is the name of the route. routeParameters +// is parameters supplied to the create or update route operation. +func (client RoutesClient) CreateOrUpdate(resourceGroupName string, routeTableName string, routeName string, routeParameters Route, cancel <-chan struct{}) (<-chan Route, <-chan error) { + resultChan := make(chan Route, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result Route + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.CreateOrUpdatePreparer(resourceGroupName, routeTableName, routeName, routeParameters, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RoutesClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.RoutesClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RoutesClient", "CreateOrUpdate", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // CreateOrUpdatePreparer prepares the CreateOrUpdate request. @@ -81,8 +89,9 @@ func (client RoutesClient) CreateOrUpdatePreparer(resourceGroupName string, rout "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -105,13 +114,14 @@ func (client RoutesClient) CreateOrUpdateSender(req *http.Request) (*http.Respon // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always // closes the http.Response Body. -func (client RoutesClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { +func (client RoutesClient) CreateOrUpdateResponder(resp *http.Response) (result Route, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } @@ -122,24 +132,37 @@ func (client RoutesClient) CreateOrUpdateResponder(resp *http.Response) (result // // resourceGroupName is the name of the resource group. routeTableName is the // name of the route table. routeName is the name of the route. -func (client RoutesClient) Delete(resourceGroupName string, routeTableName string, routeName string, cancel <-chan struct{}) (result autorest.Response, err error) { - req, err := client.DeletePreparer(resourceGroupName, routeTableName, routeName, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "network.RoutesClient", "Delete", nil, "Failure preparing request") - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "network.RoutesClient", "Delete", resp, "Failure sending request") - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "network.RoutesClient", "Delete", resp, "Failure responding to request") - } - - return +func (client RoutesClient) Delete(resourceGroupName string, routeTableName string, routeName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { + resultChan := make(chan autorest.Response, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result autorest.Response + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.DeletePreparer(resourceGroupName, routeTableName, routeName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RoutesClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "network.RoutesClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RoutesClient", "Delete", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // DeletePreparer prepares the Delete request. @@ -151,8 +174,9 @@ func (client RoutesClient) DeletePreparer(resourceGroupName string, routeTableNa "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -190,13 +214,15 @@ func (client RoutesClient) DeleteResponder(resp *http.Response) (result autorest func (client RoutesClient) Get(resourceGroupName string, routeTableName string, routeName string) (result Route, err error) { req, err := client.GetPreparer(resourceGroupName, routeTableName, routeName) if err != nil { - return result, autorest.NewErrorWithError(err, "network.RoutesClient", "Get", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "network.RoutesClient", "Get", nil, "Failure preparing request") + return } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network.RoutesClient", "Get", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "network.RoutesClient", "Get", resp, "Failure sending request") + return } result, err = client.GetResponder(resp) @@ -216,8 +242,9 @@ func (client RoutesClient) GetPreparer(resourceGroupName string, routeTableName "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -254,13 +281,15 @@ func (client RoutesClient) GetResponder(resp *http.Response) (result Route, err func (client RoutesClient) List(resourceGroupName string, routeTableName string) (result RouteListResult, err error) { req, err := client.ListPreparer(resourceGroupName, routeTableName) if err != nil { - return result, autorest.NewErrorWithError(err, "network.RoutesClient", "List", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "network.RoutesClient", "List", nil, "Failure preparing request") + return } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network.RoutesClient", "List", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "network.RoutesClient", "List", resp, "Failure sending request") + return } result, err = client.ListResponder(resp) @@ -279,8 +308,9 @@ func (client RoutesClient) ListPreparer(resourceGroupName string, routeTableName "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routetables.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routetables.go old mode 100644 new mode 100755 index b53e94ebaab4..80339f207869 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routetables.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routetables.go @@ -14,22 +14,17 @@ package network // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" "net/http" ) -// RouteTablesClient is the the Microsoft Azure Network management API -// provides a RESTful set of web services that interact with Microsoft Azure -// Networks service to manage your network resources. The API has entities -// that capture the relationship between an end user and the Microsoft Azure -// Networks service. +// RouteTablesClient is the composite Swagger for Network Client type RouteTablesClient struct { ManagementClient } @@ -39,8 +34,8 @@ func NewRouteTablesClient(subscriptionID string) RouteTablesClient { return NewRouteTablesClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewRouteTablesClientWithBaseURI creates an instance of the -// RouteTablesClient client. +// NewRouteTablesClientWithBaseURI creates an instance of the RouteTablesClient +// client. func NewRouteTablesClientWithBaseURI(baseURI string, subscriptionID string) RouteTablesClient { return RouteTablesClient{NewWithBaseURI(baseURI, subscriptionID)} } @@ -51,33 +46,39 @@ func NewRouteTablesClientWithBaseURI(baseURI string, subscriptionID string) Rout // polling and any outstanding HTTP requests. // // resourceGroupName is the name of the resource group. routeTableName is the -// name of the route table. parameters is parameters supplied to the create -// or update route table operation. -func (client RouteTablesClient) CreateOrUpdate(resourceGroupName string, routeTableName string, parameters RouteTable, cancel <-chan struct{}) (result autorest.Response, err error) { - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.RouteTablePropertiesFormat", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.RouteTablePropertiesFormat.Subnets", Name: validation.ReadOnly, Rule: true, Chain: nil}}}}}}); err != nil { - return result, validation.NewErrorWithValidationError(err, "network.RouteTablesClient", "CreateOrUpdate") - } - - req, err := client.CreateOrUpdatePreparer(resourceGroupName, routeTableName, parameters, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "CreateOrUpdate", nil, "Failure preparing request") - } - - resp, err := client.CreateOrUpdateSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "CreateOrUpdate", resp, "Failure sending request") - } - - result, err = client.CreateOrUpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "CreateOrUpdate", resp, "Failure responding to request") - } - - return +// name of the route table. parameters is parameters supplied to the create or +// update route table operation. +func (client RouteTablesClient) CreateOrUpdate(resourceGroupName string, routeTableName string, parameters RouteTable, cancel <-chan struct{}) (<-chan RouteTable, <-chan error) { + resultChan := make(chan RouteTable, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result RouteTable + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.CreateOrUpdatePreparer(resourceGroupName, routeTableName, parameters, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "CreateOrUpdate", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // CreateOrUpdatePreparer prepares the CreateOrUpdate request. @@ -88,8 +89,9 @@ func (client RouteTablesClient) CreateOrUpdatePreparer(resourceGroupName string, "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -112,41 +114,55 @@ func (client RouteTablesClient) CreateOrUpdateSender(req *http.Request) (*http.R // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always // closes the http.Response Body. -func (client RouteTablesClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { +func (client RouteTablesClient) CreateOrUpdateResponder(resp *http.Response) (result RouteTable, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } // Delete deletes the specified route table. This method may poll for -// completion. Polling can be canceled by passing the cancel channel -// argument. The channel will be used to cancel polling and any outstanding -// HTTP requests. +// completion. Polling can be canceled by passing the cancel channel argument. +// The channel will be used to cancel polling and any outstanding HTTP +// requests. // // resourceGroupName is the name of the resource group. routeTableName is the // name of the route table. -func (client RouteTablesClient) Delete(resourceGroupName string, routeTableName string, cancel <-chan struct{}) (result autorest.Response, err error) { - req, err := client.DeletePreparer(resourceGroupName, routeTableName, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "Delete", nil, "Failure preparing request") - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "Delete", resp, "Failure sending request") - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "Delete", resp, "Failure responding to request") - } - - return +func (client RouteTablesClient) Delete(resourceGroupName string, routeTableName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { + resultChan := make(chan autorest.Response, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result autorest.Response + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.DeletePreparer(resourceGroupName, routeTableName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "Delete", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // DeletePreparer prepares the Delete request. @@ -157,8 +173,9 @@ func (client RouteTablesClient) DeletePreparer(resourceGroupName string, routeTa "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -196,13 +213,15 @@ func (client RouteTablesClient) DeleteResponder(resp *http.Response) (result aut func (client RouteTablesClient) Get(resourceGroupName string, routeTableName string, expand string) (result RouteTable, err error) { req, err := client.GetPreparer(resourceGroupName, routeTableName, expand) if err != nil { - return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "Get", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "Get", nil, "Failure preparing request") + return } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "Get", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "Get", resp, "Failure sending request") + return } result, err = client.GetResponder(resp) @@ -221,8 +240,9 @@ func (client RouteTablesClient) GetPreparer(resourceGroupName string, routeTable "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } if len(expand) > 0 { queryParameters["$expand"] = autorest.Encode("query", expand) @@ -261,13 +281,15 @@ func (client RouteTablesClient) GetResponder(resp *http.Response) (result RouteT func (client RouteTablesClient) List(resourceGroupName string) (result RouteTableListResult, err error) { req, err := client.ListPreparer(resourceGroupName) if err != nil { - return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "List", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "List", nil, "Failure preparing request") + return } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "List", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "List", resp, "Failure sending request") + return } result, err = client.ListResponder(resp) @@ -285,8 +307,9 @@ func (client RouteTablesClient) ListPreparer(resourceGroupName string) (*http.Re "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -344,13 +367,15 @@ func (client RouteTablesClient) ListNextResults(lastResults RouteTableListResult func (client RouteTablesClient) ListAll() (result RouteTableListResult, err error) { req, err := client.ListAllPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "ListAll", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "ListAll", nil, "Failure preparing request") + return } resp, err := client.ListAllSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "ListAll", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "ListAll", resp, "Failure sending request") + return } result, err = client.ListAllResponder(resp) @@ -367,8 +392,9 @@ func (client RouteTablesClient) ListAllPreparer() (*http.Request, error) { "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securitygroups.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securitygroups.go old mode 100644 new mode 100755 index 37898c4d089c..3faaf007fffe --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securitygroups.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securitygroups.go @@ -14,22 +14,17 @@ package network // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" "net/http" ) -// SecurityGroupsClient is the the Microsoft Azure Network management API -// provides a RESTful set of web services that interact with Microsoft Azure -// Networks service to manage your network resources. The API has entities -// that capture the relationship between an end user and the Microsoft Azure -// Networks service. +// SecurityGroupsClient is the composite Swagger for Network Client type SecurityGroupsClient struct { ManagementClient } @@ -47,41 +42,45 @@ func NewSecurityGroupsClientWithBaseURI(baseURI string, subscriptionID string) S } // CreateOrUpdate creates or updates a network security group in the specified -// resource group. This method may poll for completion. Polling can be -// canceled by passing the cancel channel argument. The channel will be used -// to cancel polling and any outstanding HTTP requests. +// resource group. This method may poll for completion. Polling can be canceled +// by passing the cancel channel argument. The channel will be used to cancel +// polling and any outstanding HTTP requests. // // resourceGroupName is the name of the resource group. // networkSecurityGroupName is the name of the network security group. // parameters is parameters supplied to the create or update network security // group operation. -func (client SecurityGroupsClient) CreateOrUpdate(resourceGroupName string, networkSecurityGroupName string, parameters SecurityGroup, cancel <-chan struct{}) (result autorest.Response, err error) { - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.SecurityGroupPropertiesFormat", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.SecurityGroupPropertiesFormat.NetworkInterfaces", Name: validation.ReadOnly, Rule: true, Chain: nil}, - {Target: "parameters.SecurityGroupPropertiesFormat.Subnets", Name: validation.ReadOnly, Rule: true, Chain: nil}, - }}}}}); err != nil { - return result, validation.NewErrorWithValidationError(err, "network.SecurityGroupsClient", "CreateOrUpdate") - } - - req, err := client.CreateOrUpdatePreparer(resourceGroupName, networkSecurityGroupName, parameters, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "CreateOrUpdate", nil, "Failure preparing request") - } - - resp, err := client.CreateOrUpdateSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "CreateOrUpdate", resp, "Failure sending request") - } - - result, err = client.CreateOrUpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "CreateOrUpdate", resp, "Failure responding to request") - } - - return +func (client SecurityGroupsClient) CreateOrUpdate(resourceGroupName string, networkSecurityGroupName string, parameters SecurityGroup, cancel <-chan struct{}) (<-chan SecurityGroup, <-chan error) { + resultChan := make(chan SecurityGroup, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result SecurityGroup + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.CreateOrUpdatePreparer(resourceGroupName, networkSecurityGroupName, parameters, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // CreateOrUpdatePreparer prepares the CreateOrUpdate request. @@ -92,8 +91,9 @@ func (client SecurityGroupsClient) CreateOrUpdatePreparer(resourceGroupName stri "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -116,13 +116,14 @@ func (client SecurityGroupsClient) CreateOrUpdateSender(req *http.Request) (*htt // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always // closes the http.Response Body. -func (client SecurityGroupsClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { +func (client SecurityGroupsClient) CreateOrUpdateResponder(resp *http.Response) (result SecurityGroup, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } @@ -133,24 +134,37 @@ func (client SecurityGroupsClient) CreateOrUpdateResponder(resp *http.Response) // // resourceGroupName is the name of the resource group. // networkSecurityGroupName is the name of the network security group. -func (client SecurityGroupsClient) Delete(resourceGroupName string, networkSecurityGroupName string, cancel <-chan struct{}) (result autorest.Response, err error) { - req, err := client.DeletePreparer(resourceGroupName, networkSecurityGroupName, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "Delete", nil, "Failure preparing request") - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "Delete", resp, "Failure sending request") - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "Delete", resp, "Failure responding to request") - } - - return +func (client SecurityGroupsClient) Delete(resourceGroupName string, networkSecurityGroupName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { + resultChan := make(chan autorest.Response, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result autorest.Response + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.DeletePreparer(resourceGroupName, networkSecurityGroupName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "Delete", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // DeletePreparer prepares the Delete request. @@ -161,8 +175,9 @@ func (client SecurityGroupsClient) DeletePreparer(resourceGroupName string, netw "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -201,13 +216,15 @@ func (client SecurityGroupsClient) DeleteResponder(resp *http.Response) (result func (client SecurityGroupsClient) Get(resourceGroupName string, networkSecurityGroupName string, expand string) (result SecurityGroup, err error) { req, err := client.GetPreparer(resourceGroupName, networkSecurityGroupName, expand) if err != nil { - return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "Get", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "Get", nil, "Failure preparing request") + return } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "Get", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "Get", resp, "Failure sending request") + return } result, err = client.GetResponder(resp) @@ -226,8 +243,9 @@ func (client SecurityGroupsClient) GetPreparer(resourceGroupName string, network "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } if len(expand) > 0 { queryParameters["$expand"] = autorest.Encode("query", expand) @@ -266,13 +284,15 @@ func (client SecurityGroupsClient) GetResponder(resp *http.Response) (result Sec func (client SecurityGroupsClient) List(resourceGroupName string) (result SecurityGroupListResult, err error) { req, err := client.ListPreparer(resourceGroupName) if err != nil { - return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "List", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "List", nil, "Failure preparing request") + return } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "List", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "List", resp, "Failure sending request") + return } result, err = client.ListResponder(resp) @@ -290,8 +310,9 @@ func (client SecurityGroupsClient) ListPreparer(resourceGroupName string) (*http "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -349,13 +370,15 @@ func (client SecurityGroupsClient) ListNextResults(lastResults SecurityGroupList func (client SecurityGroupsClient) ListAll() (result SecurityGroupListResult, err error) { req, err := client.ListAllPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "ListAll", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "ListAll", nil, "Failure preparing request") + return } resp, err := client.ListAllSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "ListAll", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "ListAll", resp, "Failure sending request") + return } result, err = client.ListAllResponder(resp) @@ -372,8 +395,9 @@ func (client SecurityGroupsClient) ListAllPreparer() (*http.Request, error) { "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securityrules.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securityrules.go old mode 100644 new mode 100755 index 9603c3c1fba2..2e2738605f83 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securityrules.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securityrules.go @@ -14,7 +14,7 @@ package network // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. @@ -25,11 +25,7 @@ import ( "net/http" ) -// SecurityRulesClient is the the Microsoft Azure Network management API -// provides a RESTful set of web services that interact with Microsoft Azure -// Networks service to manage your network resources. The API has entities -// that capture the relationship between an end user and the Microsoft Azure -// Networks service. +// SecurityRulesClient is the composite Swagger for Network Client type SecurityRulesClient struct { ManagementClient } @@ -47,42 +43,57 @@ func NewSecurityRulesClientWithBaseURI(baseURI string, subscriptionID string) Se } // CreateOrUpdate creates or updates a security rule in the specified network -// security group. This method may poll for completion. Polling can be -// canceled by passing the cancel channel argument. The channel will be used -// to cancel polling and any outstanding HTTP requests. +// security group. This method may poll for completion. Polling can be canceled +// by passing the cancel channel argument. The channel will be used to cancel +// polling and any outstanding HTTP requests. // // resourceGroupName is the name of the resource group. // networkSecurityGroupName is the name of the network security group. -// securityRuleName is the name of the security rule. securityRuleParameters -// is parameters supplied to the create or update network security rule -// operation. -func (client SecurityRulesClient) CreateOrUpdate(resourceGroupName string, networkSecurityGroupName string, securityRuleName string, securityRuleParameters SecurityRule, cancel <-chan struct{}) (result autorest.Response, err error) { +// securityRuleName is the name of the security rule. securityRuleParameters is +// parameters supplied to the create or update network security rule operation. +func (client SecurityRulesClient) CreateOrUpdate(resourceGroupName string, networkSecurityGroupName string, securityRuleName string, securityRuleParameters SecurityRule, cancel <-chan struct{}) (<-chan SecurityRule, <-chan error) { + resultChan := make(chan SecurityRule, 1) + errChan := make(chan error, 1) if err := validation.Validate([]validation.Validation{ {TargetValue: securityRuleParameters, Constraints: []validation.Constraint{{Target: "securityRuleParameters.SecurityRulePropertiesFormat", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "securityRuleParameters.SecurityRulePropertiesFormat.SourceAddressPrefix", Name: validation.Null, Rule: true, Chain: nil}, {Target: "securityRuleParameters.SecurityRulePropertiesFormat.DestinationAddressPrefix", Name: validation.Null, Rule: true, Chain: nil}, }}}}}); err != nil { - return result, validation.NewErrorWithValidationError(err, "network.SecurityRulesClient", "CreateOrUpdate") + errChan <- validation.NewErrorWithValidationError(err, "network.SecurityRulesClient", "CreateOrUpdate") + close(errChan) + close(resultChan) + return resultChan, errChan } - req, err := client.CreateOrUpdatePreparer(resourceGroupName, networkSecurityGroupName, securityRuleName, securityRuleParameters, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "network.SecurityRulesClient", "CreateOrUpdate", nil, "Failure preparing request") - } - - resp, err := client.CreateOrUpdateSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "network.SecurityRulesClient", "CreateOrUpdate", resp, "Failure sending request") - } - - result, err = client.CreateOrUpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "network.SecurityRulesClient", "CreateOrUpdate", resp, "Failure responding to request") - } - - return + go func() { + var err error + var result SecurityRule + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.CreateOrUpdatePreparer(resourceGroupName, networkSecurityGroupName, securityRuleName, securityRuleParameters, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityRulesClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.SecurityRulesClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityRulesClient", "CreateOrUpdate", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // CreateOrUpdatePreparer prepares the CreateOrUpdate request. @@ -94,8 +105,9 @@ func (client SecurityRulesClient) CreateOrUpdatePreparer(resourceGroupName strin "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -118,42 +130,56 @@ func (client SecurityRulesClient) CreateOrUpdateSender(req *http.Request) (*http // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always // closes the http.Response Body. -func (client SecurityRulesClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { +func (client SecurityRulesClient) CreateOrUpdateResponder(resp *http.Response) (result SecurityRule, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } -// Delete deletes the specified network security rule. This method may poll -// for completion. Polling can be canceled by passing the cancel channel -// argument. The channel will be used to cancel polling and any outstanding -// HTTP requests. +// Delete deletes the specified network security rule. This method may poll for +// completion. Polling can be canceled by passing the cancel channel argument. +// The channel will be used to cancel polling and any outstanding HTTP +// requests. // // resourceGroupName is the name of the resource group. // networkSecurityGroupName is the name of the network security group. // securityRuleName is the name of the security rule. -func (client SecurityRulesClient) Delete(resourceGroupName string, networkSecurityGroupName string, securityRuleName string, cancel <-chan struct{}) (result autorest.Response, err error) { - req, err := client.DeletePreparer(resourceGroupName, networkSecurityGroupName, securityRuleName, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "network.SecurityRulesClient", "Delete", nil, "Failure preparing request") - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "network.SecurityRulesClient", "Delete", resp, "Failure sending request") - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "network.SecurityRulesClient", "Delete", resp, "Failure responding to request") - } - - return +func (client SecurityRulesClient) Delete(resourceGroupName string, networkSecurityGroupName string, securityRuleName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { + resultChan := make(chan autorest.Response, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result autorest.Response + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.DeletePreparer(resourceGroupName, networkSecurityGroupName, securityRuleName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityRulesClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "network.SecurityRulesClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityRulesClient", "Delete", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // DeletePreparer prepares the Delete request. @@ -165,8 +191,9 @@ func (client SecurityRulesClient) DeletePreparer(resourceGroupName string, netwo "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -205,13 +232,15 @@ func (client SecurityRulesClient) DeleteResponder(resp *http.Response) (result a func (client SecurityRulesClient) Get(resourceGroupName string, networkSecurityGroupName string, securityRuleName string) (result SecurityRule, err error) { req, err := client.GetPreparer(resourceGroupName, networkSecurityGroupName, securityRuleName) if err != nil { - return result, autorest.NewErrorWithError(err, "network.SecurityRulesClient", "Get", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "network.SecurityRulesClient", "Get", nil, "Failure preparing request") + return } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network.SecurityRulesClient", "Get", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "network.SecurityRulesClient", "Get", resp, "Failure sending request") + return } result, err = client.GetResponder(resp) @@ -231,8 +260,9 @@ func (client SecurityRulesClient) GetPreparer(resourceGroupName string, networkS "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -269,13 +299,15 @@ func (client SecurityRulesClient) GetResponder(resp *http.Response) (result Secu func (client SecurityRulesClient) List(resourceGroupName string, networkSecurityGroupName string) (result SecurityRuleListResult, err error) { req, err := client.ListPreparer(resourceGroupName, networkSecurityGroupName) if err != nil { - return result, autorest.NewErrorWithError(err, "network.SecurityRulesClient", "List", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "network.SecurityRulesClient", "List", nil, "Failure preparing request") + return } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network.SecurityRulesClient", "List", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "network.SecurityRulesClient", "List", resp, "Failure sending request") + return } result, err = client.ListResponder(resp) @@ -294,8 +326,9 @@ func (client SecurityRulesClient) ListPreparer(resourceGroupName string, network "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/subnets.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/subnets.go old mode 100644 new mode 100755 index 6230dbf03dc8..7d6ff882ae72 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/subnets.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/subnets.go @@ -14,22 +14,17 @@ package network // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. import ( "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" "net/http" ) -// SubnetsClient is the the Microsoft Azure Network management API provides a -// RESTful set of web services that interact with Microsoft Azure Networks -// service to manage your network resources. The API has entities that -// capture the relationship between an end user and the Microsoft Azure -// Networks service. +// SubnetsClient is the composite Swagger for Network Client type SubnetsClient struct { ManagementClient } @@ -44,51 +39,46 @@ func NewSubnetsClientWithBaseURI(baseURI string, subscriptionID string) SubnetsC return SubnetsClient{NewWithBaseURI(baseURI, subscriptionID)} } -// CreateOrUpdate creates or updates a subnet in the specified virtual -// network. This method may poll for completion. Polling can be canceled by -// passing the cancel channel argument. The channel will be used to cancel -// polling and any outstanding HTTP requests. +// CreateOrUpdate creates or updates a subnet in the specified virtual network. +// This method may poll for completion. Polling can be canceled by passing the +// cancel channel argument. The channel will be used to cancel polling and any +// outstanding HTTP requests. // // resourceGroupName is the name of the resource group. virtualNetworkName is // the name of the virtual network. subnetName is the name of the subnet. // subnetParameters is parameters supplied to the create or update subnet // operation. -func (client SubnetsClient) CreateOrUpdate(resourceGroupName string, virtualNetworkName string, subnetName string, subnetParameters Subnet, cancel <-chan struct{}) (result autorest.Response, err error) { - if err := validation.Validate([]validation.Validation{ - {TargetValue: subnetParameters, - Constraints: []validation.Constraint{{Target: "subnetParameters.SubnetPropertiesFormat", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "subnetParameters.SubnetPropertiesFormat.NetworkSecurityGroup", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "subnetParameters.SubnetPropertiesFormat.NetworkSecurityGroup.SecurityGroupPropertiesFormat", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "subnetParameters.SubnetPropertiesFormat.NetworkSecurityGroup.SecurityGroupPropertiesFormat.NetworkInterfaces", Name: validation.ReadOnly, Rule: true, Chain: nil}, - {Target: "subnetParameters.SubnetPropertiesFormat.NetworkSecurityGroup.SecurityGroupPropertiesFormat.Subnets", Name: validation.ReadOnly, Rule: true, Chain: nil}, - }}, - }}, - {Target: "subnetParameters.SubnetPropertiesFormat.RouteTable", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "subnetParameters.SubnetPropertiesFormat.RouteTable.RouteTablePropertiesFormat", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "subnetParameters.SubnetPropertiesFormat.RouteTable.RouteTablePropertiesFormat.Subnets", Name: validation.ReadOnly, Rule: true, Chain: nil}}}, - }}, - {Target: "subnetParameters.SubnetPropertiesFormat.IPConfigurations", Name: validation.ReadOnly, Rule: true, Chain: nil}, - }}}}}); err != nil { - return result, validation.NewErrorWithValidationError(err, "network.SubnetsClient", "CreateOrUpdate") - } - - req, err := client.CreateOrUpdatePreparer(resourceGroupName, virtualNetworkName, subnetName, subnetParameters, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "network.SubnetsClient", "CreateOrUpdate", nil, "Failure preparing request") - } - - resp, err := client.CreateOrUpdateSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "network.SubnetsClient", "CreateOrUpdate", resp, "Failure sending request") - } - - result, err = client.CreateOrUpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "network.SubnetsClient", "CreateOrUpdate", resp, "Failure responding to request") - } - - return +func (client SubnetsClient) CreateOrUpdate(resourceGroupName string, virtualNetworkName string, subnetName string, subnetParameters Subnet, cancel <-chan struct{}) (<-chan Subnet, <-chan error) { + resultChan := make(chan Subnet, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result Subnet + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.CreateOrUpdatePreparer(resourceGroupName, virtualNetworkName, subnetName, subnetParameters, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SubnetsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.SubnetsClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SubnetsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // CreateOrUpdatePreparer prepares the CreateOrUpdate request. @@ -100,8 +90,9 @@ func (client SubnetsClient) CreateOrUpdatePreparer(resourceGroupName string, vir "virtualNetworkName": autorest.Encode("path", virtualNetworkName), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -124,40 +115,54 @@ func (client SubnetsClient) CreateOrUpdateSender(req *http.Request) (*http.Respo // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always // closes the http.Response Body. -func (client SubnetsClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { +func (client SubnetsClient) CreateOrUpdateResponder(resp *http.Response) (result Subnet, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } // Delete deletes the specified subnet. This method may poll for completion. -// Polling can be canceled by passing the cancel channel argument. The -// channel will be used to cancel polling and any outstanding HTTP requests. +// Polling can be canceled by passing the cancel channel argument. The channel +// will be used to cancel polling and any outstanding HTTP requests. // // resourceGroupName is the name of the resource group. virtualNetworkName is // the name of the virtual network. subnetName is the name of the subnet. -func (client SubnetsClient) Delete(resourceGroupName string, virtualNetworkName string, subnetName string, cancel <-chan struct{}) (result autorest.Response, err error) { - req, err := client.DeletePreparer(resourceGroupName, virtualNetworkName, subnetName, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "network.SubnetsClient", "Delete", nil, "Failure preparing request") - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "network.SubnetsClient", "Delete", resp, "Failure sending request") - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "network.SubnetsClient", "Delete", resp, "Failure responding to request") - } - - return +func (client SubnetsClient) Delete(resourceGroupName string, virtualNetworkName string, subnetName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { + resultChan := make(chan autorest.Response, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result autorest.Response + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.DeletePreparer(resourceGroupName, virtualNetworkName, subnetName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SubnetsClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "network.SubnetsClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SubnetsClient", "Delete", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // DeletePreparer prepares the Delete request. @@ -169,8 +174,9 @@ func (client SubnetsClient) DeletePreparer(resourceGroupName string, virtualNetw "virtualNetworkName": autorest.Encode("path", virtualNetworkName), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -209,13 +215,15 @@ func (client SubnetsClient) DeleteResponder(resp *http.Response) (result autores func (client SubnetsClient) Get(resourceGroupName string, virtualNetworkName string, subnetName string, expand string) (result Subnet, err error) { req, err := client.GetPreparer(resourceGroupName, virtualNetworkName, subnetName, expand) if err != nil { - return result, autorest.NewErrorWithError(err, "network.SubnetsClient", "Get", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "network.SubnetsClient", "Get", nil, "Failure preparing request") + return } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network.SubnetsClient", "Get", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "network.SubnetsClient", "Get", resp, "Failure sending request") + return } result, err = client.GetResponder(resp) @@ -235,8 +243,9 @@ func (client SubnetsClient) GetPreparer(resourceGroupName string, virtualNetwork "virtualNetworkName": autorest.Encode("path", virtualNetworkName), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } if len(expand) > 0 { queryParameters["$expand"] = autorest.Encode("query", expand) @@ -276,13 +285,15 @@ func (client SubnetsClient) GetResponder(resp *http.Response) (result Subnet, er func (client SubnetsClient) List(resourceGroupName string, virtualNetworkName string) (result SubnetListResult, err error) { req, err := client.ListPreparer(resourceGroupName, virtualNetworkName) if err != nil { - return result, autorest.NewErrorWithError(err, "network.SubnetsClient", "List", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "network.SubnetsClient", "List", nil, "Failure preparing request") + return } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network.SubnetsClient", "List", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "network.SubnetsClient", "List", resp, "Failure sending request") + return } result, err = client.ListResponder(resp) @@ -301,8 +312,9 @@ func (client SubnetsClient) ListPreparer(resourceGroupName string, virtualNetwor "virtualNetworkName": autorest.Encode("path", virtualNetworkName), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/usages.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/usages.go old mode 100644 new mode 100755 index 016de70e1c30..34fa0df4b428 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/usages.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/usages.go @@ -14,7 +14,7 @@ package network // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. @@ -25,11 +25,7 @@ import ( "net/http" ) -// UsagesClient is the the Microsoft Azure Network management API provides a -// RESTful set of web services that interact with Microsoft Azure Networks -// service to manage your network resources. The API has entities that -// capture the relationship between an end user and the Microsoft Azure -// Networks service. +// UsagesClient is the composite Swagger for Network Client type UsagesClient struct { ManagementClient } @@ -56,13 +52,15 @@ func (client UsagesClient) List(location string) (result UsagesListResult, err e req, err := client.ListPreparer(location) if err != nil { - return result, autorest.NewErrorWithError(err, "network.UsagesClient", "List", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "network.UsagesClient", "List", nil, "Failure preparing request") + return } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network.UsagesClient", "List", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "network.UsagesClient", "List", resp, "Failure sending request") + return } result, err = client.ListResponder(resp) @@ -80,8 +78,9 @@ func (client UsagesClient) ListPreparer(location string) (*http.Request, error) "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/version.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/version.go old mode 100644 new mode 100755 index b0628fe0bd26..50b836179f85 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/version.go @@ -14,30 +14,16 @@ package network // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. -import ( - "fmt" -) - -const ( - major = "7" - minor = "0" - patch = "1" - // Always begin a "tag" with a dash (as per http://semver.org) - tag = "-beta" - semVerFormat = "%s.%s.%s%s" - userAgentFormat = "Azure-SDK-for-Go/%s arm-%s/%s" -) - // UserAgent returns the UserAgent string to use when sending http.Requests. func UserAgent() string { - return fmt.Sprintf(userAgentFormat, Version(), "network", "2016-09-01") + return "Azure-SDK-For-Go/v10.0.2-beta arm-network/" } // Version returns the semantic version (see http://semver.org) of the client. func Version() string { - return fmt.Sprintf(semVerFormat, major, minor, patch, tag) + return "v10.0.2-beta" } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgatewayconnections.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgatewayconnections.go old mode 100644 new mode 100755 index d58a9d326fac..13e1392d2853 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgatewayconnections.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgatewayconnections.go @@ -14,7 +14,7 @@ package network // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. @@ -25,11 +25,8 @@ import ( "net/http" ) -// VirtualNetworkGatewayConnectionsClient is the the Microsoft Azure Network -// management API provides a RESTful set of web services that interact with -// Microsoft Azure Networks service to manage your network resources. The API -// has entities that capture the relationship between an end user and the -// Microsoft Azure Networks service. +// VirtualNetworkGatewayConnectionsClient is the composite Swagger for Network +// Client type VirtualNetworkGatewayConnectionsClient struct { ManagementClient } @@ -48,61 +45,60 @@ func NewVirtualNetworkGatewayConnectionsClientWithBaseURI(baseURI string, subscr // CreateOrUpdate creates or updates a virtual network gateway connection in // the specified resource group. This method may poll for completion. Polling -// can be canceled by passing the cancel channel argument. The channel will -// be used to cancel polling and any outstanding HTTP requests. +// can be canceled by passing the cancel channel argument. The channel will be +// used to cancel polling and any outstanding HTTP requests. // // resourceGroupName is the name of the resource group. // virtualNetworkGatewayConnectionName is the name of the virtual network // gateway connection. parameters is parameters supplied to the create or // update virtual network gateway connection operation. -func (client VirtualNetworkGatewayConnectionsClient) CreateOrUpdate(resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters VirtualNetworkGatewayConnection, cancel <-chan struct{}) (result autorest.Response, err error) { +func (client VirtualNetworkGatewayConnectionsClient) CreateOrUpdate(resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters VirtualNetworkGatewayConnection, cancel <-chan struct{}) (<-chan VirtualNetworkGatewayConnection, <-chan error) { + resultChan := make(chan VirtualNetworkGatewayConnection, 1) + errChan := make(chan error, 1) if err := validation.Validate([]validation.Validation{ {TargetValue: parameters, Constraints: []validation.Constraint{{Target: "parameters.VirtualNetworkGatewayConnectionPropertiesFormat", Name: validation.Null, Rule: true, Chain: []validation.Constraint{{Target: "parameters.VirtualNetworkGatewayConnectionPropertiesFormat.VirtualNetworkGateway1", Name: validation.Null, Rule: true, - Chain: []validation.Constraint{{Target: "parameters.VirtualNetworkGatewayConnectionPropertiesFormat.VirtualNetworkGateway1.VirtualNetworkGatewayPropertiesFormat", Name: validation.Null, Rule: true, - Chain: []validation.Constraint{{Target: "parameters.VirtualNetworkGatewayConnectionPropertiesFormat.VirtualNetworkGateway1.VirtualNetworkGatewayPropertiesFormat.IPConfigurations", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.VirtualNetworkGatewayConnectionPropertiesFormat.VirtualNetworkGateway1.VirtualNetworkGatewayPropertiesFormat.ProvisioningState", Name: validation.ReadOnly, Rule: true, Chain: nil}, - }}, - }}, + Chain: []validation.Constraint{{Target: "parameters.VirtualNetworkGatewayConnectionPropertiesFormat.VirtualNetworkGateway1.VirtualNetworkGatewayPropertiesFormat", Name: validation.Null, Rule: true, Chain: nil}}}, {Target: "parameters.VirtualNetworkGatewayConnectionPropertiesFormat.VirtualNetworkGateway2", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.VirtualNetworkGatewayConnectionPropertiesFormat.VirtualNetworkGateway2.VirtualNetworkGatewayPropertiesFormat", Name: validation.Null, Rule: true, - Chain: []validation.Constraint{{Target: "parameters.VirtualNetworkGatewayConnectionPropertiesFormat.VirtualNetworkGateway2.VirtualNetworkGatewayPropertiesFormat.IPConfigurations", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.VirtualNetworkGatewayConnectionPropertiesFormat.VirtualNetworkGateway2.VirtualNetworkGatewayPropertiesFormat.ProvisioningState", Name: validation.ReadOnly, Rule: true, Chain: nil}, - }}, - }}, + Chain: []validation.Constraint{{Target: "parameters.VirtualNetworkGatewayConnectionPropertiesFormat.VirtualNetworkGateway2.VirtualNetworkGatewayPropertiesFormat", Name: validation.Null, Rule: true, Chain: nil}}}, {Target: "parameters.VirtualNetworkGatewayConnectionPropertiesFormat.LocalNetworkGateway2", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.VirtualNetworkGatewayConnectionPropertiesFormat.LocalNetworkGateway2.LocalNetworkGatewayPropertiesFormat", Name: validation.Null, Rule: true, - Chain: []validation.Constraint{{Target: "parameters.VirtualNetworkGatewayConnectionPropertiesFormat.LocalNetworkGateway2.LocalNetworkGatewayPropertiesFormat.LocalNetworkAddressSpace", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.VirtualNetworkGatewayConnectionPropertiesFormat.LocalNetworkGateway2.LocalNetworkGatewayPropertiesFormat.ProvisioningState", Name: validation.ReadOnly, Rule: true, Chain: nil}, - }}, - }}, - {Target: "parameters.VirtualNetworkGatewayConnectionPropertiesFormat.ConnectionStatus", Name: validation.ReadOnly, Rule: true, Chain: nil}, - {Target: "parameters.VirtualNetworkGatewayConnectionPropertiesFormat.TunnelConnectionStatus", Name: validation.ReadOnly, Rule: true, Chain: nil}, - {Target: "parameters.VirtualNetworkGatewayConnectionPropertiesFormat.EgressBytesTransferred", Name: validation.ReadOnly, Rule: true, Chain: nil}, - {Target: "parameters.VirtualNetworkGatewayConnectionPropertiesFormat.IngressBytesTransferred", Name: validation.ReadOnly, Rule: true, Chain: nil}, - {Target: "parameters.VirtualNetworkGatewayConnectionPropertiesFormat.ProvisioningState", Name: validation.ReadOnly, Rule: true, Chain: nil}, + Chain: []validation.Constraint{{Target: "parameters.VirtualNetworkGatewayConnectionPropertiesFormat.LocalNetworkGateway2.LocalNetworkGatewayPropertiesFormat", Name: validation.Null, Rule: true, Chain: nil}}}, }}}}}); err != nil { - return result, validation.NewErrorWithValidationError(err, "network.VirtualNetworkGatewayConnectionsClient", "CreateOrUpdate") - } - - req, err := client.CreateOrUpdatePreparer(resourceGroupName, virtualNetworkGatewayConnectionName, parameters, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "CreateOrUpdate", nil, "Failure preparing request") - } - - resp, err := client.CreateOrUpdateSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "CreateOrUpdate", resp, "Failure sending request") - } - - result, err = client.CreateOrUpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "CreateOrUpdate", resp, "Failure responding to request") - } - - return + errChan <- validation.NewErrorWithValidationError(err, "network.VirtualNetworkGatewayConnectionsClient", "CreateOrUpdate") + close(errChan) + close(resultChan) + return resultChan, errChan + } + + go func() { + var err error + var result VirtualNetworkGatewayConnection + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.CreateOrUpdatePreparer(resourceGroupName, virtualNetworkGatewayConnectionName, parameters, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // CreateOrUpdatePreparer prepares the CreateOrUpdate request. @@ -113,8 +109,9 @@ func (client VirtualNetworkGatewayConnectionsClient) CreateOrUpdatePreparer(reso "virtualNetworkGatewayConnectionName": autorest.Encode("path", virtualNetworkGatewayConnectionName), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -137,42 +134,56 @@ func (client VirtualNetworkGatewayConnectionsClient) CreateOrUpdateSender(req *h // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always // closes the http.Response Body. -func (client VirtualNetworkGatewayConnectionsClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { +func (client VirtualNetworkGatewayConnectionsClient) CreateOrUpdateResponder(resp *http.Response) (result VirtualNetworkGatewayConnection, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } -// Delete deletes the specified virtual network Gateway connection. This -// method may poll for completion. Polling can be canceled by passing the -// cancel channel argument. The channel will be used to cancel polling and -// any outstanding HTTP requests. +// Delete deletes the specified virtual network Gateway connection. This method +// may poll for completion. Polling can be canceled by passing the cancel +// channel argument. The channel will be used to cancel polling and any +// outstanding HTTP requests. // // resourceGroupName is the name of the resource group. // virtualNetworkGatewayConnectionName is the name of the virtual network // gateway connection. -func (client VirtualNetworkGatewayConnectionsClient) Delete(resourceGroupName string, virtualNetworkGatewayConnectionName string, cancel <-chan struct{}) (result autorest.Response, err error) { - req, err := client.DeletePreparer(resourceGroupName, virtualNetworkGatewayConnectionName, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "Delete", nil, "Failure preparing request") - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "Delete", resp, "Failure sending request") - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "Delete", resp, "Failure responding to request") - } - - return +func (client VirtualNetworkGatewayConnectionsClient) Delete(resourceGroupName string, virtualNetworkGatewayConnectionName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { + resultChan := make(chan autorest.Response, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result autorest.Response + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.DeletePreparer(resourceGroupName, virtualNetworkGatewayConnectionName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "Delete", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // DeletePreparer prepares the Delete request. @@ -183,8 +194,9 @@ func (client VirtualNetworkGatewayConnectionsClient) DeletePreparer(resourceGrou "virtualNetworkGatewayConnectionName": autorest.Encode("path", virtualNetworkGatewayConnectionName), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -223,13 +235,15 @@ func (client VirtualNetworkGatewayConnectionsClient) DeleteResponder(resp *http. func (client VirtualNetworkGatewayConnectionsClient) Get(resourceGroupName string, virtualNetworkGatewayConnectionName string) (result VirtualNetworkGatewayConnection, err error) { req, err := client.GetPreparer(resourceGroupName, virtualNetworkGatewayConnectionName) if err != nil { - return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "Get", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "Get", nil, "Failure preparing request") + return } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "Get", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "Get", resp, "Failure sending request") + return } result, err = client.GetResponder(resp) @@ -248,8 +262,9 @@ func (client VirtualNetworkGatewayConnectionsClient) GetPreparer(resourceGroupNa "virtualNetworkGatewayConnectionName": autorest.Encode("path", virtualNetworkGatewayConnectionName), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -280,8 +295,8 @@ func (client VirtualNetworkGatewayConnectionsClient) GetResponder(resp *http.Res } // GetSharedKey the Get VirtualNetworkGatewayConnectionSharedKey operation -// retrieves information about the specified virtual network gateway -// connection shared key through Network resource provider. +// retrieves information about the specified virtual network gateway connection +// shared key through Network resource provider. // // resourceGroupName is the name of the resource group. // virtualNetworkGatewayConnectionName is the virtual network gateway @@ -289,13 +304,15 @@ func (client VirtualNetworkGatewayConnectionsClient) GetResponder(resp *http.Res func (client VirtualNetworkGatewayConnectionsClient) GetSharedKey(resourceGroupName string, virtualNetworkGatewayConnectionName string) (result ConnectionSharedKey, err error) { req, err := client.GetSharedKeyPreparer(resourceGroupName, virtualNetworkGatewayConnectionName) if err != nil { - return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "GetSharedKey", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "GetSharedKey", nil, "Failure preparing request") + return } resp, err := client.GetSharedKeySender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "GetSharedKey", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "GetSharedKey", resp, "Failure sending request") + return } result, err = client.GetSharedKeyResponder(resp) @@ -314,8 +331,9 @@ func (client VirtualNetworkGatewayConnectionsClient) GetSharedKeyPreparer(resour "virtualNetworkGatewayConnectionName": autorest.Encode("path", virtualNetworkGatewayConnectionName), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -352,13 +370,15 @@ func (client VirtualNetworkGatewayConnectionsClient) GetSharedKeyResponder(resp func (client VirtualNetworkGatewayConnectionsClient) List(resourceGroupName string) (result VirtualNetworkGatewayConnectionListResult, err error) { req, err := client.ListPreparer(resourceGroupName) if err != nil { - return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "List", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "List", nil, "Failure preparing request") + return } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "List", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "List", resp, "Failure sending request") + return } result, err = client.ListResponder(resp) @@ -376,8 +396,9 @@ func (client VirtualNetworkGatewayConnectionsClient) ListPreparer(resourceGroupN "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -432,44 +453,60 @@ func (client VirtualNetworkGatewayConnectionsClient) ListNextResults(lastResults } // ResetSharedKey the VirtualNetworkGatewayConnectionResetSharedKey operation -// resets the virtual network gateway connection shared key for passed -// virtual network gateway connection in the specified resource group through -// Network resource provider. This method may poll for completion. Polling -// can be canceled by passing the cancel channel argument. The channel will -// be used to cancel polling and any outstanding HTTP requests. +// resets the virtual network gateway connection shared key for passed virtual +// network gateway connection in the specified resource group through Network +// resource provider. This method may poll for completion. Polling can be +// canceled by passing the cancel channel argument. The channel will be used to +// cancel polling and any outstanding HTTP requests. // // resourceGroupName is the name of the resource group. // virtualNetworkGatewayConnectionName is the virtual network gateway // connection reset shared key Name. parameters is parameters supplied to the -// begin reset virtual network gateway connection shared key operation -// through network resource provider. -func (client VirtualNetworkGatewayConnectionsClient) ResetSharedKey(resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters ConnectionResetSharedKey, cancel <-chan struct{}) (result autorest.Response, err error) { +// begin reset virtual network gateway connection shared key operation through +// network resource provider. +func (client VirtualNetworkGatewayConnectionsClient) ResetSharedKey(resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters ConnectionResetSharedKey, cancel <-chan struct{}) (<-chan ConnectionResetSharedKey, <-chan error) { + resultChan := make(chan ConnectionResetSharedKey, 1) + errChan := make(chan error, 1) if err := validation.Validate([]validation.Validation{ {TargetValue: parameters, Constraints: []validation.Constraint{{Target: "parameters.KeyLength", Name: validation.Null, Rule: true, Chain: []validation.Constraint{{Target: "parameters.KeyLength", Name: validation.InclusiveMaximum, Rule: 128, Chain: nil}, {Target: "parameters.KeyLength", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}, }}}}}); err != nil { - return result, validation.NewErrorWithValidationError(err, "network.VirtualNetworkGatewayConnectionsClient", "ResetSharedKey") - } - - req, err := client.ResetSharedKeyPreparer(resourceGroupName, virtualNetworkGatewayConnectionName, parameters, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "ResetSharedKey", nil, "Failure preparing request") - } - - resp, err := client.ResetSharedKeySender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "ResetSharedKey", resp, "Failure sending request") - } - - result, err = client.ResetSharedKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "ResetSharedKey", resp, "Failure responding to request") - } - - return + errChan <- validation.NewErrorWithValidationError(err, "network.VirtualNetworkGatewayConnectionsClient", "ResetSharedKey") + close(errChan) + close(resultChan) + return resultChan, errChan + } + + go func() { + var err error + var result ConnectionResetSharedKey + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.ResetSharedKeyPreparer(resourceGroupName, virtualNetworkGatewayConnectionName, parameters, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "ResetSharedKey", nil, "Failure preparing request") + return + } + + resp, err := client.ResetSharedKeySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "ResetSharedKey", resp, "Failure sending request") + return + } + + result, err = client.ResetSharedKeyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "ResetSharedKey", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // ResetSharedKeyPreparer prepares the ResetSharedKey request. @@ -480,8 +517,9 @@ func (client VirtualNetworkGatewayConnectionsClient) ResetSharedKeyPreparer(reso "virtualNetworkGatewayConnectionName": autorest.Encode("path", virtualNetworkGatewayConnectionName), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -504,52 +542,69 @@ func (client VirtualNetworkGatewayConnectionsClient) ResetSharedKeySender(req *h // ResetSharedKeyResponder handles the response to the ResetSharedKey request. The method always // closes the http.Response Body. -func (client VirtualNetworkGatewayConnectionsClient) ResetSharedKeyResponder(resp *http.Response) (result autorest.Response, err error) { +func (client VirtualNetworkGatewayConnectionsClient) ResetSharedKeyResponder(resp *http.Response) (result ConnectionResetSharedKey, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } -// SetSharedKey the Put VirtualNetworkGatewayConnectionSharedKey operation -// sets the virtual network gateway connection shared key for passed virtual -// network gateway connection in the specified resource group through Network -// resource provider. This method may poll for completion. Polling can be -// canceled by passing the cancel channel argument. The channel will be used -// to cancel polling and any outstanding HTTP requests. +// SetSharedKey the Put VirtualNetworkGatewayConnectionSharedKey operation sets +// the virtual network gateway connection shared key for passed virtual network +// gateway connection in the specified resource group through Network resource +// provider. This method may poll for completion. Polling can be canceled by +// passing the cancel channel argument. The channel will be used to cancel +// polling and any outstanding HTTP requests. // // resourceGroupName is the name of the resource group. // virtualNetworkGatewayConnectionName is the virtual network gateway -// connection name. parameters is parameters supplied to the Begin Set -// Virtual Network Gateway connection Shared key operation throughNetwork -// resource provider. -func (client VirtualNetworkGatewayConnectionsClient) SetSharedKey(resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters ConnectionSharedKey, cancel <-chan struct{}) (result autorest.Response, err error) { +// connection name. parameters is parameters supplied to the Begin Set Virtual +// Network Gateway connection Shared key operation throughNetwork resource +// provider. +func (client VirtualNetworkGatewayConnectionsClient) SetSharedKey(resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters ConnectionSharedKey, cancel <-chan struct{}) (<-chan ConnectionSharedKey, <-chan error) { + resultChan := make(chan ConnectionSharedKey, 1) + errChan := make(chan error, 1) if err := validation.Validate([]validation.Validation{ {TargetValue: parameters, Constraints: []validation.Constraint{{Target: "parameters.Value", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewErrorWithValidationError(err, "network.VirtualNetworkGatewayConnectionsClient", "SetSharedKey") - } - - req, err := client.SetSharedKeyPreparer(resourceGroupName, virtualNetworkGatewayConnectionName, parameters, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "SetSharedKey", nil, "Failure preparing request") - } - - resp, err := client.SetSharedKeySender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "SetSharedKey", resp, "Failure sending request") - } - - result, err = client.SetSharedKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "SetSharedKey", resp, "Failure responding to request") - } - - return + errChan <- validation.NewErrorWithValidationError(err, "network.VirtualNetworkGatewayConnectionsClient", "SetSharedKey") + close(errChan) + close(resultChan) + return resultChan, errChan + } + + go func() { + var err error + var result ConnectionSharedKey + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.SetSharedKeyPreparer(resourceGroupName, virtualNetworkGatewayConnectionName, parameters, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "SetSharedKey", nil, "Failure preparing request") + return + } + + resp, err := client.SetSharedKeySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "SetSharedKey", resp, "Failure sending request") + return + } + + result, err = client.SetSharedKeyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "SetSharedKey", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // SetSharedKeyPreparer prepares the SetSharedKey request. @@ -560,8 +615,9 @@ func (client VirtualNetworkGatewayConnectionsClient) SetSharedKeyPreparer(resour "virtualNetworkGatewayConnectionName": autorest.Encode("path", virtualNetworkGatewayConnectionName), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -584,12 +640,13 @@ func (client VirtualNetworkGatewayConnectionsClient) SetSharedKeySender(req *htt // SetSharedKeyResponder handles the response to the SetSharedKey request. The method always // closes the http.Response Body. -func (client VirtualNetworkGatewayConnectionsClient) SetSharedKeyResponder(resp *http.Response) (result autorest.Response, err error) { +func (client VirtualNetworkGatewayConnectionsClient) SetSharedKeyResponder(resp *http.Response) (result ConnectionSharedKey, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgateways.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgateways.go old mode 100644 new mode 100755 index 2a6ce4772dfb..720978e83ff8 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgateways.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgateways.go @@ -14,7 +14,7 @@ package network // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. @@ -25,11 +25,7 @@ import ( "net/http" ) -// VirtualNetworkGatewaysClient is the the Microsoft Azure Network management -// API provides a RESTful set of web services that interact with Microsoft -// Azure Networks service to manage your network resources. The API has -// entities that capture the relationship between an end user and the -// Microsoft Azure Networks service. +// VirtualNetworkGatewaysClient is the composite Swagger for Network Client type VirtualNetworkGatewaysClient struct { ManagementClient } @@ -46,42 +42,55 @@ func NewVirtualNetworkGatewaysClientWithBaseURI(baseURI string, subscriptionID s return VirtualNetworkGatewaysClient{NewWithBaseURI(baseURI, subscriptionID)} } -// CreateOrUpdate creates or updates a virtual network gateway in the -// specified resource group. This method may poll for completion. Polling can -// be canceled by passing the cancel channel argument. The channel will be -// used to cancel polling and any outstanding HTTP requests. +// CreateOrUpdate creates or updates a virtual network gateway in the specified +// resource group. This method may poll for completion. Polling can be canceled +// by passing the cancel channel argument. The channel will be used to cancel +// polling and any outstanding HTTP requests. // // resourceGroupName is the name of the resource group. // virtualNetworkGatewayName is the name of the virtual network gateway. // parameters is parameters supplied to create or update virtual network // gateway operation. -func (client VirtualNetworkGatewaysClient) CreateOrUpdate(resourceGroupName string, virtualNetworkGatewayName string, parameters VirtualNetworkGateway, cancel <-chan struct{}) (result autorest.Response, err error) { +func (client VirtualNetworkGatewaysClient) CreateOrUpdate(resourceGroupName string, virtualNetworkGatewayName string, parameters VirtualNetworkGateway, cancel <-chan struct{}) (<-chan VirtualNetworkGateway, <-chan error) { + resultChan := make(chan VirtualNetworkGateway, 1) + errChan := make(chan error, 1) if err := validation.Validate([]validation.Validation{ {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.VirtualNetworkGatewayPropertiesFormat", Name: validation.Null, Rule: true, - Chain: []validation.Constraint{{Target: "parameters.VirtualNetworkGatewayPropertiesFormat.IPConfigurations", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.VirtualNetworkGatewayPropertiesFormat.ProvisioningState", Name: validation.ReadOnly, Rule: true, Chain: nil}, - }}}}}); err != nil { - return result, validation.NewErrorWithValidationError(err, "network.VirtualNetworkGatewaysClient", "CreateOrUpdate") - } - - req, err := client.CreateOrUpdatePreparer(resourceGroupName, virtualNetworkGatewayName, parameters, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "CreateOrUpdate", nil, "Failure preparing request") - } - - resp, err := client.CreateOrUpdateSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "CreateOrUpdate", resp, "Failure sending request") - } - - result, err = client.CreateOrUpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "CreateOrUpdate", resp, "Failure responding to request") - } - - return + Constraints: []validation.Constraint{{Target: "parameters.VirtualNetworkGatewayPropertiesFormat", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + errChan <- validation.NewErrorWithValidationError(err, "network.VirtualNetworkGatewaysClient", "CreateOrUpdate") + close(errChan) + close(resultChan) + return resultChan, errChan + } + + go func() { + var err error + var result VirtualNetworkGateway + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.CreateOrUpdatePreparer(resourceGroupName, virtualNetworkGatewayName, parameters, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "CreateOrUpdate", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // CreateOrUpdatePreparer prepares the CreateOrUpdate request. @@ -92,8 +101,9 @@ func (client VirtualNetworkGatewaysClient) CreateOrUpdatePreparer(resourceGroupN "virtualNetworkGatewayName": autorest.Encode("path", virtualNetworkGatewayName), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -116,13 +126,14 @@ func (client VirtualNetworkGatewaysClient) CreateOrUpdateSender(req *http.Reques // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always // closes the http.Response Body. -func (client VirtualNetworkGatewaysClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { +func (client VirtualNetworkGatewaysClient) CreateOrUpdateResponder(resp *http.Response) (result VirtualNetworkGateway, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } @@ -133,24 +144,37 @@ func (client VirtualNetworkGatewaysClient) CreateOrUpdateResponder(resp *http.Re // // resourceGroupName is the name of the resource group. // virtualNetworkGatewayName is the name of the virtual network gateway. -func (client VirtualNetworkGatewaysClient) Delete(resourceGroupName string, virtualNetworkGatewayName string, cancel <-chan struct{}) (result autorest.Response, err error) { - req, err := client.DeletePreparer(resourceGroupName, virtualNetworkGatewayName, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Delete", nil, "Failure preparing request") - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Delete", resp, "Failure sending request") - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Delete", resp, "Failure responding to request") - } - - return +func (client VirtualNetworkGatewaysClient) Delete(resourceGroupName string, virtualNetworkGatewayName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { + resultChan := make(chan autorest.Response, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result autorest.Response + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.DeletePreparer(resourceGroupName, virtualNetworkGatewayName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Delete", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // DeletePreparer prepares the Delete request. @@ -161,8 +185,9 @@ func (client VirtualNetworkGatewaysClient) DeletePreparer(resourceGroupName stri "virtualNetworkGatewayName": autorest.Encode("path", virtualNetworkGatewayName), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -203,13 +228,15 @@ func (client VirtualNetworkGatewaysClient) DeleteResponder(resp *http.Response) func (client VirtualNetworkGatewaysClient) Generatevpnclientpackage(resourceGroupName string, virtualNetworkGatewayName string, parameters VpnClientParameters) (result String, err error) { req, err := client.GeneratevpnclientpackagePreparer(resourceGroupName, virtualNetworkGatewayName, parameters) if err != nil { - return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Generatevpnclientpackage", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Generatevpnclientpackage", nil, "Failure preparing request") + return } resp, err := client.GeneratevpnclientpackageSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Generatevpnclientpackage", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Generatevpnclientpackage", resp, "Failure sending request") + return } result, err = client.GeneratevpnclientpackageResponder(resp) @@ -228,8 +255,9 @@ func (client VirtualNetworkGatewaysClient) GeneratevpnclientpackagePreparer(reso "virtualNetworkGatewayName": autorest.Encode("path", virtualNetworkGatewayName), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -268,13 +296,15 @@ func (client VirtualNetworkGatewaysClient) GeneratevpnclientpackageResponder(res func (client VirtualNetworkGatewaysClient) Get(resourceGroupName string, virtualNetworkGatewayName string) (result VirtualNetworkGateway, err error) { req, err := client.GetPreparer(resourceGroupName, virtualNetworkGatewayName) if err != nil { - return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Get", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Get", nil, "Failure preparing request") + return } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Get", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Get", resp, "Failure sending request") + return } result, err = client.GetResponder(resp) @@ -293,8 +323,9 @@ func (client VirtualNetworkGatewaysClient) GetPreparer(resourceGroupName string, "virtualNetworkGatewayName": autorest.Encode("path", virtualNetworkGatewayName), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -324,19 +355,275 @@ func (client VirtualNetworkGatewaysClient) GetResponder(resp *http.Response) (re return } +// GetAdvertisedRoutes this operation retrieves a list of routes the virtual +// network gateway is advertising to the specified peer. This method may poll +// for completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// resourceGroupName is the name of the resource group. +// virtualNetworkGatewayName is the name of the virtual network gateway. peer +// is the IP address of the peer +func (client VirtualNetworkGatewaysClient) GetAdvertisedRoutes(resourceGroupName string, virtualNetworkGatewayName string, peer string, cancel <-chan struct{}) (<-chan GatewayRouteListResult, <-chan error) { + resultChan := make(chan GatewayRouteListResult, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result GatewayRouteListResult + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.GetAdvertisedRoutesPreparer(resourceGroupName, virtualNetworkGatewayName, peer, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "GetAdvertisedRoutes", nil, "Failure preparing request") + return + } + + resp, err := client.GetAdvertisedRoutesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "GetAdvertisedRoutes", resp, "Failure sending request") + return + } + + result, err = client.GetAdvertisedRoutesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "GetAdvertisedRoutes", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// GetAdvertisedRoutesPreparer prepares the GetAdvertisedRoutes request. +func (client VirtualNetworkGatewaysClient) GetAdvertisedRoutesPreparer(resourceGroupName string, virtualNetworkGatewayName string, peer string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkGatewayName": autorest.Encode("path", virtualNetworkGatewayName), + } + + const APIVersion = "2017-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + "peer": autorest.Encode("query", peer), + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getAdvertisedRoutes", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// GetAdvertisedRoutesSender sends the GetAdvertisedRoutes request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewaysClient) GetAdvertisedRoutesSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// GetAdvertisedRoutesResponder handles the response to the GetAdvertisedRoutes request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewaysClient) GetAdvertisedRoutesResponder(resp *http.Response) (result GatewayRouteListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetBgpPeerStatus the GetBgpPeerStatus operation retrieves the status of all +// BGP peers. This method may poll for completion. Polling can be canceled by +// passing the cancel channel argument. The channel will be used to cancel +// polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. +// virtualNetworkGatewayName is the name of the virtual network gateway. peer +// is the IP address of the peer to retrieve the status of. +func (client VirtualNetworkGatewaysClient) GetBgpPeerStatus(resourceGroupName string, virtualNetworkGatewayName string, peer string, cancel <-chan struct{}) (<-chan BgpPeerStatusListResult, <-chan error) { + resultChan := make(chan BgpPeerStatusListResult, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result BgpPeerStatusListResult + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.GetBgpPeerStatusPreparer(resourceGroupName, virtualNetworkGatewayName, peer, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "GetBgpPeerStatus", nil, "Failure preparing request") + return + } + + resp, err := client.GetBgpPeerStatusSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "GetBgpPeerStatus", resp, "Failure sending request") + return + } + + result, err = client.GetBgpPeerStatusResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "GetBgpPeerStatus", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// GetBgpPeerStatusPreparer prepares the GetBgpPeerStatus request. +func (client VirtualNetworkGatewaysClient) GetBgpPeerStatusPreparer(resourceGroupName string, virtualNetworkGatewayName string, peer string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkGatewayName": autorest.Encode("path", virtualNetworkGatewayName), + } + + const APIVersion = "2017-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(peer) > 0 { + queryParameters["peer"] = autorest.Encode("query", peer) + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getBgpPeerStatus", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// GetBgpPeerStatusSender sends the GetBgpPeerStatus request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewaysClient) GetBgpPeerStatusSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// GetBgpPeerStatusResponder handles the response to the GetBgpPeerStatus request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewaysClient) GetBgpPeerStatusResponder(resp *http.Response) (result BgpPeerStatusListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetLearnedRoutes this operation retrieves a list of routes the virtual +// network gateway has learned, including routes learned from BGP peers. This +// method may poll for completion. Polling can be canceled by passing the +// cancel channel argument. The channel will be used to cancel polling and any +// outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. +// virtualNetworkGatewayName is the name of the virtual network gateway. +func (client VirtualNetworkGatewaysClient) GetLearnedRoutes(resourceGroupName string, virtualNetworkGatewayName string, cancel <-chan struct{}) (<-chan GatewayRouteListResult, <-chan error) { + resultChan := make(chan GatewayRouteListResult, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result GatewayRouteListResult + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.GetLearnedRoutesPreparer(resourceGroupName, virtualNetworkGatewayName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "GetLearnedRoutes", nil, "Failure preparing request") + return + } + + resp, err := client.GetLearnedRoutesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "GetLearnedRoutes", resp, "Failure sending request") + return + } + + result, err = client.GetLearnedRoutesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "GetLearnedRoutes", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// GetLearnedRoutesPreparer prepares the GetLearnedRoutes request. +func (client VirtualNetworkGatewaysClient) GetLearnedRoutesPreparer(resourceGroupName string, virtualNetworkGatewayName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkGatewayName": autorest.Encode("path", virtualNetworkGatewayName), + } + + const APIVersion = "2017-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getLearnedRoutes", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// GetLearnedRoutesSender sends the GetLearnedRoutes request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewaysClient) GetLearnedRoutesSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// GetLearnedRoutesResponder handles the response to the GetLearnedRoutes request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewaysClient) GetLearnedRoutesResponder(resp *http.Response) (result GatewayRouteListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + // List gets all virtual network gateways by resource group. // // resourceGroupName is the name of the resource group. func (client VirtualNetworkGatewaysClient) List(resourceGroupName string) (result VirtualNetworkGatewayListResult, err error) { req, err := client.ListPreparer(resourceGroupName) if err != nil { - return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "List", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "List", nil, "Failure preparing request") + return } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "List", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "List", resp, "Failure sending request") + return } result, err = client.ListResponder(resp) @@ -354,8 +641,9 @@ func (client VirtualNetworkGatewaysClient) ListPreparer(resourceGroupName string "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -410,32 +698,45 @@ func (client VirtualNetworkGatewaysClient) ListNextResults(lastResults VirtualNe } // Reset resets the primary of the virtual network gateway in the specified -// resource group. This method may poll for completion. Polling can be -// canceled by passing the cancel channel argument. The channel will be used -// to cancel polling and any outstanding HTTP requests. +// resource group. This method may poll for completion. Polling can be canceled +// by passing the cancel channel argument. The channel will be used to cancel +// polling and any outstanding HTTP requests. // // resourceGroupName is the name of the resource group. // virtualNetworkGatewayName is the name of the virtual network gateway. // gatewayVip is virtual network gateway vip address supplied to the begin // reset of the active-active feature enabled gateway. -func (client VirtualNetworkGatewaysClient) Reset(resourceGroupName string, virtualNetworkGatewayName string, gatewayVip string, cancel <-chan struct{}) (result autorest.Response, err error) { - req, err := client.ResetPreparer(resourceGroupName, virtualNetworkGatewayName, gatewayVip, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Reset", nil, "Failure preparing request") - } - - resp, err := client.ResetSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Reset", resp, "Failure sending request") - } - - result, err = client.ResetResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Reset", resp, "Failure responding to request") - } - - return +func (client VirtualNetworkGatewaysClient) Reset(resourceGroupName string, virtualNetworkGatewayName string, gatewayVip string, cancel <-chan struct{}) (<-chan VirtualNetworkGateway, <-chan error) { + resultChan := make(chan VirtualNetworkGateway, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result VirtualNetworkGateway + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.ResetPreparer(resourceGroupName, virtualNetworkGatewayName, gatewayVip, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Reset", nil, "Failure preparing request") + return + } + + resp, err := client.ResetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Reset", resp, "Failure sending request") + return + } + + result, err = client.ResetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Reset", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // ResetPreparer prepares the Reset request. @@ -446,8 +747,9 @@ func (client VirtualNetworkGatewaysClient) ResetPreparer(resourceGroupName strin "virtualNetworkGatewayName": autorest.Encode("path", virtualNetworkGatewayName), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } if len(gatewayVip) > 0 { queryParameters["gatewayVip"] = autorest.Encode("query", gatewayVip) @@ -471,12 +773,13 @@ func (client VirtualNetworkGatewaysClient) ResetSender(req *http.Request) (*http // ResetResponder handles the response to the Reset request. The method always // closes the http.Response Body. -func (client VirtualNetworkGatewaysClient) ResetResponder(resp *http.Response) (result autorest.Response, err error) { +func (client VirtualNetworkGatewaysClient) ResetResponder(resp *http.Response) (result VirtualNetworkGateway, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusOK), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkpeerings.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkpeerings.go old mode 100644 new mode 100755 index bfed897a3bf9..27fafdfd0a46 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkpeerings.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkpeerings.go @@ -14,7 +14,7 @@ package network // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. @@ -24,11 +24,7 @@ import ( "net/http" ) -// VirtualNetworkPeeringsClient is the the Microsoft Azure Network management -// API provides a RESTful set of web services that interact with Microsoft -// Azure Networks service to manage your network resources. The API has -// entities that capture the relationship between an end user and the -// Microsoft Azure Networks service. +// VirtualNetworkPeeringsClient is the composite Swagger for Network Client type VirtualNetworkPeeringsClient struct { ManagementClient } @@ -54,24 +50,37 @@ func NewVirtualNetworkPeeringsClientWithBaseURI(baseURI string, subscriptionID s // the name of the virtual network. virtualNetworkPeeringName is the name of // the peering. virtualNetworkPeeringParameters is parameters supplied to the // create or update virtual network peering operation. -func (client VirtualNetworkPeeringsClient) CreateOrUpdate(resourceGroupName string, virtualNetworkName string, virtualNetworkPeeringName string, virtualNetworkPeeringParameters VirtualNetworkPeering, cancel <-chan struct{}) (result autorest.Response, err error) { - req, err := client.CreateOrUpdatePreparer(resourceGroupName, virtualNetworkName, virtualNetworkPeeringName, virtualNetworkPeeringParameters, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "CreateOrUpdate", nil, "Failure preparing request") - } - - resp, err := client.CreateOrUpdateSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "CreateOrUpdate", resp, "Failure sending request") - } - - result, err = client.CreateOrUpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "CreateOrUpdate", resp, "Failure responding to request") - } - - return +func (client VirtualNetworkPeeringsClient) CreateOrUpdate(resourceGroupName string, virtualNetworkName string, virtualNetworkPeeringName string, virtualNetworkPeeringParameters VirtualNetworkPeering, cancel <-chan struct{}) (<-chan VirtualNetworkPeering, <-chan error) { + resultChan := make(chan VirtualNetworkPeering, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result VirtualNetworkPeering + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.CreateOrUpdatePreparer(resourceGroupName, virtualNetworkName, virtualNetworkPeeringName, virtualNetworkPeeringParameters, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // CreateOrUpdatePreparer prepares the CreateOrUpdate request. @@ -83,8 +92,9 @@ func (client VirtualNetworkPeeringsClient) CreateOrUpdatePreparer(resourceGroupN "virtualNetworkPeeringName": autorest.Encode("path", virtualNetworkPeeringName), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -107,13 +117,14 @@ func (client VirtualNetworkPeeringsClient) CreateOrUpdateSender(req *http.Reques // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always // closes the http.Response Body. -func (client VirtualNetworkPeeringsClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { +func (client VirtualNetworkPeeringsClient) CreateOrUpdateResponder(resp *http.Response) (result VirtualNetworkPeering, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } @@ -125,24 +136,37 @@ func (client VirtualNetworkPeeringsClient) CreateOrUpdateResponder(resp *http.Re // resourceGroupName is the name of the resource group. virtualNetworkName is // the name of the virtual network. virtualNetworkPeeringName is the name of // the virtual network peering. -func (client VirtualNetworkPeeringsClient) Delete(resourceGroupName string, virtualNetworkName string, virtualNetworkPeeringName string, cancel <-chan struct{}) (result autorest.Response, err error) { - req, err := client.DeletePreparer(resourceGroupName, virtualNetworkName, virtualNetworkPeeringName, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "Delete", nil, "Failure preparing request") - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "Delete", resp, "Failure sending request") - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "Delete", resp, "Failure responding to request") - } - - return +func (client VirtualNetworkPeeringsClient) Delete(resourceGroupName string, virtualNetworkName string, virtualNetworkPeeringName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { + resultChan := make(chan autorest.Response, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result autorest.Response + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.DeletePreparer(resourceGroupName, virtualNetworkName, virtualNetworkPeeringName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "Delete", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // DeletePreparer prepares the Delete request. @@ -154,8 +178,9 @@ func (client VirtualNetworkPeeringsClient) DeletePreparer(resourceGroupName stri "virtualNetworkPeeringName": autorest.Encode("path", virtualNetworkPeeringName), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -194,13 +219,15 @@ func (client VirtualNetworkPeeringsClient) DeleteResponder(resp *http.Response) func (client VirtualNetworkPeeringsClient) Get(resourceGroupName string, virtualNetworkName string, virtualNetworkPeeringName string) (result VirtualNetworkPeering, err error) { req, err := client.GetPreparer(resourceGroupName, virtualNetworkName, virtualNetworkPeeringName) if err != nil { - return result, autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "Get", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "Get", nil, "Failure preparing request") + return } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "Get", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "Get", resp, "Failure sending request") + return } result, err = client.GetResponder(resp) @@ -220,8 +247,9 @@ func (client VirtualNetworkPeeringsClient) GetPreparer(resourceGroupName string, "virtualNetworkPeeringName": autorest.Encode("path", virtualNetworkPeeringName), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -258,13 +286,15 @@ func (client VirtualNetworkPeeringsClient) GetResponder(resp *http.Response) (re func (client VirtualNetworkPeeringsClient) List(resourceGroupName string, virtualNetworkName string) (result VirtualNetworkPeeringListResult, err error) { req, err := client.ListPreparer(resourceGroupName, virtualNetworkName) if err != nil { - return result, autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "List", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "List", nil, "Failure preparing request") + return } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "List", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "List", resp, "Failure sending request") + return } result, err = client.ListResponder(resp) @@ -283,8 +313,9 @@ func (client VirtualNetworkPeeringsClient) ListPreparer(resourceGroupName string "virtualNetworkName": autorest.Encode("path", virtualNetworkName), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworks.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworks.go old mode 100644 new mode 100755 index 9046437af2df..eab048c7252e --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworks.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworks.go @@ -14,7 +14,7 @@ package network // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. @@ -24,11 +24,7 @@ import ( "net/http" ) -// VirtualNetworksClient is the the Microsoft Azure Network management API -// provides a RESTful set of web services that interact with Microsoft Azure -// Networks service to manage your network resources. The API has entities -// that capture the relationship between an end user and the Microsoft Azure -// Networks service. +// VirtualNetworksClient is the composite Swagger for Network Client type VirtualNetworksClient struct { ManagementClient } @@ -49,18 +45,20 @@ func NewVirtualNetworksClientWithBaseURI(baseURI string, subscriptionID string) // for use. // // resourceGroupName is the name of the resource group. virtualNetworkName is -// the name of the virtual network. ipAddress is the private IP address to be +// the name of the virtual network. IPAddress is the private IP address to be // verified. -func (client VirtualNetworksClient) CheckIPAddressAvailability(resourceGroupName string, virtualNetworkName string, ipAddress string) (result IPAddressAvailabilityResult, err error) { - req, err := client.CheckIPAddressAvailabilityPreparer(resourceGroupName, virtualNetworkName, ipAddress) +func (client VirtualNetworksClient) CheckIPAddressAvailability(resourceGroupName string, virtualNetworkName string, IPAddress string) (result IPAddressAvailabilityResult, err error) { + req, err := client.CheckIPAddressAvailabilityPreparer(resourceGroupName, virtualNetworkName, IPAddress) if err != nil { - return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "CheckIPAddressAvailability", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "CheckIPAddressAvailability", nil, "Failure preparing request") + return } resp, err := client.CheckIPAddressAvailabilitySender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "CheckIPAddressAvailability", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "CheckIPAddressAvailability", resp, "Failure sending request") + return } result, err = client.CheckIPAddressAvailabilityResponder(resp) @@ -72,18 +70,19 @@ func (client VirtualNetworksClient) CheckIPAddressAvailability(resourceGroupName } // CheckIPAddressAvailabilityPreparer prepares the CheckIPAddressAvailability request. -func (client VirtualNetworksClient) CheckIPAddressAvailabilityPreparer(resourceGroupName string, virtualNetworkName string, ipAddress string) (*http.Request, error) { +func (client VirtualNetworksClient) CheckIPAddressAvailabilityPreparer(resourceGroupName string, virtualNetworkName string, IPAddress string) (*http.Request, error) { pathParameters := map[string]interface{}{ "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), "virtualNetworkName": autorest.Encode("path", virtualNetworkName), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } - if len(ipAddress) > 0 { - queryParameters["ipAddress"] = autorest.Encode("query", ipAddress) + if len(IPAddress) > 0 { + queryParameters["ipAddress"] = autorest.Encode("query", IPAddress) } preparer := autorest.CreatePreparer( @@ -114,31 +113,44 @@ func (client VirtualNetworksClient) CheckIPAddressAvailabilityResponder(resp *ht } // CreateOrUpdate creates or updates a virtual network in the specified -// resource group. This method may poll for completion. Polling can be -// canceled by passing the cancel channel argument. The channel will be used -// to cancel polling and any outstanding HTTP requests. +// resource group. This method may poll for completion. Polling can be canceled +// by passing the cancel channel argument. The channel will be used to cancel +// polling and any outstanding HTTP requests. // // resourceGroupName is the name of the resource group. virtualNetworkName is // the name of the virtual network. parameters is parameters supplied to the // create or update virtual network operation -func (client VirtualNetworksClient) CreateOrUpdate(resourceGroupName string, virtualNetworkName string, parameters VirtualNetwork, cancel <-chan struct{}) (result autorest.Response, err error) { - req, err := client.CreateOrUpdatePreparer(resourceGroupName, virtualNetworkName, parameters, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "CreateOrUpdate", nil, "Failure preparing request") - } - - resp, err := client.CreateOrUpdateSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "CreateOrUpdate", resp, "Failure sending request") - } - - result, err = client.CreateOrUpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "CreateOrUpdate", resp, "Failure responding to request") - } - - return +func (client VirtualNetworksClient) CreateOrUpdate(resourceGroupName string, virtualNetworkName string, parameters VirtualNetwork, cancel <-chan struct{}) (<-chan VirtualNetwork, <-chan error) { + resultChan := make(chan VirtualNetwork, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result VirtualNetwork + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.CreateOrUpdatePreparer(resourceGroupName, virtualNetworkName, parameters, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "CreateOrUpdate", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // CreateOrUpdatePreparer prepares the CreateOrUpdate request. @@ -149,8 +161,9 @@ func (client VirtualNetworksClient) CreateOrUpdatePreparer(resourceGroupName str "virtualNetworkName": autorest.Encode("path", virtualNetworkName), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -173,41 +186,55 @@ func (client VirtualNetworksClient) CreateOrUpdateSender(req *http.Request) (*ht // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always // closes the http.Response Body. -func (client VirtualNetworksClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { +func (client VirtualNetworksClient) CreateOrUpdateResponder(resp *http.Response) (result VirtualNetwork, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } // Delete deletes the specified virtual network. This method may poll for -// completion. Polling can be canceled by passing the cancel channel -// argument. The channel will be used to cancel polling and any outstanding -// HTTP requests. +// completion. Polling can be canceled by passing the cancel channel argument. +// The channel will be used to cancel polling and any outstanding HTTP +// requests. // // resourceGroupName is the name of the resource group. virtualNetworkName is // the name of the virtual network. -func (client VirtualNetworksClient) Delete(resourceGroupName string, virtualNetworkName string, cancel <-chan struct{}) (result autorest.Response, err error) { - req, err := client.DeletePreparer(resourceGroupName, virtualNetworkName, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "Delete", nil, "Failure preparing request") - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "Delete", resp, "Failure sending request") - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "Delete", resp, "Failure responding to request") - } - - return +func (client VirtualNetworksClient) Delete(resourceGroupName string, virtualNetworkName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { + resultChan := make(chan autorest.Response, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result autorest.Response + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.DeletePreparer(resourceGroupName, virtualNetworkName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "Delete", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // DeletePreparer prepares the Delete request. @@ -218,8 +245,9 @@ func (client VirtualNetworksClient) DeletePreparer(resourceGroupName string, vir "virtualNetworkName": autorest.Encode("path", virtualNetworkName), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -257,13 +285,15 @@ func (client VirtualNetworksClient) DeleteResponder(resp *http.Response) (result func (client VirtualNetworksClient) Get(resourceGroupName string, virtualNetworkName string, expand string) (result VirtualNetwork, err error) { req, err := client.GetPreparer(resourceGroupName, virtualNetworkName, expand) if err != nil { - return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "Get", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "Get", nil, "Failure preparing request") + return } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "Get", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "Get", resp, "Failure sending request") + return } result, err = client.GetResponder(resp) @@ -282,8 +312,9 @@ func (client VirtualNetworksClient) GetPreparer(resourceGroupName string, virtua "virtualNetworkName": autorest.Encode("path", virtualNetworkName), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } if len(expand) > 0 { queryParameters["$expand"] = autorest.Encode("query", expand) @@ -322,13 +353,15 @@ func (client VirtualNetworksClient) GetResponder(resp *http.Response) (result Vi func (client VirtualNetworksClient) List(resourceGroupName string) (result VirtualNetworkListResult, err error) { req, err := client.ListPreparer(resourceGroupName) if err != nil { - return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "List", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "List", nil, "Failure preparing request") + return } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "List", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "List", resp, "Failure sending request") + return } result, err = client.ListResponder(resp) @@ -346,8 +379,9 @@ func (client VirtualNetworksClient) ListPreparer(resourceGroupName string) (*htt "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -405,13 +439,15 @@ func (client VirtualNetworksClient) ListNextResults(lastResults VirtualNetworkLi func (client VirtualNetworksClient) ListAll() (result VirtualNetworkListResult, err error) { req, err := client.ListAllPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "ListAll", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "ListAll", nil, "Failure preparing request") + return } resp, err := client.ListAllSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "ListAll", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "ListAll", resp, "Failure sending request") + return } result, err = client.ListAllResponder(resp) @@ -428,8 +464,9 @@ func (client VirtualNetworksClient) ListAllPreparer() (*http.Request, error) { "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2017-03-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/watchers.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/watchers.go new file mode 100755 index 000000000000..28febf847484 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/watchers.go @@ -0,0 +1,1131 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// WatchersClient is the composite Swagger for Network Client +type WatchersClient struct { + ManagementClient +} + +// NewWatchersClient creates an instance of the WatchersClient client. +func NewWatchersClient(subscriptionID string) WatchersClient { + return NewWatchersClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWatchersClientWithBaseURI creates an instance of the WatchersClient +// client. +func NewWatchersClientWithBaseURI(baseURI string, subscriptionID string) WatchersClient { + return WatchersClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a network watcher in the specified +// resource group. +// +// resourceGroupName is the name of the resource group. networkWatcherName is +// the name of the network watcher. parameters is parameters that define the +// network watcher resource. +func (client WatchersClient) CreateOrUpdate(resourceGroupName string, networkWatcherName string, parameters Watcher) (result Watcher, err error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, networkWatcherName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.WatchersClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client WatchersClient) CreateOrUpdatePreparer(resourceGroupName string, networkWatcherName string, parameters Watcher) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkWatcherName": autorest.Encode("path", networkWatcherName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client WatchersClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client WatchersClient) CreateOrUpdateResponder(resp *http.Response) (result Watcher, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes the specified network watcher resource. This method may poll +// for completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// resourceGroupName is the name of the resource group. networkWatcherName is +// the name of the network watcher. +func (client WatchersClient) Delete(resourceGroupName string, networkWatcherName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) { + resultChan := make(chan autorest.Response, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result autorest.Response + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.DeletePreparer(resourceGroupName, networkWatcherName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "network.WatchersClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersClient", "Delete", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// DeletePreparer prepares the Delete request. +func (client WatchersClient) DeletePreparer(resourceGroupName string, networkWatcherName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkWatcherName": autorest.Encode("path", networkWatcherName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client WatchersClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client WatchersClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the specified network watcher by resource group. +// +// resourceGroupName is the name of the resource group. networkWatcherName is +// the name of the network watcher. +func (client WatchersClient) Get(resourceGroupName string, networkWatcherName string) (result Watcher, err error) { + req, err := client.GetPreparer(resourceGroupName, networkWatcherName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.WatchersClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client WatchersClient) GetPreparer(resourceGroupName string, networkWatcherName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkWatcherName": autorest.Encode("path", networkWatcherName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client WatchersClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client WatchersClient) GetResponder(resp *http.Response) (result Watcher, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetFlowLogStatus queries status of flow log on a specified resource. This +// method may poll for completion. Polling can be canceled by passing the +// cancel channel argument. The channel will be used to cancel polling and any +// outstanding HTTP requests. +// +// resourceGroupName is the name of the network watcher resource group. +// networkWatcherName is the name of the network watcher resource. parameters +// is parameters that define a resource to query flow log status. +func (client WatchersClient) GetFlowLogStatus(resourceGroupName string, networkWatcherName string, parameters FlowLogStatusParameters, cancel <-chan struct{}) (<-chan FlowLogInformation, <-chan error) { + resultChan := make(chan FlowLogInformation, 1) + errChan := make(chan error, 1) + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.TargetResourceID", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + errChan <- validation.NewErrorWithValidationError(err, "network.WatchersClient", "GetFlowLogStatus") + close(errChan) + close(resultChan) + return resultChan, errChan + } + + go func() { + var err error + var result FlowLogInformation + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.GetFlowLogStatusPreparer(resourceGroupName, networkWatcherName, parameters, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersClient", "GetFlowLogStatus", nil, "Failure preparing request") + return + } + + resp, err := client.GetFlowLogStatusSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.WatchersClient", "GetFlowLogStatus", resp, "Failure sending request") + return + } + + result, err = client.GetFlowLogStatusResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersClient", "GetFlowLogStatus", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// GetFlowLogStatusPreparer prepares the GetFlowLogStatus request. +func (client WatchersClient) GetFlowLogStatusPreparer(resourceGroupName string, networkWatcherName string, parameters FlowLogStatusParameters, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkWatcherName": autorest.Encode("path", networkWatcherName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/queryFlowLogStatus", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// GetFlowLogStatusSender sends the GetFlowLogStatus request. The method will close the +// http.Response Body if it receives an error. +func (client WatchersClient) GetFlowLogStatusSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// GetFlowLogStatusResponder handles the response to the GetFlowLogStatus request. The method always +// closes the http.Response Body. +func (client WatchersClient) GetFlowLogStatusResponder(resp *http.Response) (result FlowLogInformation, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetNextHop gets the next hop from the specified VM. This method may poll for +// completion. Polling can be canceled by passing the cancel channel argument. +// The channel will be used to cancel polling and any outstanding HTTP +// requests. +// +// resourceGroupName is the name of the resource group. networkWatcherName is +// the name of the network watcher. parameters is parameters that define the +// source and destination endpoint. +func (client WatchersClient) GetNextHop(resourceGroupName string, networkWatcherName string, parameters NextHopParameters, cancel <-chan struct{}) (<-chan NextHopResult, <-chan error) { + resultChan := make(chan NextHopResult, 1) + errChan := make(chan error, 1) + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.TargetResourceID", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.SourceIPAddress", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.DestinationIPAddress", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + errChan <- validation.NewErrorWithValidationError(err, "network.WatchersClient", "GetNextHop") + close(errChan) + close(resultChan) + return resultChan, errChan + } + + go func() { + var err error + var result NextHopResult + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.GetNextHopPreparer(resourceGroupName, networkWatcherName, parameters, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersClient", "GetNextHop", nil, "Failure preparing request") + return + } + + resp, err := client.GetNextHopSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.WatchersClient", "GetNextHop", resp, "Failure sending request") + return + } + + result, err = client.GetNextHopResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersClient", "GetNextHop", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// GetNextHopPreparer prepares the GetNextHop request. +func (client WatchersClient) GetNextHopPreparer(resourceGroupName string, networkWatcherName string, parameters NextHopParameters, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkWatcherName": autorest.Encode("path", networkWatcherName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/nextHop", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// GetNextHopSender sends the GetNextHop request. The method will close the +// http.Response Body if it receives an error. +func (client WatchersClient) GetNextHopSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// GetNextHopResponder handles the response to the GetNextHop request. The method always +// closes the http.Response Body. +func (client WatchersClient) GetNextHopResponder(resp *http.Response) (result NextHopResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetTopology gets the current network topology by resource group. +// +// resourceGroupName is the name of the resource group. networkWatcherName is +// the name of the network watcher. parameters is parameters that define the +// representation of topology. +func (client WatchersClient) GetTopology(resourceGroupName string, networkWatcherName string, parameters TopologyParameters) (result Topology, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.TargetResourceGroupName", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "network.WatchersClient", "GetTopology") + } + + req, err := client.GetTopologyPreparer(resourceGroupName, networkWatcherName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersClient", "GetTopology", nil, "Failure preparing request") + return + } + + resp, err := client.GetTopologySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.WatchersClient", "GetTopology", resp, "Failure sending request") + return + } + + result, err = client.GetTopologyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersClient", "GetTopology", resp, "Failure responding to request") + } + + return +} + +// GetTopologyPreparer prepares the GetTopology request. +func (client WatchersClient) GetTopologyPreparer(resourceGroupName string, networkWatcherName string, parameters TopologyParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkWatcherName": autorest.Encode("path", networkWatcherName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/topology", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetTopologySender sends the GetTopology request. The method will close the +// http.Response Body if it receives an error. +func (client WatchersClient) GetTopologySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetTopologyResponder handles the response to the GetTopology request. The method always +// closes the http.Response Body. +func (client WatchersClient) GetTopologyResponder(resp *http.Response) (result Topology, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetTroubleshooting initiate troubleshooting on a specified resource This +// method may poll for completion. Polling can be canceled by passing the +// cancel channel argument. The channel will be used to cancel polling and any +// outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. networkWatcherName is +// the name of the network watcher resource. parameters is parameters that +// define the resource to troubleshoot. +func (client WatchersClient) GetTroubleshooting(resourceGroupName string, networkWatcherName string, parameters TroubleshootingParameters, cancel <-chan struct{}) (<-chan TroubleshootingResult, <-chan error) { + resultChan := make(chan TroubleshootingResult, 1) + errChan := make(chan error, 1) + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.TargetResourceID", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.TroubleshootingProperties", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.TroubleshootingProperties.StorageID", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.TroubleshootingProperties.StoragePath", Name: validation.Null, Rule: true, Chain: nil}, + }}}}}); err != nil { + errChan <- validation.NewErrorWithValidationError(err, "network.WatchersClient", "GetTroubleshooting") + close(errChan) + close(resultChan) + return resultChan, errChan + } + + go func() { + var err error + var result TroubleshootingResult + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.GetTroubleshootingPreparer(resourceGroupName, networkWatcherName, parameters, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersClient", "GetTroubleshooting", nil, "Failure preparing request") + return + } + + resp, err := client.GetTroubleshootingSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.WatchersClient", "GetTroubleshooting", resp, "Failure sending request") + return + } + + result, err = client.GetTroubleshootingResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersClient", "GetTroubleshooting", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// GetTroubleshootingPreparer prepares the GetTroubleshooting request. +func (client WatchersClient) GetTroubleshootingPreparer(resourceGroupName string, networkWatcherName string, parameters TroubleshootingParameters, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkWatcherName": autorest.Encode("path", networkWatcherName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/troubleshoot", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// GetTroubleshootingSender sends the GetTroubleshooting request. The method will close the +// http.Response Body if it receives an error. +func (client WatchersClient) GetTroubleshootingSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// GetTroubleshootingResponder handles the response to the GetTroubleshooting request. The method always +// closes the http.Response Body. +func (client WatchersClient) GetTroubleshootingResponder(resp *http.Response) (result TroubleshootingResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetTroubleshootingResult get the last completed troubleshooting result on a +// specified resource This method may poll for completion. Polling can be +// canceled by passing the cancel channel argument. The channel will be used to +// cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. networkWatcherName is +// the name of the network watcher resource. parameters is parameters that +// define the resource to query the troubleshooting result. +func (client WatchersClient) GetTroubleshootingResult(resourceGroupName string, networkWatcherName string, parameters QueryTroubleshootingParameters, cancel <-chan struct{}) (<-chan TroubleshootingResult, <-chan error) { + resultChan := make(chan TroubleshootingResult, 1) + errChan := make(chan error, 1) + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.TargetResourceID", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + errChan <- validation.NewErrorWithValidationError(err, "network.WatchersClient", "GetTroubleshootingResult") + close(errChan) + close(resultChan) + return resultChan, errChan + } + + go func() { + var err error + var result TroubleshootingResult + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.GetTroubleshootingResultPreparer(resourceGroupName, networkWatcherName, parameters, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersClient", "GetTroubleshootingResult", nil, "Failure preparing request") + return + } + + resp, err := client.GetTroubleshootingResultSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.WatchersClient", "GetTroubleshootingResult", resp, "Failure sending request") + return + } + + result, err = client.GetTroubleshootingResultResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersClient", "GetTroubleshootingResult", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// GetTroubleshootingResultPreparer prepares the GetTroubleshootingResult request. +func (client WatchersClient) GetTroubleshootingResultPreparer(resourceGroupName string, networkWatcherName string, parameters QueryTroubleshootingParameters, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkWatcherName": autorest.Encode("path", networkWatcherName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/queryTroubleshootResult", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// GetTroubleshootingResultSender sends the GetTroubleshootingResult request. The method will close the +// http.Response Body if it receives an error. +func (client WatchersClient) GetTroubleshootingResultSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// GetTroubleshootingResultResponder handles the response to the GetTroubleshootingResult request. The method always +// closes the http.Response Body. +func (client WatchersClient) GetTroubleshootingResultResponder(resp *http.Response) (result TroubleshootingResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetVMSecurityRules gets the configured and effective security group rules on +// the specified VM. This method may poll for completion. Polling can be +// canceled by passing the cancel channel argument. The channel will be used to +// cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. networkWatcherName is +// the name of the network watcher. parameters is parameters that define the VM +// to check security groups for. +func (client WatchersClient) GetVMSecurityRules(resourceGroupName string, networkWatcherName string, parameters SecurityGroupViewParameters, cancel <-chan struct{}) (<-chan SecurityGroupViewResult, <-chan error) { + resultChan := make(chan SecurityGroupViewResult, 1) + errChan := make(chan error, 1) + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.TargetResourceID", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + errChan <- validation.NewErrorWithValidationError(err, "network.WatchersClient", "GetVMSecurityRules") + close(errChan) + close(resultChan) + return resultChan, errChan + } + + go func() { + var err error + var result SecurityGroupViewResult + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.GetVMSecurityRulesPreparer(resourceGroupName, networkWatcherName, parameters, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersClient", "GetVMSecurityRules", nil, "Failure preparing request") + return + } + + resp, err := client.GetVMSecurityRulesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.WatchersClient", "GetVMSecurityRules", resp, "Failure sending request") + return + } + + result, err = client.GetVMSecurityRulesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersClient", "GetVMSecurityRules", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// GetVMSecurityRulesPreparer prepares the GetVMSecurityRules request. +func (client WatchersClient) GetVMSecurityRulesPreparer(resourceGroupName string, networkWatcherName string, parameters SecurityGroupViewParameters, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkWatcherName": autorest.Encode("path", networkWatcherName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/securityGroupView", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// GetVMSecurityRulesSender sends the GetVMSecurityRules request. The method will close the +// http.Response Body if it receives an error. +func (client WatchersClient) GetVMSecurityRulesSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// GetVMSecurityRulesResponder handles the response to the GetVMSecurityRules request. The method always +// closes the http.Response Body. +func (client WatchersClient) GetVMSecurityRulesResponder(resp *http.Response) (result SecurityGroupViewResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets all network watchers by resource group. +// +// resourceGroupName is the name of the resource group. +func (client WatchersClient) List(resourceGroupName string) (result WatcherListResult, err error) { + req, err := client.ListPreparer(resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.WatchersClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client WatchersClient) ListPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client WatchersClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client WatchersClient) ListResponder(resp *http.Response) (result WatcherListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAll gets all network watchers by subscription. +func (client WatchersClient) ListAll() (result WatcherListResult, err error) { + req, err := client.ListAllPreparer() + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersClient", "ListAll", nil, "Failure preparing request") + return + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.WatchersClient", "ListAll", resp, "Failure sending request") + return + } + + result, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersClient", "ListAll", resp, "Failure responding to request") + } + + return +} + +// ListAllPreparer prepares the ListAll request. +func (client WatchersClient) ListAllPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkWatchers", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListAllSender sends the ListAll request. The method will close the +// http.Response Body if it receives an error. +func (client WatchersClient) ListAllSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListAllResponder handles the response to the ListAll request. The method always +// closes the http.Response Body. +func (client WatchersClient) ListAllResponder(resp *http.Response) (result WatcherListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// SetFlowLogConfiguration configures flow log on a specified resource. This +// method may poll for completion. Polling can be canceled by passing the +// cancel channel argument. The channel will be used to cancel polling and any +// outstanding HTTP requests. +// +// resourceGroupName is the name of the network watcher resource group. +// networkWatcherName is the name of the network watcher resource. parameters +// is parameters that define the configuration of flow log. +func (client WatchersClient) SetFlowLogConfiguration(resourceGroupName string, networkWatcherName string, parameters FlowLogInformation, cancel <-chan struct{}) (<-chan FlowLogInformation, <-chan error) { + resultChan := make(chan FlowLogInformation, 1) + errChan := make(chan error, 1) + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.TargetResourceID", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.FlowLogProperties", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.FlowLogProperties.StorageID", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.FlowLogProperties.Enabled", Name: validation.Null, Rule: true, Chain: nil}, + }}}}}); err != nil { + errChan <- validation.NewErrorWithValidationError(err, "network.WatchersClient", "SetFlowLogConfiguration") + close(errChan) + close(resultChan) + return resultChan, errChan + } + + go func() { + var err error + var result FlowLogInformation + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.SetFlowLogConfigurationPreparer(resourceGroupName, networkWatcherName, parameters, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersClient", "SetFlowLogConfiguration", nil, "Failure preparing request") + return + } + + resp, err := client.SetFlowLogConfigurationSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.WatchersClient", "SetFlowLogConfiguration", resp, "Failure sending request") + return + } + + result, err = client.SetFlowLogConfigurationResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersClient", "SetFlowLogConfiguration", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// SetFlowLogConfigurationPreparer prepares the SetFlowLogConfiguration request. +func (client WatchersClient) SetFlowLogConfigurationPreparer(resourceGroupName string, networkWatcherName string, parameters FlowLogInformation, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkWatcherName": autorest.Encode("path", networkWatcherName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/configureFlowLog", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// SetFlowLogConfigurationSender sends the SetFlowLogConfiguration request. The method will close the +// http.Response Body if it receives an error. +func (client WatchersClient) SetFlowLogConfigurationSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// SetFlowLogConfigurationResponder handles the response to the SetFlowLogConfiguration request. The method always +// closes the http.Response Body. +func (client WatchersClient) SetFlowLogConfigurationResponder(resp *http.Response) (result FlowLogInformation, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// VerifyIPFlow verify IP flow from the specified VM to a location given the +// currently configured NSG rules. This method may poll for completion. Polling +// can be canceled by passing the cancel channel argument. The channel will be +// used to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. networkWatcherName is +// the name of the network watcher. parameters is parameters that define the IP +// flow to be verified. +func (client WatchersClient) VerifyIPFlow(resourceGroupName string, networkWatcherName string, parameters VerificationIPFlowParameters, cancel <-chan struct{}) (<-chan VerificationIPFlowResult, <-chan error) { + resultChan := make(chan VerificationIPFlowResult, 1) + errChan := make(chan error, 1) + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.TargetResourceID", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.LocalPort", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.RemotePort", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.LocalIPAddress", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.RemoteIPAddress", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + errChan <- validation.NewErrorWithValidationError(err, "network.WatchersClient", "VerifyIPFlow") + close(errChan) + close(resultChan) + return resultChan, errChan + } + + go func() { + var err error + var result VerificationIPFlowResult + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.VerifyIPFlowPreparer(resourceGroupName, networkWatcherName, parameters, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersClient", "VerifyIPFlow", nil, "Failure preparing request") + return + } + + resp, err := client.VerifyIPFlowSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.WatchersClient", "VerifyIPFlow", resp, "Failure sending request") + return + } + + result, err = client.VerifyIPFlowResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.WatchersClient", "VerifyIPFlow", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// VerifyIPFlowPreparer prepares the VerifyIPFlow request. +func (client WatchersClient) VerifyIPFlowPreparer(resourceGroupName string, networkWatcherName string, parameters VerificationIPFlowParameters, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkWatcherName": autorest.Encode("path", networkWatcherName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/ipFlowVerify", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// VerifyIPFlowSender sends the VerifyIPFlow request. The method will close the +// http.Response Body if it receives an error. +func (client WatchersClient) VerifyIPFlowSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// VerifyIPFlowResponder handles the response to the VerifyIPFlow request. The method always +// closes the http.Response Body. +func (client WatchersClient) VerifyIPFlowResponder(resp *http.Response) (result VerificationIPFlowResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/accounts.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/accounts.go old mode 100644 new mode 100755 index c895c3a949f1..f0606ac133f3 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/accounts.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/accounts.go @@ -14,7 +14,7 @@ package storage // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. @@ -25,7 +25,7 @@ import ( "net/http" ) -// AccountsClient is the the Storage Management Client. +// AccountsClient is the the Azure Storage Management API. type AccountsClient struct { ManagementClient } @@ -44,9 +44,9 @@ func NewAccountsClientWithBaseURI(baseURI string, subscriptionID string) Account // CheckNameAvailability checks that the storage account name is valid and is // not already in use. // -// accountName is the name of the storage account within the specified -// resource group. Storage account names must be between 3 and 24 characters -// in length and use numbers and lower-case letters only. +// accountName is the name of the storage account within the specified resource +// group. Storage account names must be between 3 and 24 characters in length +// and use numbers and lower-case letters only. func (client AccountsClient) CheckNameAvailability(accountName AccountCheckNameAvailabilityParameters) (result CheckNameAvailabilityResult, err error) { if err := validation.Validate([]validation.Validation{ {TargetValue: accountName, @@ -57,13 +57,15 @@ func (client AccountsClient) CheckNameAvailability(accountName AccountCheckNameA req, err := client.CheckNameAvailabilityPreparer(accountName) if err != nil { - return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "CheckNameAvailability", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "CheckNameAvailability", nil, "Failure preparing request") + return } resp, err := client.CheckNameAvailabilitySender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "CheckNameAvailability", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "CheckNameAvailability", resp, "Failure sending request") + return } result, err = client.CheckNameAvailabilityResponder(resp) @@ -80,8 +82,9 @@ func (client AccountsClient) CheckNameAvailabilityPreparer(accountName AccountCh "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2016-12-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -114,59 +117,74 @@ func (client AccountsClient) CheckNameAvailabilityResponder(resp *http.Response) } // Create asynchronously creates a new storage account with the specified -// parameters. If an account is already created and a subsequent create -// request is issued with different properties, the account properties will -// be updated. If an account is already created and a subsequent create or -// update request is issued with the exact same set of properties, the -// request will succeed. This method may poll for completion. Polling can be -// canceled by passing the cancel channel argument. The channel will be used -// to cancel polling and any outstanding HTTP requests. +// parameters. If an account is already created and a subsequent create request +// is issued with different properties, the account properties will be updated. +// If an account is already created and a subsequent create or update request +// is issued with the exact same set of properties, the request will succeed. +// This method may poll for completion. Polling can be canceled by passing the +// cancel channel argument. The channel will be used to cancel polling and any +// outstanding HTTP requests. // // resourceGroupName is the name of the resource group within the user's -// subscription. accountName is the name of the storage account within the -// specified resource group. Storage account names must be between 3 and 24 -// characters in length and use numbers and lower-case letters only. -// parameters is the parameters to provide for the created account. -func (client AccountsClient) Create(resourceGroupName string, accountName string, parameters AccountCreateParameters, cancel <-chan struct{}) (result autorest.Response, err error) { +// subscription. The name is case insensitive. accountName is the name of the +// storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case +// letters only. parameters is the parameters to provide for the created +// account. +func (client AccountsClient) Create(resourceGroupName string, accountName string, parameters AccountCreateParameters, cancel <-chan struct{}) (<-chan Account, <-chan error) { + resultChan := make(chan Account, 1) + errChan := make(chan error, 1) if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, {TargetValue: accountName, Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.Sku", Name: validation.Null, Rule: true, - Chain: []validation.Constraint{{Target: "parameters.Sku.Tier", Name: validation.ReadOnly, Rule: true, Chain: nil}}}, + Constraints: []validation.Constraint{{Target: "parameters.Sku", Name: validation.Null, Rule: true, Chain: nil}, {Target: "parameters.Location", Name: validation.Null, Rule: true, Chain: nil}, {Target: "parameters.AccountPropertiesCreateParameters", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "parameters.AccountPropertiesCreateParameters.CustomDomain", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "parameters.AccountPropertiesCreateParameters.CustomDomain.Name", Name: validation.Null, Rule: true, Chain: nil}}}, {Target: "parameters.AccountPropertiesCreateParameters.Encryption", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.AccountPropertiesCreateParameters.Encryption.Services", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.AccountPropertiesCreateParameters.Encryption.Services.Blob", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.AccountPropertiesCreateParameters.Encryption.Services.Blob.LastEnabledTime", Name: validation.ReadOnly, Rule: true, Chain: nil}}}, - }}, - {Target: "parameters.AccountPropertiesCreateParameters.Encryption.KeySource", Name: validation.Null, Rule: true, Chain: nil}, - }}, + Chain: []validation.Constraint{{Target: "parameters.AccountPropertiesCreateParameters.Encryption.KeySource", Name: validation.Null, Rule: true, Chain: nil}}}, }}}}}); err != nil { - return result, validation.NewErrorWithValidationError(err, "storage.AccountsClient", "Create") - } - - req, err := client.CreatePreparer(resourceGroupName, accountName, parameters, cancel) - if err != nil { - return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "Create", nil, "Failure preparing request") - } - - resp, err := client.CreateSender(req) - if err != nil { - result.Response = resp - return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "Create", resp, "Failure sending request") - } - - result, err = client.CreateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Create", resp, "Failure responding to request") - } - - return + errChan <- validation.NewErrorWithValidationError(err, "storage.AccountsClient", "Create") + close(errChan) + close(resultChan) + return resultChan, errChan + } + + go func() { + var err error + var result Account + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.CreatePreparer(resourceGroupName, accountName, parameters, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Create", nil, "Failure preparing request") + return + } + + resp, err := client.CreateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Create", resp, "Failure sending request") + return + } + + result, err = client.CreateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Create", resp, "Failure responding to request") + } + }() + return resultChan, errChan } // CreatePreparer prepares the Create request. @@ -177,8 +195,9 @@ func (client AccountsClient) CreatePreparer(resourceGroupName string, accountNam "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2016-12-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -201,24 +220,30 @@ func (client AccountsClient) CreateSender(req *http.Request) (*http.Response, er // CreateResponder handles the response to the Create request. The method always // closes the http.Response Body. -func (client AccountsClient) CreateResponder(resp *http.Response) (result autorest.Response, err error) { +func (client AccountsClient) CreateResponder(resp *http.Response) (result Account, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) - result.Response = resp + result.Response = autorest.Response{Response: resp} return } // Delete deletes a storage account in Microsoft Azure. // // resourceGroupName is the name of the resource group within the user's -// subscription. accountName is the name of the storage account within the -// specified resource group. Storage account names must be between 3 and 24 -// characters in length and use numbers and lower-case letters only. +// subscription. The name is case insensitive. accountName is the name of the +// storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case +// letters only. func (client AccountsClient) Delete(resourceGroupName string, accountName string) (result autorest.Response, err error) { if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, {TargetValue: accountName, Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil { @@ -227,13 +252,15 @@ func (client AccountsClient) Delete(resourceGroupName string, accountName string req, err := client.DeletePreparer(resourceGroupName, accountName) if err != nil { - return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "Delete", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Delete", nil, "Failure preparing request") + return } resp, err := client.DeleteSender(req) if err != nil { result.Response = resp - return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "Delete", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Delete", resp, "Failure sending request") + return } result, err = client.DeleteResponder(resp) @@ -252,8 +279,9 @@ func (client AccountsClient) DeletePreparer(resourceGroupName string, accountNam "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2016-12-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -287,11 +315,16 @@ func (client AccountsClient) DeleteResponder(resp *http.Response) (result autore // The ListKeys operation should be used to retrieve storage keys. // // resourceGroupName is the name of the resource group within the user's -// subscription. accountName is the name of the storage account within the -// specified resource group. Storage account names must be between 3 and 24 -// characters in length and use numbers and lower-case letters only. +// subscription. The name is case insensitive. accountName is the name of the +// storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case +// letters only. func (client AccountsClient) GetProperties(resourceGroupName string, accountName string) (result Account, err error) { if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, {TargetValue: accountName, Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil { @@ -300,13 +333,15 @@ func (client AccountsClient) GetProperties(resourceGroupName string, accountName req, err := client.GetPropertiesPreparer(resourceGroupName, accountName) if err != nil { - return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "GetProperties", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "GetProperties", nil, "Failure preparing request") + return } resp, err := client.GetPropertiesSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "GetProperties", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "GetProperties", resp, "Failure sending request") + return } result, err = client.GetPropertiesResponder(resp) @@ -325,8 +360,9 @@ func (client AccountsClient) GetPropertiesPreparer(resourceGroupName string, acc "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2016-12-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -361,13 +397,15 @@ func (client AccountsClient) GetPropertiesResponder(resp *http.Response) (result func (client AccountsClient) List() (result AccountListResult, err error) { req, err := client.ListPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "List", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "List", nil, "Failure preparing request") + return } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "List", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "List", resp, "Failure sending request") + return } result, err = client.ListResponder(resp) @@ -384,8 +422,9 @@ func (client AccountsClient) ListPreparer() (*http.Request, error) { "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2016-12-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -415,22 +454,117 @@ func (client AccountsClient) ListResponder(resp *http.Response) (result AccountL return } -// ListByResourceGroup lists all the storage accounts available under the -// given resource group. Note that storage keys are not returned; use the -// ListKeys operation for this. +// ListAccountSAS list SAS credentials of a storage account. // // resourceGroupName is the name of the resource group within the user's -// subscription. +// subscription. The name is case insensitive. accountName is the name of the +// storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case +// letters only. parameters is the parameters to provide to list SAS +// credentials for the storage account. +func (client AccountsClient) ListAccountSAS(resourceGroupName string, accountName string, parameters AccountSasParameters) (result ListAccountSasResponse, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.SharedAccessExpiryTime", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "storage.AccountsClient", "ListAccountSAS") + } + + req, err := client.ListAccountSASPreparer(resourceGroupName, accountName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListAccountSAS", nil, "Failure preparing request") + return + } + + resp, err := client.ListAccountSASSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListAccountSAS", resp, "Failure sending request") + return + } + + result, err = client.ListAccountSASResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListAccountSAS", resp, "Failure responding to request") + } + + return +} + +// ListAccountSASPreparer prepares the ListAccountSAS request. +func (client AccountsClient) ListAccountSASPreparer(resourceGroupName string, accountName string, parameters AccountSasParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/ListAccountSas", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListAccountSASSender sends the ListAccountSAS request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) ListAccountSASSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListAccountSASResponder handles the response to the ListAccountSAS request. The method always +// closes the http.Response Body. +func (client AccountsClient) ListAccountSASResponder(resp *http.Response) (result ListAccountSasResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByResourceGroup lists all the storage accounts available under the given +// resource group. Note that storage keys are not returned; use the ListKeys +// operation for this. +// +// resourceGroupName is the name of the resource group within the user's +// subscription. The name is case insensitive. func (client AccountsClient) ListByResourceGroup(resourceGroupName string) (result AccountListResult, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "storage.AccountsClient", "ListByResourceGroup") + } + req, err := client.ListByResourceGroupPreparer(resourceGroupName) if err != nil { - return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "ListByResourceGroup", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListByResourceGroup", nil, "Failure preparing request") + return } resp, err := client.ListByResourceGroupSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "ListByResourceGroup", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListByResourceGroup", resp, "Failure sending request") + return } result, err = client.ListByResourceGroupResponder(resp) @@ -448,8 +582,9 @@ func (client AccountsClient) ListByResourceGroupPreparer(resourceGroupName strin "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2016-12-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -482,11 +617,16 @@ func (client AccountsClient) ListByResourceGroupResponder(resp *http.Response) ( // ListKeys lists the access keys for the specified storage account. // // resourceGroupName is the name of the resource group within the user's -// subscription. accountName is the name of the storage account within the -// specified resource group. Storage account names must be between 3 and 24 -// characters in length and use numbers and lower-case letters only. +// subscription. The name is case insensitive. accountName is the name of the +// storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case +// letters only. func (client AccountsClient) ListKeys(resourceGroupName string, accountName string) (result AccountListKeysResult, err error) { if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, {TargetValue: accountName, Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil { @@ -495,13 +635,15 @@ func (client AccountsClient) ListKeys(resourceGroupName string, accountName stri req, err := client.ListKeysPreparer(resourceGroupName, accountName) if err != nil { - return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "ListKeys", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListKeys", nil, "Failure preparing request") + return } resp, err := client.ListKeysSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "ListKeys", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListKeys", resp, "Failure sending request") + return } result, err = client.ListKeysResponder(resp) @@ -520,8 +662,9 @@ func (client AccountsClient) ListKeysPreparer(resourceGroupName string, accountN "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2016-12-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -551,17 +694,108 @@ func (client AccountsClient) ListKeysResponder(resp *http.Response) (result Acco return } +// ListServiceSAS list service SAS credentials of a specific resource. +// +// resourceGroupName is the name of the resource group within the user's +// subscription. The name is case insensitive. accountName is the name of the +// storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case +// letters only. parameters is the parameters to provide to list service SAS +// credentials. +func (client AccountsClient) ListServiceSAS(resourceGroupName string, accountName string, parameters ServiceSasParameters) (result ListServiceSasResponse, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.CanonicalizedResource", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.Identifier", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.Identifier", Name: validation.MaxLength, Rule: 64, Chain: nil}}}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "storage.AccountsClient", "ListServiceSAS") + } + + req, err := client.ListServiceSASPreparer(resourceGroupName, accountName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListServiceSAS", nil, "Failure preparing request") + return + } + + resp, err := client.ListServiceSASSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListServiceSAS", resp, "Failure sending request") + return + } + + result, err = client.ListServiceSASResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListServiceSAS", resp, "Failure responding to request") + } + + return +} + +// ListServiceSASPreparer prepares the ListServiceSAS request. +func (client AccountsClient) ListServiceSASPreparer(resourceGroupName string, accountName string, parameters ServiceSasParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/ListServiceSas", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListServiceSASSender sends the ListServiceSAS request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) ListServiceSASSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListServiceSASResponder handles the response to the ListServiceSAS request. The method always +// closes the http.Response Body. +func (client AccountsClient) ListServiceSASResponder(resp *http.Response) (result ListServiceSasResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + // RegenerateKey regenerates one of the access keys for the specified storage // account. // // resourceGroupName is the name of the resource group within the user's -// subscription. accountName is the name of the storage account within the -// specified resource group. Storage account names must be between 3 and 24 -// characters in length and use numbers and lower-case letters only. -// regenerateKey is specifies name of the key which should be regenerated -- -// key1 or key2. +// subscription. The name is case insensitive. accountName is the name of the +// storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case +// letters only. regenerateKey is specifies name of the key which should be +// regenerated -- key1 or key2. func (client AccountsClient) RegenerateKey(resourceGroupName string, accountName string, regenerateKey AccountRegenerateKeyParameters) (result AccountListKeysResult, err error) { if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, {TargetValue: accountName, Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, @@ -572,13 +806,15 @@ func (client AccountsClient) RegenerateKey(resourceGroupName string, accountName req, err := client.RegenerateKeyPreparer(resourceGroupName, accountName, regenerateKey) if err != nil { - return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "RegenerateKey", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "RegenerateKey", nil, "Failure preparing request") + return } resp, err := client.RegenerateKeySender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "RegenerateKey", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "RegenerateKey", resp, "Failure sending request") + return } result, err = client.RegenerateKeyResponder(resp) @@ -597,8 +833,9 @@ func (client AccountsClient) RegenerateKeyPreparer(resourceGroupName string, acc "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2016-12-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -632,22 +869,27 @@ func (client AccountsClient) RegenerateKeyResponder(resp *http.Response) (result // Update the update operation can be used to update the SKU, encryption, // access tier, or tags for a storage account. It can also be used to map the -// account to a custom domain. Only one custom domain is supported per -// storage account; the replacement/change of custom domain is not supported. -// In order to replace an old custom domain, the old value must be -// cleared/unregistered before a new value can be set. The update of multiple -// properties is supported. This call does not change the storage keys for -// the account. If you want to change the storage account keys, use the -// regenerate keys operation. The location and name of the storage account -// cannot be changed after creation. +// account to a custom domain. Only one custom domain is supported per storage +// account; the replacement/change of custom domain is not supported. In order +// to replace an old custom domain, the old value must be cleared/unregistered +// before a new value can be set. The update of multiple properties is +// supported. This call does not change the storage keys for the account. If +// you want to change the storage account keys, use the regenerate keys +// operation. The location and name of the storage account cannot be changed +// after creation. // // resourceGroupName is the name of the resource group within the user's -// subscription. accountName is the name of the storage account within the -// specified resource group. Storage account names must be between 3 and 24 -// characters in length and use numbers and lower-case letters only. -// parameters is the parameters to provide for the updated account. +// subscription. The name is case insensitive. accountName is the name of the +// storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case +// letters only. parameters is the parameters to provide for the updated +// account. func (client AccountsClient) Update(resourceGroupName string, accountName string, parameters AccountUpdateParameters) (result Account, err error) { if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, {TargetValue: accountName, Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil { @@ -656,13 +898,15 @@ func (client AccountsClient) Update(resourceGroupName string, accountName string req, err := client.UpdatePreparer(resourceGroupName, accountName, parameters) if err != nil { - return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "Update", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Update", nil, "Failure preparing request") + return } resp, err := client.UpdateSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "Update", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Update", resp, "Failure sending request") + return } result, err = client.UpdateResponder(resp) @@ -681,8 +925,9 @@ func (client AccountsClient) UpdatePreparer(resourceGroupName string, accountNam "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2016-12-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/client.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/client.go old mode 100644 new mode 100755 index 68708dbf243e..a537bdd25ec0 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/client.go @@ -1,7 +1,7 @@ // Package storage implements the Azure ARM Storage service API version -// 2016-01-01. +// 2016-12-01. // -// The Storage Management Client. +// The Azure Storage Management API. package storage // Copyright (c) Microsoft and contributors. All rights reserved. @@ -18,7 +18,7 @@ package storage // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. @@ -27,9 +27,6 @@ import ( ) const ( - // APIVersion is the version of the Storage - APIVersion = "2016-01-01" - // DefaultBaseURI is the default URI used for the service Storage DefaultBaseURI = "https://management.azure.com" ) @@ -38,7 +35,6 @@ const ( type ManagementClient struct { autorest.Client BaseURI string - APIVersion string SubscriptionID string } @@ -52,7 +48,6 @@ func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { return ManagementClient{ Client: autorest.NewClientWithUserAgent(UserAgent()), BaseURI: baseURI, - APIVersion: APIVersion, SubscriptionID: subscriptionID, } } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/models.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/models.go old mode 100644 new mode 100755 index bff65ec6eb5c..2e203018428e --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/models.go @@ -14,7 +14,7 @@ package storage // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. @@ -38,19 +38,29 @@ type AccountStatus string const ( // Available specifies the available state for account status. - Available AccountStatus = "Available" + Available AccountStatus = "available" // Unavailable specifies the unavailable state for account status. - Unavailable AccountStatus = "Unavailable" + Unavailable AccountStatus = "unavailable" +) + +// HTTPProtocol enumerates the values for http protocol. +type HTTPProtocol string + +const ( + // HTTPS specifies the https state for http protocol. + HTTPS HTTPProtocol = "https" + // Httpshttp specifies the httpshttp state for http protocol. + Httpshttp HTTPProtocol = "https,http" ) // KeyPermission enumerates the values for key permission. type KeyPermission string const ( - // FULL specifies the full state for key permission. - FULL KeyPermission = "FULL" - // READ specifies the read state for key permission. - READ KeyPermission = "READ" + // Full specifies the full state for key permission. + Full KeyPermission = "Full" + // Read specifies the read state for key permission. + Read KeyPermission = "Read" ) // Kind enumerates the values for kind. @@ -63,6 +73,50 @@ const ( Storage Kind = "Storage" ) +// Permissions enumerates the values for permissions. +type Permissions string + +const ( + // A specifies the a state for permissions. + A Permissions = "a" + // C specifies the c state for permissions. + C Permissions = "c" + // D specifies the d state for permissions. + D Permissions = "d" + // L specifies the l state for permissions. + L Permissions = "l" + // P specifies the p state for permissions. + P Permissions = "p" + // R specifies the r state for permissions. + R Permissions = "r" + // U specifies the u state for permissions. + U Permissions = "u" + // W specifies the w state for permissions. + W Permissions = "w" +) + +// Permissions1 enumerates the values for permissions 1. +type Permissions1 string + +const ( + // Permissions1A specifies the permissions 1a state for permissions 1. + Permissions1A Permissions1 = "a" + // Permissions1C specifies the permissions 1c state for permissions 1. + Permissions1C Permissions1 = "c" + // Permissions1D specifies the permissions 1d state for permissions 1. + Permissions1D Permissions1 = "d" + // Permissions1L specifies the permissions 1l state for permissions 1. + Permissions1L Permissions1 = "l" + // Permissions1P specifies the permissions 1p state for permissions 1. + Permissions1P Permissions1 = "p" + // Permissions1R specifies the permissions 1r state for permissions 1. + Permissions1R Permissions1 = "r" + // Permissions1U specifies the permissions 1u state for permissions 1. + Permissions1U Permissions1 = "u" + // Permissions1W specifies the permissions 1w state for permissions 1. + Permissions1W Permissions1 = "w" +) + // ProvisioningState enumerates the values for provisioning state. type ProvisioningState string @@ -85,6 +139,46 @@ const ( AlreadyExists Reason = "AlreadyExists" ) +// ResourceEnum enumerates the values for resource enum. +type ResourceEnum string + +const ( + // ResourceEnumB specifies the resource enum b state for resource enum. + ResourceEnumB ResourceEnum = "b" + // ResourceEnumC specifies the resource enum c state for resource enum. + ResourceEnumC ResourceEnum = "c" + // ResourceEnumF specifies the resource enum f state for resource enum. + ResourceEnumF ResourceEnum = "f" + // ResourceEnumS specifies the resource enum s state for resource enum. + ResourceEnumS ResourceEnum = "s" +) + +// ResourceTypes enumerates the values for resource types. +type ResourceTypes string + +const ( + // ResourceTypesC specifies the resource types c state for resource types. + ResourceTypesC ResourceTypes = "c" + // ResourceTypesO specifies the resource types o state for resource types. + ResourceTypesO ResourceTypes = "o" + // ResourceTypesS specifies the resource types s state for resource types. + ResourceTypesS ResourceTypes = "s" +) + +// Services enumerates the values for services. +type Services string + +const ( + // B specifies the b state for services. + B Services = "b" + // F specifies the f state for services. + F Services = "f" + // Q specifies the q state for services. + Q Services = "q" + // T specifies the t state for services. + T Services = "t" +) + // SkuName enumerates the values for sku name. type SkuName string @@ -142,7 +236,8 @@ type Account struct { *AccountProperties `json:"properties,omitempty"` } -// AccountCheckNameAvailabilityParameters is +// AccountCheckNameAvailabilityParameters is the parameters used to check the +// availabity of the storage account name. type AccountCheckNameAvailabilityParameters struct { Name *string `json:"name,omitempty"` Type *string `json:"type,omitempty"` @@ -177,43 +272,62 @@ type AccountListResult struct { Value *[]Account `json:"value,omitempty"` } -// AccountProperties is +// AccountProperties is properties of the storage account. type AccountProperties struct { - ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` - PrimaryEndpoints *Endpoints `json:"primaryEndpoints,omitempty"` - PrimaryLocation *string `json:"primaryLocation,omitempty"` - StatusOfPrimary AccountStatus `json:"statusOfPrimary,omitempty"` - LastGeoFailoverTime *date.Time `json:"lastGeoFailoverTime,omitempty"` - SecondaryLocation *string `json:"secondaryLocation,omitempty"` - StatusOfSecondary AccountStatus `json:"statusOfSecondary,omitempty"` - CreationTime *date.Time `json:"creationTime,omitempty"` - CustomDomain *CustomDomain `json:"customDomain,omitempty"` - SecondaryEndpoints *Endpoints `json:"secondaryEndpoints,omitempty"` - Encryption *Encryption `json:"encryption,omitempty"` - AccessTier AccessTier `json:"accessTier,omitempty"` + ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` + PrimaryEndpoints *Endpoints `json:"primaryEndpoints,omitempty"` + PrimaryLocation *string `json:"primaryLocation,omitempty"` + StatusOfPrimary AccountStatus `json:"statusOfPrimary,omitempty"` + LastGeoFailoverTime *date.Time `json:"lastGeoFailoverTime,omitempty"` + SecondaryLocation *string `json:"secondaryLocation,omitempty"` + StatusOfSecondary AccountStatus `json:"statusOfSecondary,omitempty"` + CreationTime *date.Time `json:"creationTime,omitempty"` + CustomDomain *CustomDomain `json:"customDomain,omitempty"` + SecondaryEndpoints *Endpoints `json:"secondaryEndpoints,omitempty"` + Encryption *Encryption `json:"encryption,omitempty"` + AccessTier AccessTier `json:"accessTier,omitempty"` + EnableHTTPSTrafficOnly *bool `json:"supportsHttpsTrafficOnly,omitempty"` } -// AccountPropertiesCreateParameters is +// AccountPropertiesCreateParameters is the parameters used to create the +// storage account. type AccountPropertiesCreateParameters struct { - CustomDomain *CustomDomain `json:"customDomain,omitempty"` - Encryption *Encryption `json:"encryption,omitempty"` - AccessTier AccessTier `json:"accessTier,omitempty"` + CustomDomain *CustomDomain `json:"customDomain,omitempty"` + Encryption *Encryption `json:"encryption,omitempty"` + AccessTier AccessTier `json:"accessTier,omitempty"` + EnableHTTPSTrafficOnly *bool `json:"supportsHttpsTrafficOnly,omitempty"` } -// AccountPropertiesUpdateParameters is +// AccountPropertiesUpdateParameters is the parameters used when updating a +// storage account. type AccountPropertiesUpdateParameters struct { - CustomDomain *CustomDomain `json:"customDomain,omitempty"` - Encryption *Encryption `json:"encryption,omitempty"` - AccessTier AccessTier `json:"accessTier,omitempty"` + CustomDomain *CustomDomain `json:"customDomain,omitempty"` + Encryption *Encryption `json:"encryption,omitempty"` + AccessTier AccessTier `json:"accessTier,omitempty"` + EnableHTTPSTrafficOnly *bool `json:"supportsHttpsTrafficOnly,omitempty"` } -// AccountRegenerateKeyParameters is +// AccountRegenerateKeyParameters is the parameters used to regenerate the +// storage account key. type AccountRegenerateKeyParameters struct { KeyName *string `json:"keyName,omitempty"` } -// AccountUpdateParameters is the parameters that can be provided when -// updating the storage account properties. +// AccountSasParameters is the parameters to list SAS credentials of a storage +// account. +type AccountSasParameters struct { + Services Services `json:"signedServices,omitempty"` + ResourceTypes ResourceTypes `json:"signedResourceTypes,omitempty"` + Permissions Permissions `json:"signedPermission,omitempty"` + IPAddressOrRange *string `json:"signedIp,omitempty"` + Protocols HTTPProtocol `json:"signedProtocol,omitempty"` + SharedAccessStartTime *date.Time `json:"signedStart,omitempty"` + SharedAccessExpiryTime *date.Time `json:"signedExpiry,omitempty"` + KeyToSign *string `json:"keyToSign,omitempty"` +} + +// AccountUpdateParameters is the parameters that can be provided when updating +// the storage account properties. type AccountUpdateParameters struct { Sku *Sku `json:"sku,omitempty"` Tags *map[string]*string `json:"tags,omitempty"` @@ -228,8 +342,8 @@ type CheckNameAvailabilityResult struct { Message *string `json:"message,omitempty"` } -// CustomDomain is the custom domain assigned to this storage account. This -// can be set via Update. +// CustomDomain is the custom domain assigned to this storage account. This can +// be set via Update. type CustomDomain struct { Name *string `json:"name,omitempty"` UseSubDomain *bool `json:"useSubDomain,omitempty"` @@ -250,11 +364,14 @@ type EncryptionService struct { // EncryptionServices is a list of services that support encryption. type EncryptionServices struct { - Blob *EncryptionService `json:"blob,omitempty"` + Blob *EncryptionService `json:"blob,omitempty"` + File *EncryptionService `json:"file,omitempty"` + Table *EncryptionService `json:"table,omitempty"` + Queue *EncryptionService `json:"queue,omitempty"` } -// Endpoints is the URIs that are used to perform a retrieval of a public -// blob, queue, or table object. +// Endpoints is the URIs that are used to perform a retrieval of a public blob, +// queue, or table object. type Endpoints struct { Blob *string `json:"blob,omitempty"` Queue *string `json:"queue,omitempty"` @@ -262,7 +379,20 @@ type Endpoints struct { File *string `json:"file,omitempty"` } -// Resource is +// ListAccountSasResponse is the List SAS credentials operation response. +type ListAccountSasResponse struct { + autorest.Response `json:"-"` + AccountSasToken *string `json:"accountSasToken,omitempty"` +} + +// ListServiceSasResponse is the List service SAS credentials operation +// response. +type ListServiceSasResponse struct { + autorest.Response `json:"-"` + ServiceSasToken *string `json:"serviceSasToken,omitempty"` +} + +// Resource is describes a storage resource. type Resource struct { ID *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` @@ -271,6 +401,29 @@ type Resource struct { Tags *map[string]*string `json:"tags,omitempty"` } +// ServiceSasParameters is the parameters to list service SAS credentials of a +// speicific resource. +type ServiceSasParameters struct { + CanonicalizedResource *string `json:"canonicalizedResource,omitempty"` + Resource Resource `json:"signedResource,omitempty"` + Permissions Permissions `json:"signedPermission,omitempty"` + IPAddressOrRange *string `json:"signedIp,omitempty"` + Protocols HTTPProtocol `json:"signedProtocol,omitempty"` + SharedAccessStartTime *date.Time `json:"signedStart,omitempty"` + SharedAccessExpiryTime *date.Time `json:"signedExpiry,omitempty"` + Identifier *string `json:"signedIdentifier,omitempty"` + PartitionKeyStart *string `json:"startPk,omitempty"` + PartitionKeyEnd *string `json:"endPk,omitempty"` + RowKeyStart *string `json:"startRk,omitempty"` + RowKeyEnd *string `json:"endRk,omitempty"` + KeyToSign *string `json:"keyToSign,omitempty"` + CacheControl *string `json:"rscc,omitempty"` + ContentDisposition *string `json:"rscd,omitempty"` + ContentEncoding *string `json:"rsce,omitempty"` + ContentLanguage *string `json:"rscl,omitempty"` + ContentType *string `json:"rsct,omitempty"` +} + // Sku is the SKU of the storage account. type Sku struct { Name SkuName `json:"name,omitempty"` diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/usageoperations.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/usage.go old mode 100644 new mode 100755 similarity index 60% rename from vendor/github.com/Azure/azure-sdk-for-go/arm/storage/usageoperations.go rename to vendor/github.com/Azure/azure-sdk-for-go/arm/storage/usage.go index 866efc9c311d..b12a6d315fda --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/usageoperations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/usage.go @@ -14,7 +14,7 @@ package storage // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. @@ -24,53 +24,54 @@ import ( "net/http" ) -// UsageOperationsClient is the the Storage Management Client. -type UsageOperationsClient struct { +// UsageClient is the the Azure Storage Management API. +type UsageClient struct { ManagementClient } -// NewUsageOperationsClient creates an instance of the UsageOperationsClient -// client. -func NewUsageOperationsClient(subscriptionID string) UsageOperationsClient { - return NewUsageOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +// NewUsageClient creates an instance of the UsageClient client. +func NewUsageClient(subscriptionID string) UsageClient { + return NewUsageClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewUsageOperationsClientWithBaseURI creates an instance of the -// UsageOperationsClient client. -func NewUsageOperationsClientWithBaseURI(baseURI string, subscriptionID string) UsageOperationsClient { - return UsageOperationsClient{NewWithBaseURI(baseURI, subscriptionID)} +// NewUsageClientWithBaseURI creates an instance of the UsageClient client. +func NewUsageClientWithBaseURI(baseURI string, subscriptionID string) UsageClient { + return UsageClient{NewWithBaseURI(baseURI, subscriptionID)} } // List gets the current usage count and the limit for the resources under the // subscription. -func (client UsageOperationsClient) List() (result UsageListResult, err error) { +func (client UsageClient) List() (result UsageListResult, err error) { req, err := client.ListPreparer() if err != nil { - return result, autorest.NewErrorWithError(err, "storage.UsageOperationsClient", "List", nil, "Failure preparing request") + err = autorest.NewErrorWithError(err, "storage.UsageClient", "List", nil, "Failure preparing request") + return } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "storage.UsageOperationsClient", "List", resp, "Failure sending request") + err = autorest.NewErrorWithError(err, "storage.UsageClient", "List", resp, "Failure sending request") + return } result, err = client.ListResponder(resp) if err != nil { - err = autorest.NewErrorWithError(err, "storage.UsageOperationsClient", "List", resp, "Failure responding to request") + err = autorest.NewErrorWithError(err, "storage.UsageClient", "List", resp, "Failure responding to request") } return } // ListPreparer prepares the List request. -func (client UsageOperationsClient) ListPreparer() (*http.Request, error) { +func (client UsageClient) ListPreparer() (*http.Request, error) { pathParameters := map[string]interface{}{ "subscriptionId": autorest.Encode("path", client.SubscriptionID), } + const APIVersion = "2016-12-01" queryParameters := map[string]interface{}{ - "api-version": client.APIVersion, + "api-version": APIVersion, } preparer := autorest.CreatePreparer( @@ -83,13 +84,13 @@ func (client UsageOperationsClient) ListPreparer() (*http.Request, error) { // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. -func (client UsageOperationsClient) ListSender(req *http.Request) (*http.Response, error) { +func (client UsageClient) ListSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req) } // ListResponder handles the response to the List request. The method always // closes the http.Response Body. -func (client UsageOperationsClient) ListResponder(resp *http.Response) (result UsageListResult, err error) { +func (client UsageClient) ListResponder(resp *http.Response) (result UsageListResult, err error) { err = autorest.Respond( resp, client.ByInspecting(), diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/version.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/version.go old mode 100644 new mode 100755 index e0a181c11a56..ac97c159fa6d --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/version.go @@ -14,30 +14,16 @@ package storage // See the License for the specific language governing permissions and // limitations under the License. // -// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 // Changes may cause incorrect behavior and will be lost if the code is // regenerated. -import ( - "fmt" -) - -const ( - major = "7" - minor = "0" - patch = "1" - // Always begin a "tag" with a dash (as per http://semver.org) - tag = "-beta" - semVerFormat = "%s.%s.%s%s" - userAgentFormat = "Azure-SDK-for-Go/%s arm-%s/%s" -) - // UserAgent returns the UserAgent string to use when sending http.Requests. func UserAgent() string { - return fmt.Sprintf(userAgentFormat, Version(), "storage", "2016-01-01") + return "Azure-SDK-For-Go/v10.0.2-beta arm-storage/2016-12-01" } // Version returns the semantic version (see http://semver.org) of the client. func Version() string { - return fmt.Sprintf(semVerFormat, major, minor, patch, tag) + return "v10.0.2-beta" } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md b/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md deleted file mode 100644 index 0ab099848bba..000000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Azure Storage SDK for Go - -The `github.com/Azure/azure-sdk-for-go/storage` package is used to perform operations in Azure Storage Service. To manage your storage accounts (Azure Resource Manager / ARM), use the [github.com/Azure/azure-sdk-for-go/arm/storage](../arm/storage) package. For your classic storage accounts (Azure Service Management / ASM), use [github.com/Azure/azure-sdk-for-go/management/storageservice](../management/storageservice) package. - -This package includes support for [Azure Storage Emulator](https://azure.microsoft.com/documentation/articles/storage-use-emulator/) \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/appendblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/appendblob.go new file mode 100644 index 000000000000..3292cb556951 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/appendblob.go @@ -0,0 +1,70 @@ +package storage + +import ( + "bytes" + "fmt" + "net/http" + "net/url" + "time" +) + +// PutAppendBlob initializes an empty append blob with specified name. An +// append blob must be created using this method before appending blocks. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob +func (b *Blob) PutAppendBlob(options *PutBlobOptions) error { + params := url.Values{} + headers := b.Container.bsc.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypeAppend) + headers = mergeHeaders(headers, headersFromStruct(b.Properties)) + headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) + if err != nil { + return err + } + readAndCloseBody(resp.body) + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// AppendBlockOptions includes the options for an append block operation +type AppendBlockOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + MaxSize *uint `header:"x-ms-blob-condition-maxsize"` + AppendPosition *uint `header:"x-ms-blob-condition-appendpos"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + RequestID string `header:"x-ms-client-request-id"` +} + +// AppendBlock appends a block to an append blob. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Append-Block +func (b *Blob) AppendBlock(chunk []byte, options *AppendBlockOptions) error { + params := url.Values{"comp": {"appendblock"}} + headers := b.Container.bsc.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypeAppend) + headers["Content-Length"] = fmt.Sprintf("%v", len(chunk)) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, bytes.NewReader(chunk), b.Container.bsc.auth) + if err != nil { + return err + } + readAndCloseBody(resp.body) + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/appendblob_test.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/appendblob_test.go new file mode 100644 index 000000000000..23f2a6e08a27 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/appendblob_test.go @@ -0,0 +1,126 @@ +package storage + +import ( + "io/ioutil" + + chk "gopkg.in/check.v1" +) + +type AppendBlobSuite struct{} + +var _ = chk.Suite(&AppendBlobSuite{}) + +func (s *AppendBlobSuite) TestPutAppendBlob(c *chk.C) { + cli := getBlobClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + cnt := cli.GetContainerReference(containerName(c)) + b := cnt.GetBlobReference(blobName(c)) + c.Assert(cnt.Create(nil), chk.IsNil) + defer cnt.Delete(nil) + + c.Assert(b.PutAppendBlob(nil), chk.IsNil) + + // Verify + err := b.GetProperties(nil) + c.Assert(err, chk.IsNil) + c.Assert(b.Properties.ContentLength, chk.Equals, int64(0)) + c.Assert(b.Properties.BlobType, chk.Equals, BlobTypeAppend) +} + +func (s *AppendBlobSuite) TestPutAppendBlobAppendBlocks(c *chk.C) { + cli := getBlobClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + cnt := cli.GetContainerReference(containerName(c)) + b := cnt.GetBlobReference(blobName(c)) + c.Assert(cnt.Create(nil), chk.IsNil) + defer cnt.Delete(nil) + + c.Assert(b.PutAppendBlob(nil), chk.IsNil) + + chunk1 := content(1024) + chunk2 := content(512) + + // Append first block + c.Assert(b.AppendBlock(chunk1, nil), chk.IsNil) + + // Verify contents + options := GetBlobRangeOptions{ + Range: &BlobRange{ + Start: 0, + End: uint64(len(chunk1) - 1), + }, + } + out, err := b.GetRange(&options) + c.Assert(err, chk.IsNil) + defer out.Close() + blobContents, err := ioutil.ReadAll(out) + c.Assert(err, chk.IsNil) + c.Assert(blobContents, chk.DeepEquals, chunk1) + + // Append second block + c.Assert(b.AppendBlock(chunk2, nil), chk.IsNil) + + // Verify contents + options.Range.End = uint64(len(chunk1) + len(chunk2) - 1) + out, err = b.GetRange(&options) + c.Assert(err, chk.IsNil) + defer out.Close() + blobContents, err = ioutil.ReadAll(out) + c.Assert(err, chk.IsNil) + c.Assert(blobContents, chk.DeepEquals, append(chunk1, chunk2...)) +} + +func (s *StorageBlobSuite) TestPutAppendBlobSpecialChars(c *chk.C) { + cli := getBlobClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + cnt := cli.GetContainerReference(containerName(c)) + b := cnt.GetBlobReference(blobName(c)) + c.Assert(cnt.Create(nil), chk.IsNil) + defer cnt.Delete(nil) + + c.Assert(b.PutAppendBlob(nil), chk.IsNil) + + // Verify metadata + err := b.GetProperties(nil) + c.Assert(err, chk.IsNil) + c.Assert(b.Properties.ContentLength, chk.Equals, int64(0)) + c.Assert(b.Properties.BlobType, chk.Equals, BlobTypeAppend) + + chunk1 := content(1024) + chunk2 := content(512) + + // Append first block + c.Assert(b.AppendBlock(chunk1, nil), chk.IsNil) + + // Verify contents + options := GetBlobRangeOptions{ + Range: &BlobRange{ + Start: 0, + End: uint64(len(chunk1) - 1), + }, + } + out, err := b.GetRange(&options) + c.Assert(err, chk.IsNil) + defer out.Close() + blobContents, err := ioutil.ReadAll(out) + c.Assert(err, chk.IsNil) + c.Assert(blobContents, chk.DeepEquals, chunk1) + + // Append second block + c.Assert(b.AppendBlock(chunk2, nil), chk.IsNil) + + // Verify contents + options.Range.End = uint64(len(chunk1) + len(chunk2) - 1) + out, err = b.GetRange(&options) + c.Assert(err, chk.IsNil) + defer out.Close() + blobContents, err = ioutil.ReadAll(out) + c.Assert(err, chk.IsNil) + c.Assert(blobContents, chk.DeepEquals, append(chunk1, chunk2...)) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go new file mode 100644 index 000000000000..608bf3133866 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go @@ -0,0 +1,227 @@ +// Package storage provides clients for Microsoft Azure Storage Services. +package storage + +import ( + "bytes" + "fmt" + "net/url" + "sort" + "strings" +) + +// See: https://docs.microsoft.com/rest/api/storageservices/fileservices/authentication-for-the-azure-storage-services + +type authentication string + +const ( + sharedKey authentication = "sharedKey" + sharedKeyForTable authentication = "sharedKeyTable" + sharedKeyLite authentication = "sharedKeyLite" + sharedKeyLiteForTable authentication = "sharedKeyLiteTable" + + // headers + headerAcceptCharset = "Accept-Charset" + headerAuthorization = "Authorization" + headerContentLength = "Content-Length" + headerDate = "Date" + headerXmsDate = "x-ms-date" + headerXmsVersion = "x-ms-version" + headerContentEncoding = "Content-Encoding" + headerContentLanguage = "Content-Language" + headerContentType = "Content-Type" + headerContentMD5 = "Content-MD5" + headerIfModifiedSince = "If-Modified-Since" + headerIfMatch = "If-Match" + headerIfNoneMatch = "If-None-Match" + headerIfUnmodifiedSince = "If-Unmodified-Since" + headerRange = "Range" + headerDataServiceVersion = "DataServiceVersion" + headerMaxDataServiceVersion = "MaxDataServiceVersion" + headerContentTransferEncoding = "Content-Transfer-Encoding" +) + +func (c *Client) addAuthorizationHeader(verb, url string, headers map[string]string, auth authentication) (map[string]string, error) { + authHeader, err := c.getSharedKey(verb, url, headers, auth) + if err != nil { + return nil, err + } + headers[headerAuthorization] = authHeader + return headers, nil +} + +func (c *Client) getSharedKey(verb, url string, headers map[string]string, auth authentication) (string, error) { + canRes, err := c.buildCanonicalizedResource(url, auth) + if err != nil { + return "", err + } + + canString, err := buildCanonicalizedString(verb, headers, canRes, auth) + if err != nil { + return "", err + } + return c.createAuthorizationHeader(canString, auth), nil +} + +func (c *Client) buildCanonicalizedResource(uri string, auth authentication) (string, error) { + errMsg := "buildCanonicalizedResource error: %s" + u, err := url.Parse(uri) + if err != nil { + return "", fmt.Errorf(errMsg, err.Error()) + } + + cr := bytes.NewBufferString("/") + cr.WriteString(c.getCanonicalizedAccountName()) + + if len(u.Path) > 0 { + // Any portion of the CanonicalizedResource string that is derived from + // the resource's URI should be encoded exactly as it is in the URI. + // -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx + cr.WriteString(u.EscapedPath()) + } + + params, err := url.ParseQuery(u.RawQuery) + if err != nil { + return "", fmt.Errorf(errMsg, err.Error()) + } + + // See https://github.com/Azure/azure-storage-net/blob/master/Lib/Common/Core/Util/AuthenticationUtility.cs#L277 + if auth == sharedKey { + if len(params) > 0 { + cr.WriteString("\n") + + keys := []string{} + for key := range params { + keys = append(keys, key) + } + sort.Strings(keys) + + completeParams := []string{} + for _, key := range keys { + if len(params[key]) > 1 { + sort.Strings(params[key]) + } + + completeParams = append(completeParams, fmt.Sprintf("%s:%s", key, strings.Join(params[key], ","))) + } + cr.WriteString(strings.Join(completeParams, "\n")) + } + } else { + // search for "comp" parameter, if exists then add it to canonicalizedresource + if v, ok := params["comp"]; ok { + cr.WriteString("?comp=" + v[0]) + } + } + + return string(cr.Bytes()), nil +} + +func (c *Client) getCanonicalizedAccountName() string { + // since we may be trying to access a secondary storage account, we need to + // remove the -secondary part of the storage name + return strings.TrimSuffix(c.accountName, "-secondary") +} + +func buildCanonicalizedString(verb string, headers map[string]string, canonicalizedResource string, auth authentication) (string, error) { + contentLength := headers[headerContentLength] + if contentLength == "0" { + contentLength = "" + } + date := headers[headerDate] + if v, ok := headers[headerXmsDate]; ok { + if auth == sharedKey || auth == sharedKeyLite { + date = "" + } else { + date = v + } + } + var canString string + switch auth { + case sharedKey: + canString = strings.Join([]string{ + verb, + headers[headerContentEncoding], + headers[headerContentLanguage], + contentLength, + headers[headerContentMD5], + headers[headerContentType], + date, + headers[headerIfModifiedSince], + headers[headerIfMatch], + headers[headerIfNoneMatch], + headers[headerIfUnmodifiedSince], + headers[headerRange], + buildCanonicalizedHeader(headers), + canonicalizedResource, + }, "\n") + case sharedKeyForTable: + canString = strings.Join([]string{ + verb, + headers[headerContentMD5], + headers[headerContentType], + date, + canonicalizedResource, + }, "\n") + case sharedKeyLite: + canString = strings.Join([]string{ + verb, + headers[headerContentMD5], + headers[headerContentType], + date, + buildCanonicalizedHeader(headers), + canonicalizedResource, + }, "\n") + case sharedKeyLiteForTable: + canString = strings.Join([]string{ + date, + canonicalizedResource, + }, "\n") + default: + return "", fmt.Errorf("%s authentication is not supported yet", auth) + } + return canString, nil +} + +func buildCanonicalizedHeader(headers map[string]string) string { + cm := make(map[string]string) + + for k, v := range headers { + headerName := strings.TrimSpace(strings.ToLower(k)) + if strings.HasPrefix(headerName, "x-ms-") { + cm[headerName] = v + } + } + + if len(cm) == 0 { + return "" + } + + keys := []string{} + for key := range cm { + keys = append(keys, key) + } + + sort.Strings(keys) + + ch := bytes.NewBufferString("") + + for _, key := range keys { + ch.WriteString(key) + ch.WriteRune(':') + ch.WriteString(cm[key]) + ch.WriteRune('\n') + } + + return strings.TrimSuffix(string(ch.Bytes()), "\n") +} + +func (c *Client) createAuthorizationHeader(canonicalizedString string, auth authentication) string { + signature := c.computeHmac256(canonicalizedString) + var key string + switch auth { + case sharedKey, sharedKeyForTable: + key = "SharedKey" + case sharedKeyLite, sharedKeyLiteForTable: + key = "SharedKeyLite" + } + return fmt.Sprintf("%s %s:%s", key, c.getCanonicalizedAccountName(), signature) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/authorization_test.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/authorization_test.go new file mode 100644 index 000000000000..420868acfb71 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/authorization_test.go @@ -0,0 +1,230 @@ +package storage + +import ( + "encoding/base64" + "net/http" + + chk "gopkg.in/check.v1" +) + +type AuthorizationSuite struct{} + +var _ = chk.Suite(&AuthorizationSuite{}) + +func (a *AuthorizationSuite) Test_addAuthorizationHeader(c *chk.C) { + cli, err := NewBasicClient(dummyStorageAccount, dummyMiniStorageKey) + c.Assert(err, chk.IsNil) + cli.UseSharedKeyLite = true + tableCli := cli.GetTableService() + + headers := map[string]string{ + "Accept-Charset": "UTF-8", + headerContentType: "application/json", + headerXmsDate: "Wed, 23 Sep 2015 16:40:05 GMT", + headerContentLength: "0", + headerXmsVersion: "2015-02-21", + "Accept": "application/json;odata=nometadata", + } + url := "https://golangrocksonazure.table.core.windows.net/tquery()" + headers, err = tableCli.client.addAuthorizationHeader("", url, headers, tableCli.auth) + c.Assert(err, chk.IsNil) + + c.Assert(headers[headerAuthorization], chk.Equals, "SharedKeyLite golangrocksonazure:NusXSFXAvHqr6EQNXnZZ50CvU1sX0iP/FFDHehnixLc=") +} + +func (a *AuthorizationSuite) Test_getSharedKey(c *chk.C) { + // Shared Key Lite for Tables + cli, err := NewBasicClient(dummyStorageAccount, dummyMiniStorageKey) + c.Assert(err, chk.IsNil) + + headers := map[string]string{ + "Accept-Charset": "UTF-8", + headerContentType: "application/json", + headerXmsDate: "Wed, 23 Sep 2015 16:40:05 GMT", + headerContentLength: "0", + headerXmsVersion: "2015-02-21", + "Accept": "application/json;odata=nometadata", + } + url := "https://golangrocksonazure.table.core.windows.net/tquery()" + + key, err := cli.getSharedKey("", url, headers, sharedKeyLiteForTable) + c.Assert(err, chk.IsNil) + c.Assert(key, chk.Equals, "SharedKeyLite golangrocksonazure:NusXSFXAvHqr6EQNXnZZ50CvU1sX0iP/FFDHehnixLc=") +} + +func (a *AuthorizationSuite) Test_buildCanonicalizedResource(c *chk.C) { + cli, err := NewBasicClient(dummyStorageAccount, dummyMiniStorageKey) + c.Assert(err, chk.IsNil) + + type test struct { + url string + auth authentication + expected string + } + tests := []test{ + // Shared Key + {"https://golangrocksonazure.blob.core.windows.net/path?a=b&c=d", sharedKey, "/golangrocksonazure/path\na:b\nc:d"}, + {"https://golangrocksonazure.blob.core.windows.net/?comp=list", sharedKey, "/golangrocksonazure/\ncomp:list"}, + {"https://golangrocksonazure.blob.core.windows.net/cnt/blob", sharedKey, "/golangrocksonazure/cnt/blob"}, + {"https://golangrocksonazure.blob.core.windows.net/cnt/bl ob", sharedKey, "/golangrocksonazure/cnt/bl%20ob"}, + {"https://golangrocksonazure.blob.core.windows.net/c nt/blob", sharedKey, "/golangrocksonazure/c%20nt/blob"}, + {"https://golangrocksonazure.blob.core.windows.net/cnt/blob%3F%23%5B%5D%21$&%27%28%29%2A blob", sharedKey, "/golangrocksonazure/cnt/blob%3F%23%5B%5D%21$&%27%28%29%2A%20blob"}, + {"https://golangrocksonazure.blob.core.windows.net/cnt/blob-._~:,@;+=blob", sharedKey, "/golangrocksonazure/cnt/blob-._~:,@;+=blob"}, + {"https://golangrocksonazure.blob.core.windows.net/c nt/blob-._~:%3F%23%5B%5D@%21$&%27%28%29%2A,;+=/blob", sharedKey, "/golangrocksonazure/c%20nt/blob-._~:%3F%23%5B%5D@%21$&%27%28%29%2A,;+=/blob"}, + // Shared Key Lite for Table + {"https://golangrocksonazure.table.core.windows.net/mytable", sharedKeyLiteForTable, "/golangrocksonazure/mytable"}, + {"https://golangrocksonazure.table.core.windows.net/mytable?comp=acl", sharedKeyLiteForTable, "/golangrocksonazure/mytable?comp=acl"}, + {"https://golangrocksonazure.table.core.windows.net/mytable?comp=acl&timeout=10", sharedKeyForTable, "/golangrocksonazure/mytable?comp=acl"}, + {"https://golangrocksonazure.table.core.windows.net/mytable(PartitionKey='pkey',RowKey='rowkey%3D')", sharedKeyForTable, "/golangrocksonazure/mytable(PartitionKey='pkey',RowKey='rowkey%3D')"}, + } + + for _, t := range tests { + out, err := cli.buildCanonicalizedResource(t.url, t.auth) + c.Assert(err, chk.IsNil) + c.Assert(out, chk.Equals, t.expected) + } +} + +func (a *AuthorizationSuite) Test_buildCanonicalizedString(c *chk.C) { + var tests = []struct { + verb string + headers map[string]string + canonicalizedResource string + auth authentication + out string + }{ + { + // Shared Key + verb: http.MethodGet, + headers: map[string]string{ + headerXmsDate: "Sun, 11 Oct 2009 21:49:13 GMT", + headerXmsVersion: "2009-09-19", + }, + canonicalizedResource: "/myaccount/ mycontainer\ncomp:metadata\nrestype:container\ntimeout:20", + auth: sharedKey, + out: "GET\n\n\n\n\n\n\n\n\n\n\n\nx-ms-date:Sun, 11 Oct 2009 21:49:13 GMT\nx-ms-version:2009-09-19\n/myaccount/ mycontainer\ncomp:metadata\nrestype:container\ntimeout:20", + }, + { + // Shared Key for Tables + verb: http.MethodPut, + headers: map[string]string{ + headerContentType: "text/plain; charset=UTF-8", + headerDate: "Sun, 11 Oct 2009 19:52:39 GMT", + }, + canonicalizedResource: "/testaccount1/Tables", + auth: sharedKeyForTable, + out: "PUT\n\ntext/plain; charset=UTF-8\nSun, 11 Oct 2009 19:52:39 GMT\n/testaccount1/Tables", + }, + { + // Shared Key Lite + verb: http.MethodPut, + headers: map[string]string{ + headerContentType: "text/plain; charset=UTF-8", + headerXmsDate: "Sun, 20 Sep 2009 20:36:40 GMT", + "x-ms-meta-m1": "v1", + "x-ms-meta-m2": "v2", + }, + canonicalizedResource: "/testaccount1/mycontainer/hello.txt", + auth: sharedKeyLite, + out: "PUT\n\ntext/plain; charset=UTF-8\n\nx-ms-date:Sun, 20 Sep 2009 20:36:40 GMT\nx-ms-meta-m1:v1\nx-ms-meta-m2:v2\n/testaccount1/mycontainer/hello.txt", + }, + { + // Shared Key Lite for Tables + verb: "", + headers: map[string]string{ + headerDate: "Sun, 11 Oct 2009 19:52:39 GMT", + }, + canonicalizedResource: "/testaccount1/Tables", + auth: sharedKeyLiteForTable, + out: "Sun, 11 Oct 2009 19:52:39 GMT\n/testaccount1/Tables", + }, + } + + for _, t := range tests { + canonicalizedString, err := buildCanonicalizedString(t.verb, t.headers, t.canonicalizedResource, t.auth) + c.Assert(err, chk.IsNil) + c.Assert(canonicalizedString, chk.Equals, t.out) + } +} + +func (a *AuthorizationSuite) Test_buildCanonicalizedHeader(c *chk.C) { + type test struct { + headers map[string]string + expected string + } + tests := []test{ + {map[string]string{}, + ""}, + {map[string]string{ + "x-ms-lol": "rofl"}, + "x-ms-lol:rofl"}, + {map[string]string{ + "lol:": "rofl"}, + ""}, + {map[string]string{ + "lol:": "rofl", + "x-ms-lol": "rofl"}, + "x-ms-lol:rofl"}, + {map[string]string{ + "x-ms-version": "9999-99-99", + "x-ms-blob-type": "BlockBlob"}, + "x-ms-blob-type:BlockBlob\nx-ms-version:9999-99-99"}} + + for _, i := range tests { + c.Assert(buildCanonicalizedHeader(i.headers), chk.Equals, i.expected) + } +} + +func (a *AuthorizationSuite) Test_createAuthorizationHeader(c *chk.C) { + cli, err := NewBasicClient(dummyStorageAccount, base64.StdEncoding.EncodeToString([]byte("bar"))) + c.Assert(err, chk.IsNil) + + canonicalizedString := `foobarzoo` + + c.Assert(cli.createAuthorizationHeader(canonicalizedString, sharedKey), + chk.Equals, `SharedKey golangrocksonazure:h5U0ATVX6SpbFX1H6GNuxIMeXXCILLoIvhflPtuQZ30=`) + c.Assert(cli.createAuthorizationHeader(canonicalizedString, sharedKeyLite), + chk.Equals, `SharedKeyLite golangrocksonazure:h5U0ATVX6SpbFX1H6GNuxIMeXXCILLoIvhflPtuQZ30=`) +} + +func (a *AuthorizationSuite) Test_allSharedKeys(c *chk.C) { + cli := getBasicClient(c) + rec := cli.appendRecorder(c) + defer rec.Stop() + + blobCli := cli.GetBlobService() + tableCli := cli.GetTableService() + + cnt1 := blobCli.GetContainerReference(containerName(c, "1")) + cnt2 := blobCli.GetContainerReference(containerName(c, "2")) + + // Shared Key + c.Assert(blobCli.auth, chk.Equals, sharedKey) + c.Assert(cnt1.Create(nil), chk.IsNil) + c.Assert(cnt1.Delete(nil), chk.IsNil) + + // Shared Key for Tables + c.Assert(tableCli.auth, chk.Equals, sharedKeyForTable) + table1 := tableCli.GetTableReference(tableName(c, "1")) + c.Assert(table1.tsc.auth, chk.Equals, sharedKeyForTable) + c.Assert(table1.Create(30, EmptyPayload, nil), chk.IsNil) + c.Assert(table1.Delete(30, nil), chk.IsNil) + + // Change to Lite + cli.UseSharedKeyLite = true + blobCli = cli.GetBlobService() + tableCli = cli.GetTableService() + + // Shared Key Lite + c.Assert(blobCli.auth, chk.Equals, sharedKeyLite) + c.Assert(cnt2.Create(nil), chk.IsNil) + c.Assert(cnt2.Delete(nil), chk.IsNil) + + // Shared Key Lite for Tables + tableCli = cli.GetTableService() + c.Assert(tableCli.auth, chk.Equals, sharedKeyLiteForTable) + table2 := tableCli.GetTableReference(tableName(c, "2")) + c.Assert(table2.tsc.auth, chk.Equals, sharedKeyLiteForTable) + c.Assert(table2.Create(30, EmptyPayload, nil), chk.IsNil) + c.Assert(table2.Delete(30, nil), chk.IsNil) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go index 3dbaca52ade2..dd9eb386cb5a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go @@ -1,12 +1,10 @@ package storage import ( - "bytes" "encoding/xml" "errors" "fmt" "io" - "io/ioutil" "net/http" "net/url" "strconv" @@ -14,51 +12,28 @@ import ( "time" ) -// BlobStorageClient contains operations for Microsoft Azure Blob Storage -// Service. -type BlobStorageClient struct { - client Client -} - -// A Container is an entry in ContainerListResponse. -type Container struct { - Name string `xml:"Name"` - Properties ContainerProperties `xml:"Properties"` - // TODO (ahmetalpbalkan) Metadata -} - -// ContainerProperties contains various properties of a container returned from -// various endpoints like ListContainers. -type ContainerProperties struct { - LastModified string `xml:"Last-Modified"` - Etag string `xml:"Etag"` - LeaseStatus string `xml:"LeaseStatus"` - LeaseState string `xml:"LeaseState"` - LeaseDuration string `xml:"LeaseDuration"` - // TODO (ahmetalpbalkan) remaining fields -} - -// ContainerListResponse contains the response fields from -// ListContainers call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx -type ContainerListResponse struct { - XMLName xml.Name `xml:"EnumerationResults"` - Xmlns string `xml:"xmlns,attr"` - Prefix string `xml:"Prefix"` - Marker string `xml:"Marker"` - NextMarker string `xml:"NextMarker"` - MaxResults int64 `xml:"MaxResults"` - Containers []Container `xml:"Containers>Container"` -} - // A Blob is an entry in BlobListResponse. type Blob struct { + Container *Container Name string `xml:"Name"` + Snapshot time.Time `xml:"Snapshot"` Properties BlobProperties `xml:"Properties"` Metadata BlobMetadata `xml:"Metadata"` } +// PutBlobOptions includes the options any put blob operation +// (page, block, append) +type PutBlobOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + Origin string `header:"Origin"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + RequestID string `header:"x-ms-client-request-id"` +} + // BlobMetadata is a set of custom name/value pairs. // // See https://msdn.microsoft.com/en-us/library/azure/dd179404.aspx @@ -106,128 +81,28 @@ func (bm BlobMetadata) MarshalXML(enc *xml.Encoder, start xml.StartElement) erro // BlobProperties contains various properties of a blob // returned in various endpoints like ListBlobs or GetBlobProperties. type BlobProperties struct { - LastModified string `xml:"Last-Modified"` - Etag string `xml:"Etag"` - ContentMD5 string `xml:"Content-MD5"` - ContentLength int64 `xml:"Content-Length"` - ContentType string `xml:"Content-Type"` - ContentEncoding string `xml:"Content-Encoding"` - CacheControl string `xml:"Cache-Control"` - ContentLanguage string `xml:"Cache-Language"` - BlobType BlobType `xml:"x-ms-blob-blob-type"` - SequenceNumber int64 `xml:"x-ms-blob-sequence-number"` - CopyID string `xml:"CopyId"` - CopyStatus string `xml:"CopyStatus"` - CopySource string `xml:"CopySource"` - CopyProgress string `xml:"CopyProgress"` - CopyCompletionTime string `xml:"CopyCompletionTime"` - CopyStatusDescription string `xml:"CopyStatusDescription"` - LeaseStatus string `xml:"LeaseStatus"` -} - -// BlobHeaders contains various properties of a blob and is an entry -// in SetBlobProperties -type BlobHeaders struct { - ContentMD5 string `header:"x-ms-blob-content-md5"` - ContentLanguage string `header:"x-ms-blob-content-language"` - ContentEncoding string `header:"x-ms-blob-content-encoding"` - ContentType string `header:"x-ms-blob-content-type"` - CacheControl string `header:"x-ms-blob-cache-control"` -} - -// BlobListResponse contains the response fields from ListBlobs call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx -type BlobListResponse struct { - XMLName xml.Name `xml:"EnumerationResults"` - Xmlns string `xml:"xmlns,attr"` - Prefix string `xml:"Prefix"` - Marker string `xml:"Marker"` - NextMarker string `xml:"NextMarker"` - MaxResults int64 `xml:"MaxResults"` - Blobs []Blob `xml:"Blobs>Blob"` - - // BlobPrefix is used to traverse blobs as if it were a file system. - // It is returned if ListBlobsParameters.Delimiter is specified. - // The list here can be thought of as "folders" that may contain - // other folders or blobs. - BlobPrefixes []string `xml:"Blobs>BlobPrefix>Name"` - - // Delimiter is used to traverse blobs as if it were a file system. - // It is returned if ListBlobsParameters.Delimiter is specified. - Delimiter string `xml:"Delimiter"` -} - -// ListContainersParameters defines the set of customizable parameters to make a -// List Containers call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx -type ListContainersParameters struct { - Prefix string - Marker string - Include string - MaxResults uint - Timeout uint -} - -func (p ListContainersParameters) getParameters() url.Values { - out := url.Values{} - - if p.Prefix != "" { - out.Set("prefix", p.Prefix) - } - if p.Marker != "" { - out.Set("marker", p.Marker) - } - if p.Include != "" { - out.Set("include", p.Include) - } - if p.MaxResults != 0 { - out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults)) - } - if p.Timeout != 0 { - out.Set("timeout", fmt.Sprintf("%v", p.Timeout)) - } - - return out -} - -// ListBlobsParameters defines the set of customizable -// parameters to make a List Blobs call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx -type ListBlobsParameters struct { - Prefix string - Delimiter string - Marker string - Include string - MaxResults uint - Timeout uint -} - -func (p ListBlobsParameters) getParameters() url.Values { - out := url.Values{} - - if p.Prefix != "" { - out.Set("prefix", p.Prefix) - } - if p.Delimiter != "" { - out.Set("delimiter", p.Delimiter) - } - if p.Marker != "" { - out.Set("marker", p.Marker) - } - if p.Include != "" { - out.Set("include", p.Include) - } - if p.MaxResults != 0 { - out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults)) - } - if p.Timeout != 0 { - out.Set("timeout", fmt.Sprintf("%v", p.Timeout)) - } - - return out + LastModified TimeRFC1123 `xml:"Last-Modified"` + Etag string `xml:"Etag"` + ContentMD5 string `xml:"Content-MD5" header:"x-ms-blob-content-md5"` + ContentLength int64 `xml:"Content-Length"` + ContentType string `xml:"Content-Type" header:"x-ms-blob-content-type"` + ContentEncoding string `xml:"Content-Encoding" header:"x-ms-blob-content-encoding"` + CacheControl string `xml:"Cache-Control" header:"x-ms-blob-cache-control"` + ContentLanguage string `xml:"Cache-Language" header:"x-ms-blob-content-language"` + ContentDisposition string `xml:"Content-Disposition" header:"x-ms-blob-content-disposition"` + BlobType BlobType `xml:"x-ms-blob-blob-type"` + SequenceNumber int64 `xml:"x-ms-blob-sequence-number"` + CopyID string `xml:"CopyId"` + CopyStatus string `xml:"CopyStatus"` + CopySource string `xml:"CopySource"` + CopyProgress string `xml:"CopyProgress"` + CopyCompletionTime TimeRFC1123 `xml:"CopyCompletionTime"` + CopyStatusDescription string `xml:"CopyStatusDescription"` + LeaseStatus string `xml:"LeaseStatus"` + LeaseState string `xml:"LeaseState"` + LeaseDuration string `xml:"LeaseDuration"` + ServerEncrypted bool `xml:"ServerEncrypted"` + IncrementalCopy bool `xml:"IncrementalCopy"` } // BlobType defines the type of the Azure Blob. @@ -240,257 +115,18 @@ const ( BlobTypeAppend BlobType = "AppendBlob" ) -// PageWriteType defines the type updates that are going to be -// done on the page blob. -type PageWriteType string - -// Types of operations on page blobs -const ( - PageWriteTypeUpdate PageWriteType = "update" - PageWriteTypeClear PageWriteType = "clear" -) - -const ( - blobCopyStatusPending = "pending" - blobCopyStatusSuccess = "success" - blobCopyStatusAborted = "aborted" - blobCopyStatusFailed = "failed" -) - -// lease constants. -const ( - leaseHeaderPrefix = "x-ms-lease-" - leaseID = "x-ms-lease-id" - leaseAction = "x-ms-lease-action" - leaseBreakPeriod = "x-ms-lease-break-period" - leaseDuration = "x-ms-lease-duration" - leaseProposedID = "x-ms-proposed-lease-id" - leaseTime = "x-ms-lease-time" - - acquireLease = "acquire" - renewLease = "renew" - changeLease = "change" - releaseLease = "release" - breakLease = "break" -) - -// BlockListType is used to filter out types of blocks in a Get Blocks List call -// for a block blob. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx for all -// block types. -type BlockListType string - -// Filters for listing blocks in block blobs -const ( - BlockListTypeAll BlockListType = "all" - BlockListTypeCommitted BlockListType = "committed" - BlockListTypeUncommitted BlockListType = "uncommitted" -) - -// ContainerAccessType defines the access level to the container from a public -// request. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx and "x-ms- -// blob-public-access" header. -type ContainerAccessType string - -// Access options for containers -const ( - ContainerAccessTypePrivate ContainerAccessType = "" - ContainerAccessTypeBlob ContainerAccessType = "blob" - ContainerAccessTypeContainer ContainerAccessType = "container" -) - -// ContainerAccessOptions are used when setting ACLs of containers (after creation) -type ContainerAccessOptions struct { - ContainerAccess ContainerAccessType - Timeout int - LeaseID string -} - -// AccessPolicyDetails are used for SETTING policies -type AccessPolicyDetails struct { - ID string - StartTime time.Time - ExpiryTime time.Time - CanRead bool - CanWrite bool - CanDelete bool -} - -// ContainerPermissions is used when setting permissions and Access Policies for containers. -type ContainerPermissions struct { - AccessOptions ContainerAccessOptions - AccessPolicy AccessPolicyDetails -} - -// AccessPolicyDetailsXML has specifics about an access policy -// annotated with XML details. -type AccessPolicyDetailsXML struct { - StartTime time.Time `xml:"Start"` - ExpiryTime time.Time `xml:"Expiry"` - Permission string `xml:"Permission"` -} - -// SignedIdentifier is a wrapper for a specific policy -type SignedIdentifier struct { - ID string `xml:"Id"` - AccessPolicy AccessPolicyDetailsXML `xml:"AccessPolicy"` -} - -// SignedIdentifiers part of the response from GetPermissions call. -type SignedIdentifiers struct { - SignedIdentifiers []SignedIdentifier `xml:"SignedIdentifier"` -} - -// AccessPolicy is the response type from the GetPermissions call. -type AccessPolicy struct { - SignedIdentifiersList SignedIdentifiers `xml:"SignedIdentifiers"` -} - -// ContainerAccessResponse is returned for the GetContainerPermissions function. -// This contains both the permission and access policy for the container. -type ContainerAccessResponse struct { - ContainerAccess ContainerAccessType - AccessPolicy SignedIdentifiers -} - -// ContainerAccessHeader references header used when setting/getting container ACL -const ( - ContainerAccessHeader string = "x-ms-blob-public-access" -) - -// Maximum sizes (per REST API) for various concepts -const ( - MaxBlobBlockSize = 4 * 1024 * 1024 - MaxBlobPageSize = 4 * 1024 * 1024 -) - -// BlockStatus defines states a block for a block blob can -// be in. -type BlockStatus string - -// List of statuses that can be used to refer to a block in a block list -const ( - BlockStatusUncommitted BlockStatus = "Uncommitted" - BlockStatusCommitted BlockStatus = "Committed" - BlockStatusLatest BlockStatus = "Latest" -) - -// Block is used to create Block entities for Put Block List -// call. -type Block struct { - ID string - Status BlockStatus -} - -// BlockListResponse contains the response fields from Get Block List call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx -type BlockListResponse struct { - XMLName xml.Name `xml:"BlockList"` - CommittedBlocks []BlockResponse `xml:"CommittedBlocks>Block"` - UncommittedBlocks []BlockResponse `xml:"UncommittedBlocks>Block"` -} - -// BlockResponse contains the block information returned -// in the GetBlockListCall. -type BlockResponse struct { - Name string `xml:"Name"` - Size int64 `xml:"Size"` -} - -// GetPageRangesResponse contains the reponse fields from -// Get Page Ranges call. -// -// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx -type GetPageRangesResponse struct { - XMLName xml.Name `xml:"PageList"` - PageList []PageRange `xml:"PageRange"` -} - -// PageRange contains information about a page of a page blob from -// Get Pages Range call. -// -// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx -type PageRange struct { - Start int64 `xml:"Start"` - End int64 `xml:"End"` -} - -var ( - errBlobCopyAborted = errors.New("storage: blob copy is aborted") - errBlobCopyIDMismatch = errors.New("storage: blob copy id is a mismatch") -) - -// ListContainers returns the list of containers in a storage account along with -// pagination token and other response details. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx -func (b BlobStorageClient) ListContainers(params ListContainersParameters) (ContainerListResponse, error) { - q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}}) - uri := b.client.getEndpoint(blobServiceName, "", q) - headers := b.client.getStandardHeaders() - - var out ContainerListResponse - resp, err := b.client.exec("GET", uri, headers, nil) - if err != nil { - return out, err - } - defer resp.body.Close() - - err = xmlUnmarshal(resp.body, &out) - return out, err -} - -// CreateContainer creates a blob container within the storage account -// with given name and access level. Returns error if container already exists. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx -func (b BlobStorageClient) CreateContainer(name string, access ContainerAccessType) error { - resp, err := b.createContainer(name, access) - if err != nil { - return err - } - defer resp.body.Close() - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// CreateContainerIfNotExists creates a blob container if it does not exist. Returns -// true if container is newly created or false if container already exists. -func (b BlobStorageClient) CreateContainerIfNotExists(name string, access ContainerAccessType) (bool, error) { - resp, err := b.createContainer(name, access) - if resp != nil { - defer resp.body.Close() - if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict { - return resp.statusCode == http.StatusCreated, nil - } - } - return false, err -} - -func (b BlobStorageClient) createContainer(name string, access ContainerAccessType) (*storageResponse, error) { - verb := "PUT" - uri := b.client.getEndpoint(blobServiceName, pathForContainer(name), url.Values{"restype": {"container"}}) - - headers := b.client.getStandardHeaders() - if access != "" { - headers[ContainerAccessHeader] = string(access) - } - return b.client.exec(verb, uri, headers, nil) +func (b *Blob) buildPath() string { + return b.Container.buildPath() + "/" + b.Name } -// ContainerExists returns true if a container with given name exists -// on the storage account, otherwise returns false. -func (b BlobStorageClient) ContainerExists(name string) (bool, error) { - verb := "HEAD" - uri := b.client.getEndpoint(blobServiceName, pathForContainer(name), url.Values{"restype": {"container"}}) - headers := b.client.getStandardHeaders() - - resp, err := b.client.exec(verb, uri, headers, nil) +// Exists returns true if a blob with given name exists on the specified +// container of the storage account. +func (b *Blob) Exists() (bool, error) { + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), nil) + headers := b.Container.bsc.client.getStandardHeaders() + resp, err := b.Container.bsc.client.exec(http.MethodHead, uri, headers, nil, b.Container.bsc.auth) if resp != nil { - defer resp.body.Close() + defer readAndCloseBody(resp.body) if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound { return resp.statusCode == http.StatusOK, nil } @@ -498,194 +134,57 @@ func (b BlobStorageClient) ContainerExists(name string) (bool, error) { return false, err } -// SetContainerPermissions sets up container permissions as per https://msdn.microsoft.com/en-us/library/azure/dd179391.aspx -func (b BlobStorageClient) SetContainerPermissions(container string, containerPermissions ContainerPermissions) (err error) { - params := url.Values{ - "restype": {"container"}, - "comp": {"acl"}, - } - - if containerPermissions.AccessOptions.Timeout > 0 { - params.Add("timeout", strconv.Itoa(containerPermissions.AccessOptions.Timeout)) - } - - uri := b.client.getEndpoint(blobServiceName, pathForContainer(container), params) - headers := b.client.getStandardHeaders() - if containerPermissions.AccessOptions.ContainerAccess != "" { - headers[ContainerAccessHeader] = string(containerPermissions.AccessOptions.ContainerAccess) - } - - if containerPermissions.AccessOptions.LeaseID != "" { - headers[leaseID] = containerPermissions.AccessOptions.LeaseID - } - - // generate the XML for the SharedAccessSignature if required. - accessPolicyXML, err := generateAccessPolicy(containerPermissions.AccessPolicy) - if err != nil { - return err - } - - var resp *storageResponse - if accessPolicyXML != "" { - headers["Content-Length"] = strconv.Itoa(len(accessPolicyXML)) - resp, err = b.client.exec("PUT", uri, headers, strings.NewReader(accessPolicyXML)) - } else { - resp, err = b.client.exec("PUT", uri, headers, nil) - } - - if err != nil { - return err - } - - if resp != nil { - defer func() { - err = resp.body.Close() - }() - - if resp.statusCode != http.StatusOK { - return errors.New("Unable to set permissions") - } +// GetURL gets the canonical URL to the blob with the specified name in the +// specified container. If name is not specified, the canonical URL for the entire +// container is obtained. +// This method does not create a publicly accessible URL if the blob or container +// is private and this method does not check if the blob exists. +func (b *Blob) GetURL() string { + container := b.Container.Name + if container == "" { + container = "$root" } - return nil + return b.Container.bsc.client.getEndpoint(blobServiceName, pathForResource(container, b.Name), nil) } -// GetContainerPermissions gets the container permissions as per https://msdn.microsoft.com/en-us/library/azure/dd179469.aspx -// If timeout is 0 then it will not be passed to Azure -// leaseID will only be passed to Azure if populated -// Returns permissionResponse which is combined permissions and AccessPolicy -func (b BlobStorageClient) GetContainerPermissions(container string, timeout int, leaseID string) (permissionResponse *ContainerAccessResponse, err error) { - params := url.Values{"restype": {"container"}, - "comp": {"acl"}} - - if timeout > 0 { - params.Add("timeout", strconv.Itoa(timeout)) - } - - uri := b.client.getEndpoint(blobServiceName, pathForContainer(container), params) - headers := b.client.getStandardHeaders() - - if leaseID != "" { - headers[leaseID] = leaseID - } - - resp, err := b.client.exec("GET", uri, headers, nil) - if err != nil { - return nil, err - } - - // containerAccess. Blob, Container, empty - containerAccess := resp.headers.Get(http.CanonicalHeaderKey(ContainerAccessHeader)) - - defer func() { - err = resp.body.Close() - }() - - var out AccessPolicy - err = xmlUnmarshal(resp.body, &out.SignedIdentifiersList) - if err != nil { - return nil, err - } - - permissionResponse = &ContainerAccessResponse{} - permissionResponse.AccessPolicy = out.SignedIdentifiersList - permissionResponse.ContainerAccess = ContainerAccessType(containerAccess) - - return permissionResponse, nil +// GetBlobRangeOptions includes the options for a get blob range operation +type GetBlobRangeOptions struct { + Range *BlobRange + GetRangeContentMD5 bool + *GetBlobOptions } -// DeleteContainer deletes the container with given name on the storage -// account. If the container does not exist returns error. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179408.aspx -func (b BlobStorageClient) DeleteContainer(name string) error { - resp, err := b.deleteContainer(name) - if err != nil { - return err - } - defer resp.body.Close() - return checkRespCode(resp.statusCode, []int{http.StatusAccepted}) +// GetBlobOptions includes the options for a get blob operation +type GetBlobOptions struct { + Timeout uint + Snapshot *time.Time + LeaseID string `header:"x-ms-lease-id"` + Origin string `header:"Origin"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + RequestID string `header:"x-ms-client-request-id"` } -// DeleteContainerIfExists deletes the container with given name on the storage -// account if it exists. Returns true if container is deleted with this call, or -// false if the container did not exist at the time of the Delete Container -// operation. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179408.aspx -func (b BlobStorageClient) DeleteContainerIfExists(name string) (bool, error) { - resp, err := b.deleteContainer(name) - if resp != nil { - defer resp.body.Close() - if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { - return resp.statusCode == http.StatusAccepted, nil - } - } - return false, err +// BlobRange represents the bytes range to be get +type BlobRange struct { + Start uint64 + End uint64 } -func (b BlobStorageClient) deleteContainer(name string) (*storageResponse, error) { - verb := "DELETE" - uri := b.client.getEndpoint(blobServiceName, pathForContainer(name), url.Values{"restype": {"container"}}) - - headers := b.client.getStandardHeaders() - return b.client.exec(verb, uri, headers, nil) +func (br BlobRange) String() string { + return fmt.Sprintf("bytes=%d-%d", br.Start, br.End) } -// ListBlobs returns an object that contains list of blobs in the container, -// pagination token and other information in the response of List Blobs call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx -func (b BlobStorageClient) ListBlobs(container string, params ListBlobsParameters) (BlobListResponse, error) { - q := mergeParams(params.getParameters(), url.Values{ - "restype": {"container"}, - "comp": {"list"}}) - uri := b.client.getEndpoint(blobServiceName, pathForContainer(container), q) - headers := b.client.getStandardHeaders() - - var out BlobListResponse - resp, err := b.client.exec("GET", uri, headers, nil) - if err != nil { - return out, err +// Get returns a stream to read the blob. Caller must call both Read and Close() +// to correctly close the underlying connection. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Blob +func (b *Blob) Get(options *GetBlobOptions) (io.ReadCloser, error) { + rangeOptions := GetBlobRangeOptions{ + GetBlobOptions: options, } - defer resp.body.Close() - - err = xmlUnmarshal(resp.body, &out) - return out, err -} - -// BlobExists returns true if a blob with given name exists on the specified -// container of the storage account. -func (b BlobStorageClient) BlobExists(container, name string) (bool, error) { - verb := "HEAD" - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) - headers := b.client.getStandardHeaders() - resp, err := b.client.exec(verb, uri, headers, nil) - if resp != nil { - defer resp.body.Close() - if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound { - return resp.statusCode == http.StatusOK, nil - } - } - return false, err -} - -// GetBlobURL gets the canonical URL to the blob with the specified name in the -// specified container. This method does not create a publicly accessible URL if -// the blob or container is private and this method does not check if the blob -// exists. -func (b BlobStorageClient) GetBlobURL(container, name string) string { - if container == "" { - container = "$root" - } - return b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) -} - -// GetBlob returns a stream to read the blob. Caller must call Close() the -// reader to close on the underlying connection. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179440.aspx -func (b BlobStorageClient) GetBlob(container, name string) (io.ReadCloser, error) { - resp, err := b.getBlobRange(container, name, "", nil) + resp, err := b.getRange(&rangeOptions) if err != nil { return nil, err } @@ -693,15 +192,19 @@ func (b BlobStorageClient) GetBlob(container, name string) (io.ReadCloser, error if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { return nil, err } + if err := b.writePropoerties(resp.headers); err != nil { + return resp.body, err + } return resp.body, nil } -// GetBlobRange reads the specified range of a blob to a stream. The bytesRange +// GetRange reads the specified range of a blob to a stream. The bytesRange // string must be in a format like "0-", "10-100" as defined in HTTP 1.1 spec. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179440.aspx -func (b BlobStorageClient) GetBlobRange(container, name, bytesRange string, extraHeaders map[string]string) (io.ReadCloser, error) { - resp, err := b.getBlobRange(container, name, bytesRange, extraHeaders) +// Caller must call both Read and Close()// to correctly close the underlying +// connection. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Blob +func (b *Blob) GetRange(options *GetBlobRangeOptions) (io.ReadCloser, error) { + resp, err := b.getRange(options) if err != nil { return nil, err } @@ -709,65 +212,65 @@ func (b BlobStorageClient) GetBlobRange(container, name, bytesRange string, extr if err := checkRespCode(resp.statusCode, []int{http.StatusPartialContent}); err != nil { return nil, err } + if err := b.writePropoerties(resp.headers); err != nil { + return resp.body, err + } return resp.body, nil } -func (b BlobStorageClient) getBlobRange(container, name, bytesRange string, extraHeaders map[string]string) (*storageResponse, error) { - verb := "GET" - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) - - headers := b.client.getStandardHeaders() - if bytesRange != "" { - headers["Range"] = fmt.Sprintf("bytes=%s", bytesRange) - } +func (b *Blob) getRange(options *GetBlobRangeOptions) (*storageResponse, error) { + params := url.Values{} + headers := b.Container.bsc.client.getStandardHeaders() - for k, v := range extraHeaders { - headers[k] = v + if options != nil { + if options.Range != nil { + headers["Range"] = options.Range.String() + headers["x-ms-range-get-content-md5"] = fmt.Sprintf("%v", options.GetRangeContentMD5) + } + if options.GetBlobOptions != nil { + headers = mergeHeaders(headers, headersFromStruct(*options.GetBlobOptions)) + params = addTimeout(params, options.Timeout) + params = addSnapshot(params, options.Snapshot) + } } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - resp, err := b.client.exec(verb, uri, headers, nil) + resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth) if err != nil { return nil, err } return resp, err } -// leasePut is common PUT code for the various aquire/release/break etc functions. -func (b BlobStorageClient) leaseCommonPut(container string, name string, headers map[string]string, expectedStatus int) (http.Header, error) { - params := url.Values{"comp": {"lease"}} - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) - - resp, err := b.client.exec("PUT", uri, headers, nil) - if err != nil { - return nil, err - } - defer resp.body.Close() - - if err := checkRespCode(resp.statusCode, []int{expectedStatus}); err != nil { - return nil, err - } - - return resp.headers, nil +// SnapshotOptions includes the options for a snapshot blob operation +type SnapshotOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + RequestID string `header:"x-ms-client-request-id"` } -// SnapshotBlob creates a snapshot for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691971.aspx -func (b BlobStorageClient) SnapshotBlob(container string, name string, timeout int, extraHeaders map[string]string) (snapshotTimestamp *time.Time, err error) { - headers := b.client.getStandardHeaders() +// CreateSnapshot creates a snapshot for a blob +// See https://msdn.microsoft.com/en-us/library/azure/ee691971.aspx +func (b *Blob) CreateSnapshot(options *SnapshotOptions) (snapshotTimestamp *time.Time, err error) { params := url.Values{"comp": {"snapshot"}} + headers := b.Container.bsc.client.getStandardHeaders() + headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) - if timeout > 0 { - params.Add("timeout", strconv.Itoa(timeout)) + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - for k, v := range extraHeaders { - headers[k] = v - } - - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) - resp, err := b.client.exec("PUT", uri, headers, nil) - if err != nil { + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) + if err != nil || resp == nil { return nil, err } + defer readAndCloseBody(resp.body) if err := checkRespCode(resp.statusCode, []int{http.StatusCreated}); err != nil { return nil, err @@ -779,212 +282,178 @@ func (b BlobStorageClient) SnapshotBlob(container string, name string, timeout i if err != nil { return nil, err } - return &snapshotTimestamp, nil } return nil, errors.New("Snapshot not created") } -// AcquireLease creates a lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx -// returns leaseID acquired -func (b BlobStorageClient) AcquireLease(container string, name string, leaseTimeInSeconds int, proposedLeaseID string) (returnedLeaseID string, err error) { - headers := b.client.getStandardHeaders() - headers[leaseAction] = acquireLease - - if leaseTimeInSeconds > 0 { - headers[leaseDuration] = strconv.Itoa(leaseTimeInSeconds) - } - - if proposedLeaseID != "" { - headers[leaseProposedID] = proposedLeaseID - } - - respHeaders, err := b.leaseCommonPut(container, name, headers, http.StatusCreated) - if err != nil { - return "", err - } - - returnedLeaseID = respHeaders.Get(http.CanonicalHeaderKey(leaseID)) - - if returnedLeaseID != "" { - return returnedLeaseID, nil - } - - return "", errors.New("LeaseID not returned") -} - -// BreakLease breaks the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx -// Returns the timeout remaining in the lease in seconds -func (b BlobStorageClient) BreakLease(container string, name string) (breakTimeout int, err error) { - headers := b.client.getStandardHeaders() - headers[leaseAction] = breakLease - return b.breakLeaseCommon(container, name, headers) -} - -// BreakLeaseWithBreakPeriod breaks the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx -// breakPeriodInSeconds is used to determine how long until new lease can be created. -// Returns the timeout remaining in the lease in seconds -func (b BlobStorageClient) BreakLeaseWithBreakPeriod(container string, name string, breakPeriodInSeconds int) (breakTimeout int, err error) { - headers := b.client.getStandardHeaders() - headers[leaseAction] = breakLease - headers[leaseBreakPeriod] = strconv.Itoa(breakPeriodInSeconds) - return b.breakLeaseCommon(container, name, headers) +// GetBlobPropertiesOptions includes the options for a get blob properties operation +type GetBlobPropertiesOptions struct { + Timeout uint + Snapshot *time.Time + LeaseID string `header:"x-ms-lease-id"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + RequestID string `header:"x-ms-client-request-id"` } -// breakLeaseCommon is common code for both version of BreakLease (with and without break period) -func (b BlobStorageClient) breakLeaseCommon(container string, name string, headers map[string]string) (breakTimeout int, err error) { +// GetProperties provides various information about the specified blob. +// See https://msdn.microsoft.com/en-us/library/azure/dd179394.aspx +func (b *Blob) GetProperties(options *GetBlobPropertiesOptions) error { + params := url.Values{} + headers := b.Container.bsc.client.getStandardHeaders() - respHeaders, err := b.leaseCommonPut(container, name, headers, http.StatusAccepted) - if err != nil { - return 0, err + if options != nil { + params = addTimeout(params, options.Timeout) + params = addSnapshot(params, options.Snapshot) + headers = mergeHeaders(headers, headersFromStruct(*options)) } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - breakTimeoutStr := respHeaders.Get(http.CanonicalHeaderKey(leaseTime)) - if breakTimeoutStr != "" { - breakTimeout, err = strconv.Atoi(breakTimeoutStr) - if err != nil { - return 0, err - } - } - - return breakTimeout, nil -} - -// ChangeLease changes a lease ID for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx -// Returns the new LeaseID acquired -func (b BlobStorageClient) ChangeLease(container string, name string, currentLeaseID string, proposedLeaseID string) (newLeaseID string, err error) { - headers := b.client.getStandardHeaders() - headers[leaseAction] = changeLease - headers[leaseID] = currentLeaseID - headers[leaseProposedID] = proposedLeaseID - - respHeaders, err := b.leaseCommonPut(container, name, headers, http.StatusOK) - if err != nil { - return "", err - } - - newLeaseID = respHeaders.Get(http.CanonicalHeaderKey(leaseID)) - if newLeaseID != "" { - return newLeaseID, nil - } - - return "", errors.New("LeaseID not returned") -} - -// ReleaseLease releases the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx -func (b BlobStorageClient) ReleaseLease(container string, name string, currentLeaseID string) error { - headers := b.client.getStandardHeaders() - headers[leaseAction] = releaseLease - headers[leaseID] = currentLeaseID - - _, err := b.leaseCommonPut(container, name, headers, http.StatusOK) + resp, err := b.Container.bsc.client.exec(http.MethodHead, uri, headers, nil, b.Container.bsc.auth) if err != nil { return err } + defer readAndCloseBody(resp.body) - return nil -} - -// RenewLease renews the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx -func (b BlobStorageClient) RenewLease(container string, name string, currentLeaseID string) error { - headers := b.client.getStandardHeaders() - headers[leaseAction] = renewLease - headers[leaseID] = currentLeaseID - - _, err := b.leaseCommonPut(container, name, headers, http.StatusOK) - if err != nil { + if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { return err } - - return nil + return b.writePropoerties(resp.headers) } -// GetBlobProperties provides various information about the specified -// blob. See https://msdn.microsoft.com/en-us/library/azure/dd179394.aspx -func (b BlobStorageClient) GetBlobProperties(container, name string) (*BlobProperties, error) { - verb := "HEAD" - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) - - headers := b.client.getStandardHeaders() - resp, err := b.client.exec(verb, uri, headers, nil) - if err != nil { - return nil, err - } - defer resp.body.Close() - - if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - return nil, err - } +func (b *Blob) writePropoerties(h http.Header) error { + var err error var contentLength int64 - contentLengthStr := resp.headers.Get("Content-Length") + contentLengthStr := h.Get("Content-Length") if contentLengthStr != "" { contentLength, err = strconv.ParseInt(contentLengthStr, 0, 64) if err != nil { - return nil, err + return err } } var sequenceNum int64 - sequenceNumStr := resp.headers.Get("x-ms-blob-sequence-number") + sequenceNumStr := h.Get("x-ms-blob-sequence-number") if sequenceNumStr != "" { sequenceNum, err = strconv.ParseInt(sequenceNumStr, 0, 64) if err != nil { - return nil, err + return err } } - return &BlobProperties{ - LastModified: resp.headers.Get("Last-Modified"), - Etag: resp.headers.Get("Etag"), - ContentMD5: resp.headers.Get("Content-MD5"), + lastModified, err := getTimeFromHeaders(h, "Last-Modified") + if err != nil { + return err + } + + copyCompletionTime, err := getTimeFromHeaders(h, "x-ms-copy-completion-time") + if err != nil { + return err + } + + b.Properties = BlobProperties{ + LastModified: TimeRFC1123(*lastModified), + Etag: h.Get("Etag"), + ContentMD5: h.Get("Content-MD5"), ContentLength: contentLength, - ContentEncoding: resp.headers.Get("Content-Encoding"), - ContentType: resp.headers.Get("Content-Type"), - CacheControl: resp.headers.Get("Cache-Control"), - ContentLanguage: resp.headers.Get("Content-Language"), + ContentEncoding: h.Get("Content-Encoding"), + ContentType: h.Get("Content-Type"), + ContentDisposition: h.Get("Content-Disposition"), + CacheControl: h.Get("Cache-Control"), + ContentLanguage: h.Get("Content-Language"), SequenceNumber: sequenceNum, - CopyCompletionTime: resp.headers.Get("x-ms-copy-completion-time"), - CopyStatusDescription: resp.headers.Get("x-ms-copy-status-description"), - CopyID: resp.headers.Get("x-ms-copy-id"), - CopyProgress: resp.headers.Get("x-ms-copy-progress"), - CopySource: resp.headers.Get("x-ms-copy-source"), - CopyStatus: resp.headers.Get("x-ms-copy-status"), - BlobType: BlobType(resp.headers.Get("x-ms-blob-type")), - LeaseStatus: resp.headers.Get("x-ms-lease-status"), - }, nil + CopyCompletionTime: TimeRFC1123(*copyCompletionTime), + CopyStatusDescription: h.Get("x-ms-copy-status-description"), + CopyID: h.Get("x-ms-copy-id"), + CopyProgress: h.Get("x-ms-copy-progress"), + CopySource: h.Get("x-ms-copy-source"), + CopyStatus: h.Get("x-ms-copy-status"), + BlobType: BlobType(h.Get("x-ms-blob-type")), + LeaseStatus: h.Get("x-ms-lease-status"), + LeaseState: h.Get("x-ms-lease-state"), + } + b.writeMetadata(h) + return nil +} + +// SetBlobPropertiesOptions contains various properties of a blob and is an entry +// in SetProperties +type SetBlobPropertiesOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + Origin string `header:"Origin"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + SequenceNumberAction *SequenceNumberAction + RequestID string `header:"x-ms-client-request-id"` } -// SetBlobProperties replaces the BlobHeaders for the specified blob. +// SequenceNumberAction defines how the blob's sequence number should be modified +type SequenceNumberAction string + +// Options for sequence number action +const ( + SequenceNumberActionMax SequenceNumberAction = "max" + SequenceNumberActionUpdate SequenceNumberAction = "update" + SequenceNumberActionIncrement SequenceNumberAction = "increment" +) + +// SetProperties replaces the BlobHeaders for the specified blob. // // Some keys may be converted to Camel-Case before sending. All keys // are returned in lower case by GetBlobProperties. HTTP header names // are case-insensitive so case munging should not matter to other // applications either. // -// See https://msdn.microsoft.com/en-us/library/azure/ee691966.aspx -func (b BlobStorageClient) SetBlobProperties(container, name string, blobHeaders BlobHeaders) error { +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Blob-Properties +func (b *Blob) SetProperties(options *SetBlobPropertiesOptions) error { params := url.Values{"comp": {"properties"}} - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) - headers := b.client.getStandardHeaders() - - extraHeaders := headersFromStruct(blobHeaders) - - for k, v := range extraHeaders { - headers[k] = v + headers := b.Container.bsc.client.getStandardHeaders() + headers = mergeHeaders(headers, headersFromStruct(b.Properties)) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + if b.Properties.BlobType == BlobTypePage { + headers = addToHeaders(headers, "x-ms-blob-content-length", fmt.Sprintf("byte %v", b.Properties.ContentLength)) + if options != nil || options.SequenceNumberAction != nil { + headers = addToHeaders(headers, "x-ms-sequence-number-action", string(*options.SequenceNumberAction)) + if *options.SequenceNumberAction != SequenceNumberActionIncrement { + headers = addToHeaders(headers, "x-ms-blob-sequence-number", fmt.Sprintf("%v", b.Properties.SequenceNumber)) + } + } } - resp, err := b.client.exec("PUT", uri, headers, nil) + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) if err != nil { return err } - defer resp.body.Close() - + readAndCloseBody(resp.body) return checkRespCode(resp.statusCode, []int{http.StatusOK}) } -// SetBlobMetadata replaces the metadata for the specified blob. +// SetBlobMetadataOptions includes the options for a set blob metadata operation +type SetBlobMetadataOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + RequestID string `header:"x-ms-client-request-id"` +} + +// SetMetadata replaces the metadata for the specified blob. // // Some keys may be converted to Camel-Case before sending. All keys // are returned in lower case by GetBlobMetadata. HTTP header names @@ -992,50 +461,71 @@ func (b BlobStorageClient) SetBlobProperties(container, name string, blobHeaders // applications either. // // See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx -func (b BlobStorageClient) SetBlobMetadata(container, name string, metadata map[string]string, extraHeaders map[string]string) error { +func (b *Blob) SetMetadata(options *SetBlobMetadataOptions) error { params := url.Values{"comp": {"metadata"}} - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) - headers := b.client.getStandardHeaders() - for k, v := range metadata { - headers[userDefinedMetadataHeaderPrefix+k] = v - } + headers := b.Container.bsc.client.getStandardHeaders() + headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) - for k, v := range extraHeaders { - headers[k] = v + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - resp, err := b.client.exec("PUT", uri, headers, nil) + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) if err != nil { return err } - defer resp.body.Close() - + readAndCloseBody(resp.body) return checkRespCode(resp.statusCode, []int{http.StatusOK}) } -// GetBlobMetadata returns all user-defined metadata for the specified blob. +// GetBlobMetadataOptions includes the options for a get blob metadata operation +type GetBlobMetadataOptions struct { + Timeout uint + Snapshot *time.Time + LeaseID string `header:"x-ms-lease-id"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + RequestID string `header:"x-ms-client-request-id"` +} + +// GetMetadata returns all user-defined metadata for the specified blob. // // All metadata keys will be returned in lower case. (HTTP header // names are case-insensitive.) // // See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx -func (b BlobStorageClient) GetBlobMetadata(container, name string) (map[string]string, error) { +func (b *Blob) GetMetadata(options *GetBlobMetadataOptions) error { params := url.Values{"comp": {"metadata"}} - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) - headers := b.client.getStandardHeaders() + headers := b.Container.bsc.client.getStandardHeaders() + + if options != nil { + params = addTimeout(params, options.Timeout) + params = addSnapshot(params, options.Snapshot) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) - resp, err := b.client.exec("GET", uri, headers, nil) + resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth) if err != nil { - return nil, err + return err } - defer resp.body.Close() + defer readAndCloseBody(resp.body) if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - return nil, err + return err } + b.writeMetadata(resp.headers) + return nil +} + +func (b *Blob) writeMetadata(h http.Header) { metadata := make(map[string]string) - for k, v := range resp.headers { + for k, v := range h { // Can't trust CanonicalHeaderKey() to munge case // reliably. "_" is allowed in identifiers: // https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx @@ -1044,377 +534,53 @@ func (b BlobStorageClient) GetBlobMetadata(container, name string) (map[string]s // ...but "_" is considered invalid by // CanonicalMIMEHeaderKey in // https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542 - // so k can be "X-Ms-Meta-Foo" or "x-ms-meta-foo_bar". + // so k can be "X-Ms-Meta-Lol" or "x-ms-meta-lol_rofl". k = strings.ToLower(k) if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) { continue } - // metadata["foo"] = content of the last X-Ms-Meta-Foo header + // metadata["lol"] = content of the last X-Ms-Meta-Lol header k = k[len(userDefinedMetadataHeaderPrefix):] metadata[k] = v[len(v)-1] } - return metadata, nil -} - -// CreateBlockBlob initializes an empty block blob with no blocks. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx -func (b BlobStorageClient) CreateBlockBlob(container, name string) error { - return b.CreateBlockBlobFromReader(container, name, 0, nil, nil) -} - -// CreateBlockBlobFromReader initializes a block blob using data from -// reader. Size must be the number of bytes read from reader. To -// create an empty blob, use size==0 and reader==nil. -// -// The API rejects requests with size > 64 MiB (but this limit is not -// checked by the SDK). To write a larger blob, use CreateBlockBlob, -// PutBlock, and PutBlockList. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx -func (b BlobStorageClient) CreateBlockBlobFromReader(container, name string, size uint64, blob io.Reader, extraHeaders map[string]string) error { - path := fmt.Sprintf("%s/%s", container, name) - uri := b.client.getEndpoint(blobServiceName, path, url.Values{}) - headers := b.client.getStandardHeaders() - headers["x-ms-blob-type"] = string(BlobTypeBlock) - headers["Content-Length"] = fmt.Sprintf("%d", size) - - for k, v := range extraHeaders { - headers[k] = v - } - - resp, err := b.client.exec("PUT", uri, headers, blob) - if err != nil { - return err - } - defer resp.body.Close() - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// PutBlock saves the given data chunk to the specified block blob with -// given ID. -// -// The API rejects chunks larger than 4 MiB (but this limit is not -// checked by the SDK). -// -// See https://msdn.microsoft.com/en-us/library/azure/dd135726.aspx -func (b BlobStorageClient) PutBlock(container, name, blockID string, chunk []byte) error { - return b.PutBlockWithLength(container, name, blockID, uint64(len(chunk)), bytes.NewReader(chunk), nil) -} - -// PutBlockWithLength saves the given data stream of exactly specified size to -// the block blob with given ID. It is an alternative to PutBlocks where data -// comes as stream but the length is known in advance. -// -// The API rejects requests with size > 4 MiB (but this limit is not -// checked by the SDK). -// -// See https://msdn.microsoft.com/en-us/library/azure/dd135726.aspx -func (b BlobStorageClient) PutBlockWithLength(container, name, blockID string, size uint64, blob io.Reader, extraHeaders map[string]string) error { - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{"comp": {"block"}, "blockid": {blockID}}) - headers := b.client.getStandardHeaders() - headers["x-ms-blob-type"] = string(BlobTypeBlock) - headers["Content-Length"] = fmt.Sprintf("%v", size) - - for k, v := range extraHeaders { - headers[k] = v - } - - resp, err := b.client.exec("PUT", uri, headers, blob) - if err != nil { - return err - } - - defer resp.body.Close() - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// PutBlockList saves list of blocks to the specified block blob. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179467.aspx -func (b BlobStorageClient) PutBlockList(container, name string, blocks []Block) error { - blockListXML := prepareBlockListRequest(blocks) - - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{"comp": {"blocklist"}}) - headers := b.client.getStandardHeaders() - headers["Content-Length"] = fmt.Sprintf("%v", len(blockListXML)) - - resp, err := b.client.exec("PUT", uri, headers, strings.NewReader(blockListXML)) - if err != nil { - return err - } - defer resp.body.Close() - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// GetBlockList retrieves list of blocks in the specified block blob. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx -func (b BlobStorageClient) GetBlockList(container, name string, blockType BlockListType) (BlockListResponse, error) { - params := url.Values{"comp": {"blocklist"}, "blocklisttype": {string(blockType)}} - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) - headers := b.client.getStandardHeaders() - - var out BlockListResponse - resp, err := b.client.exec("GET", uri, headers, nil) - if err != nil { - return out, err - } - defer resp.body.Close() - - err = xmlUnmarshal(resp.body, &out) - return out, err -} - -// PutPageBlob initializes an empty page blob with specified name and maximum -// size in bytes (size must be aligned to a 512-byte boundary). A page blob must -// be created using this method before writing pages. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx -func (b BlobStorageClient) PutPageBlob(container, name string, size int64, extraHeaders map[string]string) error { - path := fmt.Sprintf("%s/%s", container, name) - uri := b.client.getEndpoint(blobServiceName, path, url.Values{}) - headers := b.client.getStandardHeaders() - headers["x-ms-blob-type"] = string(BlobTypePage) - headers["x-ms-blob-content-length"] = fmt.Sprintf("%v", size) - - for k, v := range extraHeaders { - headers[k] = v - } - - resp, err := b.client.exec("PUT", uri, headers, nil) - if err != nil { - return err - } - defer resp.body.Close() - - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// PutPage writes a range of pages to a page blob or clears the given range. -// In case of 'clear' writes, given chunk is discarded. Ranges must be aligned -// with 512-byte boundaries and chunk must be of size multiplies by 512. -// -// See https://msdn.microsoft.com/en-us/library/ee691975.aspx -func (b BlobStorageClient) PutPage(container, name string, startByte, endByte int64, writeType PageWriteType, chunk []byte, extraHeaders map[string]string) error { - path := fmt.Sprintf("%s/%s", container, name) - uri := b.client.getEndpoint(blobServiceName, path, url.Values{"comp": {"page"}}) - headers := b.client.getStandardHeaders() - headers["x-ms-blob-type"] = string(BlobTypePage) - headers["x-ms-page-write"] = string(writeType) - headers["x-ms-range"] = fmt.Sprintf("bytes=%v-%v", startByte, endByte) - for k, v := range extraHeaders { - headers[k] = v - } - var contentLength int64 - var data io.Reader - if writeType == PageWriteTypeClear { - contentLength = 0 - data = bytes.NewReader([]byte{}) - } else { - contentLength = int64(len(chunk)) - data = bytes.NewReader(chunk) - } - headers["Content-Length"] = fmt.Sprintf("%v", contentLength) - - resp, err := b.client.exec("PUT", uri, headers, data) - if err != nil { - return err - } - defer resp.body.Close() - - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// GetPageRanges returns the list of valid page ranges for a page blob. -// -// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx -func (b BlobStorageClient) GetPageRanges(container, name string) (GetPageRangesResponse, error) { - path := fmt.Sprintf("%s/%s", container, name) - uri := b.client.getEndpoint(blobServiceName, path, url.Values{"comp": {"pagelist"}}) - headers := b.client.getStandardHeaders() - - var out GetPageRangesResponse - resp, err := b.client.exec("GET", uri, headers, nil) - if err != nil { - return out, err - } - defer resp.body.Close() - if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - return out, err - } - err = xmlUnmarshal(resp.body, &out) - return out, err + b.Metadata = BlobMetadata(metadata) } -// PutAppendBlob initializes an empty append blob with specified name. An -// append blob must be created using this method before appending blocks. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx -func (b BlobStorageClient) PutAppendBlob(container, name string, extraHeaders map[string]string) error { - path := fmt.Sprintf("%s/%s", container, name) - uri := b.client.getEndpoint(blobServiceName, path, url.Values{}) - headers := b.client.getStandardHeaders() - headers["x-ms-blob-type"] = string(BlobTypeAppend) - - for k, v := range extraHeaders { - headers[k] = v - } - - resp, err := b.client.exec("PUT", uri, headers, nil) - if err != nil { - return err - } - defer resp.body.Close() - - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +// DeleteBlobOptions includes the options for a delete blob operation +type DeleteBlobOptions struct { + Timeout uint + Snapshot *time.Time + LeaseID string `header:"x-ms-lease-id"` + DeleteSnapshots *bool + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + RequestID string `header:"x-ms-client-request-id"` } -// AppendBlock appends a block to an append blob. -// -// See https://msdn.microsoft.com/en-us/library/azure/mt427365.aspx -func (b BlobStorageClient) AppendBlock(container, name string, chunk []byte, extraHeaders map[string]string) error { - path := fmt.Sprintf("%s/%s", container, name) - uri := b.client.getEndpoint(blobServiceName, path, url.Values{"comp": {"appendblock"}}) - headers := b.client.getStandardHeaders() - headers["x-ms-blob-type"] = string(BlobTypeAppend) - headers["Content-Length"] = fmt.Sprintf("%v", len(chunk)) - - for k, v := range extraHeaders { - headers[k] = v - } - - resp, err := b.client.exec("PUT", uri, headers, bytes.NewReader(chunk)) - if err != nil { - return err - } - defer resp.body.Close() - - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) -} - -// CopyBlob starts a blob copy operation and waits for the operation to -// complete. sourceBlob parameter must be a canonical URL to the blob (can be -// obtained using GetBlobURL method.) There is no SLA on blob copy and therefore -// this helper method works faster on smaller files. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd894037.aspx -func (b BlobStorageClient) CopyBlob(container, name, sourceBlob string) error { - copyID, err := b.StartBlobCopy(container, name, sourceBlob) - if err != nil { - return err - } - - return b.WaitForBlobCopy(container, name, copyID) -} - -// StartBlobCopy starts a blob copy operation. -// sourceBlob parameter must be a canonical URL to the blob (can be -// obtained using GetBlobURL method.) -// -// See https://msdn.microsoft.com/en-us/library/azure/dd894037.aspx -func (b BlobStorageClient) StartBlobCopy(container, name, sourceBlob string) (string, error) { - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) - - headers := b.client.getStandardHeaders() - headers["x-ms-copy-source"] = sourceBlob - - resp, err := b.client.exec("PUT", uri, headers, nil) - if err != nil { - return "", err - } - defer resp.body.Close() - - if err := checkRespCode(resp.statusCode, []int{http.StatusAccepted, http.StatusCreated}); err != nil { - return "", err - } - - copyID := resp.headers.Get("x-ms-copy-id") - if copyID == "" { - return "", errors.New("Got empty copy id header") - } - return copyID, nil -} - -// AbortBlobCopy aborts a BlobCopy which has already been triggered by the StartBlobCopy function. -// copyID is generated from StartBlobCopy function. -// currentLeaseID is required IF the destination blob has an active lease on it. -// As defined in https://msdn.microsoft.com/en-us/library/azure/jj159098.aspx -func (b BlobStorageClient) AbortBlobCopy(container, name, copyID, currentLeaseID string, timeout int) error { - params := url.Values{"comp": {"copy"}, "copyid": {copyID}} - if timeout > 0 { - params.Add("timeout", strconv.Itoa(timeout)) - } - - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) - headers := b.client.getStandardHeaders() - headers["x-ms-copy-action"] = "abort" - - if currentLeaseID != "" { - headers[leaseID] = currentLeaseID - } - - resp, err := b.client.exec("PUT", uri, headers, nil) - if err != nil { - return err - } - defer resp.body.Close() - - if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { - return err - } - - return nil -} - -// WaitForBlobCopy loops until a BlobCopy operation is completed (or fails with error) -func (b BlobStorageClient) WaitForBlobCopy(container, name, copyID string) error { - for { - props, err := b.GetBlobProperties(container, name) - if err != nil { - return err - } - - if props.CopyID != copyID { - return errBlobCopyIDMismatch - } - - switch props.CopyStatus { - case blobCopyStatusSuccess: - return nil - case blobCopyStatusPending: - continue - case blobCopyStatusAborted: - return errBlobCopyAborted - case blobCopyStatusFailed: - return fmt.Errorf("storage: blob copy failed. Id=%s Description=%s", props.CopyID, props.CopyStatusDescription) - default: - return fmt.Errorf("storage: unhandled blob copy status: '%s'", props.CopyStatus) - } - } -} - -// DeleteBlob deletes the given blob from the specified container. +// Delete deletes the given blob from the specified container. // If the blob does not exists at the time of the Delete Blob operation, it -// returns error. See https://msdn.microsoft.com/en-us/library/azure/dd179413.aspx -func (b BlobStorageClient) DeleteBlob(container, name string, extraHeaders map[string]string) error { - resp, err := b.deleteBlob(container, name, extraHeaders) +// returns error. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Blob +func (b *Blob) Delete(options *DeleteBlobOptions) error { + resp, err := b.delete(options) if err != nil { return err } - defer resp.body.Close() + readAndCloseBody(resp.body) return checkRespCode(resp.statusCode, []int{http.StatusAccepted}) } -// DeleteBlobIfExists deletes the given blob from the specified container If the +// DeleteIfExists deletes the given blob from the specified container If the // blob is deleted with this call, returns true. Otherwise returns false. // -// See https://msdn.microsoft.com/en-us/library/azure/dd179413.aspx -func (b BlobStorageClient) DeleteBlobIfExists(container, name string, extraHeaders map[string]string) (bool, error) { - resp, err := b.deleteBlob(container, name, extraHeaders) +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Blob +func (b *Blob) DeleteIfExists(options *DeleteBlobOptions) (bool, error) { + resp, err := b.delete(options) if resp != nil { - defer resp.body.Close() + defer readAndCloseBody(resp.body) if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { return resp.statusCode == http.StatusAccepted, nil } @@ -1422,175 +588,30 @@ func (b BlobStorageClient) DeleteBlobIfExists(container, name string, extraHeade return false, err } -func (b BlobStorageClient) deleteBlob(container, name string, extraHeaders map[string]string) (*storageResponse, error) { - verb := "DELETE" - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) - headers := b.client.getStandardHeaders() - for k, v := range extraHeaders { - headers[k] = v - } - - return b.client.exec(verb, uri, headers, nil) -} - -// helper method to construct the path to a container given its name -func pathForContainer(name string) string { - return fmt.Sprintf("/%s", name) -} - -// helper method to construct the path to a blob given its container and blob -// name -func pathForBlob(container, name string) string { - return fmt.Sprintf("/%s/%s", container, name) -} - -// GetBlobSASURIWithSignedIPAndProtocol creates an URL to the specified blob which contains the Shared -// Access Signature with specified permissions and expiration time. Also includes signedIPRange and allowed procotols. -// If old API version is used but no signedIP is passed (ie empty string) then this should still work. -// We only populate the signedIP when it non-empty. -// -// See https://msdn.microsoft.com/en-us/library/azure/ee395415.aspx -func (b BlobStorageClient) GetBlobSASURIWithSignedIPAndProtocol(container, name string, expiry time.Time, permissions string, signedIPRange string, HTTPSOnly bool) (string, error) { - var ( - signedPermissions = permissions - blobURL = b.GetBlobURL(container, name) - ) - canonicalizedResource, err := b.client.buildCanonicalizedResource(blobURL) - if err != nil { - return "", err - } - - // "The canonicalizedresouce portion of the string is a canonical path to the signed resource. - // It must include the service name (blob, table, queue or file) for version 2015-02-21 or - // later, the storage account name, and the resource name, and must be URL-decoded. - // -- https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx - - // We need to replace + with %2b first to avoid being treated as a space (which is correct for query strings, but not the path component). - canonicalizedResource = strings.Replace(canonicalizedResource, "+", "%2b", -1) - canonicalizedResource, err = url.QueryUnescape(canonicalizedResource) - if err != nil { - return "", err - } - - signedExpiry := expiry.UTC().Format(time.RFC3339) - signedResource := "b" - - protocols := "https,http" - if HTTPSOnly { - protocols = "https" - } - stringToSign, err := blobSASStringToSign(b.client.apiVersion, canonicalizedResource, signedExpiry, signedPermissions, signedIPRange, protocols) - if err != nil { - return "", err - } - - sig := b.client.computeHmac256(stringToSign) - sasParams := url.Values{ - "sv": {b.client.apiVersion}, - "se": {signedExpiry}, - "sr": {signedResource}, - "sp": {signedPermissions}, - "sig": {sig}, - } - - if b.client.apiVersion >= "2015-04-05" { - sasParams.Add("spr", protocols) - if signedIPRange != "" { - sasParams.Add("sip", signedIPRange) +func (b *Blob) delete(options *DeleteBlobOptions) (*storageResponse, error) { + params := url.Values{} + headers := b.Container.bsc.client.getStandardHeaders() + + if options != nil { + params = addTimeout(params, options.Timeout) + params = addSnapshot(params, options.Snapshot) + headers = mergeHeaders(headers, headersFromStruct(*options)) + if options.DeleteSnapshots != nil { + if *options.DeleteSnapshots { + headers["x-ms-delete-snapshots"] = "include" + } else { + headers["x-ms-delete-snapshots"] = "only" + } } } - - sasURL, err := url.Parse(blobURL) - if err != nil { - return "", err - } - sasURL.RawQuery = sasParams.Encode() - return sasURL.String(), nil -} - -// GetBlobSASURI creates an URL to the specified blob which contains the Shared -// Access Signature with specified permissions and expiration time. -// -// See https://msdn.microsoft.com/en-us/library/azure/ee395415.aspx -func (b BlobStorageClient) GetBlobSASURI(container, name string, expiry time.Time, permissions string) (string, error) { - url, err := b.GetBlobSASURIWithSignedIPAndProtocol(container, name, expiry, permissions, "", false) - return url, err -} - -func blobSASStringToSign(signedVersion, canonicalizedResource, signedExpiry, signedPermissions string, signedIP string, protocols string) (string, error) { - var signedStart, signedIdentifier, rscc, rscd, rsce, rscl, rsct string - - if signedVersion >= "2015-02-21" { - canonicalizedResource = "/blob" + canonicalizedResource - } - - // https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx#Anchor_12 - if signedVersion >= "2015-04-05" { - return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedIP, protocols, signedVersion, rscc, rscd, rsce, rscl, rsct), nil - } - - // reference: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx - if signedVersion >= "2013-08-15" { - return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedVersion, rscc, rscd, rsce, rscl, rsct), nil - } - - return "", errors.New("storage: not implemented SAS for versions earlier than 2013-08-15") -} - -func generatePermissions(accessPolicy AccessPolicyDetails) (permissions string) { - // generate the permissions string (rwd). - // still want the end user API to have bool flags. - permissions = "" - - if accessPolicy.CanRead { - permissions += "r" - } - - if accessPolicy.CanWrite { - permissions += "w" - } - - if accessPolicy.CanDelete { - permissions += "d" - } - - return permissions + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + return b.Container.bsc.client.exec(http.MethodDelete, uri, headers, nil, b.Container.bsc.auth) } -// convertAccessPolicyToXMLStructs converts between AccessPolicyDetails which is a struct better for API usage to the -// AccessPolicy struct which will get converted to XML. -func convertAccessPolicyToXMLStructs(accessPolicy AccessPolicyDetails) SignedIdentifiers { - return SignedIdentifiers{ - SignedIdentifiers: []SignedIdentifier{ - { - ID: accessPolicy.ID, - AccessPolicy: AccessPolicyDetailsXML{ - StartTime: accessPolicy.StartTime.UTC().Round(time.Second), - ExpiryTime: accessPolicy.ExpiryTime.UTC().Round(time.Second), - Permission: generatePermissions(accessPolicy), - }, - }, - }, +// helper method to construct the path to either a blob or container +func pathForResource(container, name string) string { + if name != "" { + return fmt.Sprintf("/%s/%s", container, name) } -} - -// generateAccessPolicy generates the XML access policy used as the payload for SetContainerPermissions. -func generateAccessPolicy(accessPolicy AccessPolicyDetails) (accessPolicyXML string, err error) { - - if accessPolicy.ID != "" { - signedIdentifiers := convertAccessPolicyToXMLStructs(accessPolicy) - body, _, err := xmlMarshal(signedIdentifiers) - if err != nil { - return "", err - } - - xmlByteArray, err := ioutil.ReadAll(body) - if err != nil { - return "", err - } - accessPolicyXML = string(xmlByteArray) - return accessPolicyXML, nil - } - - return "", nil + return fmt.Sprintf("/%s", container) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blob_test.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blob_test.go index eb0064e9e24f..a70b518bf4fe 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/blob_test.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/blob_test.go @@ -2,18 +2,12 @@ package storage import ( "bytes" - "crypto/rand" - "encoding/base64" "encoding/xml" "fmt" - "io" "io/ioutil" "net/http" - "net/url" - "sort" - "sync" - "testing" - "time" + "strconv" + "strings" chk "gopkg.in/check.v1" ) @@ -22,1563 +16,509 @@ type StorageBlobSuite struct{} var _ = chk.Suite(&StorageBlobSuite{}) -const testContainerPrefix = "zzzztest-" - func getBlobClient(c *chk.C) BlobStorageClient { return getBasicClient(c).GetBlobService() } -func (s *StorageBlobSuite) Test_pathForContainer(c *chk.C) { - c.Assert(pathForContainer("foo"), chk.Equals, "/foo") -} - -func (s *StorageBlobSuite) Test_pathForBlob(c *chk.C) { - c.Assert(pathForBlob("foo", "blob"), chk.Equals, "/foo/blob") -} - -func (s *StorageBlobSuite) Test_blobSASStringToSign(c *chk.C) { - _, err := blobSASStringToSign("2012-02-12", "CS", "SE", "SP", "", "") - c.Assert(err, chk.NotNil) // not implemented SAS for versions earlier than 2013-08-15 - - out, err := blobSASStringToSign("2013-08-15", "CS", "SE", "SP", "", "") - c.Assert(err, chk.IsNil) - c.Assert(out, chk.Equals, "SP\n\nSE\nCS\n\n2013-08-15\n\n\n\n\n") - - // check format for 2015-04-05 version - out, err = blobSASStringToSign("2015-04-05", "CS", "SE", "SP", "127.0.0.1", "https,http") - c.Assert(err, chk.IsNil) - c.Assert(out, chk.Equals, "SP\n\nSE\n/blobCS\n\n127.0.0.1\nhttps,http\n2015-04-05\n\n\n\n\n") -} - -func (s *StorageBlobSuite) TestGetBlobSASURI(c *chk.C) { - api, err := NewClient("foo", "YmFy", DefaultBaseURL, "2013-08-15", true) - c.Assert(err, chk.IsNil) - cli := api.GetBlobService() - expiry := time.Time{} - - expectedParts := url.URL{ - Scheme: "https", - Host: "foo.blob.core.windows.net", - Path: "container/name", - RawQuery: url.Values{ - "sv": {"2013-08-15"}, - "sig": {"/OXG7rWh08jYwtU03GzJM0DHZtidRGpC6g69rSGm3I0="}, - "sr": {"b"}, - "sp": {"r"}, - "se": {"0001-01-01T00:00:00Z"}, - }.Encode()} - - u, err := cli.GetBlobSASURI("container", "name", expiry, "r") - c.Assert(err, chk.IsNil) - sasParts, err := url.Parse(u) - c.Assert(err, chk.IsNil) - c.Assert(expectedParts.String(), chk.Equals, sasParts.String()) - c.Assert(expectedParts.Query(), chk.DeepEquals, sasParts.Query()) -} - -func (s *StorageBlobSuite) TestGetBlobSASURIWithSignedIPAndProtocolValidAPIVersionPassed(c *chk.C) { - api, err := NewClient("foo", "YmFy", DefaultBaseURL, "2015-04-05", true) - c.Assert(err, chk.IsNil) - cli := api.GetBlobService() - expiry := time.Time{} - - expectedParts := url.URL{ - Scheme: "https", - Host: "foo.blob.core.windows.net", - Path: "/container/name", - RawQuery: url.Values{ - "sv": {"2015-04-05"}, - "sig": {"VBOYJmt89UuBRXrxNzmsCMoC+8PXX2yklV71QcL1BfM="}, - "sr": {"b"}, - "sip": {"127.0.0.1"}, - "sp": {"r"}, - "se": {"0001-01-01T00:00:00Z"}, - "spr": {"https"}, - }.Encode()} - - u, err := cli.GetBlobSASURIWithSignedIPAndProtocol("container", "name", expiry, "r", "127.0.0.1", true) - c.Assert(err, chk.IsNil) - sasParts, err := url.Parse(u) - c.Assert(err, chk.IsNil) - c.Assert(sasParts.Query(), chk.DeepEquals, expectedParts.Query()) -} - -// Trying to use SignedIP and Protocol but using an older version of the API. -// Should ignore the signedIP/protocol and just use what the older version requires. -func (s *StorageBlobSuite) TestGetBlobSASURIWithSignedIPAndProtocolUsingOldAPIVersion(c *chk.C) { - api, err := NewClient("foo", "YmFy", DefaultBaseURL, "2013-08-15", true) - c.Assert(err, chk.IsNil) - cli := api.GetBlobService() - expiry := time.Time{} - - expectedParts := url.URL{ - Scheme: "https", - Host: "foo.blob.core.windows.net", - Path: "/container/name", - RawQuery: url.Values{ - "sv": {"2013-08-15"}, - "sig": {"/OXG7rWh08jYwtU03GzJM0DHZtidRGpC6g69rSGm3I0="}, - "sr": {"b"}, - "sp": {"r"}, - "se": {"0001-01-01T00:00:00Z"}, - }.Encode()} - - u, err := cli.GetBlobSASURIWithSignedIPAndProtocol("container", "name", expiry, "r", "", true) - c.Assert(err, chk.IsNil) - sasParts, err := url.Parse(u) - c.Assert(err, chk.IsNil) - c.Assert(expectedParts.String(), chk.Equals, sasParts.String()) - c.Assert(expectedParts.Query(), chk.DeepEquals, sasParts.Query()) -} - -func (s *StorageBlobSuite) TestBlobSASURICorrectness(c *chk.C) { +func (s *StorageBlobSuite) Test_buildPath(c *chk.C) { cli := getBlobClient(c) - cnt := randContainer() - blob := randNameWithSpecialChars(5) - body := []byte(randString(100)) - expiry := time.Now().UTC().Add(time.Hour) - permissions := "r" - - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.DeleteContainer(cnt) - - c.Assert(cli.putSingleBlockBlob(cnt, blob, body), chk.IsNil) - - sasURI, err := cli.GetBlobSASURI(cnt, blob, expiry, permissions) - c.Assert(err, chk.IsNil) - - resp, err := http.Get(sasURI) - c.Assert(err, chk.IsNil) - - blobResp, err := ioutil.ReadAll(resp.Body) - defer resp.Body.Close() - c.Assert(err, chk.IsNil) - - c.Assert(resp.StatusCode, chk.Equals, http.StatusOK) - c.Assert(len(blobResp), chk.Equals, len(body)) + cnt := cli.GetContainerReference("lol") + b := cnt.GetBlobReference("rofl") + c.Assert(b.buildPath(), chk.Equals, "/lol/rofl") } -func (s *StorageBlobSuite) TestListContainersPagination(c *chk.C) { - cli := getBlobClient(c) - c.Assert(deleteTestContainers(cli), chk.IsNil) - - const n = 5 - const pageSize = 2 - - // Create test containers - created := []string{} - for i := 0; i < n; i++ { - name := randContainer() - c.Assert(cli.CreateContainer(name, ContainerAccessTypePrivate), chk.IsNil) - created = append(created, name) - } - sort.Strings(created) - - // Defer test container deletions - defer func() { - var wg sync.WaitGroup - for _, cnt := range created { - wg.Add(1) - go func(name string) { - c.Assert(cli.DeleteContainer(name), chk.IsNil) - wg.Done() - }(cnt) - } - wg.Wait() - }() - - // Paginate results - seen := []string{} - marker := "" - for { - resp, err := cli.ListContainers(ListContainersParameters{ - Prefix: testContainerPrefix, - MaxResults: pageSize, - Marker: marker}) - c.Assert(err, chk.IsNil) - - containers := resp.Containers - if len(containers) > pageSize { - c.Fatalf("Got a bigger page. Expected: %d, got: %d", pageSize, len(containers)) - } - - for _, c := range containers { - seen = append(seen, c.Name) - } - - marker = resp.NextMarker - if marker == "" || len(containers) == 0 { - break - } - } - - c.Assert(seen, chk.DeepEquals, created) +func (s *StorageBlobSuite) Test_pathForResource(c *chk.C) { + c.Assert(pathForResource("lol", ""), chk.Equals, "/lol") + c.Assert(pathForResource("lol", "blob"), chk.Equals, "/lol/blob") } -func (s *StorageBlobSuite) TestContainerExists(c *chk.C) { - cnt := randContainer() - cli := getBlobClient(c) - ok, err := cli.ContainerExists(cnt) - c.Assert(err, chk.IsNil) - c.Assert(ok, chk.Equals, false) - - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypeBlob), chk.IsNil) - defer cli.DeleteContainer(cnt) - - ok, err = cli.ContainerExists(cnt) - c.Assert(err, chk.IsNil) - c.Assert(ok, chk.Equals, true) -} - -func (s *StorageBlobSuite) TestCreateContainerDeleteContainer(c *chk.C) { - cnt := randContainer() - cli := getBlobClient(c) - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - c.Assert(cli.DeleteContainer(cnt), chk.IsNil) -} - -func (s *StorageBlobSuite) TestCreateContainerIfNotExists(c *chk.C) { - cnt := randContainer() - cli := getBlobClient(c) - defer cli.DeleteContainer(cnt) - - // First create - ok, err := cli.CreateContainerIfNotExists(cnt, ContainerAccessTypePrivate) - c.Assert(err, chk.IsNil) - c.Assert(ok, chk.Equals, true) - - // Second create, should not give errors - ok, err = cli.CreateContainerIfNotExists(cnt, ContainerAccessTypePrivate) - c.Assert(err, chk.IsNil) - c.Assert(ok, chk.Equals, false) -} - -func (s *StorageBlobSuite) TestDeleteContainerIfExists(c *chk.C) { - cnt := randContainer() +func (s *StorageBlobSuite) TestBlobExists(c *chk.C) { cli := getBlobClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() - // Nonexisting container - c.Assert(cli.DeleteContainer(cnt), chk.NotNil) + cnt := cli.GetContainerReference(containerName(c)) + c.Assert(cnt.Create(nil), chk.IsNil) + b := cnt.GetBlobReference(blobName(c)) + defer cnt.Delete(nil) - ok, err := cli.DeleteContainerIfExists(cnt) - c.Assert(err, chk.IsNil) - c.Assert(ok, chk.Equals, false) + c.Assert(b.putSingleBlockBlob([]byte("Hello!")), chk.IsNil) + defer b.Delete(nil) - // Existing container - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - ok, err = cli.DeleteContainerIfExists(cnt) + ok, err := b.Exists() c.Assert(err, chk.IsNil) c.Assert(ok, chk.Equals, true) -} - -func (s *StorageBlobSuite) TestBlobExists(c *chk.C) { - cnt := randContainer() - blob := randName(5) - cli := getBlobClient(c) - - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypeBlob), chk.IsNil) - defer cli.DeleteContainer(cnt) - c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte("Hello!")), chk.IsNil) - defer cli.DeleteBlob(cnt, blob, nil) - - ok, err := cli.BlobExists(cnt, blob+".foo") + b.Name += ".lol" + ok, err = b.Exists() c.Assert(err, chk.IsNil) c.Assert(ok, chk.Equals, false) - ok, err = cli.BlobExists(cnt, blob) - c.Assert(err, chk.IsNil) - c.Assert(ok, chk.Equals, true) } func (s *StorageBlobSuite) TestGetBlobURL(c *chk.C) { - api, err := NewBasicClient("foo", "YmFy") + cli, err := NewBasicClient(dummyStorageAccount, dummyMiniStorageKey) c.Assert(err, chk.IsNil) - cli := api.GetBlobService() + blobCli := cli.GetBlobService() - c.Assert(cli.GetBlobURL("c", "nested/blob"), chk.Equals, "https://foo.blob.core.windows.net/c/nested/blob") - c.Assert(cli.GetBlobURL("", "blob"), chk.Equals, "https://foo.blob.core.windows.net/$root/blob") - c.Assert(cli.GetBlobURL("", "nested/blob"), chk.Equals, "https://foo.blob.core.windows.net/$root/nested/blob") -} + cnt := blobCli.GetContainerReference("c") + b := cnt.GetBlobReference("nested/blob") + c.Assert(b.GetURL(), chk.Equals, "https://golangrocksonazure.blob.core.windows.net/c/nested/blob") -func (s *StorageBlobSuite) TestBlobCopy(c *chk.C) { - if testing.Short() { - c.Skip("skipping blob copy in short mode, no SLA on async operation") - } + cnt.Name = "" + c.Assert(b.GetURL(), chk.Equals, "https://golangrocksonazure.blob.core.windows.net/$root/nested/blob") - cli := getBlobClient(c) - cnt := randContainer() - src := randName(5) - dst := randName(5) - body := []byte(randString(1024)) + b.Name = "blob" + c.Assert(b.GetURL(), chk.Equals, "https://golangrocksonazure.blob.core.windows.net/$root/blob") - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.deleteContainer(cnt) - - c.Assert(cli.putSingleBlockBlob(cnt, src, body), chk.IsNil) - defer cli.DeleteBlob(cnt, src, nil) - - c.Assert(cli.CopyBlob(cnt, dst, cli.GetBlobURL(cnt, src)), chk.IsNil) - defer cli.DeleteBlob(cnt, dst, nil) - - blobBody, err := cli.GetBlob(cnt, dst) - c.Assert(err, chk.IsNil) - - b, err := ioutil.ReadAll(blobBody) - defer blobBody.Close() - c.Assert(err, chk.IsNil) - c.Assert(b, chk.DeepEquals, body) } -func (s *StorageBlobSuite) TestStartBlobCopy(c *chk.C) { - if testing.Short() { - c.Skip("skipping blob copy in short mode, no SLA on async operation") - } - - cli := getBlobClient(c) - cnt := randContainer() - src := randName(5) - dst := randName(5) - body := []byte(randString(1024)) - - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.deleteContainer(cnt) - - c.Assert(cli.putSingleBlockBlob(cnt, src, body), chk.IsNil) - defer cli.DeleteBlob(cnt, src, nil) - - // given we dont know when it will start, can we even test destination creation? - // will just test that an error wasn't thrown for now. - copyID, err := cli.StartBlobCopy(cnt, dst, cli.GetBlobURL(cnt, src)) - c.Assert(copyID, chk.NotNil) +func (s *StorageBlobSuite) TestGetBlobContainerURL(c *chk.C) { + cli, err := NewBasicClient(dummyStorageAccount, dummyMiniStorageKey) c.Assert(err, chk.IsNil) -} + blobCli := cli.GetBlobService() -// Tests abort of blobcopy. Given the blobcopy is usually over before we can actually trigger an abort -// it is agreed that we perform a copy then try and perform an abort. It should result in a HTTP status of 409. -// So basically we're testing negative scenario (as good as we can do for now) -func (s *StorageBlobSuite) TestAbortBlobCopy(c *chk.C) { - if testing.Short() { - c.Skip("skipping blob copy in short mode, no SLA on async operation") - } + cnt := blobCli.GetContainerReference("c") + b := cnt.GetBlobReference("") + c.Assert(b.GetURL(), chk.Equals, "https://golangrocksonazure.blob.core.windows.net/c") - cli := getBlobClient(c) - cnt := randContainer() - src := randName(5) - dst := randName(5) - body := []byte(randString(1024)) - - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.deleteContainer(cnt) - - c.Assert(cli.putSingleBlockBlob(cnt, src, body), chk.IsNil) - defer cli.DeleteBlob(cnt, src, nil) - - // given we dont know when it will start, can we even test destination creation? - // will just test that an error wasn't thrown for now. - copyID, err := cli.StartBlobCopy(cnt, dst, cli.GetBlobURL(cnt, src)) - c.Assert(copyID, chk.NotNil) - c.Assert(err, chk.IsNil) - - err = cli.WaitForBlobCopy(cnt, dst, copyID) - c.Assert(err, chk.IsNil) - - // abort abort abort, but we *know* its already completed. - err = cli.AbortBlobCopy(cnt, dst, copyID, "", 0) - - // abort should fail (over already) - c.Assert(err.(AzureStorageServiceError).StatusCode, chk.Equals, http.StatusConflict) + cnt.Name = "" + c.Assert(b.GetURL(), chk.Equals, "https://golangrocksonazure.blob.core.windows.net/$root") } func (s *StorageBlobSuite) TestDeleteBlobIfExists(c *chk.C) { - cnt := randContainer() - blob := randName(5) - cli := getBlobClient(c) - c.Assert(cli.DeleteBlob(cnt, blob, nil), chk.NotNil) + rec := cli.client.appendRecorder(c) + defer rec.Stop() - ok, err := cli.DeleteBlobIfExists(cnt, blob, nil) + cnt := cli.GetContainerReference(containerName(c)) + b := cnt.GetBlobReference(blobName(c)) + c.Assert(cnt.Create(nil), chk.IsNil) + defer cnt.Delete(nil) + + c.Assert(b.Delete(nil), chk.NotNil) + + ok, err := b.DeleteIfExists(nil) c.Assert(err, chk.IsNil) c.Assert(ok, chk.Equals, false) } func (s *StorageBlobSuite) TestDeleteBlobWithConditions(c *chk.C) { - cnt := randContainer() - blob := randName(5) - cli := getBlobClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.deleteContainer(cnt) + cnt := cli.GetContainerReference(containerName(c)) + b := cnt.GetBlobReference(blobName(c)) + c.Assert(cnt.Create(nil), chk.IsNil) + defer cnt.Delete(nil) - c.Assert(cli.CreateBlockBlob(cnt, blob), chk.IsNil) - oldProps, err := cli.GetBlobProperties(cnt, blob) + c.Assert(b.CreateBlockBlob(nil), chk.IsNil) + err := b.GetProperties(nil) c.Assert(err, chk.IsNil) + etag := b.Properties.Etag - // Update metadata, so Etag changes - c.Assert(cli.SetBlobMetadata(cnt, blob, map[string]string{}, nil), chk.IsNil) - newProps, err := cli.GetBlobProperties(cnt, blob) - c.Assert(err, chk.IsNil) - - // "Delete if matches old Etag" should fail without deleting. - err = cli.DeleteBlob(cnt, blob, map[string]string{ - "If-Match": oldProps.Etag, - }) + // "Delete if matches incorrect or old Etag" should fail without deleting. + options := DeleteBlobOptions{ + IfMatch: "GolangRocksOnAzure", + } + err = b.Delete(&options) c.Assert(err, chk.FitsTypeOf, AzureStorageServiceError{}) c.Assert(err.(AzureStorageServiceError).StatusCode, chk.Equals, http.StatusPreconditionFailed) - _, err = cli.GetBlob(cnt, blob) + ok, err := b.Exists() c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, true) // "Delete if matches new Etag" should succeed. - err = cli.DeleteBlob(cnt, blob, map[string]string{ - "If-Match": newProps.Etag, - }) + options.IfMatch = etag + ok, err = b.DeleteIfExists(&options) c.Assert(err, chk.IsNil) - _, err = cli.GetBlob(cnt, blob) - c.Assert(err, chk.Not(chk.IsNil)) + c.Assert(ok, chk.Equals, true) } func (s *StorageBlobSuite) TestGetBlobProperties(c *chk.C) { - cnt := randContainer() - blob := randName(5) - contents := randString(64) - cli := getBlobClient(c) - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.DeleteContainer(cnt) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + cnt := cli.GetContainerReference(containerName(c)) + c.Assert(cnt.Create(nil), chk.IsNil) + defer cnt.Delete(nil) - // Nonexisting blob - _, err := cli.GetBlobProperties(cnt, blob) + // try to get properties on a nonexisting blob + blob1 := cnt.GetBlobReference(blobName(c, "1")) + err := blob1.GetProperties(nil) c.Assert(err, chk.NotNil) - // Put the blob - c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte(contents)), chk.IsNil) + // Put a blob + blob2 := cnt.GetBlobReference(blobName(c, "2")) + contents := content(64) + c.Assert(blob2.putSingleBlockBlob(contents), chk.IsNil) // Get blob properties - props, err := cli.GetBlobProperties(cnt, blob) - c.Assert(err, chk.IsNil) - - c.Assert(props.ContentLength, chk.Equals, int64(len(contents))) - c.Assert(props.ContentType, chk.Equals, "application/octet-stream") - c.Assert(props.BlobType, chk.Equals, BlobTypeBlock) -} - -func (s *StorageBlobSuite) TestListBlobsPagination(c *chk.C) { - cli := getBlobClient(c) - cnt := randContainer() - - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.DeleteContainer(cnt) - - blobs := []string{} - const n = 5 - const pageSize = 2 - for i := 0; i < n; i++ { - name := randName(5) - c.Assert(cli.putSingleBlockBlob(cnt, name, []byte("Hello, world!")), chk.IsNil) - blobs = append(blobs, name) - } - sort.Strings(blobs) - - // Paginate - seen := []string{} - marker := "" - for { - resp, err := cli.ListBlobs(cnt, ListBlobsParameters{ - MaxResults: pageSize, - Marker: marker}) - c.Assert(err, chk.IsNil) - - for _, v := range resp.Blobs { - seen = append(seen, v.Name) - } - - marker = resp.NextMarker - if marker == "" || len(resp.Blobs) == 0 { - break - } - } - - // Compare - c.Assert(seen, chk.DeepEquals, blobs) -} - -// listBlobsAsFiles is a helper function to list blobs as "folders" and "files". -func listBlobsAsFiles(cli BlobStorageClient, cnt string, parentDir string) (folders []string, files []string, err error) { - var blobParams ListBlobsParameters - var blobListResponse BlobListResponse - - // Top level "folders" - blobParams = ListBlobsParameters{ - Delimiter: "/", - Prefix: parentDir, - } - - blobListResponse, err = cli.ListBlobs(cnt, blobParams) - if err != nil { - return nil, nil, err - } - - // These are treated as "folders" under the parentDir. - folders = blobListResponse.BlobPrefixes - - // "Files"" are blobs which are under the parentDir. - files = make([]string, len(blobListResponse.Blobs)) - for i := range blobListResponse.Blobs { - files[i] = blobListResponse.Blobs[i].Name - } - - return folders, files, nil -} - -// TestListBlobsTraversal tests that we can correctly traverse -// blobs in blob storage as if it were a file system by using -// a combination of Prefix, Delimiter, and BlobPrefixes. -// -// Blob storage is flat, but we can *simulate* the file -// system with folders and files using conventions in naming. -// With the blob namedd "/usr/bin/ls", when we use delimiter '/', -// the "ls" would be a "file"; with "/", /usr" and "/usr/bin" being -// the "folders" -// -// NOTE: The use of delimiter (eg forward slash) is extremely fiddly -// and difficult to get right so some discipline in naming and rules -// when using the API is required to get everything to work as expected. -// -// Assuming our delimiter is a forward slash, the rules are: -// -// - Do use a leading forward slash in blob names to make things -// consistent and simpler (see further). -// Note that doing so will show "" as the only top-level -// folder in the container in Azure portal, which may look strange. -// -// - The "folder names" are returned *with trailing forward slash* as per MSDN. -// -// - The "folder names" will be "absolue paths", e.g. listing things under "/usr/" -// will return folder names "/usr/bin/". -// -// - The "file names" are returned as full blob names, e.g. when listing -// things under "/usr/bin/", the file names will be "/usr/bin/ls" and -// "/usr/bin/cat". -// -// - Everything is returned with case-sensitive order as expected in real file system -// as per MSDN. -// -// - To list things under a "folder" always use trailing forward slash. -// -// Example: to list top level folders we use root folder named "" with -// trailing forward slash, so we use "/". -// -// Example: to list folders under "/usr", we again append forward slash and -// so we use "/usr/". -// -// Because we use leading forward slash we don't need to have different -// treatment of "get top-level folders" and "get non-top-level folders" -// scenarios. -func (s *StorageBlobSuite) TestListBlobsTraversal(c *chk.C) { - cli := getBlobClient(c) - cnt := randContainer() - - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.DeleteContainer(cnt) - - // Note use of leading forward slash as per naming rules. - blobsToCreate := []string{ - "/usr/bin/ls", - "/usr/bin/cat", - "/usr/lib64/libc.so", - "/etc/hosts", - "/etc/init.d/iptables", - } - - // Create the above blobs - for _, blobName := range blobsToCreate { - err := cli.CreateBlockBlob(cnt, blobName) - c.Assert(err, chk.IsNil) - } - - var folders []string - var files []string - var err error - - // Top level folders and files. - folders, files, err = listBlobsAsFiles(cli, cnt, "/") - c.Assert(err, chk.IsNil) - c.Assert(folders, chk.DeepEquals, []string{"/etc/", "/usr/"}) - c.Assert(files, chk.DeepEquals, []string{}) - - // Things under /etc/. Note use of trailing forward slash here as per rules. - folders, files, err = listBlobsAsFiles(cli, cnt, "/etc/") - c.Assert(err, chk.IsNil) - c.Assert(folders, chk.DeepEquals, []string{"/etc/init.d/"}) - c.Assert(files, chk.DeepEquals, []string{"/etc/hosts"}) - - // Things under /etc/init.d/ - folders, files, err = listBlobsAsFiles(cli, cnt, "/etc/init.d/") - c.Assert(err, chk.IsNil) - c.Assert(folders, chk.DeepEquals, []string(nil)) - c.Assert(files, chk.DeepEquals, []string{"/etc/init.d/iptables"}) - - // Things under /usr/ - folders, files, err = listBlobsAsFiles(cli, cnt, "/usr/") - c.Assert(err, chk.IsNil) - c.Assert(folders, chk.DeepEquals, []string{"/usr/bin/", "/usr/lib64/"}) - c.Assert(files, chk.DeepEquals, []string{}) - - // Things under /usr/bin/ - folders, files, err = listBlobsAsFiles(cli, cnt, "/usr/bin/") - c.Assert(err, chk.IsNil) - c.Assert(folders, chk.DeepEquals, []string(nil)) - c.Assert(files, chk.DeepEquals, []string{"/usr/bin/cat", "/usr/bin/ls"}) -} - -func (s *StorageBlobSuite) TestListBlobsWithMetadata(c *chk.C) { - cli := getBlobClient(c) - cnt := randContainer() - - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.deleteContainer(cnt) - - expectMeta := make(map[string]BlobMetadata) - - // Put 4 blobs with metadata - for i := 0; i < 4; i++ { - name := randName(5) - c.Assert(cli.putSingleBlockBlob(cnt, name, []byte("Hello, world!")), chk.IsNil) - c.Assert(cli.SetBlobMetadata(cnt, name, map[string]string{ - "Foo": name, - "Bar_BAZ": "Waz Qux", - }, nil), chk.IsNil) - expectMeta[name] = BlobMetadata{ - "foo": name, - "bar_baz": "Waz Qux", - } - } - - // Put one more blob with no metadata - blobWithoutMetadata := randName(5) - c.Assert(cli.putSingleBlockBlob(cnt, blobWithoutMetadata, []byte("Hello, world!")), chk.IsNil) - expectMeta[blobWithoutMetadata] = nil - - // Get ListBlobs with include:"metadata" - resp, err := cli.ListBlobs(cnt, ListBlobsParameters{ - MaxResults: 5, - Include: "metadata"}) + err = blob2.GetProperties(nil) c.Assert(err, chk.IsNil) - respBlobs := make(map[string]Blob) - for _, v := range resp.Blobs { - respBlobs[v.Name] = v - } - - // Verify the metadata is as expected - for name := range expectMeta { - c.Check(respBlobs[name].Metadata, chk.DeepEquals, expectMeta[name]) - } + c.Assert(blob2.Properties.ContentLength, chk.Equals, int64(len(contents))) + c.Assert(blob2.Properties.ContentType, chk.Equals, "application/octet-stream") + c.Assert(blob2.Properties.BlobType, chk.Equals, BlobTypeBlock) } // Ensure it's possible to generate a ListBlobs response with // metadata, e.g., for a stub server. func (s *StorageBlobSuite) TestMarshalBlobMetadata(c *chk.C) { buf, err := xml.Marshal(Blob{ - Name: randName(5), + Name: blobName(c), Properties: BlobProperties{}, - Metadata: BlobMetadata{"foo": "baz < waz"}, + Metadata: map[string]string{ + "lol": "baz < waz", + }, }) c.Assert(err, chk.IsNil) - c.Assert(string(buf), chk.Matches, `.*baz < waz.*`) + c.Assert(string(buf), chk.Matches, `.*baz < waz.*`) } -func (s *StorageBlobSuite) TestGetAndSetMetadata(c *chk.C) { +func (s *StorageBlobSuite) TestGetAndSetBlobMetadata(c *chk.C) { cli := getBlobClient(c) - cnt := randContainer() + rec := cli.client.appendRecorder(c) + defer rec.Stop() - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.deleteContainer(cnt) + cnt := cli.GetContainerReference(containerName(c)) + c.Assert(cnt.Create(nil), chk.IsNil) + defer cnt.Delete(nil) - blob := randName(5) - c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte{}), chk.IsNil) + // Get empty metadata + blob1 := cnt.GetBlobReference(blobName(c, "1")) + c.Assert(blob1.putSingleBlockBlob([]byte("Hello!")), chk.IsNil) - m, err := cli.GetBlobMetadata(cnt, blob) + err := blob1.GetMetadata(nil) c.Assert(err, chk.IsNil) - c.Assert(m, chk.Not(chk.Equals), nil) - c.Assert(len(m), chk.Equals, 0) + c.Assert(blob1.Metadata, chk.HasLen, 0) - mPut := map[string]string{ - "foo": "bar", - "bar_baz": "waz qux", + // Get and set the metadata + blob2 := cnt.GetBlobReference(blobName(c, "2")) + c.Assert(blob2.putSingleBlockBlob([]byte("Hello!")), chk.IsNil) + metaPut := BlobMetadata{ + "lol": "rofl", + "rofl_baz": "waz qux", } + blob2.Metadata = metaPut - err = cli.SetBlobMetadata(cnt, blob, mPut, nil) + err = blob2.SetMetadata(nil) c.Assert(err, chk.IsNil) - m, err = cli.GetBlobMetadata(cnt, blob) + err = blob2.GetMetadata(nil) c.Assert(err, chk.IsNil) - c.Check(m, chk.DeepEquals, mPut) + c.Check(blob2.Metadata, chk.DeepEquals, metaPut) +} - // Case munging +func (s *StorageBlobSuite) TestMetadataCaseMunging(c *chk.C) { + cli := getBlobClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + cnt := cli.GetContainerReference(containerName(c)) + c.Assert(cnt.Create(nil), chk.IsNil) + defer cnt.Delete(nil) + + b := cnt.GetBlobReference(blobName(c)) + c.Assert(b.putSingleBlockBlob([]byte("Hello!")), chk.IsNil) - mPutUpper := map[string]string{ - "Foo": "different bar", - "bar_BAZ": "different waz qux", + // Case munging + metaPutUpper := BlobMetadata{ + "Lol": "different rofl", + "rofl_BAZ": "different waz qux", } - mExpectLower := map[string]string{ - "foo": "different bar", - "bar_baz": "different waz qux", + metaExpectLower := BlobMetadata{ + "lol": "different rofl", + "rofl_baz": "different waz qux", } - err = cli.SetBlobMetadata(cnt, blob, mPutUpper, nil) + b.Metadata = metaPutUpper + err := b.SetMetadata(nil) c.Assert(err, chk.IsNil) - m, err = cli.GetBlobMetadata(cnt, blob) + err = b.GetMetadata(nil) c.Assert(err, chk.IsNil) - c.Check(m, chk.DeepEquals, mExpectLower) + c.Check(b.Metadata, chk.DeepEquals, metaExpectLower) } func (s *StorageBlobSuite) TestSetMetadataWithExtraHeaders(c *chk.C) { cli := getBlobClient(c) - cnt := randContainer() + rec := cli.client.appendRecorder(c) + defer rec.Stop() - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.deleteContainer(cnt) + cnt := cli.GetContainerReference(containerName(c)) + b := cnt.GetBlobReference(blobName(c)) + c.Assert(cnt.Create(nil), chk.IsNil) + defer cnt.Delete(nil) - blob := randName(5) - c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte{}), chk.IsNil) + c.Assert(b.putSingleBlockBlob([]byte("Hello!")), chk.IsNil) - mPut := map[string]string{ - "foo": "bar", - "bar_baz": "waz qux", + meta := BlobMetadata{ + "lol": "rofl", + "rofl_baz": "waz qux", } + b.Metadata = meta - extraHeaders := map[string]string{ - "If-Match": "incorrect-etag", + options := SetBlobMetadataOptions{ + IfMatch: "incorrect-etag", } // Set with incorrect If-Match in extra headers should result in error - err := cli.SetBlobMetadata(cnt, blob, mPut, extraHeaders) + err := b.SetMetadata(&options) c.Assert(err, chk.NotNil) - props, err := cli.GetBlobProperties(cnt, blob) - extraHeaders = map[string]string{ - "If-Match": props.Etag, - } + err = b.GetProperties(nil) + c.Assert(err, chk.IsNil) // Set with matching If-Match in extra headers should succeed - err = cli.SetBlobMetadata(cnt, blob, mPut, extraHeaders) + options.IfMatch = b.Properties.Etag + b.Metadata = meta + err = b.SetMetadata(&options) c.Assert(err, chk.IsNil) } func (s *StorageBlobSuite) TestSetBlobProperties(c *chk.C) { cli := getBlobClient(c) - cnt := randContainer() + rec := cli.client.appendRecorder(c) + defer rec.Stop() - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.deleteContainer(cnt) + cnt := cli.GetContainerReference(containerName(c)) + b := cnt.GetBlobReference(blobName(c)) + c.Assert(cnt.Create(nil), chk.IsNil) + defer cnt.Delete(nil) - blob := randName(5) - c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte{}), chk.IsNil) + c.Assert(b.putSingleBlockBlob([]byte("Hello!")), chk.IsNil) - mPut := BlobHeaders{ + input := BlobProperties{ CacheControl: "private, max-age=0, no-cache", ContentMD5: "oBATU+oaDduHWbVZLuzIJw==", ContentType: "application/json", ContentEncoding: "gzip", ContentLanguage: "de-DE", } + b.Properties = input - err := cli.SetBlobProperties(cnt, blob, mPut) - c.Assert(err, chk.IsNil) - - props, err := cli.GetBlobProperties(cnt, blob) - c.Assert(err, chk.IsNil) - - c.Check(mPut.CacheControl, chk.Equals, props.CacheControl) - c.Check(mPut.ContentType, chk.Equals, props.ContentType) - c.Check(mPut.ContentMD5, chk.Equals, props.ContentMD5) - c.Check(mPut.ContentEncoding, chk.Equals, props.ContentEncoding) - c.Check(mPut.ContentLanguage, chk.Equals, props.ContentLanguage) -} - -func (s *StorageBlobSuite) createContainerPermissions(accessType ContainerAccessType, - timeout int, leaseID string, ID string, canRead bool, - canWrite bool, canDelete bool) ContainerPermissions { - perms := ContainerPermissions{} - perms.AccessOptions.ContainerAccess = accessType - perms.AccessOptions.Timeout = timeout - perms.AccessOptions.LeaseID = leaseID - - if ID != "" { - perms.AccessPolicy.ID = ID - perms.AccessPolicy.StartTime = time.Now() - perms.AccessPolicy.ExpiryTime = time.Now().Add(time.Hour * 10) - perms.AccessPolicy.CanRead = canRead - perms.AccessPolicy.CanWrite = canWrite - perms.AccessPolicy.CanDelete = canDelete - } - - return perms -} - -func (s *StorageBlobSuite) TestSetContainerPermissionsWithTimeoutSuccessfully(c *chk.C) { - cli := getBlobClient(c) - cnt := randContainer() - - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.deleteContainer(cnt) - - perms := s.createContainerPermissions(ContainerAccessTypeBlob, 30, "", "MTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTa=", true, true, true) - - err := cli.SetContainerPermissions(cnt, perms) - c.Assert(err, chk.IsNil) -} - -func (s *StorageBlobSuite) TestSetContainerPermissionsSuccessfully(c *chk.C) { - cli := getBlobClient(c) - cnt := randContainer() - - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.deleteContainer(cnt) - - perms := s.createContainerPermissions(ContainerAccessTypeBlob, 0, "", "MTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTa=", true, true, true) - - err := cli.SetContainerPermissions(cnt, perms) - c.Assert(err, chk.IsNil) -} - -func (s *StorageBlobSuite) TestSetThenGetContainerPermissionsSuccessfully(c *chk.C) { - cli := getBlobClient(c) - cnt := randContainer() - - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.deleteContainer(cnt) - - perms := s.createContainerPermissions(ContainerAccessTypeBlob, 0, "", "MTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTa=", true, true, true) - - err := cli.SetContainerPermissions(cnt, perms) - c.Assert(err, chk.IsNil) - - returnedPerms, err := cli.GetContainerPermissions(cnt, 0, "") - c.Assert(err, chk.IsNil) - - // check container permissions itself. - c.Assert(returnedPerms.ContainerAccess, chk.Equals, perms.AccessOptions.ContainerAccess) - - // now check policy set. - c.Assert(returnedPerms.AccessPolicy.SignedIdentifiers, chk.HasLen, 1) - c.Assert(returnedPerms.AccessPolicy.SignedIdentifiers[0].ID, chk.Equals, perms.AccessPolicy.ID) - - // test timestamps down the second - // rounding start/expiry time original perms since the returned perms would have been rounded. - // so need rounded vs rounded. - c.Assert(returnedPerms.AccessPolicy.SignedIdentifiers[0].AccessPolicy.StartTime.Round(time.Second).Format(time.RFC1123), chk.Equals, perms.AccessPolicy.StartTime.Round(time.Second).Format(time.RFC1123)) - c.Assert(returnedPerms.AccessPolicy.SignedIdentifiers[0].AccessPolicy.ExpiryTime.Round(time.Second).Format(time.RFC1123), chk.Equals, perms.AccessPolicy.ExpiryTime.Round(time.Second).Format(time.RFC1123)) - c.Assert(returnedPerms.AccessPolicy.SignedIdentifiers[0].AccessPolicy.Permission, chk.Equals, "rwd") -} - -func (s *StorageBlobSuite) TestSetContainerPermissionsOnlySuccessfully(c *chk.C) { - cli := getBlobClient(c) - cnt := randContainer() - - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.deleteContainer(cnt) - - perms := s.createContainerPermissions(ContainerAccessTypeBlob, 0, "", "", true, true, true) - - err := cli.SetContainerPermissions(cnt, perms) - c.Assert(err, chk.IsNil) -} - -func (s *StorageBlobSuite) TestSetThenGetContainerPermissionsOnlySuccessfully(c *chk.C) { - cli := getBlobClient(c) - cnt := randContainer() - - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.deleteContainer(cnt) - - perms := s.createContainerPermissions(ContainerAccessTypeBlob, 0, "", "", true, true, true) - - err := cli.SetContainerPermissions(cnt, perms) + err := b.SetProperties(nil) c.Assert(err, chk.IsNil) - returnedPerms, err := cli.GetContainerPermissions(cnt, 0, "") + err = b.GetProperties(nil) c.Assert(err, chk.IsNil) - // check container permissions itself. - c.Assert(returnedPerms.ContainerAccess, chk.Equals, perms.AccessOptions.ContainerAccess) - - // now check there are NO policies set - c.Assert(returnedPerms.AccessPolicy.SignedIdentifiers, chk.HasLen, 0) + c.Check(b.Properties.CacheControl, chk.Equals, input.CacheControl) + c.Check(b.Properties.ContentType, chk.Equals, input.ContentType) + c.Check(b.Properties.ContentMD5, chk.Equals, input.ContentMD5) + c.Check(b.Properties.ContentEncoding, chk.Equals, input.ContentEncoding) + c.Check(b.Properties.ContentLanguage, chk.Equals, input.ContentLanguage) } func (s *StorageBlobSuite) TestSnapshotBlob(c *chk.C) { cli := getBlobClient(c) - cnt := randContainer() + rec := cli.client.appendRecorder(c) + defer rec.Stop() - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.deleteContainer(cnt) + cnt := cli.GetContainerReference(containerName(c)) + b := cnt.GetBlobReference(blobName(c)) + c.Assert(cnt.Create(nil), chk.IsNil) + defer cnt.Delete(nil) - blob := randName(5) - c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte{}), chk.IsNil) + c.Assert(b.putSingleBlockBlob([]byte("Hello!")), chk.IsNil) - snapshotTime, err := cli.SnapshotBlob(cnt, blob, 0, nil) + snapshotTime, err := b.CreateSnapshot(nil) c.Assert(err, chk.IsNil) c.Assert(snapshotTime, chk.NotNil) } func (s *StorageBlobSuite) TestSnapshotBlobWithTimeout(c *chk.C) { cli := getBlobClient(c) - cnt := randContainer() + rec := cli.client.appendRecorder(c) + defer rec.Stop() - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.deleteContainer(cnt) + cnt := cli.GetContainerReference(containerName(c)) + b := cnt.GetBlobReference(blobName(c)) + c.Assert(cnt.Create(nil), chk.IsNil) + defer cnt.Delete(nil) - blob := randName(5) - c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte{}), chk.IsNil) + c.Assert(b.putSingleBlockBlob([]byte("Hello!")), chk.IsNil) - snapshotTime, err := cli.SnapshotBlob(cnt, blob, 30, nil) + options := SnapshotOptions{ + Timeout: 0, + } + snapshotTime, err := b.CreateSnapshot(&options) c.Assert(err, chk.IsNil) c.Assert(snapshotTime, chk.NotNil) } func (s *StorageBlobSuite) TestSnapshotBlobWithValidLease(c *chk.C) { cli := getBlobClient(c) - cnt := randContainer() + rec := cli.client.appendRecorder(c) + defer rec.Stop() - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.deleteContainer(cnt) + cnt := cli.GetContainerReference(containerName(c)) + b := cnt.GetBlobReference(blobName(c)) + c.Assert(cnt.Create(nil), chk.IsNil) + defer cnt.Delete(nil) - blob := randName(5) - c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte{}), chk.IsNil) + c.Assert(b.putSingleBlockBlob([]byte("Hello!")), chk.IsNil) // generate lease. - currentLeaseID, err := cli.AcquireLease(cnt, blob, 30, "") + currentLeaseID, err := b.AcquireLease(30, "", nil) c.Assert(err, chk.IsNil) - extraHeaders := map[string]string{ - leaseID: currentLeaseID, + options := SnapshotOptions{ + LeaseID: currentLeaseID, } - - snapshotTime, err := cli.SnapshotBlob(cnt, blob, 0, extraHeaders) + snapshotTime, err := b.CreateSnapshot(&options) c.Assert(err, chk.IsNil) c.Assert(snapshotTime, chk.NotNil) } func (s *StorageBlobSuite) TestSnapshotBlobWithInvalidLease(c *chk.C) { cli := getBlobClient(c) - cnt := randContainer() + rec := cli.client.appendRecorder(c) + defer rec.Stop() - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.deleteContainer(cnt) + cnt := cli.GetContainerReference(containerName(c)) + b := cnt.GetBlobReference(blobName(c)) + c.Assert(cnt.Create(nil), chk.IsNil) + defer cnt.Delete(nil) - blob := randName(5) - c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte{}), chk.IsNil) + c.Assert(b.putSingleBlockBlob([]byte("Hello!")), chk.IsNil) // generate lease. - _, err := cli.AcquireLease(cnt, blob, 30, "") + leaseID, err := b.AcquireLease(30, "", nil) c.Assert(err, chk.IsNil) - c.Assert(leaseID, chk.NotNil) + c.Assert(leaseID, chk.Not(chk.Equals), "") - extraHeaders := map[string]string{ - leaseID: "718e3c89-da3d-4201-b616-dd794b0bd7c1", + options := SnapshotOptions{ + LeaseID: "GolangRocksOnAzure", } - - snapshotTime, err := cli.SnapshotBlob(cnt, blob, 0, extraHeaders) + snapshotTime, err := b.CreateSnapshot(&options) c.Assert(err, chk.NotNil) c.Assert(snapshotTime, chk.IsNil) } -func (s *StorageBlobSuite) TestAcquireLeaseWithNoProposedLeaseID(c *chk.C) { - cli := getBlobClient(c) - cnt := randContainer() - - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.deleteContainer(cnt) - - blob := randName(5) - c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte{}), chk.IsNil) - - _, err := cli.AcquireLease(cnt, blob, 30, "") - c.Assert(err, chk.IsNil) -} - -func (s *StorageBlobSuite) TestAcquireLeaseWithProposedLeaseID(c *chk.C) { - cli := getBlobClient(c) - cnt := randContainer() - - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.deleteContainer(cnt) - - blob := randName(5) - c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte{}), chk.IsNil) - - proposedLeaseID := "dfe6dde8-68d5-4910-9248-c97c61768fea" - leaseID, err := cli.AcquireLease(cnt, blob, 30, proposedLeaseID) - c.Assert(err, chk.IsNil) - c.Assert(leaseID, chk.Equals, proposedLeaseID) -} - -func (s *StorageBlobSuite) TestAcquireLeaseWithBadProposedLeaseID(c *chk.C) { - cli := getBlobClient(c) - cnt := randContainer() - - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.deleteContainer(cnt) - - blob := randName(5) - c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte{}), chk.IsNil) - - proposedLeaseID := "badbadbad" - _, err := cli.AcquireLease(cnt, blob, 30, proposedLeaseID) - c.Assert(err, chk.NotNil) -} - -func (s *StorageBlobSuite) TestRenewLeaseSuccessful(c *chk.C) { - cli := getBlobClient(c) - cnt := randContainer() - - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.deleteContainer(cnt) - - blob := randName(5) - c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte{}), chk.IsNil) - - proposedLeaseID := "dfe6dde8-68d5-4910-9248-c97c61768fea" - leaseID, err := cli.AcquireLease(cnt, blob, 30, proposedLeaseID) - c.Assert(err, chk.IsNil) - - err = cli.RenewLease(cnt, blob, leaseID) - c.Assert(err, chk.IsNil) -} - -func (s *StorageBlobSuite) TestRenewLeaseAgainstNoCurrentLease(c *chk.C) { - cli := getBlobClient(c) - cnt := randContainer() - - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.deleteContainer(cnt) - - blob := randName(5) - c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte{}), chk.IsNil) - - badLeaseID := "1f812371-a41d-49e6-b123-f4b542e85144" - err := cli.RenewLease(cnt, blob, badLeaseID) - c.Assert(err, chk.NotNil) -} - -func (s *StorageBlobSuite) TestChangeLeaseSuccessful(c *chk.C) { - cli := getBlobClient(c) - cnt := randContainer() - - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.deleteContainer(cnt) - - blob := randName(5) - c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte{}), chk.IsNil) - proposedLeaseID := "dfe6dde8-68d5-4910-9248-c97c61768fea" - leaseID, err := cli.AcquireLease(cnt, blob, 30, proposedLeaseID) - c.Assert(err, chk.IsNil) - - newProposedLeaseID := "dfe6dde8-68d5-4910-9248-c97c61768fbb" - newLeaseID, err := cli.ChangeLease(cnt, blob, leaseID, newProposedLeaseID) - c.Assert(err, chk.IsNil) - c.Assert(newLeaseID, chk.Equals, newProposedLeaseID) -} - -func (s *StorageBlobSuite) TestChangeLeaseNotSuccessfulbadProposedLeaseID(c *chk.C) { - cli := getBlobClient(c) - cnt := randContainer() - - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.deleteContainer(cnt) - - blob := randName(5) - c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte{}), chk.IsNil) - proposedLeaseID := "dfe6dde8-68d5-4910-9248-c97c61768fea" - leaseID, err := cli.AcquireLease(cnt, blob, 30, proposedLeaseID) - c.Assert(err, chk.IsNil) - - newProposedLeaseID := "1f812371-a41d-49e6-b123-f4b542e" - _, err = cli.ChangeLease(cnt, blob, leaseID, newProposedLeaseID) - c.Assert(err, chk.NotNil) -} - -func (s *StorageBlobSuite) TestReleaseLeaseSuccessful(c *chk.C) { - cli := getBlobClient(c) - cnt := randContainer() - - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.deleteContainer(cnt) - - blob := randName(5) - c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte{}), chk.IsNil) - proposedLeaseID := "dfe6dde8-68d5-4910-9248-c97c61768fea" - leaseID, err := cli.AcquireLease(cnt, blob, 30, proposedLeaseID) - c.Assert(err, chk.IsNil) - - err = cli.ReleaseLease(cnt, blob, leaseID) - c.Assert(err, chk.IsNil) -} - -func (s *StorageBlobSuite) TestReleaseLeaseNotSuccessfulBadLeaseID(c *chk.C) { - cli := getBlobClient(c) - cnt := randContainer() - - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.deleteContainer(cnt) - - blob := randName(5) - c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte{}), chk.IsNil) - proposedLeaseID := "dfe6dde8-68d5-4910-9248-c97c61768fea" - _, err := cli.AcquireLease(cnt, blob, 30, proposedLeaseID) - c.Assert(err, chk.IsNil) - - err = cli.ReleaseLease(cnt, blob, "badleaseid") - c.Assert(err, chk.NotNil) -} - -func (s *StorageBlobSuite) TestBreakLeaseSuccessful(c *chk.C) { - cli := getBlobClient(c) - cnt := randContainer() - - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.deleteContainer(cnt) - - blob := randName(5) - c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte{}), chk.IsNil) - - proposedLeaseID := "dfe6dde8-68d5-4910-9248-c97c61768fea" - _, err := cli.AcquireLease(cnt, blob, 30, proposedLeaseID) - c.Assert(err, chk.IsNil) - - _, err = cli.BreakLease(cnt, blob) - c.Assert(err, chk.IsNil) -} - -func (s *StorageBlobSuite) TestPutEmptyBlockBlob(c *chk.C) { +func (s *StorageBlobSuite) TestGetBlobRange(c *chk.C) { cli := getBlobClient(c) - cnt := randContainer() - - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.deleteContainer(cnt) - - blob := randName(5) - c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte{}), chk.IsNil) + rec := cli.client.appendRecorder(c) + defer rec.Stop() - props, err := cli.GetBlobProperties(cnt, blob) - c.Assert(err, chk.IsNil) - c.Assert(props.ContentLength, chk.Not(chk.Equals), 0) -} + cnt := cli.GetContainerReference(containerName(c)) + b := cnt.GetBlobReference(blobName(c)) + c.Assert(cnt.Create(nil), chk.IsNil) + defer cnt.Delete(nil) -func (s *StorageBlobSuite) TestGetBlobRange(c *chk.C) { - cnt := randContainer() - blob := randName(5) body := "0123456789" + c.Assert(b.putSingleBlockBlob([]byte(body)), chk.IsNil) + defer b.Delete(nil) - cli := getBlobClient(c) - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypeBlob), chk.IsNil) - defer cli.DeleteContainer(cnt) - - c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte(body)), chk.IsNil) - defer cli.DeleteBlob(cnt, blob, nil) - - // Read 1-3 - for _, r := range []struct { - rangeStr string + cases := []struct { + options GetBlobRangeOptions expected string }{ - {"0-", body}, - {"1-3", body[1 : 3+1]}, - {"3-", body[3:]}, - } { - resp, err := cli.GetBlobRange(cnt, blob, r.rangeStr, nil) + { + options: GetBlobRangeOptions{ + Range: &BlobRange{ + Start: 0, + End: uint64(len(body)), + }, + }, + expected: body, + }, + { + options: GetBlobRangeOptions{ + Range: &BlobRange{ + Start: 1, + End: 3, + }, + }, + expected: body[1 : 3+1], + }, + { + options: GetBlobRangeOptions{ + Range: &BlobRange{ + Start: 3, + End: uint64(len(body)), + }, + }, + expected: body[3:], + }, + } + + // Read 1-3 + for _, r := range cases { + resp, err := b.GetRange(&(r.options)) c.Assert(err, chk.IsNil) blobBody, err := ioutil.ReadAll(resp) c.Assert(err, chk.IsNil) str := string(blobBody) c.Assert(str, chk.Equals, r.expected) - } -} - -func (s *StorageBlobSuite) TestCreateBlockBlobFromReader(c *chk.C) { - cli := getBlobClient(c) - cnt := randContainer() - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.deleteContainer(cnt) - - name := randName(5) - data := randBytes(8888) - c.Assert(cli.CreateBlockBlobFromReader(cnt, name, uint64(len(data)), bytes.NewReader(data), nil), chk.IsNil) - - body, err := cli.GetBlob(cnt, name) - c.Assert(err, chk.IsNil) - gotData, err := ioutil.ReadAll(body) - body.Close() - - c.Assert(err, chk.IsNil) - c.Assert(gotData, chk.DeepEquals, data) -} -func (s *StorageBlobSuite) TestCreateBlockBlobFromReaderWithShortData(c *chk.C) { - cli := getBlobClient(c) - cnt := randContainer() - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.deleteContainer(cnt) - - name := randName(5) - data := randBytes(8888) - err := cli.CreateBlockBlobFromReader(cnt, name, 9999, bytes.NewReader(data), nil) - c.Assert(err, chk.Not(chk.IsNil)) - - _, err = cli.GetBlob(cnt, name) - // Upload was incomplete: blob should not have been created. - c.Assert(err, chk.Not(chk.IsNil)) -} - -func (s *StorageBlobSuite) TestPutBlock(c *chk.C) { - cli := getBlobClient(c) - cnt := randContainer() - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.deleteContainer(cnt) - - blob := randName(5) - chunk := []byte(randString(1024)) - blockID := base64.StdEncoding.EncodeToString([]byte("foo")) - c.Assert(cli.PutBlock(cnt, blob, blockID, chunk), chk.IsNil) -} - -func (s *StorageBlobSuite) TestGetBlockList_PutBlockList(c *chk.C) { - cli := getBlobClient(c) - cnt := randContainer() - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.deleteContainer(cnt) - - blob := randName(5) - chunk := []byte(randString(1024)) - blockID := base64.StdEncoding.EncodeToString([]byte("foo")) - - // Put one block - c.Assert(cli.PutBlock(cnt, blob, blockID, chunk), chk.IsNil) - defer cli.deleteBlob(cnt, blob, nil) - - // Get committed blocks - committed, err := cli.GetBlockList(cnt, blob, BlockListTypeCommitted) - c.Assert(err, chk.IsNil) - - if len(committed.CommittedBlocks) > 0 { - c.Fatal("There are committed blocks") - } - - // Get uncommitted blocks - uncommitted, err := cli.GetBlockList(cnt, blob, BlockListTypeUncommitted) - c.Assert(err, chk.IsNil) - - c.Assert(len(uncommitted.UncommittedBlocks), chk.Equals, 1) - // Commit block list - c.Assert(cli.PutBlockList(cnt, blob, []Block{{blockID, BlockStatusUncommitted}}), chk.IsNil) - - // Get all blocks - all, err := cli.GetBlockList(cnt, blob, BlockListTypeAll) - c.Assert(err, chk.IsNil) - c.Assert(len(all.CommittedBlocks), chk.Equals, 1) - c.Assert(len(all.UncommittedBlocks), chk.Equals, 0) - - // Verify the block - thatBlock := all.CommittedBlocks[0] - c.Assert(thatBlock.Name, chk.Equals, blockID) - c.Assert(thatBlock.Size, chk.Equals, int64(len(chunk))) -} - -func (s *StorageBlobSuite) TestCreateBlockBlob(c *chk.C) { - cli := getBlobClient(c) - cnt := randContainer() - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.deleteContainer(cnt) - - blob := randName(5) - c.Assert(cli.CreateBlockBlob(cnt, blob), chk.IsNil) - - // Verify - blocks, err := cli.GetBlockList(cnt, blob, BlockListTypeAll) - c.Assert(err, chk.IsNil) - c.Assert(len(blocks.CommittedBlocks), chk.Equals, 0) - c.Assert(len(blocks.UncommittedBlocks), chk.Equals, 0) -} - -func (s *StorageBlobSuite) TestPutPageBlob(c *chk.C) { - cli := getBlobClient(c) - cnt := randContainer() - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.deleteContainer(cnt) - - blob := randName(5) - size := int64(10 * 1024 * 1024) - c.Assert(cli.PutPageBlob(cnt, blob, size, nil), chk.IsNil) - - // Verify - props, err := cli.GetBlobProperties(cnt, blob) - c.Assert(err, chk.IsNil) - c.Assert(props.ContentLength, chk.Equals, size) - c.Assert(props.BlobType, chk.Equals, BlobTypePage) -} - -func (s *StorageBlobSuite) TestPutPagesUpdate(c *chk.C) { - cli := getBlobClient(c) - cnt := randContainer() - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.deleteContainer(cnt) - - blob := randName(5) - size := int64(10 * 1024 * 1024) // larger than we'll use - c.Assert(cli.PutPageBlob(cnt, blob, size, nil), chk.IsNil) - - chunk1 := []byte(randString(1024)) - chunk2 := []byte(randString(512)) - - // Append chunks - c.Assert(cli.PutPage(cnt, blob, 0, int64(len(chunk1)-1), PageWriteTypeUpdate, chunk1, nil), chk.IsNil) - c.Assert(cli.PutPage(cnt, blob, int64(len(chunk1)), int64(len(chunk1)+len(chunk2)-1), PageWriteTypeUpdate, chunk2, nil), chk.IsNil) - - // Verify contents - out, err := cli.GetBlobRange(cnt, blob, fmt.Sprintf("%v-%v", 0, len(chunk1)+len(chunk2)-1), nil) - c.Assert(err, chk.IsNil) - defer out.Close() - blobContents, err := ioutil.ReadAll(out) - c.Assert(err, chk.IsNil) - c.Assert(blobContents, chk.DeepEquals, append(chunk1, chunk2...)) - out.Close() - - // Overwrite first half of chunk1 - chunk0 := []byte(randString(512)) - c.Assert(cli.PutPage(cnt, blob, 0, int64(len(chunk0)-1), PageWriteTypeUpdate, chunk0, nil), chk.IsNil) - - // Verify contents - out, err = cli.GetBlobRange(cnt, blob, fmt.Sprintf("%v-%v", 0, len(chunk1)+len(chunk2)-1), nil) - c.Assert(err, chk.IsNil) - defer out.Close() - blobContents, err = ioutil.ReadAll(out) - c.Assert(err, chk.IsNil) - c.Assert(blobContents, chk.DeepEquals, append(append(chunk0, chunk1[512:]...), chunk2...)) -} - -func (s *StorageBlobSuite) TestPutPagesClear(c *chk.C) { - cli := getBlobClient(c) - cnt := randContainer() - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.deleteContainer(cnt) - - blob := randName(5) - size := int64(10 * 1024 * 1024) // larger than we'll use - c.Assert(cli.PutPageBlob(cnt, blob, size, nil), chk.IsNil) - - // Put 0-2047 - chunk := []byte(randString(2048)) - c.Assert(cli.PutPage(cnt, blob, 0, 2047, PageWriteTypeUpdate, chunk, nil), chk.IsNil) - - // Clear 512-1023 - c.Assert(cli.PutPage(cnt, blob, 512, 1023, PageWriteTypeClear, nil, nil), chk.IsNil) - - // Verify contents - out, err := cli.GetBlobRange(cnt, blob, "0-2047", nil) - c.Assert(err, chk.IsNil) - contents, err := ioutil.ReadAll(out) - c.Assert(err, chk.IsNil) - defer out.Close() - c.Assert(contents, chk.DeepEquals, append(append(chunk[:512], make([]byte, 512)...), chunk[1024:]...)) -} - -func (s *StorageBlobSuite) TestGetPageRanges(c *chk.C) { - cli := getBlobClient(c) - cnt := randContainer() - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.deleteContainer(cnt) - - blob := randName(5) - size := int64(10 * 1024 * 1024) // larger than we'll use - c.Assert(cli.PutPageBlob(cnt, blob, size, nil), chk.IsNil) - - // Get page ranges on empty blob - out, err := cli.GetPageRanges(cnt, blob) - c.Assert(err, chk.IsNil) - c.Assert(len(out.PageList), chk.Equals, 0) - - // Add 0-512 page - c.Assert(cli.PutPage(cnt, blob, 0, 511, PageWriteTypeUpdate, []byte(randString(512)), nil), chk.IsNil) - - out, err = cli.GetPageRanges(cnt, blob) - c.Assert(err, chk.IsNil) - c.Assert(len(out.PageList), chk.Equals, 1) - - // Add 1024-2048 - c.Assert(cli.PutPage(cnt, blob, 1024, 2047, PageWriteTypeUpdate, []byte(randString(1024)), nil), chk.IsNil) - - out, err = cli.GetPageRanges(cnt, blob) - c.Assert(err, chk.IsNil) - c.Assert(len(out.PageList), chk.Equals, 2) -} - -func (s *StorageBlobSuite) TestPutAppendBlob(c *chk.C) { - cli := getBlobClient(c) - cnt := randContainer() - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.deleteContainer(cnt) - - blob := randName(5) - c.Assert(cli.PutAppendBlob(cnt, blob, nil), chk.IsNil) - - // Verify - props, err := cli.GetBlobProperties(cnt, blob) - c.Assert(err, chk.IsNil) - c.Assert(props.ContentLength, chk.Equals, int64(0)) - c.Assert(props.BlobType, chk.Equals, BlobTypeAppend) -} - -func (s *StorageBlobSuite) TestPutAppendBlobAppendBlocks(c *chk.C) { - cli := getBlobClient(c) - cnt := randContainer() - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.deleteContainer(cnt) - - blob := randName(5) - c.Assert(cli.PutAppendBlob(cnt, blob, nil), chk.IsNil) - - chunk1 := []byte(randString(1024)) - chunk2 := []byte(randString(512)) - - // Append first block - c.Assert(cli.AppendBlock(cnt, blob, chunk1, nil), chk.IsNil) - - // Verify contents - out, err := cli.GetBlobRange(cnt, blob, fmt.Sprintf("%v-%v", 0, len(chunk1)-1), nil) - c.Assert(err, chk.IsNil) - defer out.Close() - blobContents, err := ioutil.ReadAll(out) - c.Assert(err, chk.IsNil) - c.Assert(blobContents, chk.DeepEquals, chunk1) - out.Close() - - // Append second block - c.Assert(cli.AppendBlock(cnt, blob, chunk2, nil), chk.IsNil) - - // Verify contents - out, err = cli.GetBlobRange(cnt, blob, fmt.Sprintf("%v-%v", 0, len(chunk1)+len(chunk2)-1), nil) - c.Assert(err, chk.IsNil) - defer out.Close() - blobContents, err = ioutil.ReadAll(out) - c.Assert(err, chk.IsNil) - c.Assert(blobContents, chk.DeepEquals, append(chunk1, chunk2...)) - out.Close() -} - -func deleteTestContainers(cli BlobStorageClient) error { - for { - resp, err := cli.ListContainers(ListContainersParameters{Prefix: testContainerPrefix}) - if err != nil { - return err - } - if len(resp.Containers) == 0 { - break - } - for _, c := range resp.Containers { - err = cli.DeleteContainer(c.Name) - if err != nil { - return err - } - } + // Was content lenght properly updated...? + c.Assert(b.Properties.ContentLength, chk.Equals, int64(len(r.expected))) } - return nil } -func (b BlobStorageClient) putSingleBlockBlob(container, name string, chunk []byte) error { +func (b *Blob) putSingleBlockBlob(chunk []byte) error { if len(chunk) > MaxBlobBlockSize { return fmt.Errorf("storage: provided chunk (%d bytes) cannot fit into single-block blob (max %d bytes)", len(chunk), MaxBlobBlockSize) } - uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) - headers := b.client.getStandardHeaders() + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), nil) + headers := b.Container.bsc.client.getStandardHeaders() headers["x-ms-blob-type"] = string(BlobTypeBlock) - headers["Content-Length"] = fmt.Sprintf("%v", len(chunk)) + headers["Content-Length"] = strconv.Itoa(len(chunk)) - resp, err := b.client.exec("PUT", uri, headers, bytes.NewReader(chunk)) + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, bytes.NewReader(chunk), b.Container.bsc.auth) if err != nil { return err } return checkRespCode(resp.statusCode, []int{http.StatusCreated}) } -func (s *StorageBlobSuite) TestPutAppendBlobSpecialChars(c *chk.C) { - cli := getBlobClient(c) - cnt := randContainer() - c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) - defer cli.deleteContainer(cnt) - - blob := randNameWithSpecialChars(5) - c.Assert(cli.PutAppendBlob(cnt, blob, nil), chk.IsNil) - - // Verify metadata - props, err := cli.GetBlobProperties(cnt, blob) - c.Assert(err, chk.IsNil) - c.Assert(props.ContentLength, chk.Equals, int64(0)) - c.Assert(props.BlobType, chk.Equals, BlobTypeAppend) - - chunk1 := []byte(randString(1024)) - chunk2 := []byte(randString(512)) +func blobName(c *chk.C, extras ...string) string { + return nameGenerator(1024, "blob/", alphanum, c, extras) - // Append first block - c.Assert(cli.AppendBlock(cnt, blob, chunk1, nil), chk.IsNil) - - // Verify contents - out, err := cli.GetBlobRange(cnt, blob, fmt.Sprintf("%v-%v", 0, len(chunk1)-1), nil) - c.Assert(err, chk.IsNil) - defer out.Close() - blobContents, err := ioutil.ReadAll(out) - c.Assert(err, chk.IsNil) - c.Assert(blobContents, chk.DeepEquals, chunk1) - out.Close() - - // Append second block - c.Assert(cli.AppendBlock(cnt, blob, chunk2, nil), chk.IsNil) - - // Verify contents - out, err = cli.GetBlobRange(cnt, blob, fmt.Sprintf("%v-%v", 0, len(chunk1)+len(chunk2)-1), nil) - c.Assert(err, chk.IsNil) - defer out.Close() - blobContents, err = ioutil.ReadAll(out) - c.Assert(err, chk.IsNil) - c.Assert(blobContents, chk.DeepEquals, append(chunk1, chunk2...)) - out.Close() } -func randContainer() string { - return testContainerPrefix + randString(32-len(testContainerPrefix)) +func contentWithSpecialChars(n int) string { + name := string(content(n)) + "/" + string(content(n)) + "-._~:?#[]@!$&'()*,;+= " + string(content(n)) + return name } -func randString(n int) string { - if n <= 0 { - panic("negative number") - } - const alphanum = "0123456789abcdefghijklmnopqrstuvwxyz" - var bytes = make([]byte, n) - rand.Read(bytes) - for i, b := range bytes { - bytes[i] = alphanum[b%byte(len(alphanum))] +func nameGenerator(maxLen int, prefix, valid string, c *chk.C, extras []string) string { + extra := strings.Join(extras, "") + name := prefix + extra + removeInvalidCharacters(c.TestName(), valid) + if len(name) > maxLen { + return name[:maxLen] } - return string(bytes) + return name } -func randBytes(n int) []byte { - data := make([]byte, n) - if _, err := io.ReadFull(rand.Reader, data); err != nil { - panic(err) +func removeInvalidCharacters(unformatted string, valid string) string { + unformatted = strings.ToLower(unformatted) + buffer := bytes.NewBufferString(strconv.Itoa((len(unformatted)))) + runes := []rune(unformatted) + for _, r := range runes { + if strings.ContainsRune(valid, r) { + buffer.WriteRune(r) + } } - return data + return string(buffer.Bytes()) } -func randName(n int) string { - name := randString(n) + "/" + randString(n) - return name +func content(n int) []byte { + buffer := bytes.NewBufferString("") + rep := (n / len(veryLongString)) + 1 + for i := 0; i < rep; i++ { + buffer.WriteString(veryLongString) + } + return buffer.Bytes()[:n] } -func randNameWithSpecialChars(n int) string { - name := randString(n) + "/" + randString(n) + "-._~:?#[]@!$&'()*,;+= " + randString(n) - return name -} +const ( + alphanum = "0123456789abcdefghijklmnopqrstuvwxyz" + alpha = "abcdefghijklmnopqrstuvwxyz" + veryLongString = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Integer feugiat eleifend scelerisque. Phasellus tempor turpis eget magna pretium, et finibus massa convallis. Donec eget lacinia nibh. Ut ut cursus odio. Quisque id justo interdum, maximus ex a, dapibus leo. Nullam mattis arcu nec justo vehicula pretium. Curabitur fermentum quam ac dolor venenatis, vitae scelerisque ex posuere. Donec ut ante porttitor, ultricies ante ac, pulvinar metus. Nunc suscipit elit gravida dolor facilisis sollicitudin. Fusce ac ultrices libero. Donec erat lectus, hendrerit volutpat nisl quis, porta accumsan nibh. Pellentesque hendrerit nisi id mi porttitor maximus. Phasellus vitae venenatis velit. Quisque id felis nec lacus iaculis porttitor. Maecenas egestas tortor et nulla dapibus varius. In hac habitasse platea dictumst." +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri.go new file mode 100644 index 000000000000..43173d3a417a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri.go @@ -0,0 +1,106 @@ +package storage + +import ( + "errors" + "fmt" + "net/url" + "strings" + "time" +) + +// GetSASURIWithSignedIPAndProtocol creates an URL to the specified blob which contains the Shared +// Access Signature with specified permissions and expiration time. Also includes signedIPRange and allowed protocols. +// If old API version is used but no signedIP is passed (ie empty string) then this should still work. +// We only populate the signedIP when it non-empty. +// +// See https://msdn.microsoft.com/en-us/library/azure/ee395415.aspx +func (b *Blob) GetSASURIWithSignedIPAndProtocol(expiry time.Time, permissions string, signedIPRange string, HTTPSOnly bool) (string, error) { + var ( + signedPermissions = permissions + blobURL = b.GetURL() + ) + canonicalizedResource, err := b.Container.bsc.client.buildCanonicalizedResource(blobURL, b.Container.bsc.auth) + if err != nil { + return "", err + } + + // "The canonicalizedresouce portion of the string is a canonical path to the signed resource. + // It must include the service name (blob, table, queue or file) for version 2015-02-21 or + // later, the storage account name, and the resource name, and must be URL-decoded. + // -- https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx + + // We need to replace + with %2b first to avoid being treated as a space (which is correct for query strings, but not the path component). + canonicalizedResource = strings.Replace(canonicalizedResource, "+", "%2b", -1) + canonicalizedResource, err = url.QueryUnescape(canonicalizedResource) + if err != nil { + return "", err + } + + signedExpiry := expiry.UTC().Format(time.RFC3339) + + //If blob name is missing, resource is a container + signedResource := "c" + if len(b.Name) > 0 { + signedResource = "b" + } + + protocols := "https,http" + if HTTPSOnly { + protocols = "https" + } + stringToSign, err := blobSASStringToSign(b.Container.bsc.client.apiVersion, canonicalizedResource, signedExpiry, signedPermissions, signedIPRange, protocols) + if err != nil { + return "", err + } + + sig := b.Container.bsc.client.computeHmac256(stringToSign) + sasParams := url.Values{ + "sv": {b.Container.bsc.client.apiVersion}, + "se": {signedExpiry}, + "sr": {signedResource}, + "sp": {signedPermissions}, + "sig": {sig}, + } + + if b.Container.bsc.client.apiVersion >= "2015-04-05" { + sasParams.Add("spr", protocols) + if signedIPRange != "" { + sasParams.Add("sip", signedIPRange) + } + } + + sasURL, err := url.Parse(blobURL) + if err != nil { + return "", err + } + sasURL.RawQuery = sasParams.Encode() + return sasURL.String(), nil +} + +// GetSASURI creates an URL to the specified blob which contains the Shared +// Access Signature with specified permissions and expiration time. +// +// See https://msdn.microsoft.com/en-us/library/azure/ee395415.aspx +func (b *Blob) GetSASURI(expiry time.Time, permissions string) (string, error) { + return b.GetSASURIWithSignedIPAndProtocol(expiry, permissions, "", false) +} + +func blobSASStringToSign(signedVersion, canonicalizedResource, signedExpiry, signedPermissions string, signedIP string, protocols string) (string, error) { + var signedStart, signedIdentifier, rscc, rscd, rsce, rscl, rsct string + + if signedVersion >= "2015-02-21" { + canonicalizedResource = "/blob" + canonicalizedResource + } + + // https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx#Anchor_12 + if signedVersion >= "2015-04-05" { + return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedIP, protocols, signedVersion, rscc, rscd, rsce, rscl, rsct), nil + } + + // reference: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx + if signedVersion >= "2013-08-15" { + return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedVersion, rscc, rscd, rsce, rscl, rsct), nil + } + + return "", errors.New("storage: not implemented SAS for versions earlier than 2013-08-15") +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri_test.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri_test.go new file mode 100644 index 000000000000..ac2b6b1c586e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri_test.go @@ -0,0 +1,185 @@ +package storage + +import ( + "io/ioutil" + "net/http" + "net/url" + "time" + + chk "gopkg.in/check.v1" +) + +type BlobSASURISuite struct{} + +var _ = chk.Suite(&BlobSASURISuite{}) + +var oldAPIVer = "2013-08-15" +var newerAPIVer = "2015-04-05" + +func (s *BlobSASURISuite) TestGetBlobSASURI(c *chk.C) { + api, err := NewClient("foo", dummyMiniStorageKey, DefaultBaseURL, oldAPIVer, true) + c.Assert(err, chk.IsNil) + cli := api.GetBlobService() + cnt := cli.GetContainerReference("container") + b := cnt.GetBlobReference("name") + expiry := time.Time{} + + expectedParts := url.URL{ + Scheme: "https", + Host: "foo.blob.core.windows.net", + Path: "container/name", + RawQuery: url.Values{ + "sv": {oldAPIVer}, + "sig": {"/OXG7rWh08jYwtU03GzJM0DHZtidRGpC6g69rSGm3I0="}, + "sr": {"b"}, + "sp": {"r"}, + "se": {"0001-01-01T00:00:00Z"}, + }.Encode()} + + u, err := b.GetSASURI(expiry, "r") + c.Assert(err, chk.IsNil) + sasParts, err := url.Parse(u) + c.Assert(err, chk.IsNil) + c.Assert(expectedParts.String(), chk.Equals, sasParts.String()) + c.Assert(expectedParts.Query(), chk.DeepEquals, sasParts.Query()) +} + +//Gets a SASURI for the entire container +func (s *BlobSASURISuite) TestGetBlobSASURIContainer(c *chk.C) { + api, err := NewClient("foo", dummyMiniStorageKey, DefaultBaseURL, oldAPIVer, true) + c.Assert(err, chk.IsNil) + cli := api.GetBlobService() + cnt := cli.GetContainerReference("container") + b := cnt.GetBlobReference("") + expiry := time.Time{} + + expectedParts := url.URL{ + Scheme: "https", + Host: "foo.blob.core.windows.net", + Path: "container", + RawQuery: url.Values{ + "sv": {oldAPIVer}, + "sig": {"KMjYyQODKp6uK9EKR3yGhO2M84e1LfoztypU32kHj4s="}, + "sr": {"c"}, + "sp": {"r"}, + "se": {"0001-01-01T00:00:00Z"}, + }.Encode()} + + u, err := b.GetSASURI(expiry, "r") + c.Assert(err, chk.IsNil) + sasParts, err := url.Parse(u) + c.Assert(err, chk.IsNil) + c.Assert(expectedParts.String(), chk.Equals, sasParts.String()) + c.Assert(expectedParts.Query(), chk.DeepEquals, sasParts.Query()) +} + +func (s *BlobSASURISuite) TestGetBlobSASURIWithSignedIPAndProtocolValidAPIVersionPassed(c *chk.C) { + api, err := NewClient("foo", dummyMiniStorageKey, DefaultBaseURL, newerAPIVer, true) + c.Assert(err, chk.IsNil) + cli := api.GetBlobService() + cnt := cli.GetContainerReference("container") + b := cnt.GetBlobReference("name") + expiry := time.Time{} + + expectedParts := url.URL{ + Scheme: "https", + Host: "foo.blob.core.windows.net", + Path: "/container/name", + RawQuery: url.Values{ + "sv": {newerAPIVer}, + "sig": {"VBOYJmt89UuBRXrxNzmsCMoC+8PXX2yklV71QcL1BfM="}, + "sr": {"b"}, + "sip": {"127.0.0.1"}, + "sp": {"r"}, + "se": {"0001-01-01T00:00:00Z"}, + "spr": {"https"}, + }.Encode()} + + u, err := b.GetSASURIWithSignedIPAndProtocol(expiry, "r", "127.0.0.1", true) + c.Assert(err, chk.IsNil) + sasParts, err := url.Parse(u) + c.Assert(err, chk.IsNil) + c.Assert(sasParts.Query(), chk.DeepEquals, expectedParts.Query()) +} + +// Trying to use SignedIP and Protocol but using an older version of the API. +// Should ignore the signedIP/protocol and just use what the older version requires. +func (s *BlobSASURISuite) TestGetBlobSASURIWithSignedIPAndProtocolUsingOldAPIVersion(c *chk.C) { + api, err := NewClient("foo", dummyMiniStorageKey, DefaultBaseURL, oldAPIVer, true) + c.Assert(err, chk.IsNil) + cli := api.GetBlobService() + cnt := cli.GetContainerReference("container") + b := cnt.GetBlobReference("name") + expiry := time.Time{} + + expectedParts := url.URL{ + Scheme: "https", + Host: "foo.blob.core.windows.net", + Path: "/container/name", + RawQuery: url.Values{ + "sv": {oldAPIVer}, + "sig": {"/OXG7rWh08jYwtU03GzJM0DHZtidRGpC6g69rSGm3I0="}, + "sr": {"b"}, + "sp": {"r"}, + "se": {"0001-01-01T00:00:00Z"}, + }.Encode()} + + u, err := b.GetSASURIWithSignedIPAndProtocol(expiry, "r", "", true) + c.Assert(err, chk.IsNil) + sasParts, err := url.Parse(u) + c.Assert(err, chk.IsNil) + c.Assert(expectedParts.String(), chk.Equals, sasParts.String()) + c.Assert(expectedParts.Query(), chk.DeepEquals, sasParts.Query()) +} + +func (s *BlobSASURISuite) TestBlobSASURICorrectness(c *chk.C) { + cli := getBlobClient(c) + + if cli.client.usesDummies() { + c.Skip("As GetSASURI result depends on the account key, it is not practical to test it with a dummy key.") + } + + simpleClient := &http.Client{} + rec := cli.client.appendRecorder(c) + simpleClient.Transport = rec + defer rec.Stop() + + cnt := cli.GetContainerReference(containerName(c)) + c.Assert(cnt.Create(nil), chk.IsNil) + b := cnt.GetBlobReference(contentWithSpecialChars(5)) + defer cnt.Delete(nil) + + body := content(100) + expiry := fixedTime.UTC().Add(time.Hour) + permissions := "r" + + c.Assert(b.putSingleBlockBlob(body), chk.IsNil) + + sasURI, err := b.GetSASURI(expiry, permissions) + c.Assert(err, chk.IsNil) + + resp, err := simpleClient.Get(sasURI) + c.Assert(err, chk.IsNil) + + blobResp, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + c.Assert(err, chk.IsNil) + + c.Assert(resp.StatusCode, chk.Equals, http.StatusOK) + c.Assert(string(blobResp), chk.Equals, string(body)) + +} + +func (s *BlobSASURISuite) Test_blobSASStringToSign(c *chk.C) { + _, err := blobSASStringToSign("2012-02-12", "CS", "SE", "SP", "", "") + c.Assert(err, chk.NotNil) // not implemented SAS for versions earlier than 2013-08-15 + + out, err := blobSASStringToSign(oldAPIVer, "CS", "SE", "SP", "", "") + c.Assert(err, chk.IsNil) + c.Assert(out, chk.Equals, "SP\n\nSE\nCS\n\n2013-08-15\n\n\n\n\n") + + // check format for 2015-04-05 version + out, err = blobSASStringToSign(newerAPIVer, "CS", "SE", "SP", "127.0.0.1", "https,http") + c.Assert(err, chk.IsNil) + c.Assert(out, chk.Equals, "SP\n\nSE\n/blobCS\n\n127.0.0.1\nhttps,http\n2015-04-05\n\n\n\n\n") +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go new file mode 100644 index 000000000000..450b20f96724 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go @@ -0,0 +1,95 @@ +package storage + +import ( + "net/http" + "net/url" + "strconv" +) + +// BlobStorageClient contains operations for Microsoft Azure Blob Storage +// Service. +type BlobStorageClient struct { + client Client + auth authentication +} + +// GetServiceProperties gets the properties of your storage account's blob service. +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-blob-service-properties +func (b *BlobStorageClient) GetServiceProperties() (*ServiceProperties, error) { + return b.client.getServiceProperties(blobServiceName, b.auth) +} + +// SetServiceProperties sets the properties of your storage account's blob service. +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-blob-service-properties +func (b *BlobStorageClient) SetServiceProperties(props ServiceProperties) error { + return b.client.setServiceProperties(props, blobServiceName, b.auth) +} + +// ListContainersParameters defines the set of customizable parameters to make a +// List Containers call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx +type ListContainersParameters struct { + Prefix string + Marker string + Include string + MaxResults uint + Timeout uint +} + +// GetContainerReference returns a Container object for the specified container name. +func (b *BlobStorageClient) GetContainerReference(name string) *Container { + return &Container{ + bsc: b, + Name: name, + } +} + +// ListContainers returns the list of containers in a storage account along with +// pagination token and other response details. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx +func (b BlobStorageClient) ListContainers(params ListContainersParameters) (*ContainerListResponse, error) { + q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}}) + uri := b.client.getEndpoint(blobServiceName, "", q) + headers := b.client.getStandardHeaders() + + var out ContainerListResponse + resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth) + if err != nil { + return nil, err + } + defer resp.body.Close() + err = xmlUnmarshal(resp.body, &out) + if err != nil { + return nil, err + } + + // assign our client to the newly created Container objects + for i := range out.Containers { + out.Containers[i].bsc = &b + } + return &out, err +} + +func (p ListContainersParameters) getParameters() url.Values { + out := url.Values{} + + if p.Prefix != "" { + out.Set("prefix", p.Prefix) + } + if p.Marker != "" { + out.Set("marker", p.Marker) + } + if p.Include != "" { + out.Set("include", p.Include) + } + if p.MaxResults != 0 { + out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10)) + } + if p.Timeout != 0 { + out.Set("timeout", strconv.FormatUint(uint64(p.Timeout), 10)) + } + + return out +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go new file mode 100644 index 000000000000..6a180d48d7cb --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go @@ -0,0 +1,240 @@ +package storage + +import ( + "bytes" + "encoding/xml" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + "time" +) + +// BlockListType is used to filter out types of blocks in a Get Blocks List call +// for a block blob. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx for all +// block types. +type BlockListType string + +// Filters for listing blocks in block blobs +const ( + BlockListTypeAll BlockListType = "all" + BlockListTypeCommitted BlockListType = "committed" + BlockListTypeUncommitted BlockListType = "uncommitted" +) + +// Maximum sizes (per REST API) for various concepts +const ( + MaxBlobBlockSize = 100 * 1024 * 1024 + MaxBlobPageSize = 4 * 1024 * 1024 +) + +// BlockStatus defines states a block for a block blob can +// be in. +type BlockStatus string + +// List of statuses that can be used to refer to a block in a block list +const ( + BlockStatusUncommitted BlockStatus = "Uncommitted" + BlockStatusCommitted BlockStatus = "Committed" + BlockStatusLatest BlockStatus = "Latest" +) + +// Block is used to create Block entities for Put Block List +// call. +type Block struct { + ID string + Status BlockStatus +} + +// BlockListResponse contains the response fields from Get Block List call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx +type BlockListResponse struct { + XMLName xml.Name `xml:"BlockList"` + CommittedBlocks []BlockResponse `xml:"CommittedBlocks>Block"` + UncommittedBlocks []BlockResponse `xml:"UncommittedBlocks>Block"` +} + +// BlockResponse contains the block information returned +// in the GetBlockListCall. +type BlockResponse struct { + Name string `xml:"Name"` + Size int64 `xml:"Size"` +} + +// CreateBlockBlob initializes an empty block blob with no blocks. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob +func (b *Blob) CreateBlockBlob(options *PutBlobOptions) error { + return b.CreateBlockBlobFromReader(nil, options) +} + +// CreateBlockBlobFromReader initializes a block blob using data from +// reader. Size must be the number of bytes read from reader. To +// create an empty blob, use size==0 and reader==nil. +// +// The API rejects requests with size > 256 MiB (but this limit is not +// checked by the SDK). To write a larger blob, use CreateBlockBlob, +// PutBlock, and PutBlockList. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob +func (b *Blob) CreateBlockBlobFromReader(blob io.Reader, options *PutBlobOptions) error { + params := url.Values{} + headers := b.Container.bsc.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypeBlock) + + headers["Content-Length"] = "0" + var n int64 + var err error + if blob != nil { + buf := &bytes.Buffer{} + n, err = io.Copy(buf, blob) + if err != nil { + return err + } + blob = buf + headers["Content-Length"] = strconv.FormatInt(n, 10) + } + b.Properties.ContentLength = n + + headers = mergeHeaders(headers, headersFromStruct(b.Properties)) + headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, blob, b.Container.bsc.auth) + if err != nil { + return err + } + readAndCloseBody(resp.body) + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// PutBlockOptions includes the options for a put block operation +type PutBlockOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + ContentMD5 string `header:"Content-MD5"` + RequestID string `header:"x-ms-client-request-id"` +} + +// PutBlock saves the given data chunk to the specified block blob with +// given ID. +// +// The API rejects chunks larger than 100 MiB (but this limit is not +// checked by the SDK). +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Block +func (b *Blob) PutBlock(blockID string, chunk []byte, options *PutBlockOptions) error { + return b.PutBlockWithLength(blockID, uint64(len(chunk)), bytes.NewReader(chunk), options) +} + +// PutBlockWithLength saves the given data stream of exactly specified size to +// the block blob with given ID. It is an alternative to PutBlocks where data +// comes as stream but the length is known in advance. +// +// The API rejects requests with size > 100 MiB (but this limit is not +// checked by the SDK). +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Block +func (b *Blob) PutBlockWithLength(blockID string, size uint64, blob io.Reader, options *PutBlockOptions) error { + query := url.Values{ + "comp": {"block"}, + "blockid": {blockID}, + } + headers := b.Container.bsc.client.getStandardHeaders() + headers["Content-Length"] = fmt.Sprintf("%v", size) + + if options != nil { + query = addTimeout(query, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), query) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, blob, b.Container.bsc.auth) + if err != nil { + return err + } + readAndCloseBody(resp.body) + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// PutBlockListOptions includes the options for a put block list operation +type PutBlockListOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + RequestID string `header:"x-ms-client-request-id"` +} + +// PutBlockList saves list of blocks to the specified block blob. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Block-List +func (b *Blob) PutBlockList(blocks []Block, options *PutBlockListOptions) error { + params := url.Values{"comp": {"blocklist"}} + blockListXML := prepareBlockListRequest(blocks) + headers := b.Container.bsc.client.getStandardHeaders() + headers["Content-Length"] = fmt.Sprintf("%v", len(blockListXML)) + headers = mergeHeaders(headers, headersFromStruct(b.Properties)) + headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, strings.NewReader(blockListXML), b.Container.bsc.auth) + if err != nil { + return err + } + readAndCloseBody(resp.body) + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// GetBlockListOptions includes the options for a get block list operation +type GetBlockListOptions struct { + Timeout uint + Snapshot *time.Time + LeaseID string `header:"x-ms-lease-id"` + RequestID string `header:"x-ms-client-request-id"` +} + +// GetBlockList retrieves list of blocks in the specified block blob. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Block-List +func (b *Blob) GetBlockList(blockType BlockListType, options *GetBlockListOptions) (BlockListResponse, error) { + params := url.Values{ + "comp": {"blocklist"}, + "blocklisttype": {string(blockType)}, + } + headers := b.Container.bsc.client.getStandardHeaders() + + if options != nil { + params = addTimeout(params, options.Timeout) + params = addSnapshot(params, options.Snapshot) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + var out BlockListResponse + resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth) + if err != nil { + return out, err + } + defer resp.body.Close() + + err = xmlUnmarshal(resp.body, &out) + return out, err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob_test.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob_test.go new file mode 100644 index 000000000000..9a5f37832456 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob_test.go @@ -0,0 +1,134 @@ +package storage + +import ( + "bytes" + "encoding/base64" + "io/ioutil" + + chk "gopkg.in/check.v1" +) + +type BlockBlobSuite struct{} + +var _ = chk.Suite(&BlockBlobSuite{}) + +func (s *BlockBlobSuite) TestCreateBlockBlobFromReader(c *chk.C) { + cli := getBlobClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + cnt := cli.GetContainerReference(containerName(c)) + b := cnt.GetBlobReference(blobName(c)) + c.Assert(cnt.Create(nil), chk.IsNil) + defer cnt.Delete(nil) + + length := 8888 + data := content(length) + err := b.CreateBlockBlobFromReader(bytes.NewReader(data), nil) + c.Assert(err, chk.IsNil) + c.Assert(b.Properties.ContentLength, chk.Equals, int64(length)) + + resp, err := b.Get(nil) + c.Assert(err, chk.IsNil) + gotData, err := ioutil.ReadAll(resp) + defer resp.Close() + + c.Assert(err, chk.IsNil) + c.Assert(gotData, chk.DeepEquals, data) +} + +func (s *BlockBlobSuite) TestPutBlock(c *chk.C) { + cli := getBlobClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + cnt := cli.GetContainerReference(containerName(c)) + b := cnt.GetBlobReference(blobName(c)) + c.Assert(cnt.Create(nil), chk.IsNil) + defer cnt.Delete(nil) + + chunk := content(1024) + blockID := base64.StdEncoding.EncodeToString([]byte("lol")) + c.Assert(b.PutBlock(blockID, chunk, nil), chk.IsNil) +} + +func (s *BlockBlobSuite) TestGetBlockList_PutBlockList(c *chk.C) { + cli := getBlobClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + cnt := cli.GetContainerReference(containerName(c)) + b := cnt.GetBlobReference(blobName(c)) + c.Assert(cnt.Create(nil), chk.IsNil) + defer cnt.Delete(nil) + + chunk := content(1024) + blockID := base64.StdEncoding.EncodeToString([]byte("lol")) + + // Put one block + c.Assert(b.PutBlock(blockID, chunk, nil), chk.IsNil) + defer b.Delete(nil) + + // Get committed blocks + committed, err := b.GetBlockList(BlockListTypeCommitted, nil) + c.Assert(err, chk.IsNil) + + if len(committed.CommittedBlocks) > 0 { + c.Fatal("There are committed blocks") + } + + // Get uncommitted blocks + uncommitted, err := b.GetBlockList(BlockListTypeUncommitted, nil) + c.Assert(err, chk.IsNil) + + c.Assert(len(uncommitted.UncommittedBlocks), chk.Equals, 1) + // Commit block list + c.Assert(b.PutBlockList([]Block{{blockID, BlockStatusUncommitted}}, nil), chk.IsNil) + + // Get all blocks + all, err := b.GetBlockList(BlockListTypeAll, nil) + c.Assert(err, chk.IsNil) + c.Assert(len(all.CommittedBlocks), chk.Equals, 1) + c.Assert(len(all.UncommittedBlocks), chk.Equals, 0) + + // Verify the block + thatBlock := all.CommittedBlocks[0] + c.Assert(thatBlock.Name, chk.Equals, blockID) + c.Assert(thatBlock.Size, chk.Equals, int64(len(chunk))) +} + +func (s *BlockBlobSuite) TestCreateBlockBlob(c *chk.C) { + cli := getBlobClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + cnt := cli.GetContainerReference(containerName(c)) + b := cnt.GetBlobReference(blobName(c)) + c.Assert(cnt.Create(nil), chk.IsNil) + defer cnt.Delete(nil) + + c.Assert(b.CreateBlockBlob(nil), chk.IsNil) + + // Verify + blocks, err := b.GetBlockList(BlockListTypeAll, nil) + c.Assert(err, chk.IsNil) + c.Assert(len(blocks.CommittedBlocks), chk.Equals, 0) + c.Assert(len(blocks.UncommittedBlocks), chk.Equals, 0) +} + +func (s *BlockBlobSuite) TestPutEmptyBlockBlob(c *chk.C) { + cli := getBlobClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + cnt := cli.GetContainerReference(containerName(c)) + b := cnt.GetBlobReference(blobName(c)) + c.Assert(cnt.Create(nil), chk.IsNil) + defer cnt.Delete(nil) + + c.Assert(b.putSingleBlockBlob([]byte("Hello!")), chk.IsNil) + + err := b.GetProperties(nil) + c.Assert(err, chk.IsNil) + c.Assert(b.Properties.ContentLength, chk.Not(chk.Equals), 0) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go index 77528511a47a..8671e52eb71f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go @@ -2,6 +2,7 @@ package storage import ( + "bufio" "bytes" "encoding/base64" "encoding/json" @@ -10,22 +11,27 @@ import ( "fmt" "io" "io/ioutil" + "mime" + "mime/multipart" "net/http" "net/url" "regexp" - "sort" - "strconv" + "runtime" "strings" + "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" ) const ( - // DefaultBaseURL is the domain name used for storage requests when a - // default client is created. + // DefaultBaseURL is the domain name used for storage requests in the + // public cloud when a default client is created. DefaultBaseURL = "core.windows.net" - // DefaultAPIVersion is the Azure Storage API version string used when a + // DefaultAPIVersion is the Azure Storage API version string used when a // basic client is created. - DefaultAPIVersion = "2015-02-21" + DefaultAPIVersion = "2016-05-31" defaultUseHTTPS = true @@ -43,20 +49,75 @@ const ( storageEmulatorBlob = "127.0.0.1:10000" storageEmulatorTable = "127.0.0.1:10002" storageEmulatorQueue = "127.0.0.1:10001" + + userAgentHeader = "User-Agent" + + userDefinedMetadataHeaderPrefix = "x-ms-meta-" ) +var ( + validStorageAccount = regexp.MustCompile("^[0-9a-z]{3,24}$") +) + +// Sender sends a request +type Sender interface { + Send(*Client, *http.Request) (*http.Response, error) +} + +// DefaultSender is the default sender for the client. It implements +// an automatic retry strategy. +type DefaultSender struct { + RetryAttempts int + RetryDuration time.Duration + ValidStatusCodes []int + attempts int // used for testing +} + +// Send is the default retry strategy in the client +func (ds *DefaultSender) Send(c *Client, req *http.Request) (resp *http.Response, err error) { + b := []byte{} + if req.Body != nil { + b, err = ioutil.ReadAll(req.Body) + if err != nil { + return resp, err + } + } + + for attempts := 0; attempts < ds.RetryAttempts; attempts++ { + if len(b) > 0 { + req.Body = ioutil.NopCloser(bytes.NewBuffer(b)) + } + resp, err = c.HTTPClient.Do(req) + if err != nil || !autorest.ResponseHasStatusCode(resp, ds.ValidStatusCodes...) { + return resp, err + } + autorest.DelayForBackoff(ds.RetryDuration, attempts, req.Cancel) + ds.attempts = attempts + } + ds.attempts++ + return resp, err +} + // Client is the object that needs to be constructed to perform // operations on the storage account. type Client struct { // HTTPClient is the http.Client used to initiate API - // requests. If it is nil, http.DefaultClient is used. + // requests. http.DefaultClient is used when creating a + // client. HTTPClient *http.Client - accountName string - accountKey []byte - useHTTPS bool - baseURL string - apiVersion string + // Sender is an interface that sends the request. Clients are + // created with a DefaultSender. The DefaultSender has an + // automatic retry strategy built in. The Sender can be customized. + Sender Sender + + accountName string + accountKey []byte + useHTTPS bool + UseSharedKeyLite bool + baseURL string + apiVersion string + userAgent string } type storageResponse struct { @@ -67,7 +128,7 @@ type storageResponse struct { type odataResponse struct { storageResponse - odata odataErrorMessage + odata odataErrorWrapper } // AzureStorageServiceError contains fields of the error response from @@ -80,22 +141,25 @@ type AzureStorageServiceError struct { QueryParameterName string `xml:"QueryParameterName"` QueryParameterValue string `xml:"QueryParameterValue"` Reason string `xml:"Reason"` + Lang string StatusCode int RequestID string + Date string + APIVersion string } -type odataErrorMessageMessage struct { +type odataErrorMessage struct { Lang string `json:"lang"` Value string `json:"value"` } -type odataErrorMessageInternal struct { - Code string `json:"code"` - Message odataErrorMessageMessage `json:"message"` +type odataError struct { + Code string `json:"code"` + Message odataErrorMessage `json:"message"` } -type odataErrorMessage struct { - Err odataErrorMessageInternal `json:"odata.error"` +type odataErrorWrapper struct { + Err odataError `json:"odata.error"` } // UnexpectedStatusCodeError is returned when a storage service responds with neither an error @@ -128,7 +192,15 @@ func NewBasicClient(accountName, accountKey string) (Client, error) { return NewEmulatorClient() } return NewClient(accountName, accountKey, DefaultBaseURL, DefaultAPIVersion, defaultUseHTTPS) +} +// NewBasicClientOnSovereignCloud constructs a Client with given storage service name and +// key in the referenced cloud. +func NewBasicClientOnSovereignCloud(accountName, accountKey string, env azure.Environment) (Client, error) { + if accountName == StorageEmulatorAccountName { + return NewEmulatorClient() + } + return NewClient(accountName, accountKey, env.StorageEndpointSuffix, DefaultAPIVersion, defaultUseHTTPS) } //NewEmulatorClient contructs a Client intended to only work with Azure @@ -142,8 +214,8 @@ func NewEmulatorClient() (Client, error) { // storage endpoint than Azure Public Cloud. func NewClient(accountName, accountKey, blobServiceBaseURL, apiVersion string, useHTTPS bool) (Client, error) { var c Client - if accountName == "" { - return c, fmt.Errorf("azure: account name required") + if !IsValidStorageAccount(accountName) { + return c, fmt.Errorf("azure: account name is not valid: it must be between 3 and 24 characters, and only may contain numbers and lowercase letters: %v", accountName) } else if accountKey == "" { return c, fmt.Errorf("azure: account key required") } else if blobServiceBaseURL == "" { @@ -155,16 +227,67 @@ func NewClient(accountName, accountKey, blobServiceBaseURL, apiVersion string, u return c, fmt.Errorf("azure: malformed storage account key: %v", err) } - return Client{ - accountName: accountName, - accountKey: key, - useHTTPS: useHTTPS, - baseURL: blobServiceBaseURL, - apiVersion: apiVersion, - }, nil + c = Client{ + HTTPClient: http.DefaultClient, + accountName: accountName, + accountKey: key, + useHTTPS: useHTTPS, + baseURL: blobServiceBaseURL, + apiVersion: apiVersion, + UseSharedKeyLite: false, + Sender: &DefaultSender{ + RetryAttempts: 5, + ValidStatusCodes: []int{ + http.StatusRequestTimeout, // 408 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + }, + RetryDuration: time.Second * 5, + }, + } + c.userAgent = c.getDefaultUserAgent() + return c, nil } -func (c Client) getBaseURL(service string) string { +// IsValidStorageAccount checks if the storage account name is valid. +// See https://docs.microsoft.com/en-us/azure/storage/storage-create-storage-account +func IsValidStorageAccount(account string) bool { + return validStorageAccount.MatchString(account) +} + +func (c Client) getDefaultUserAgent() string { + return fmt.Sprintf("Go/%s (%s-%s) azure-storage-go/%s api-version/%s", + runtime.Version(), + runtime.GOARCH, + runtime.GOOS, + sdkVersion, + c.apiVersion, + ) +} + +// AddToUserAgent adds an extension to the current user agent +func (c *Client) AddToUserAgent(extension string) error { + if extension != "" { + c.userAgent = fmt.Sprintf("%s %s", c.userAgent, extension) + return nil + } + return fmt.Errorf("Extension was empty, User Agent stayed as %s", c.userAgent) +} + +// protectUserAgent is used in funcs that include extraheaders as a parameter. +// It prevents the User-Agent header to be overwritten, instead if it happens to +// be present, it gets added to the current User-Agent. Use it before getStandardHeaders +func (c *Client) protectUserAgent(extraheaders map[string]string) map[string]string { + if v, ok := extraheaders[userAgentHeader]; ok { + c.AddToUserAgent(v) + delete(extraheaders, userAgentHeader) + } + return extraheaders +} + +func (c Client) getBaseURL(service string) *url.URL { scheme := "http" if c.useHTTPS { scheme = "https" @@ -183,18 +306,14 @@ func (c Client) getBaseURL(service string) string { host = fmt.Sprintf("%s.%s.%s", c.accountName, service, c.baseURL) } - u := &url.URL{ + return &url.URL{ Scheme: scheme, - Host: host} - return u.String() + Host: host, + } } func (c Client) getEndpoint(service, path string, params url.Values) string { - u, err := url.Parse(c.getBaseURL(service)) - if err != nil { - // really should not be happening - panic(err) - } + u := c.getBaseURL(service) // API doesn't accept path segments not starting with '/' if !strings.HasPrefix(path, "/") { @@ -213,181 +332,69 @@ func (c Client) getEndpoint(service, path string, params url.Values) string { // GetBlobService returns a BlobStorageClient which can operate on the blob // service of the storage account. func (c Client) GetBlobService() BlobStorageClient { - return BlobStorageClient{c} + b := BlobStorageClient{ + client: c, + } + b.client.AddToUserAgent(blobServiceName) + b.auth = sharedKey + if c.UseSharedKeyLite { + b.auth = sharedKeyLite + } + return b } // GetQueueService returns a QueueServiceClient which can operate on the queue // service of the storage account. func (c Client) GetQueueService() QueueServiceClient { - return QueueServiceClient{c} + q := QueueServiceClient{ + client: c, + } + q.client.AddToUserAgent(queueServiceName) + q.auth = sharedKey + if c.UseSharedKeyLite { + q.auth = sharedKeyLite + } + return q } // GetTableService returns a TableServiceClient which can operate on the table // service of the storage account. func (c Client) GetTableService() TableServiceClient { - return TableServiceClient{c} + t := TableServiceClient{ + client: c, + } + t.client.AddToUserAgent(tableServiceName) + t.auth = sharedKeyForTable + if c.UseSharedKeyLite { + t.auth = sharedKeyLiteForTable + } + return t } // GetFileService returns a FileServiceClient which can operate on the file // service of the storage account. func (c Client) GetFileService() FileServiceClient { - return FileServiceClient{c} -} - -func (c Client) createAuthorizationHeader(canonicalizedString string) string { - signature := c.computeHmac256(canonicalizedString) - return fmt.Sprintf("%s %s:%s", "SharedKey", c.getCanonicalizedAccountName(), signature) -} - -func (c Client) getAuthorizationHeader(verb, url string, headers map[string]string) (string, error) { - canonicalizedResource, err := c.buildCanonicalizedResource(url) - if err != nil { - return "", err + f := FileServiceClient{ + client: c, } - - canonicalizedString := c.buildCanonicalizedString(verb, headers, canonicalizedResource) - return c.createAuthorizationHeader(canonicalizedString), nil + f.client.AddToUserAgent(fileServiceName) + f.auth = sharedKey + if c.UseSharedKeyLite { + f.auth = sharedKeyLite + } + return f } func (c Client) getStandardHeaders() map[string]string { return map[string]string{ - "x-ms-version": c.apiVersion, - "x-ms-date": currentTimeRfc1123Formatted(), - } -} - -func (c Client) getCanonicalizedAccountName() string { - // since we may be trying to access a secondary storage account, we need to - // remove the -secondary part of the storage name - return strings.TrimSuffix(c.accountName, "-secondary") -} - -func (c Client) buildCanonicalizedHeader(headers map[string]string) string { - cm := make(map[string]string) - - for k, v := range headers { - headerName := strings.TrimSpace(strings.ToLower(k)) - match, _ := regexp.MatchString("x-ms-", headerName) - if match { - cm[headerName] = v - } - } - - if len(cm) == 0 { - return "" - } - - keys := make([]string, 0, len(cm)) - for key := range cm { - keys = append(keys, key) - } - - sort.Strings(keys) - - ch := "" - - for i, key := range keys { - if i == len(keys)-1 { - ch += fmt.Sprintf("%s:%s", key, cm[key]) - } else { - ch += fmt.Sprintf("%s:%s\n", key, cm[key]) - } - } - return ch -} - -func (c Client) buildCanonicalizedResourceTable(uri string) (string, error) { - errMsg := "buildCanonicalizedResourceTable error: %s" - u, err := url.Parse(uri) - if err != nil { - return "", fmt.Errorf(errMsg, err.Error()) - } - - cr := "/" + c.getCanonicalizedAccountName() - - if len(u.Path) > 0 { - cr += u.EscapedPath() - } - - return cr, nil -} - -func (c Client) buildCanonicalizedResource(uri string) (string, error) { - errMsg := "buildCanonicalizedResource error: %s" - u, err := url.Parse(uri) - if err != nil { - return "", fmt.Errorf(errMsg, err.Error()) - } - - cr := "/" + c.getCanonicalizedAccountName() - - if len(u.Path) > 0 { - // Any portion of the CanonicalizedResource string that is derived from - // the resource's URI should be encoded exactly as it is in the URI. - // -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx - cr += u.EscapedPath() - } - - params, err := url.ParseQuery(u.RawQuery) - if err != nil { - return "", fmt.Errorf(errMsg, err.Error()) + userAgentHeader: c.userAgent, + "x-ms-version": c.apiVersion, + "x-ms-date": currentTimeRfc1123Formatted(), } - - if len(params) > 0 { - cr += "\n" - keys := make([]string, 0, len(params)) - for key := range params { - keys = append(keys, key) - } - - sort.Strings(keys) - - for i, key := range keys { - if len(params[key]) > 1 { - sort.Strings(params[key]) - } - - if i == len(keys)-1 { - cr += fmt.Sprintf("%s:%s", key, strings.Join(params[key], ",")) - } else { - cr += fmt.Sprintf("%s:%s\n", key, strings.Join(params[key], ",")) - } - } - } - - return cr, nil } -func (c Client) buildCanonicalizedString(verb string, headers map[string]string, canonicalizedResource string) string { - contentLength := headers["Content-Length"] - if contentLength == "0" { - contentLength = "" - } - canonicalizedString := fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", - verb, - headers["Content-Encoding"], - headers["Content-Language"], - contentLength, - headers["Content-MD5"], - headers["Content-Type"], - headers["Date"], - headers["If-Modified-Since"], - headers["If-Match"], - headers["If-None-Match"], - headers["If-Unmodified-Since"], - headers["Range"], - c.buildCanonicalizedHeader(headers), - canonicalizedResource) - - return canonicalizedString -} - -func (c Client) exec(verb, url string, headers map[string]string, body io.Reader) (*storageResponse, error) { - authHeader, err := c.getAuthorizationHeader(verb, url, headers) - if err != nil { - return nil, err - } - headers["Authorization"] = authHeader +func (c Client) exec(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*storageResponse, error) { + headers, err := c.addAuthorizationHeader(verb, url, headers, auth) if err != nil { return nil, err } @@ -397,45 +404,44 @@ func (c Client) exec(verb, url string, headers map[string]string, body io.Reader return nil, errors.New("azure/storage: error creating request: " + err.Error()) } - if clstr, ok := headers["Content-Length"]; ok { - // content length header is being signed, but completely ignored by golang. - // instead we have to use the ContentLength property on the request struct - // (see https://golang.org/src/net/http/request.go?s=18140:18370#L536 and - // https://golang.org/src/net/http/transfer.go?s=1739:2467#L49) - req.ContentLength, err = strconv.ParseInt(clstr, 10, 64) - if err != nil { - return nil, err - } - } for k, v := range headers { req.Header.Add(k, v) } - httpClient := c.HTTPClient - if httpClient == nil { - httpClient = http.DefaultClient - } - resp, err := httpClient.Do(req) + resp, err := c.Sender.Send(&c, req) if err != nil { return nil, err } - statusCode := resp.StatusCode - if statusCode >= 400 && statusCode <= 505 { + if resp.StatusCode >= 400 && resp.StatusCode <= 505 { var respBody []byte - respBody, err = readResponseBody(resp) + respBody, err = readAndCloseBody(resp.Body) if err != nil { return nil, err } + requestID, date, version := getDebugHeaders(resp.Header) if len(respBody) == 0 { - // no error in response body - err = fmt.Errorf("storage: service returned without a response body (%s)", resp.Status) + // no error in response body, might happen in HEAD requests + err = serviceErrFromStatusCode(resp.StatusCode, resp.Status, requestID, date, version) } else { + storageErr := AzureStorageServiceError{ + StatusCode: resp.StatusCode, + RequestID: requestID, + Date: date, + APIVersion: version, + } // response contains storage service error object, unmarshal - storageErr, errIn := serviceErrFromXML(respBody, resp.StatusCode, resp.Header.Get("x-ms-request-id")) - if err != nil { // error unmarshaling the error response - err = errIn + if resp.Header.Get("Content-Type") == "application/xml" { + errIn := serviceErrFromXML(respBody, &storageErr) + if err != nil { // error unmarshaling the error response + err = errIn + } + } else { + errIn := serviceErrFromJSON(respBody, &storageErr) + if err != nil { // error unmarshaling the error response + err = errIn + } } err = storageErr } @@ -452,20 +458,20 @@ func (c Client) exec(verb, url string, headers map[string]string, body io.Reader body: resp.Body}, nil } -func (c Client) execInternalJSON(verb, url string, headers map[string]string, body io.Reader) (*odataResponse, error) { +func (c Client) execInternalJSONCommon(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, *http.Request, *http.Response, error) { + headers, err := c.addAuthorizationHeader(verb, url, headers, auth) + if err != nil { + return nil, nil, nil, err + } + req, err := http.NewRequest(verb, url, body) for k, v := range headers { req.Header.Add(k, v) } - httpClient := c.HTTPClient - if httpClient == nil { - httpClient = http.DefaultClient - } - - resp, err := httpClient.Do(req) + resp, err := c.Sender.Send(&c, req) if err != nil { - return nil, err + return nil, nil, nil, err } respToRet := &odataResponse{} @@ -476,68 +482,155 @@ func (c Client) execInternalJSON(verb, url string, headers map[string]string, bo statusCode := resp.StatusCode if statusCode >= 400 && statusCode <= 505 { var respBody []byte - respBody, err = readResponseBody(resp) + respBody, err = readAndCloseBody(resp.Body) if err != nil { - return nil, err + return nil, nil, nil, err } + requestID, date, version := getDebugHeaders(resp.Header) if len(respBody) == 0 { - // no error in response body - err = fmt.Errorf("storage: service returned without a response body (%d)", resp.StatusCode) - return respToRet, err + // no error in response body, might happen in HEAD requests + err = serviceErrFromStatusCode(resp.StatusCode, resp.Status, requestID, date, version) + return respToRet, req, resp, err } // try unmarshal as odata.error json err = json.Unmarshal(respBody, &respToRet.odata) - return respToRet, err + } + + return respToRet, req, resp, err +} + +func (c Client) execInternalJSON(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, error) { + respToRet, _, _, err := c.execInternalJSONCommon(verb, url, headers, body, auth) + return respToRet, err +} + +func (c Client) execBatchOperationJSON(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, error) { + // execute common query, get back generated request, response etc... for more processing. + respToRet, req, resp, err := c.execInternalJSONCommon(verb, url, headers, body, auth) + if err != nil { + return nil, err + } + + // return the OData in the case of executing batch commands. + // In this case we need to read the outer batch boundary and contents. + // Then we read the changeset information within the batch + var respBody []byte + respBody, err = readAndCloseBody(resp.Body) + if err != nil { + return nil, err + } + + // outer multipart body + _, batchHeader, err := mime.ParseMediaType(resp.Header["Content-Type"][0]) + if err != nil { + return nil, err + } + + // batch details. + batchBoundary := batchHeader["boundary"] + batchPartBuf, changesetBoundary, err := genBatchReader(batchBoundary, respBody) + if err != nil { + return nil, err + } + + // changeset details. + err = genChangesetReader(req, respToRet, batchPartBuf, changesetBoundary) + if err != nil { + return nil, err } return respToRet, nil } -func (c Client) createSharedKeyLite(url string, headers map[string]string) (string, error) { - can, err := c.buildCanonicalizedResourceTable(url) +func genChangesetReader(req *http.Request, respToRet *odataResponse, batchPartBuf io.Reader, changesetBoundary string) error { + changesetMultiReader := multipart.NewReader(batchPartBuf, changesetBoundary) + changesetPart, err := changesetMultiReader.NextPart() + if err != nil { + return err + } + changesetPartBufioReader := bufio.NewReader(changesetPart) + changesetResp, err := http.ReadResponse(changesetPartBufioReader, req) if err != nil { - return "", err + return err + } + + if changesetResp.StatusCode != http.StatusNoContent { + changesetBody, err := readAndCloseBody(changesetResp.Body) + err = json.Unmarshal(changesetBody, &respToRet.odata) + if err != nil { + return err + } + respToRet.statusCode = changesetResp.StatusCode } - strToSign := headers["x-ms-date"] + "\n" + can - hmac := c.computeHmac256(strToSign) - return fmt.Sprintf("SharedKeyLite %s:%s", c.accountName, hmac), nil + return nil } -func (c Client) execTable(verb, url string, headers map[string]string, body io.Reader) (*odataResponse, error) { - var err error - headers["Authorization"], err = c.createSharedKeyLite(url, headers) +func genBatchReader(batchBoundary string, respBody []byte) (io.Reader, string, error) { + respBodyString := string(respBody) + respBodyReader := strings.NewReader(respBodyString) + + // reading batchresponse + batchMultiReader := multipart.NewReader(respBodyReader, batchBoundary) + batchPart, err := batchMultiReader.NextPart() if err != nil { - return nil, err + return nil, "", err } + batchPartBufioReader := bufio.NewReader(batchPart) - return c.execInternalJSON(verb, url, headers, body) + _, changesetHeader, err := mime.ParseMediaType(batchPart.Header.Get("Content-Type")) + if err != nil { + return nil, "", err + } + changesetBoundary := changesetHeader["boundary"] + return batchPartBufioReader, changesetBoundary, nil } -func readResponseBody(resp *http.Response) ([]byte, error) { - defer resp.Body.Close() - out, err := ioutil.ReadAll(resp.Body) +func readAndCloseBody(body io.ReadCloser) ([]byte, error) { + defer body.Close() + out, err := ioutil.ReadAll(body) if err == io.EOF { err = nil } return out, err } -func serviceErrFromXML(body []byte, statusCode int, requestID string) (AzureStorageServiceError, error) { - var storageErr AzureStorageServiceError - if err := xml.Unmarshal(body, &storageErr); err != nil { - return storageErr, err +func serviceErrFromXML(body []byte, storageErr *AzureStorageServiceError) error { + if err := xml.Unmarshal(body, storageErr); err != nil { + storageErr.Message = fmt.Sprintf("Response body could no be unmarshaled: %v. Body: %v.", err, string(body)) + return err + } + return nil +} + +func serviceErrFromJSON(body []byte, storageErr *AzureStorageServiceError) error { + odataError := odataErrorWrapper{} + if err := json.Unmarshal(body, &odataError); err != nil { + storageErr.Message = fmt.Sprintf("Response body could no be unmarshaled: %v. Body: %v.", err, string(body)) + return err + } + storageErr.Code = odataError.Err.Code + storageErr.Message = odataError.Err.Message.Value + storageErr.Lang = odataError.Err.Message.Lang + return nil +} + +func serviceErrFromStatusCode(code int, status string, requestID, date, version string) AzureStorageServiceError { + return AzureStorageServiceError{ + StatusCode: code, + Code: status, + RequestID: requestID, + Date: date, + APIVersion: version, + Message: "no response body was available for error status code", } - storageErr.StatusCode = statusCode - storageErr.RequestID = requestID - return storageErr, nil } func (e AzureStorageServiceError) Error() string { - return fmt.Sprintf("storage: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=%s, RequestId=%s, QueryParameterName=%s, QueryParameterValue=%s", - e.StatusCode, e.Code, e.Message, e.RequestID, e.QueryParameterName, e.QueryParameterValue) + return fmt.Sprintf("storage: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=%s, RequestInitiated=%s, RequestId=%s, API Version=%s, QueryParameterName=%s, QueryParameterValue=%s", + e.StatusCode, e.Code, e.Message, e.Date, e.RequestID, e.APIVersion, e.QueryParameterName, e.QueryParameterValue) } // checkRespCode returns UnexpectedStatusError if the given response code is not @@ -550,3 +643,18 @@ func checkRespCode(respCode int, allowed []int) error { } return UnexpectedStatusCodeError{allowed, respCode} } + +func (c Client) addMetadataToHeaders(h map[string]string, metadata map[string]string) map[string]string { + metadata = c.protectUserAgent(metadata) + for k, v := range metadata { + h[userDefinedMetadataHeaderPrefix+k] = v + } + return h +} + +func getDebugHeaders(h http.Header) (requestID, date, version string) { + requestID = h.Get("x-ms-request-id") + version = h.Get("x-ms-version") + date = h.Get("Date") + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/client_test.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/client_test.go index 038299fd942f..3124ab2bbbde 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/client_test.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/client_test.go @@ -1,11 +1,24 @@ package storage import ( + "bytes" "encoding/base64" + "io/ioutil" + "math" + "net/http" "net/url" "os" + "path/filepath" + "reflect" + "regexp" + "strconv" + "strings" "testing" + "time" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/dnaeon/go-vcr/cassette" + "github.com/dnaeon/go-vcr/recorder" chk "gopkg.in/check.v1" ) @@ -17,18 +30,163 @@ type StorageClientSuite struct{} var _ = chk.Suite(&StorageClientSuite{}) // getBasicClient returns a test client from storage credentials in the env -func getBasicClient(c *chk.C) Client { +func getBasicClient(c *chk.C) *Client { name := os.Getenv("ACCOUNT_NAME") if name == "" { - c.Fatal("ACCOUNT_NAME not set, need an empty storage account to test") + name = dummyStorageAccount } key := os.Getenv("ACCOUNT_KEY") if key == "" { - c.Fatal("ACCOUNT_KEY not set") + key = dummyMiniStorageKey } cli, err := NewBasicClient(name, key) c.Assert(err, chk.IsNil) - return cli + + return &cli +} + +func (client *Client) appendRecorder(c *chk.C) *recorder.Recorder { + tests := strings.Split(c.TestName(), ".") + path := filepath.Join(recordingsFolder, tests[0], tests[1]) + rec, err := recorder.New(path) + c.Assert(err, chk.IsNil) + client.HTTPClient = &http.Client{ + Transport: rec, + } + rec.SetMatcher(func(r *http.Request, i cassette.Request) bool { + return compareMethods(r, i) && + compareURLs(r, i) && + compareHeaders(r, i) && + compareBodies(r, i) + }) + return rec +} + +func (client *Client) usesDummies() bool { + key, err := base64.StdEncoding.DecodeString(dummyMiniStorageKey) + if err != nil { + return false + } + if string(client.accountKey) == string(key) && + client.accountName == dummyStorageAccount { + return true + } + return false +} + +func compareMethods(r *http.Request, i cassette.Request) bool { + return r.Method == i.Method +} + +func compareURLs(r *http.Request, i cassette.Request) bool { + newURL := modifyURL(r.URL) + return newURL.String() == i.URL +} + +func modifyURL(url *url.URL) *url.URL { + // The URL host looks like this... + // accountname.service.storageEndpointSuffix + parts := strings.Split(url.Host, ".") + // parts[0] corresponds to the storage account name, so it can be (almost) any string + // parts[1] corresponds to the service name (table, blob, etc.). + if !(parts[1] == blobServiceName || + parts[1] == tableServiceName || + parts[1] == queueServiceName || + parts[1] == fileServiceName) { + return nil + } + // The rest of the host depends on which Azure cloud is used + storageEndpointSuffix := strings.Join(parts[2:], ".") + if !(storageEndpointSuffix == azure.PublicCloud.StorageEndpointSuffix || + storageEndpointSuffix == azure.USGovernmentCloud.StorageEndpointSuffix || + storageEndpointSuffix == azure.ChinaCloud.StorageEndpointSuffix || + storageEndpointSuffix == azure.GermanCloud.StorageEndpointSuffix) { + return nil + } + + host := dummyStorageAccount + "." + parts[1] + "." + azure.PublicCloud.StorageEndpointSuffix + newURL := url + newURL.Host = host + return newURL +} + +func compareHeaders(r *http.Request, i cassette.Request) bool { + requestHeaders := r.Header + cassetteHeaders := i.Headers + // Some headers shall not be compared... + requestHeaders.Del("User-Agent") + requestHeaders.Del("Authorization") + requestHeaders.Del("X-Ms-Date") + + cassetteHeaders.Del("User-Agent") + cassetteHeaders.Del("Authorization") + cassetteHeaders.Del("X-Ms-Date") + + srcURLstr := requestHeaders.Get("X-Ms-Copy-Source") + if srcURLstr != "" { + srcURL, err := url.Parse(srcURLstr) + if err != nil { + return false + } + modifiedURL := modifyURL(srcURL) + requestHeaders.Set("X-Ms-Copy-Source", modifiedURL.String()) + } + + // Do not compare the complete Content-Type header in table batch requests + if isBatchOp(r.URL.String()) { + // They all start like this, but then they have a UUID... + ctPrefixBatch := "multipart/mixed; boundary=batch_" + contentTypeRequest := requestHeaders.Get("Content-Type") + contentTypeCassette := cassetteHeaders.Get("Content-Type") + if !(strings.HasPrefix(contentTypeRequest, ctPrefixBatch) && + strings.HasPrefix(contentTypeCassette, ctPrefixBatch)) { + return false + } + requestHeaders.Del("Content-Type") + cassetteHeaders.Del("Content-Type") + } + + return reflect.DeepEqual(requestHeaders, cassetteHeaders) +} + +func compareBodies(r *http.Request, i cassette.Request) bool { + body := bytes.Buffer{} + if r.Body != nil { + _, err := body.ReadFrom(r.Body) + if err != nil { + return false + } + r.Body = ioutil.NopCloser(&body) + } + // Comparing bodies in table batch operations is trickier, because the bodies include UUIDs + if isBatchOp(r.URL.String()) { + return compareBatchBodies(body.String(), i.Body) + } + return body.String() == i.Body +} + +func compareBatchBodies(rBody, cBody string) bool { + // UUIDs in the batch body look like this... + // 2d7f2323-1e42-11e7-8c6c-6451064d81e8 + exp, err := regexp.Compile("[0-9a-z]{8}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{12}") + if err != nil { + return false + } + rBody = replaceStorageAccount(replaceUUIDs(rBody, exp)) + cBody = replaceUUIDs(cBody, exp) + return rBody == cBody +} + +func replaceUUIDs(body string, exp *regexp.Regexp) string { + indexes := exp.FindAllStringIndex(body, -1) + for _, pair := range indexes { + body = strings.Replace(body, body[pair[0]:pair[1]], "00000000-0000-0000-0000-000000000000", -1) + } + return body +} + +func isBatchOp(url string) bool { + return url == "https://golangrocksonazure.table.core.windows.net/$batch" } //getEmulatorClient returns a test client for Azure Storeage Emulator @@ -47,25 +205,44 @@ func (s *StorageClientSuite) TestNewEmulatorClient(c *chk.C) { c.Assert(cli.accountKey, chk.DeepEquals, expectedKey) } +func (s *StorageClientSuite) TestIsValidStorageAccount(c *chk.C) { + type test struct { + account string + expected bool + } + testCases := []test{ + {"name1", true}, + {"Name2", false}, + {"reallyLongName1234567891011", false}, + {"", false}, + {"concated&name", false}, + {"formatted name", false}, + } + + for _, tc := range testCases { + c.Assert(IsValidStorageAccount(tc.account), chk.Equals, tc.expected) + } +} + func (s *StorageClientSuite) TestMalformedKeyError(c *chk.C) { - _, err := NewBasicClient("foo", "malformed") + _, err := NewBasicClient(dummyStorageAccount, "malformed") c.Assert(err, chk.ErrorMatches, "azure: malformed storage account key: .*") } func (s *StorageClientSuite) TestGetBaseURL_Basic_Https(c *chk.C) { - cli, err := NewBasicClient("foo", "YmFy") + cli, err := NewBasicClient(dummyStorageAccount, dummyMiniStorageKey) c.Assert(err, chk.IsNil) c.Assert(cli.apiVersion, chk.Equals, DefaultAPIVersion) c.Assert(err, chk.IsNil) - c.Assert(cli.getBaseURL("table"), chk.Equals, "https://foo.table.core.windows.net") + c.Assert(cli.getBaseURL("table").String(), chk.Equals, "https://golangrocksonazure.table.core.windows.net") } func (s *StorageClientSuite) TestGetBaseURL_Custom_NoHttps(c *chk.C) { apiVersion := "2015-01-01" // a non existing one - cli, err := NewClient("foo", "YmFy", "core.chinacloudapi.cn", apiVersion, false) + cli, err := NewClient(dummyStorageAccount, dummyMiniStorageKey, "core.chinacloudapi.cn", apiVersion, false) c.Assert(err, chk.IsNil) c.Assert(cli.apiVersion, chk.Equals, apiVersion) - c.Assert(cli.getBaseURL("table"), chk.Equals, "http://foo.table.core.chinacloudapi.cn") + c.Assert(cli.getBaseURL("table").String(), chk.Equals, "http://golangrocksonazure.table.core.chinacloudapi.cn") } func (s *StorageClientSuite) TestGetBaseURL_StorageEmulator(c *chk.C) { @@ -80,42 +257,42 @@ func (s *StorageClientSuite) TestGetBaseURL_StorageEmulator(c *chk.C) { } for _, i := range tests { baseURL := cli.getBaseURL(i.service) - c.Assert(baseURL, chk.Equals, i.expected) + c.Assert(baseURL.String(), chk.Equals, i.expected) } } func (s *StorageClientSuite) TestGetEndpoint_None(c *chk.C) { - cli, err := NewBasicClient("foo", "YmFy") + cli, err := NewBasicClient(dummyStorageAccount, "YmFy") c.Assert(err, chk.IsNil) output := cli.getEndpoint(blobServiceName, "", url.Values{}) - c.Assert(output, chk.Equals, "https://foo.blob.core.windows.net/") + c.Assert(output, chk.Equals, "https://golangrocksonazure.blob.core.windows.net/") } func (s *StorageClientSuite) TestGetEndpoint_PathOnly(c *chk.C) { - cli, err := NewBasicClient("foo", "YmFy") + cli, err := NewBasicClient(dummyStorageAccount, "YmFy") c.Assert(err, chk.IsNil) output := cli.getEndpoint(blobServiceName, "path", url.Values{}) - c.Assert(output, chk.Equals, "https://foo.blob.core.windows.net/path") + c.Assert(output, chk.Equals, "https://golangrocksonazure.blob.core.windows.net/path") } func (s *StorageClientSuite) TestGetEndpoint_ParamsOnly(c *chk.C) { - cli, err := NewBasicClient("foo", "YmFy") + cli, err := NewBasicClient(dummyStorageAccount, "YmFy") c.Assert(err, chk.IsNil) params := url.Values{} params.Set("a", "b") params.Set("c", "d") output := cli.getEndpoint(blobServiceName, "", params) - c.Assert(output, chk.Equals, "https://foo.blob.core.windows.net/?a=b&c=d") + c.Assert(output, chk.Equals, "https://golangrocksonazure.blob.core.windows.net/?a=b&c=d") } func (s *StorageClientSuite) TestGetEndpoint_Mixed(c *chk.C) { - cli, err := NewBasicClient("foo", "YmFy") + cli, err := NewBasicClient(dummyStorageAccount, "YmFy") c.Assert(err, chk.IsNil) params := url.Values{} params.Set("a", "b") params.Set("c", "d") output := cli.getEndpoint(blobServiceName, "path", params) - c.Assert(output, chk.Equals, "https://foo.blob.core.windows.net/path?a=b&c=d") + c.Assert(output, chk.Equals, "https://golangrocksonazure.blob.core.windows.net/path?a=b&c=d") } func (s *StorageClientSuite) TestGetEndpoint_StorageEmulator(c *chk.C) { @@ -135,97 +312,178 @@ func (s *StorageClientSuite) TestGetEndpoint_StorageEmulator(c *chk.C) { } func (s *StorageClientSuite) Test_getStandardHeaders(c *chk.C) { - cli, err := NewBasicClient("foo", "YmFy") + cli, err := NewBasicClient(dummyStorageAccount, "YmFy") c.Assert(err, chk.IsNil) headers := cli.getStandardHeaders() - c.Assert(len(headers), chk.Equals, 2) + c.Assert(len(headers), chk.Equals, 3) c.Assert(headers["x-ms-version"], chk.Equals, cli.apiVersion) if _, ok := headers["x-ms-date"]; !ok { c.Fatal("Missing date header") } + c.Assert(headers[userAgentHeader], chk.Equals, cli.getDefaultUserAgent()) } -func (s *StorageClientSuite) Test_buildCanonicalizedResourceTable(c *chk.C) { - cli, err := NewBasicClient("foo", "YmFy") - c.Assert(err, chk.IsNil) +func (s *StorageClientSuite) TestReturnsStorageServiceError(c *chk.C) { + // attempt to delete nonexisting resources + cli := getBasicClient(c) + rec := cli.appendRecorder(c) + defer rec.Stop() + + // XML response + blobCli := cli.GetBlobService() + cnt := blobCli.GetContainerReference(containerName(c)) + err := cnt.Delete(nil) + c.Assert(err, chk.NotNil) - type test struct{ url, expected string } - tests := []test{ - {"https://foo.table.core.windows.net/mytable", "/foo/mytable"}, - {"https://foo.table.core.windows.net/mytable(PartitionKey='pkey',RowKey='rowkey%3D')", "/foo/mytable(PartitionKey='pkey',RowKey='rowkey%3D')"}, - } + v, ok := err.(AzureStorageServiceError) + c.Check(ok, chk.Equals, true) + c.Assert(v.StatusCode, chk.Equals, 404) + c.Assert(v.Code, chk.Equals, "ContainerNotFound") + c.Assert(v.RequestID, chk.Not(chk.Equals), "") + c.Assert(v.Date, chk.Not(chk.Equals), "") + c.Assert(v.APIVersion, chk.Not(chk.Equals), "") + + // JSON response + tableCli := cli.GetTableService() + table := tableCli.GetTableReference(tableName(c)) + err = table.Delete(30, nil) + c.Assert(err, chk.NotNil) - for _, i := range tests { - out, err := cli.buildCanonicalizedResourceTable(i.url) - c.Assert(err, chk.IsNil) - c.Assert(out, chk.Equals, i.expected) - } + v, ok = err.(AzureStorageServiceError) + c.Check(ok, chk.Equals, true) + c.Assert(v.StatusCode, chk.Equals, 404) + c.Assert(v.Code, chk.Equals, "ResourceNotFound") + c.Assert(v.RequestID, chk.Not(chk.Equals), "") + c.Assert(v.Date, chk.Not(chk.Equals), "") + c.Assert(v.APIVersion, chk.Not(chk.Equals), "") } -func (s *StorageClientSuite) Test_buildCanonicalizedResource(c *chk.C) { - cli, err := NewBasicClient("foo", "YmFy") - c.Assert(err, chk.IsNil) +func (s *StorageClientSuite) TestReturnsStorageServiceError_withoutResponseBody(c *chk.C) { + // HEAD on non-existing blob + cli := getBlobClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() - type test struct{ url, expected string } - tests := []test{ - {"https://foo.blob.core.windows.net/path?a=b&c=d", "/foo/path\na:b\nc:d"}, - {"https://foo.blob.core.windows.net/?comp=list", "/foo/\ncomp:list"}, - {"https://foo.blob.core.windows.net/cnt/blob", "/foo/cnt/blob"}, - {"https://foo.blob.core.windows.net/cnt/bl ob", "/foo/cnt/bl%20ob"}, - {"https://foo.blob.core.windows.net/c nt/blob", "/foo/c%20nt/blob"}, - {"https://foo.blob.core.windows.net/cnt/blob%3F%23%5B%5D%21$&%27%28%29%2A blob", "/foo/cnt/blob%3F%23%5B%5D%21$&%27%28%29%2A%20blob"}, - {"https://foo.blob.core.windows.net/cnt/blob-._~:,@;+=blob", "/foo/cnt/blob-._~:,@;+=blob"}, - {"https://foo.blob.core.windows.net/c nt/blob-._~:%3F%23%5B%5D@%21$&%27%28%29%2A,;+=/blob", "/foo/c%20nt/blob-._~:%3F%23%5B%5D@%21$&%27%28%29%2A,;+=/blob"}, - } + cnt := cli.GetContainerReference("non-existing-container") + b := cnt.GetBlobReference("non-existing-blob") + err := b.GetProperties(nil) - for _, i := range tests { - out, err := cli.buildCanonicalizedResource(i.url) - c.Assert(err, chk.IsNil) - c.Assert(out, chk.Equals, i.expected) - } + c.Assert(err, chk.NotNil) + c.Assert(err, chk.FitsTypeOf, AzureStorageServiceError{}) + + v, ok := err.(AzureStorageServiceError) + c.Check(ok, chk.Equals, true) + c.Assert(v.StatusCode, chk.Equals, http.StatusNotFound) + c.Assert(v.Code, chk.Equals, "404 The specified container does not exist.") + c.Assert(v.RequestID, chk.Not(chk.Equals), "") + c.Assert(v.Message, chk.Equals, "no response body was available for error status code") } -func (s *StorageClientSuite) Test_buildCanonicalizedHeader(c *chk.C) { - cli, err := NewBasicClient("foo", "YmFy") +func (s *StorageClientSuite) Test_createServiceClients(c *chk.C) { + cli, err := NewBasicClient(dummyStorageAccount, "YmFy") c.Assert(err, chk.IsNil) - type test struct { - headers map[string]string - expected string - } - tests := []test{ - {map[string]string{}, ""}, - {map[string]string{"x-ms-foo": "bar"}, "x-ms-foo:bar"}, - {map[string]string{"foo:": "bar"}, ""}, - {map[string]string{"foo:": "bar", "x-ms-foo": "bar"}, "x-ms-foo:bar"}, - {map[string]string{ - "x-ms-version": "9999-99-99", - "x-ms-blob-type": "BlockBlob"}, "x-ms-blob-type:BlockBlob\nx-ms-version:9999-99-99"}} + ua := cli.getDefaultUserAgent() - for _, i := range tests { - c.Assert(cli.buildCanonicalizedHeader(i.headers), chk.Equals, i.expected) - } + headers := cli.getStandardHeaders() + c.Assert(headers[userAgentHeader], chk.Equals, ua) + c.Assert(cli.userAgent, chk.Equals, ua) + + b := cli.GetBlobService() + c.Assert(b.client.userAgent, chk.Equals, ua+" "+blobServiceName) + c.Assert(cli.userAgent, chk.Equals, ua) + + t := cli.GetTableService() + c.Assert(t.client.userAgent, chk.Equals, ua+" "+tableServiceName) + c.Assert(cli.userAgent, chk.Equals, ua) + + q := cli.GetQueueService() + c.Assert(q.client.userAgent, chk.Equals, ua+" "+queueServiceName) + c.Assert(cli.userAgent, chk.Equals, ua) + + f := cli.GetFileService() + c.Assert(f.client.userAgent, chk.Equals, ua+" "+fileServiceName) + c.Assert(cli.userAgent, chk.Equals, ua) } -func (s *StorageClientSuite) TestReturnsStorageServiceError(c *chk.C) { - // attempt to delete a nonexisting container - _, err := getBlobClient(c).deleteContainer(randContainer()) +func (s *StorageClientSuite) TestAddToUserAgent(c *chk.C) { + cli, err := NewBasicClient(dummyStorageAccount, "YmFy") + c.Assert(err, chk.IsNil) + + ua := cli.getDefaultUserAgent() + + err = cli.AddToUserAgent("rofl") + c.Assert(err, chk.IsNil) + c.Assert(cli.userAgent, chk.Equals, ua+" rofl") + + err = cli.AddToUserAgent("") c.Assert(err, chk.NotNil) +} - v, ok := err.(AzureStorageServiceError) - c.Check(ok, chk.Equals, true) - c.Assert(v.StatusCode, chk.Equals, 404) - c.Assert(v.Code, chk.Equals, "ContainerNotFound") - c.Assert(v.Code, chk.Not(chk.Equals), "") +func (s *StorageClientSuite) Test_protectUserAgent(c *chk.C) { + extraheaders := map[string]string{ + "1": "one", + "2": "two", + "3": "three", + userAgentHeader: "four", + } + + cli, err := NewBasicClient(dummyStorageAccount, "YmFy") + c.Assert(err, chk.IsNil) + + ua := cli.getDefaultUserAgent() + + got := cli.protectUserAgent(extraheaders) + c.Assert(cli.userAgent, chk.Equals, ua+" four") + c.Assert(got, chk.HasLen, 3) + c.Assert(got, chk.DeepEquals, map[string]string{ + "1": "one", + "2": "two", + "3": "three", + }) } -func (s *StorageClientSuite) Test_createAuthorizationHeader(c *chk.C) { - key := base64.StdEncoding.EncodeToString([]byte("bar")) - cli, err := NewBasicClient("foo", key) +func (s *StorageClientSuite) Test_doRetry(c *chk.C) { + cli := getBasicClient(c) + rec := cli.appendRecorder(c) + defer rec.Stop() + + // Prepare request that will fail with 404 (delete non extising table) + uri, err := url.Parse(cli.getEndpoint(tableServiceName, "(retry)", url.Values{"timeout": {strconv.Itoa(30)}})) c.Assert(err, chk.IsNil) + req := http.Request{ + Method: http.MethodDelete, + URL: uri, + Header: http.Header{ + "Accept": {"application/json;odata=nometadata"}, + "Prefer": {"return-no-content"}, + "X-Ms-Version": {"2016-05-31"}, + }, + } - canonicalizedString := `foobarzoo` - expected := `SharedKey foo:h5U0ATVX6SpbFX1H6GNuxIMeXXCILLoIvhflPtuQZ30=` - c.Assert(cli.createAuthorizationHeader(canonicalizedString), chk.Equals, expected) + ds, ok := cli.Sender.(*DefaultSender) + c.Assert(ok, chk.Equals, true) + // Modify sender so it retries quickly + ds.RetryAttempts = 3 + ds.RetryDuration = time.Second + // include 404 as a valid status code for retries + ds.ValidStatusCodes = []int{http.StatusNotFound} + cli.Sender = ds + + now := time.Now() + resp, err := cli.Sender.Send(cli, &req) + afterRetries := time.Since(now) + c.Assert(err, chk.IsNil) + c.Assert(resp.StatusCode, chk.Equals, http.StatusNotFound) + + // Was it the correct amount of retries... ? + c.Assert(cli.Sender.(*DefaultSender).attempts, chk.Equals, cli.Sender.(*DefaultSender).RetryAttempts) + // What about time... ? + // Note, seconds are rounded + sum := 0 + for i := 0; i < ds.RetryAttempts; i++ { + sum += int(ds.RetryDuration.Seconds() * math.Pow(2, float64(i))) // same formula used in autorest.DelayForBackoff + } + c.Assert(int(afterRetries.Seconds()), chk.Equals, sum) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/container.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/container.go new file mode 100644 index 000000000000..c2c9c055b562 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/container.go @@ -0,0 +1,453 @@ +package storage + +import ( + "encoding/xml" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + "time" +) + +// Container represents an Azure container. +type Container struct { + bsc *BlobStorageClient + Name string `xml:"Name"` + Properties ContainerProperties `xml:"Properties"` + Metadata map[string]string +} + +func (c *Container) buildPath() string { + return fmt.Sprintf("/%s", c.Name) +} + +// ContainerProperties contains various properties of a container returned from +// various endpoints like ListContainers. +type ContainerProperties struct { + LastModified string `xml:"Last-Modified"` + Etag string `xml:"Etag"` + LeaseStatus string `xml:"LeaseStatus"` + LeaseState string `xml:"LeaseState"` + LeaseDuration string `xml:"LeaseDuration"` +} + +// ContainerListResponse contains the response fields from +// ListContainers call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx +type ContainerListResponse struct { + XMLName xml.Name `xml:"EnumerationResults"` + Xmlns string `xml:"xmlns,attr"` + Prefix string `xml:"Prefix"` + Marker string `xml:"Marker"` + NextMarker string `xml:"NextMarker"` + MaxResults int64 `xml:"MaxResults"` + Containers []Container `xml:"Containers>Container"` +} + +// BlobListResponse contains the response fields from ListBlobs call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx +type BlobListResponse struct { + XMLName xml.Name `xml:"EnumerationResults"` + Xmlns string `xml:"xmlns,attr"` + Prefix string `xml:"Prefix"` + Marker string `xml:"Marker"` + NextMarker string `xml:"NextMarker"` + MaxResults int64 `xml:"MaxResults"` + Blobs []Blob `xml:"Blobs>Blob"` + + // BlobPrefix is used to traverse blobs as if it were a file system. + // It is returned if ListBlobsParameters.Delimiter is specified. + // The list here can be thought of as "folders" that may contain + // other folders or blobs. + BlobPrefixes []string `xml:"Blobs>BlobPrefix>Name"` + + // Delimiter is used to traverse blobs as if it were a file system. + // It is returned if ListBlobsParameters.Delimiter is specified. + Delimiter string `xml:"Delimiter"` +} + +// IncludeBlobDataset has options to include in a list blobs operation +type IncludeBlobDataset struct { + Snapshots bool + Metadata bool + UncommittedBlobs bool + Copy bool +} + +// ListBlobsParameters defines the set of customizable +// parameters to make a List Blobs call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx +type ListBlobsParameters struct { + Prefix string + Delimiter string + Marker string + Include *IncludeBlobDataset + MaxResults uint + Timeout uint + RequestID string +} + +func (p ListBlobsParameters) getParameters() url.Values { + out := url.Values{} + + if p.Prefix != "" { + out.Set("prefix", p.Prefix) + } + if p.Delimiter != "" { + out.Set("delimiter", p.Delimiter) + } + if p.Marker != "" { + out.Set("marker", p.Marker) + } + if p.Include != nil { + include := []string{} + include = addString(include, p.Include.Snapshots, "snapshots") + include = addString(include, p.Include.Metadata, "metadata") + include = addString(include, p.Include.UncommittedBlobs, "uncommittedblobs") + include = addString(include, p.Include.Copy, "copy") + fullInclude := strings.Join(include, ",") + out.Set("include", fullInclude) + } + if p.MaxResults != 0 { + out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10)) + } + if p.Timeout != 0 { + out.Set("timeout", strconv.FormatUint(uint64(p.Timeout), 10)) + } + + return out +} + +func addString(datasets []string, include bool, text string) []string { + if include { + datasets = append(datasets, text) + } + return datasets +} + +// ContainerAccessType defines the access level to the container from a public +// request. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx and "x-ms- +// blob-public-access" header. +type ContainerAccessType string + +// Access options for containers +const ( + ContainerAccessTypePrivate ContainerAccessType = "" + ContainerAccessTypeBlob ContainerAccessType = "blob" + ContainerAccessTypeContainer ContainerAccessType = "container" +) + +// ContainerAccessPolicy represents each access policy in the container ACL. +type ContainerAccessPolicy struct { + ID string + StartTime time.Time + ExpiryTime time.Time + CanRead bool + CanWrite bool + CanDelete bool +} + +// ContainerPermissions represents the container ACLs. +type ContainerPermissions struct { + AccessType ContainerAccessType + AccessPolicies []ContainerAccessPolicy +} + +// ContainerAccessHeader references header used when setting/getting container ACL +const ( + ContainerAccessHeader string = "x-ms-blob-public-access" +) + +// GetBlobReference returns a Blob object for the specified blob name. +func (c *Container) GetBlobReference(name string) *Blob { + return &Blob{ + Container: c, + Name: name, + } +} + +// CreateContainerOptions includes the options for a create container operation +type CreateContainerOptions struct { + Timeout uint + Access ContainerAccessType `header:"x-ms-blob-public-access"` + RequestID string `header:"x-ms-client-request-id"` +} + +// Create creates a blob container within the storage account +// with given name and access level. Returns error if container already exists. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Container +func (c *Container) Create(options *CreateContainerOptions) error { + resp, err := c.create(options) + if err != nil { + return err + } + readAndCloseBody(resp.body) + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// CreateIfNotExists creates a blob container if it does not exist. Returns +// true if container is newly created or false if container already exists. +func (c *Container) CreateIfNotExists(options *CreateContainerOptions) (bool, error) { + resp, err := c.create(options) + if resp != nil { + defer readAndCloseBody(resp.body) + if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict { + return resp.statusCode == http.StatusCreated, nil + } + } + return false, err +} + +func (c *Container) create(options *CreateContainerOptions) (*storageResponse, error) { + query := url.Values{"restype": {"container"}} + headers := c.bsc.client.getStandardHeaders() + headers = c.bsc.client.addMetadataToHeaders(headers, c.Metadata) + + if options != nil { + query = addTimeout(query, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), query) + + return c.bsc.client.exec(http.MethodPut, uri, headers, nil, c.bsc.auth) +} + +// Exists returns true if a container with given name exists +// on the storage account, otherwise returns false. +func (c *Container) Exists() (bool, error) { + uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), url.Values{"restype": {"container"}}) + headers := c.bsc.client.getStandardHeaders() + + resp, err := c.bsc.client.exec(http.MethodHead, uri, headers, nil, c.bsc.auth) + if resp != nil { + defer readAndCloseBody(resp.body) + if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound { + return resp.statusCode == http.StatusOK, nil + } + } + return false, err +} + +// SetContainerPermissionOptions includes options for a set container permissions operation +type SetContainerPermissionOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + RequestID string `header:"x-ms-client-request-id"` +} + +// SetPermissions sets up container permissions +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Container-ACL +func (c *Container) SetPermissions(permissions ContainerPermissions, options *SetContainerPermissionOptions) error { + body, length, err := generateContainerACLpayload(permissions.AccessPolicies) + if err != nil { + return err + } + params := url.Values{ + "restype": {"container"}, + "comp": {"acl"}, + } + headers := c.bsc.client.getStandardHeaders() + headers = addToHeaders(headers, ContainerAccessHeader, string(permissions.AccessType)) + headers["Content-Length"] = strconv.Itoa(length) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params) + + resp, err := c.bsc.client.exec(http.MethodPut, uri, headers, body, c.bsc.auth) + if err != nil { + return err + } + defer readAndCloseBody(resp.body) + + if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return errors.New("Unable to set permissions") + } + + return nil +} + +// GetContainerPermissionOptions includes options for a get container permissions operation +type GetContainerPermissionOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + RequestID string `header:"x-ms-client-request-id"` +} + +// GetPermissions gets the container permissions as per https://msdn.microsoft.com/en-us/library/azure/dd179469.aspx +// If timeout is 0 then it will not be passed to Azure +// leaseID will only be passed to Azure if populated +func (c *Container) GetPermissions(options *GetContainerPermissionOptions) (*ContainerPermissions, error) { + params := url.Values{ + "restype": {"container"}, + "comp": {"acl"}, + } + headers := c.bsc.client.getStandardHeaders() + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params) + + resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth) + if err != nil { + return nil, err + } + defer resp.body.Close() + + var ap AccessPolicy + err = xmlUnmarshal(resp.body, &ap.SignedIdentifiersList) + if err != nil { + return nil, err + } + return buildAccessPolicy(ap, &resp.headers), nil +} + +func buildAccessPolicy(ap AccessPolicy, headers *http.Header) *ContainerPermissions { + // containerAccess. Blob, Container, empty + containerAccess := headers.Get(http.CanonicalHeaderKey(ContainerAccessHeader)) + permissions := ContainerPermissions{ + AccessType: ContainerAccessType(containerAccess), + AccessPolicies: []ContainerAccessPolicy{}, + } + + for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers { + capd := ContainerAccessPolicy{ + ID: policy.ID, + StartTime: policy.AccessPolicy.StartTime, + ExpiryTime: policy.AccessPolicy.ExpiryTime, + } + capd.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r") + capd.CanWrite = updatePermissions(policy.AccessPolicy.Permission, "w") + capd.CanDelete = updatePermissions(policy.AccessPolicy.Permission, "d") + + permissions.AccessPolicies = append(permissions.AccessPolicies, capd) + } + return &permissions +} + +// DeleteContainerOptions includes options for a delete container operation +type DeleteContainerOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + RequestID string `header:"x-ms-client-request-id"` +} + +// Delete deletes the container with given name on the storage +// account. If the container does not exist returns error. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/delete-container +func (c *Container) Delete(options *DeleteContainerOptions) error { + resp, err := c.delete(options) + if err != nil { + return err + } + readAndCloseBody(resp.body) + return checkRespCode(resp.statusCode, []int{http.StatusAccepted}) +} + +// DeleteIfExists deletes the container with given name on the storage +// account if it exists. Returns true if container is deleted with this call, or +// false if the container did not exist at the time of the Delete Container +// operation. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/delete-container +func (c *Container) DeleteIfExists(options *DeleteContainerOptions) (bool, error) { + resp, err := c.delete(options) + if resp != nil { + defer readAndCloseBody(resp.body) + if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { + return resp.statusCode == http.StatusAccepted, nil + } + } + return false, err +} + +func (c *Container) delete(options *DeleteContainerOptions) (*storageResponse, error) { + query := url.Values{"restype": {"container"}} + headers := c.bsc.client.getStandardHeaders() + + if options != nil { + query = addTimeout(query, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), query) + + return c.bsc.client.exec(http.MethodDelete, uri, headers, nil, c.bsc.auth) +} + +// ListBlobs returns an object that contains list of blobs in the container, +// pagination token and other information in the response of List Blobs call. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Blobs +func (c *Container) ListBlobs(params ListBlobsParameters) (BlobListResponse, error) { + q := mergeParams(params.getParameters(), url.Values{ + "restype": {"container"}, + "comp": {"list"}}, + ) + uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), q) + + headers := c.bsc.client.getStandardHeaders() + headers = addToHeaders(headers, "x-ms-client-request-id", params.RequestID) + + var out BlobListResponse + resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth) + if err != nil { + return out, err + } + defer resp.body.Close() + + err = xmlUnmarshal(resp.body, &out) + for i := range out.Blobs { + out.Blobs[i].Container = c + } + return out, err +} + +func generateContainerACLpayload(policies []ContainerAccessPolicy) (io.Reader, int, error) { + sil := SignedIdentifiers{ + SignedIdentifiers: []SignedIdentifier{}, + } + for _, capd := range policies { + permission := capd.generateContainerPermissions() + signedIdentifier := convertAccessPolicyToXMLStructs(capd.ID, capd.StartTime, capd.ExpiryTime, permission) + sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier) + } + return xmlMarshal(sil) +} + +func (capd *ContainerAccessPolicy) generateContainerPermissions() (permissions string) { + // generate the permissions string (rwd). + // still want the end user API to have bool flags. + permissions = "" + + if capd.CanRead { + permissions += "r" + } + + if capd.CanWrite { + permissions += "w" + } + + if capd.CanDelete { + permissions += "d" + } + + return permissions +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/container_test.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/container_test.go new file mode 100644 index 000000000000..e7317473c448 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/container_test.go @@ -0,0 +1,554 @@ +package storage + +import ( + "sort" + "strconv" + "time" + + chk "gopkg.in/check.v1" +) + +type ContainerSuite struct{} + +var _ = chk.Suite(&ContainerSuite{}) + +func (s *ContainerSuite) Test_containerBuildPath(c *chk.C) { + cli := getBlobClient(c) + cnt := cli.GetContainerReference("lol") + c.Assert(cnt.buildPath(), chk.Equals, "/lol") +} + +func (s *ContainerSuite) TestListContainersPagination(c *chk.C) { + cli := getBlobClient(c) + cli.deleteTestContainers(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + const n = 5 + const pageSize = 2 + + cntNames := []string{} + for i := 0; i < n; i++ { + cntNames = append(cntNames, containerName(c, strconv.Itoa(i))) + } + sort.Strings(cntNames) + + // Create test containers + created := []*Container{} + for i := 0; i < n; i++ { + cnt := cli.GetContainerReference(cntNames[i]) + c.Assert(cnt.Create(nil), chk.IsNil) + created = append(created, cnt) + defer cnt.Delete(nil) + } + + // Paginate results + seen := []Container{} + marker := "" + for { + resp, err := cli.ListContainers(ListContainersParameters{ + MaxResults: pageSize, + Marker: marker}) + + c.Assert(err, chk.IsNil) + + if len(resp.Containers) > pageSize { + c.Fatalf("Got a bigger page. Expected: %d, got: %d", pageSize, len(resp.Containers)) + } + + for _, c := range resp.Containers { + seen = append(seen, c) + } + + marker = resp.NextMarker + if marker == "" || len(resp.Containers) == 0 { + break + } + } + + for i := range created { + c.Assert(seen[i].Name, chk.DeepEquals, created[i].Name) + } +} + +func (s *ContainerSuite) TestContainerExists(c *chk.C) { + cli := getBlobClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + // Container does not exist + cnt1 := cli.GetContainerReference(containerName(c, "1")) + ok, err := cnt1.Exists() + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, false) + + // COntainer exists + cnt2 := cli.GetContainerReference(containerName(c, "2")) + c.Assert(cnt2.Create(nil), chk.IsNil) + defer cnt2.Delete(nil) + ok, err = cnt2.Exists() + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, true) +} + +func (s *ContainerSuite) TestCreateContainerDeleteContainer(c *chk.C) { + cli := getBlobClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + cnt := cli.GetContainerReference(containerName(c)) + c.Assert(cnt.Create(nil), chk.IsNil) + c.Assert(cnt.Delete(nil), chk.IsNil) +} + +func (s *ContainerSuite) TestCreateContainerIfNotExists(c *chk.C) { + cli := getBlobClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + // Create non exisiting container + cnt := cli.GetContainerReference(containerName(c)) + ok, err := cnt.CreateIfNotExists(nil) + defer cnt.Delete(nil) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, true) + +} + +func (s *ContainerSuite) TestCreateContainerIfExists(c *chk.C) { + cli := getBlobClient(c) + cnt := cli.GetContainerReference(containerName(c)) + cnt.Create(nil) + defer cnt.Delete(nil) + rec := cli.client.appendRecorder(c) + cnt.bsc = &cli + defer rec.Stop() + + // Try to create already exisiting container + ok, err := cnt.CreateIfNotExists(nil) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, false) +} + +func (s *ContainerSuite) TestDeleteContainerIfExists(c *chk.C) { + cli := getBlobClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + // Nonexisting container + cnt1 := cli.GetContainerReference(containerName(c, "1")) + ok, err := cnt1.Exists() + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, false) + ok, err = cnt1.DeleteIfExists(nil) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, false) + + // Existing container + cnt2 := cli.GetContainerReference(containerName(c, "2")) + c.Assert(cnt2.Create(nil), chk.IsNil) + ok, err = cnt2.DeleteIfExists(nil) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, true) +} + +func (s *ContainerSuite) TestListBlobsPagination(c *chk.C) { + cli := getBlobClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + cnt := cli.GetContainerReference(containerName(c)) + + c.Assert(cnt.Create(nil), chk.IsNil) + defer cnt.Delete(nil) + + blobs := []string{} + const n = 5 + const pageSize = 2 + for i := 0; i < n; i++ { + name := blobName(c, strconv.Itoa(i)) + b := cnt.GetBlobReference(name) + c.Assert(b.putSingleBlockBlob([]byte("Hello, world!")), chk.IsNil) + blobs = append(blobs, name) + } + sort.Strings(blobs) + + // Paginate + seen := []string{} + marker := "" + for { + resp, err := cnt.ListBlobs(ListBlobsParameters{ + MaxResults: pageSize, + Marker: marker}) + c.Assert(err, chk.IsNil) + + for _, b := range resp.Blobs { + seen = append(seen, b.Name) + c.Assert(b.Container, chk.Equals, cnt) + } + + marker = resp.NextMarker + if marker == "" || len(resp.Blobs) == 0 { + break + } + } + + // Compare + c.Assert(seen, chk.DeepEquals, blobs) +} + +// listBlobsAsFiles is a helper function to list blobs as "folders" and "files". +func listBlobsAsFiles(cli BlobStorageClient, cnt *Container, parentDir string) (folders []string, files []string, err error) { + var blobParams ListBlobsParameters + var blobListResponse BlobListResponse + + // Top level "folders" + blobParams = ListBlobsParameters{ + Delimiter: "/", + Prefix: parentDir, + } + + blobListResponse, err = cnt.ListBlobs(blobParams) + if err != nil { + return nil, nil, err + } + + // These are treated as "folders" under the parentDir. + folders = blobListResponse.BlobPrefixes + + // "Files"" are blobs which are under the parentDir. + files = make([]string, len(blobListResponse.Blobs)) + for i := range blobListResponse.Blobs { + files[i] = blobListResponse.Blobs[i].Name + } + + return folders, files, nil +} + +// TestListBlobsTraversal tests that we can correctly traverse +// blobs in blob storage as if it were a file system by using +// a combination of Prefix, Delimiter, and BlobPrefixes. +// +// Blob storage is flat, but we can *simulate* the file +// system with folders and files using conventions in naming. +// With the blob namedd "/usr/bin/ls", when we use delimiter '/', +// the "ls" would be a "file"; with "/", /usr" and "/usr/bin" being +// the "folders" +// +// NOTE: The use of delimiter (eg forward slash) is extremely fiddly +// and difficult to get right so some discipline in naming and rules +// when using the API is required to get everything to work as expected. +// +// Assuming our delimiter is a forward slash, the rules are: +// +// - Do use a leading forward slash in blob names to make things +// consistent and simpler (see further). +// Note that doing so will show "" as the only top-level +// folder in the container in Azure portal, which may look strange. +// +// - The "folder names" are returned *with trailing forward slash* as per MSDN. +// +// - The "folder names" will be "absolute paths", e.g. listing things under "/usr/" +// will return folder names "/usr/bin/". +// +// - The "file names" are returned as full blob names, e.g. when listing +// things under "/usr/bin/", the file names will be "/usr/bin/ls" and +// "/usr/bin/cat". +// +// - Everything is returned with case-sensitive order as expected in real file system +// as per MSDN. +// +// - To list things under a "folder" always use trailing forward slash. +// +// Example: to list top level folders we use root folder named "" with +// trailing forward slash, so we use "/". +// +// Example: to list folders under "/usr", we again append forward slash and +// so we use "/usr/". +// +// Because we use leading forward slash we don't need to have different +// treatment of "get top-level folders" and "get non-top-level folders" +// scenarios. +func (s *ContainerSuite) TestListBlobsTraversal(c *chk.C) { + cli := getBlobClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + cnt := cli.GetContainerReference(containerName(c)) + c.Assert(cnt.Create(nil), chk.IsNil) + defer cnt.Delete(nil) + + // Note use of leading forward slash as per naming rules. + blobsToCreate := []string{ + "/usr/bin/ls", + "/usr/bin/cat", + "/usr/lib64/libc.so", + "/etc/hosts", + "/etc/init.d/iptables", + } + + // Create the above blobs + for _, blobName := range blobsToCreate { + b := cnt.GetBlobReference(blobName) + err := b.CreateBlockBlob(nil) + c.Assert(err, chk.IsNil) + } + + var folders []string + var files []string + var err error + + // Top level folders and files. + folders, files, err = listBlobsAsFiles(cli, cnt, "/") + c.Assert(err, chk.IsNil) + c.Assert(folders, chk.DeepEquals, []string{"/etc/", "/usr/"}) + c.Assert(files, chk.DeepEquals, []string{}) + + // Things under /etc/. Note use of trailing forward slash here as per rules. + folders, files, err = listBlobsAsFiles(cli, cnt, "/etc/") + c.Assert(err, chk.IsNil) + c.Assert(folders, chk.DeepEquals, []string{"/etc/init.d/"}) + c.Assert(files, chk.DeepEquals, []string{"/etc/hosts"}) + + // Things under /etc/init.d/ + folders, files, err = listBlobsAsFiles(cli, cnt, "/etc/init.d/") + c.Assert(err, chk.IsNil) + c.Assert(folders, chk.DeepEquals, []string(nil)) + c.Assert(files, chk.DeepEquals, []string{"/etc/init.d/iptables"}) + + // Things under /usr/ + folders, files, err = listBlobsAsFiles(cli, cnt, "/usr/") + c.Assert(err, chk.IsNil) + c.Assert(folders, chk.DeepEquals, []string{"/usr/bin/", "/usr/lib64/"}) + c.Assert(files, chk.DeepEquals, []string{}) + + // Things under /usr/bin/ + folders, files, err = listBlobsAsFiles(cli, cnt, "/usr/bin/") + c.Assert(err, chk.IsNil) + c.Assert(folders, chk.DeepEquals, []string(nil)) + c.Assert(files, chk.DeepEquals, []string{"/usr/bin/cat", "/usr/bin/ls"}) +} + +func (s *ContainerSuite) TestListBlobsWithMetadata(c *chk.C) { + cli := getBlobClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + cnt := cli.GetContainerReference(containerName(c)) + c.Assert(cnt.Create(nil), chk.IsNil) + defer cnt.Delete(nil) + + expectMeta := make(map[string]BlobMetadata) + + // Put 4 blobs with metadata + for i := 0; i < 4; i++ { + name := blobName(c, strconv.Itoa(i)) + b := cnt.GetBlobReference(name) + c.Assert(b.putSingleBlockBlob([]byte("Hello, world!")), chk.IsNil) + b.Metadata = BlobMetadata{ + "Lol": name, + "Rofl_BAZ": "Waz Qux", + } + c.Assert(b.SetMetadata(nil), chk.IsNil) + expectMeta[name] = BlobMetadata{ + "lol": name, + "rofl_baz": "Waz Qux", + } + _, err := b.CreateSnapshot(nil) + c.Assert(err, chk.IsNil) + } + + // Put one more blob with no metadata + b := cnt.GetBlobReference(blobName(c, "nometa")) + c.Assert(b.putSingleBlockBlob([]byte("Hello, world!")), chk.IsNil) + expectMeta[b.Name] = nil + + // Get ListBlobs with include: metadata and snapshots + resp, err := cnt.ListBlobs(ListBlobsParameters{ + Include: &IncludeBlobDataset{ + Metadata: true, + Snapshots: true, + }, + }) + c.Assert(err, chk.IsNil) + + originalBlobs := make(map[string]Blob) + snapshotBlobs := make(map[string]Blob) + for _, v := range resp.Blobs { + if v.Snapshot == (time.Time{}) { + originalBlobs[v.Name] = v + } else { + snapshotBlobs[v.Name] = v + + } + } + c.Assert(originalBlobs, chk.HasLen, 5) + c.Assert(snapshotBlobs, chk.HasLen, 4) + + // Verify the metadata is as expected + for name := range expectMeta { + c.Check(originalBlobs[name].Metadata, chk.DeepEquals, expectMeta[name]) + c.Check(snapshotBlobs[name].Metadata, chk.DeepEquals, expectMeta[name]) + } +} + +func appendContainerPermission(perms ContainerPermissions, accessType ContainerAccessType, + ID string, start time.Time, expiry time.Time, + canRead bool, canWrite bool, canDelete bool) ContainerPermissions { + + perms.AccessType = accessType + + if ID != "" { + capd := ContainerAccessPolicy{ + ID: ID, + StartTime: start, + ExpiryTime: expiry, + CanRead: canRead, + CanWrite: canWrite, + CanDelete: canDelete, + } + perms.AccessPolicies = append(perms.AccessPolicies, capd) + } + return perms +} + +func (s *ContainerSuite) TestSetContainerPermissionsWithTimeoutSuccessfully(c *chk.C) { + cli := getBlobClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + cnt := cli.GetContainerReference(containerName(c)) + c.Assert(cnt.Create(nil), chk.IsNil) + defer cnt.Delete(nil) + + perms := ContainerPermissions{} + perms = appendContainerPermission(perms, ContainerAccessTypeBlob, "GolangRocksOnAzure", fixedTime, fixedTime.Add(10*time.Hour), true, true, true) + + options := SetContainerPermissionOptions{ + Timeout: 30, + } + err := cnt.SetPermissions(perms, &options) + c.Assert(err, chk.IsNil) +} + +func (s *ContainerSuite) TestSetContainerPermissionsSuccessfully(c *chk.C) { + cli := getBlobClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + cnt := cli.GetContainerReference(containerName(c)) + c.Assert(cnt.Create(nil), chk.IsNil) + defer cnt.Delete(nil) + + perms := ContainerPermissions{} + perms = appendContainerPermission(perms, ContainerAccessTypeBlob, "GolangRocksOnAzure", fixedTime, fixedTime.Add(10*time.Hour), true, true, true) + + err := cnt.SetPermissions(perms, nil) + c.Assert(err, chk.IsNil) +} + +func (s *ContainerSuite) TestSetThenGetContainerPermissionsSuccessfully(c *chk.C) { + cli := getBlobClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + cnt := cli.GetContainerReference(containerName(c)) + c.Assert(cnt.Create(nil), chk.IsNil) + defer cnt.delete(nil) + + perms := ContainerPermissions{} + perms = appendContainerPermission(perms, ContainerAccessTypeBlob, "AutoRestIsSuperCool", fixedTime, fixedTime.Add(10*time.Hour), true, true, true) + perms = appendContainerPermission(perms, ContainerAccessTypeBlob, "GolangRocksOnAzure", fixedTime.Add(20*time.Hour), fixedTime.Add(30*time.Hour), true, false, false) + c.Assert(perms.AccessPolicies, chk.HasLen, 2) + + err := cnt.SetPermissions(perms, nil) + c.Assert(err, chk.IsNil) + + newPerms, err := cnt.GetPermissions(nil) + c.Assert(err, chk.IsNil) + + // check container permissions itself. + c.Assert(newPerms.AccessType, chk.Equals, perms.AccessType) + + // fixedTime check policy set. + c.Assert(newPerms.AccessPolicies, chk.HasLen, 2) + + for i := range perms.AccessPolicies { + c.Assert(newPerms.AccessPolicies[i].ID, chk.Equals, perms.AccessPolicies[i].ID) + + // test timestamps down the second + // rounding start/expiry time original perms since the returned perms would have been rounded. + // so need rounded vs rounded. + c.Assert(newPerms.AccessPolicies[i].StartTime.UTC().Round(time.Second).Format(time.RFC1123), + chk.Equals, perms.AccessPolicies[i].StartTime.UTC().Round(time.Second).Format(time.RFC1123)) + + c.Assert(newPerms.AccessPolicies[i].ExpiryTime.UTC().Round(time.Second).Format(time.RFC1123), + chk.Equals, perms.AccessPolicies[i].ExpiryTime.UTC().Round(time.Second).Format(time.RFC1123)) + + c.Assert(newPerms.AccessPolicies[i].CanRead, chk.Equals, perms.AccessPolicies[i].CanRead) + c.Assert(newPerms.AccessPolicies[i].CanWrite, chk.Equals, perms.AccessPolicies[i].CanWrite) + c.Assert(newPerms.AccessPolicies[i].CanDelete, chk.Equals, perms.AccessPolicies[i].CanDelete) + } +} + +func (s *ContainerSuite) TestSetContainerPermissionsOnlySuccessfully(c *chk.C) { + cli := getBlobClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + cnt := cli.GetContainerReference(containerName(c)) + c.Assert(cnt.Create(nil), chk.IsNil) + defer cnt.Delete(nil) + + perms := ContainerPermissions{} + perms = appendContainerPermission(perms, ContainerAccessTypeBlob, "GolangRocksOnAzure", fixedTime, fixedTime.Add(10*time.Hour), true, true, true) + + err := cnt.SetPermissions(perms, nil) + c.Assert(err, chk.IsNil) +} + +func (s *ContainerSuite) TestSetThenGetContainerPermissionsOnlySuccessfully(c *chk.C) { + cli := getBlobClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + cnt := cli.GetContainerReference(containerName(c)) + c.Assert(cnt.Create(nil), chk.IsNil) + defer cnt.Delete(nil) + + perms := ContainerPermissions{} + perms = appendContainerPermission(perms, ContainerAccessTypeBlob, "", fixedTime, fixedTime.Add(10*time.Hour), true, true, true) + + err := cnt.SetPermissions(perms, nil) + c.Assert(err, chk.IsNil) + + newPerms, err := cnt.GetPermissions(nil) + c.Assert(err, chk.IsNil) + + // check container permissions itself. + c.Assert(newPerms.AccessType, chk.Equals, perms.AccessType) + + // fixedTime check there are NO policies set + c.Assert(newPerms.AccessPolicies, chk.HasLen, 0) +} + +func (cli *BlobStorageClient) deleteTestContainers(c *chk.C) error { + for { + resp, err := cli.ListContainers(ListContainersParameters{}) + if err != nil { + return err + } + if len(resp.Containers) == 0 { + break + } + for _, c := range resp.Containers { + err = c.Delete(nil) + if err != nil { + return err + } + } + } + return nil +} + +func containerName(c *chk.C, extras ...string) string { + return nameGenerator(32, "cnt-", alphanum, c, extras) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/copyblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/copyblob.go new file mode 100644 index 000000000000..377a3c622e9a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/copyblob.go @@ -0,0 +1,223 @@ +package storage + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "strings" + "time" +) + +const ( + blobCopyStatusPending = "pending" + blobCopyStatusSuccess = "success" + blobCopyStatusAborted = "aborted" + blobCopyStatusFailed = "failed" +) + +// CopyOptions includes the options for a copy blob operation +type CopyOptions struct { + Timeout uint + Source CopyOptionsConditions + Destiny CopyOptionsConditions + RequestID string +} + +// IncrementalCopyOptions includes the options for an incremental copy blob operation +type IncrementalCopyOptions struct { + Timeout uint + Destination IncrementalCopyOptionsConditions + RequestID string +} + +// CopyOptionsConditions includes some conditional options in a copy blob operation +type CopyOptionsConditions struct { + LeaseID string + IfModifiedSince *time.Time + IfUnmodifiedSince *time.Time + IfMatch string + IfNoneMatch string +} + +// IncrementalCopyOptionsConditions includes some conditional options in a copy blob operation +type IncrementalCopyOptionsConditions struct { + IfModifiedSince *time.Time + IfUnmodifiedSince *time.Time + IfMatch string + IfNoneMatch string +} + +// Copy starts a blob copy operation and waits for the operation to +// complete. sourceBlob parameter must be a canonical URL to the blob (can be +// obtained using GetBlobURL method.) There is no SLA on blob copy and therefore +// this helper method works faster on smaller files. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Copy-Blob +func (b *Blob) Copy(sourceBlob string, options *CopyOptions) error { + copyID, err := b.StartCopy(sourceBlob, options) + if err != nil { + return err + } + + return b.WaitForCopy(copyID) +} + +// StartCopy starts a blob copy operation. +// sourceBlob parameter must be a canonical URL to the blob (can be +// obtained using GetBlobURL method.) +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Copy-Blob +func (b *Blob) StartCopy(sourceBlob string, options *CopyOptions) (string, error) { + params := url.Values{} + headers := b.Container.bsc.client.getStandardHeaders() + headers["x-ms-copy-source"] = sourceBlob + headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID) + // source + headers = addToHeaders(headers, "x-ms-source-lease-id", options.Source.LeaseID) + headers = addTimeToHeaders(headers, "x-ms-source-if-modified-since", options.Source.IfModifiedSince) + headers = addTimeToHeaders(headers, "x-ms-source-if-unmodified-since", options.Source.IfUnmodifiedSince) + headers = addToHeaders(headers, "x-ms-source-if-match", options.Source.IfMatch) + headers = addToHeaders(headers, "x-ms-source-if-none-match", options.Source.IfNoneMatch) + //destiny + headers = addToHeaders(headers, "x-ms-lease-id", options.Destiny.LeaseID) + headers = addTimeToHeaders(headers, "x-ms-if-modified-since", options.Destiny.IfModifiedSince) + headers = addTimeToHeaders(headers, "x-ms-if-unmodified-since", options.Destiny.IfUnmodifiedSince) + headers = addToHeaders(headers, "x-ms-if-match", options.Destiny.IfMatch) + headers = addToHeaders(headers, "x-ms-if-none-match", options.Destiny.IfNoneMatch) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) + if err != nil { + return "", err + } + defer readAndCloseBody(resp.body) + + if err := checkRespCode(resp.statusCode, []int{http.StatusAccepted, http.StatusCreated}); err != nil { + return "", err + } + + copyID := resp.headers.Get("x-ms-copy-id") + if copyID == "" { + return "", errors.New("Got empty copy id header") + } + return copyID, nil +} + +// AbortCopyOptions includes the options for an abort blob operation +type AbortCopyOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + RequestID string `header:"x-ms-client-request-id"` +} + +// AbortCopy aborts a BlobCopy which has already been triggered by the StartBlobCopy function. +// copyID is generated from StartBlobCopy function. +// currentLeaseID is required IF the destination blob has an active lease on it. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Abort-Copy-Blob +func (b *Blob) AbortCopy(copyID string, options *AbortCopyOptions) error { + params := url.Values{ + "comp": {"copy"}, + "copyid": {copyID}, + } + headers := b.Container.bsc.client.getStandardHeaders() + headers["x-ms-copy-action"] = "abort" + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) + if err != nil { + return err + } + readAndCloseBody(resp.body) + return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) +} + +// WaitForCopy loops until a BlobCopy operation is completed (or fails with error) +func (b *Blob) WaitForCopy(copyID string) error { + for { + err := b.GetProperties(nil) + if err != nil { + return err + } + + if b.Properties.CopyID != copyID { + return errBlobCopyIDMismatch + } + + switch b.Properties.CopyStatus { + case blobCopyStatusSuccess: + return nil + case blobCopyStatusPending: + continue + case blobCopyStatusAborted: + return errBlobCopyAborted + case blobCopyStatusFailed: + return fmt.Errorf("storage: blob copy failed. Id=%s Description=%s", b.Properties.CopyID, b.Properties.CopyStatusDescription) + default: + return fmt.Errorf("storage: unhandled blob copy status: '%s'", b.Properties.CopyStatus) + } + } +} + +// IncrementalCopyBlob copies a snapshot of a source blob and copies to referring blob +// sourceBlob parameter must be a valid snapshot URL of the original blob. +// THe original blob mut be public, or use a Shared Access Signature. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/incremental-copy-blob . +func (b *Blob) IncrementalCopyBlob(sourceBlobURL string, snapshotTime time.Time, options *IncrementalCopyOptions) (string, error) { + params := url.Values{"comp": {"incrementalcopy"}} + + // need formatting to 7 decimal places so it's friendly to Windows and *nix + snapshotTimeFormatted := snapshotTime.Format("2006-01-02T15:04:05.0000000Z") + u, err := url.Parse(sourceBlobURL) + if err != nil { + return "", err + } + query := u.Query() + query.Add("snapshot", snapshotTimeFormatted) + encodedQuery := query.Encode() + encodedQuery = strings.Replace(encodedQuery, "%3A", ":", -1) + u.RawQuery = encodedQuery + snapshotURL := u.String() + + headers := b.Container.bsc.client.getStandardHeaders() + headers["x-ms-copy-source"] = snapshotURL + + if options != nil { + addTimeout(params, options.Timeout) + headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID) + headers = addTimeToHeaders(headers, "x-ms-if-modified-since", options.Destination.IfModifiedSince) + headers = addTimeToHeaders(headers, "x-ms-if-unmodified-since", options.Destination.IfUnmodifiedSince) + headers = addToHeaders(headers, "x-ms-if-match", options.Destination.IfMatch) + headers = addToHeaders(headers, "x-ms-if-none-match", options.Destination.IfNoneMatch) + } + + // get URI of destination blob + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) + if err != nil { + return "", err + } + defer readAndCloseBody(resp.body) + + if err := checkRespCode(resp.statusCode, []int{http.StatusAccepted}); err != nil { + return "", err + } + + copyID := resp.headers.Get("x-ms-copy-id") + if copyID == "" { + return "", errors.New("Got empty copy id header") + } + return copyID, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/copyblob_test.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/copyblob_test.go new file mode 100644 index 000000000000..60a50c8b7b4d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/copyblob_test.go @@ -0,0 +1,171 @@ +package storage + +import ( + "io/ioutil" + "net/http" + "testing" + + chk "gopkg.in/check.v1" +) + +type CopyBlobSuite struct{} + +var _ = chk.Suite(&CopyBlobSuite{}) + +func (s *CopyBlobSuite) TestBlobCopy(c *chk.C) { + if testing.Short() { + c.Skip("skipping blob copy in short mode, no SLA on async operation") + } + + cli := getBlobClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + cnt := cli.GetContainerReference(containerName(c)) + c.Assert(cnt.Create(nil), chk.IsNil) + defer cnt.Delete(nil) + + srcBlob := cnt.GetBlobReference(blobName(c, "src")) + dstBlob := cnt.GetBlobReference(blobName(c, "dst")) + body := content(1024) + + c.Assert(srcBlob.putSingleBlockBlob(body), chk.IsNil) + defer srcBlob.Delete(nil) + + c.Assert(dstBlob.Copy(srcBlob.GetURL(), nil), chk.IsNil) + defer dstBlob.Delete(nil) + + resp, err := dstBlob.Get(nil) + c.Assert(err, chk.IsNil) + + b, err := ioutil.ReadAll(resp) + defer resp.Close() + c.Assert(err, chk.IsNil) + c.Assert(b, chk.DeepEquals, body) +} + +func (s *CopyBlobSuite) TestStartBlobCopy(c *chk.C) { + if testing.Short() { + c.Skip("skipping blob copy in short mode, no SLA on async operation") + } + + cli := getBlobClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + cnt := cli.GetContainerReference(containerName(c)) + c.Assert(cnt.Create(nil), chk.IsNil) + defer cnt.Delete(nil) + + srcBlob := cnt.GetBlobReference(blobName(c, "src")) + dstBlob := cnt.GetBlobReference(blobName(c, "dst")) + body := content(1024) + + c.Assert(srcBlob.putSingleBlockBlob(body), chk.IsNil) + defer srcBlob.Delete(nil) + + // given we dont know when it will start, can we even test destination creation? + // will just test that an error wasn't thrown for now. + copyID, err := dstBlob.StartCopy(srcBlob.GetURL(), nil) + c.Assert(copyID, chk.NotNil) + c.Assert(err, chk.IsNil) +} + +// Tests abort of blobcopy. Given the blobcopy is usually over before we can actually trigger an abort +// it is agreed that we perform a copy then try and perform an abort. It should result in a HTTP status of 409. +// So basically we're testing negative scenario (as good as we can do for now) +func (s *CopyBlobSuite) TestAbortBlobCopy(c *chk.C) { + if testing.Short() { + c.Skip("skipping blob copy in short mode, no SLA on async operation") + } + + cli := getBlobClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + cnt := cli.GetContainerReference(containerName(c)) + c.Assert(cnt.Create(nil), chk.IsNil) + defer cnt.Delete(nil) + + srcBlob := cnt.GetBlobReference(blobName(c, "src")) + dstBlob := cnt.GetBlobReference(blobName(c, "dst")) + body := content(1024) + + c.Assert(srcBlob.putSingleBlockBlob(body), chk.IsNil) + defer srcBlob.Delete(nil) + + // given we dont know when it will start, can we even test destination creation? + // will just test that an error wasn't thrown for now. + copyID, err := dstBlob.StartCopy(srcBlob.GetURL(), nil) + c.Assert(copyID, chk.NotNil) + c.Assert(err, chk.IsNil) + + err = dstBlob.WaitForCopy(copyID) + c.Assert(err, chk.IsNil) + + // abort abort abort, but we *know* its already completed. + err = dstBlob.AbortCopy(copyID, nil) + + // abort should fail (over already) + c.Assert(err.(AzureStorageServiceError).StatusCode, chk.Equals, http.StatusConflict) +} + +func (s *CopyBlobSuite) TestIncrementalCopyBlobNoTimeout(c *chk.C) { + if testing.Short() { + c.Skip("skipping blob copy in short mode, no SLA on async operation") + } + + cli := getBlobClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + cnt := cli.GetContainerReference(containerName(c)) + options := CreateContainerOptions{ + Access: ContainerAccessTypeBlob, + } + c.Assert(cnt.Create(&options), chk.IsNil) + defer cnt.Delete(nil) + + b := cnt.GetBlobReference(blobName(c, "src")) + size := int64(10 * 1024 * 1024) + b.Properties.ContentLength = size + c.Assert(b.PutPageBlob(nil), chk.IsNil) + + snapshotTime, err := b.CreateSnapshot(nil) + c.Assert(err, chk.IsNil) + c.Assert(snapshotTime, chk.NotNil) + + u := b.GetURL() + destBlob := cnt.GetBlobReference(blobName(c, "dst")) + copyID, err := destBlob.IncrementalCopyBlob(u, *snapshotTime, nil) + c.Assert(copyID, chk.NotNil) + c.Assert(err, chk.IsNil) +} + +func (s *CopyBlobSuite) TestIncrementalCopyBlobWithTimeout(c *chk.C) { + cli := getBlobClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + cnt := cli.GetContainerReference(containerName(c)) + options := CreateContainerOptions{ + Access: ContainerAccessTypeBlob, + } + c.Assert(cnt.Create(&options), chk.IsNil) + defer cnt.Delete(nil) + + b := cnt.GetBlobReference(blobName(c, "src")) + size := int64(10 * 1024 * 1024) + b.Properties.ContentLength = size + c.Assert(b.PutPageBlob(nil), chk.IsNil) + + snapshotTime, err := b.CreateSnapshot(nil) + c.Assert(err, chk.IsNil) + c.Assert(snapshotTime, chk.NotNil) + + u := b.GetURL() + destBlob := cnt.GetBlobReference(blobName(c, "dst")) + copyID, err := destBlob.IncrementalCopyBlob(u, *snapshotTime, &IncrementalCopyOptions{Timeout: 30}) + c.Assert(copyID, chk.NotNil) + c.Assert(err, chk.IsNil) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go new file mode 100644 index 000000000000..29610329ec31 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go @@ -0,0 +1,222 @@ +package storage + +import ( + "encoding/xml" + "net/http" + "net/url" +) + +// Directory represents a directory on a share. +type Directory struct { + fsc *FileServiceClient + Metadata map[string]string + Name string `xml:"Name"` + parent *Directory + Properties DirectoryProperties + share *Share +} + +// DirectoryProperties contains various properties of a directory. +type DirectoryProperties struct { + LastModified string `xml:"Last-Modified"` + Etag string `xml:"Etag"` +} + +// ListDirsAndFilesParameters defines the set of customizable parameters to +// make a List Files and Directories call. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Directories-and-Files +type ListDirsAndFilesParameters struct { + Prefix string + Marker string + MaxResults uint + Timeout uint +} + +// DirsAndFilesListResponse contains the response fields from +// a List Files and Directories call. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Directories-and-Files +type DirsAndFilesListResponse struct { + XMLName xml.Name `xml:"EnumerationResults"` + Xmlns string `xml:"xmlns,attr"` + Marker string `xml:"Marker"` + MaxResults int64 `xml:"MaxResults"` + Directories []Directory `xml:"Entries>Directory"` + Files []File `xml:"Entries>File"` + NextMarker string `xml:"NextMarker"` +} + +// builds the complete directory path for this directory object. +func (d *Directory) buildPath() string { + path := "" + current := d + for current.Name != "" { + path = "/" + current.Name + path + current = current.parent + } + return d.share.buildPath() + path +} + +// Create this directory in the associated share. +// If a directory with the same name already exists, the operation fails. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Directory +func (d *Directory) Create(options *FileRequestOptions) error { + // if this is the root directory exit early + if d.parent == nil { + return nil + } + + params := prepareOptions(options) + headers, err := d.fsc.createResource(d.buildPath(), resourceDirectory, params, mergeMDIntoExtraHeaders(d.Metadata, nil), []int{http.StatusCreated}) + if err != nil { + return err + } + + d.updateEtagAndLastModified(headers) + return nil +} + +// CreateIfNotExists creates this directory under the associated share if the +// directory does not exists. Returns true if the directory is newly created or +// false if the directory already exists. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Directory +func (d *Directory) CreateIfNotExists(options *FileRequestOptions) (bool, error) { + // if this is the root directory exit early + if d.parent == nil { + return false, nil + } + + params := prepareOptions(options) + resp, err := d.fsc.createResourceNoClose(d.buildPath(), resourceDirectory, params, nil) + if resp != nil { + defer readAndCloseBody(resp.body) + if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict { + if resp.statusCode == http.StatusCreated { + d.updateEtagAndLastModified(resp.headers) + return true, nil + } + + return false, d.FetchAttributes(nil) + } + } + + return false, err +} + +// Delete removes this directory. It must be empty in order to be deleted. +// If the directory does not exist the operation fails. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Directory +func (d *Directory) Delete(options *FileRequestOptions) error { + return d.fsc.deleteResource(d.buildPath(), resourceDirectory, options) +} + +// DeleteIfExists removes this directory if it exists. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Directory +func (d *Directory) DeleteIfExists(options *FileRequestOptions) (bool, error) { + resp, err := d.fsc.deleteResourceNoClose(d.buildPath(), resourceDirectory, options) + if resp != nil { + defer readAndCloseBody(resp.body) + if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { + return resp.statusCode == http.StatusAccepted, nil + } + } + return false, err +} + +// Exists returns true if this directory exists. +func (d *Directory) Exists() (bool, error) { + exists, headers, err := d.fsc.resourceExists(d.buildPath(), resourceDirectory) + if exists { + d.updateEtagAndLastModified(headers) + } + return exists, err +} + +// FetchAttributes retrieves metadata for this directory. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-directory-properties +func (d *Directory) FetchAttributes(options *FileRequestOptions) error { + params := prepareOptions(options) + headers, err := d.fsc.getResourceHeaders(d.buildPath(), compNone, resourceDirectory, params, http.MethodHead) + if err != nil { + return err + } + + d.updateEtagAndLastModified(headers) + d.Metadata = getMetadataFromHeaders(headers) + + return nil +} + +// GetDirectoryReference returns a child Directory object for this directory. +func (d *Directory) GetDirectoryReference(name string) *Directory { + return &Directory{ + fsc: d.fsc, + Name: name, + parent: d, + share: d.share, + } +} + +// GetFileReference returns a child File object for this directory. +func (d *Directory) GetFileReference(name string) *File { + return &File{ + fsc: d.fsc, + Name: name, + parent: d, + share: d.share, + } +} + +// ListDirsAndFiles returns a list of files and directories under this directory. +// It also contains a pagination token and other response details. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Directories-and-Files +func (d *Directory) ListDirsAndFiles(params ListDirsAndFilesParameters) (*DirsAndFilesListResponse, error) { + q := mergeParams(params.getParameters(), getURLInitValues(compList, resourceDirectory)) + + resp, err := d.fsc.listContent(d.buildPath(), q, nil) + if err != nil { + return nil, err + } + + defer resp.body.Close() + var out DirsAndFilesListResponse + err = xmlUnmarshal(resp.body, &out) + return &out, err +} + +// SetMetadata replaces the metadata for this directory. +// +// Some keys may be converted to Camel-Case before sending. All keys +// are returned in lower case by GetDirectoryMetadata. HTTP header names +// are case-insensitive so case munging should not matter to other +// applications either. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Directory-Metadata +func (d *Directory) SetMetadata(options *FileRequestOptions) error { + headers, err := d.fsc.setResourceHeaders(d.buildPath(), compMetadata, resourceDirectory, mergeMDIntoExtraHeaders(d.Metadata, nil), options) + if err != nil { + return err + } + + d.updateEtagAndLastModified(headers) + return nil +} + +// updates Etag and last modified date +func (d *Directory) updateEtagAndLastModified(headers http.Header) { + d.Properties.Etag = headers.Get("Etag") + d.Properties.LastModified = headers.Get("Last-Modified") +} + +// URL gets the canonical URL to this directory. +// This method does not create a publicly accessible URL if the directory +// is private and this method does not check if the directory exists. +func (d *Directory) URL() string { + return d.fsc.client.getEndpoint(fileServiceName, d.buildPath(), url.Values{}) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/directory_test.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/directory_test.go new file mode 100644 index 000000000000..ffe5f25c6e79 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/directory_test.go @@ -0,0 +1,170 @@ +package storage + +import chk "gopkg.in/check.v1" + +type StorageDirSuite struct{} + +var _ = chk.Suite(&StorageDirSuite{}) + +func (s *StorageDirSuite) TestListZeroDirsAndFiles(c *chk.C) { + // create share + cli := getFileClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + share := cli.GetShareReference(shareName(c)) + c.Assert(share.Create(nil), chk.IsNil) + defer share.Delete(nil) + + // list contents, should be empty + root := share.GetRootDirectoryReference() + resp, err := root.ListDirsAndFiles(ListDirsAndFilesParameters{}) + c.Assert(err, chk.IsNil) + c.Assert(resp.Directories, chk.IsNil) + c.Assert(resp.Files, chk.IsNil) +} + +func (s *StorageDirSuite) TestListDirsAndFiles(c *chk.C) { + // create share + cli := getFileClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + share := cli.GetShareReference(shareName(c)) + c.Assert(share.Create(nil), chk.IsNil) + defer share.Delete(nil) + + // create a directory and a file + root := share.GetRootDirectoryReference() + dir := root.GetDirectoryReference("SomeDirectory") + file := root.GetFileReference("lol.file") + c.Assert(dir.Create(nil), chk.IsNil) + c.Assert(file.Create(512, nil), chk.IsNil) + + // list contents + resp, err := root.ListDirsAndFiles(ListDirsAndFilesParameters{}) + c.Assert(err, chk.IsNil) + c.Assert(len(resp.Directories), chk.Equals, 1) + c.Assert(len(resp.Files), chk.Equals, 1) + c.Assert(resp.Directories[0].Name, chk.Equals, dir.Name) + c.Assert(resp.Files[0].Name, chk.Equals, file.Name) + + // delete file + del, err := file.DeleteIfExists(nil) + c.Assert(err, chk.IsNil) + c.Assert(del, chk.Equals, true) + + ok, err := file.Exists() + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, false) +} + +func (s *StorageDirSuite) TestCreateDirectory(c *chk.C) { + cli := getFileClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + share := cli.GetShareReference(shareName(c)) + c.Assert(share.Create(nil), chk.IsNil) + defer share.Delete(nil) + + root := share.GetRootDirectoryReference() + dir := root.GetDirectoryReference("dir") + err := dir.Create(nil) + c.Assert(err, chk.IsNil) + + // check properties + c.Assert(dir.Properties.Etag, chk.Not(chk.Equals), "") + c.Assert(dir.Properties.LastModified, chk.Not(chk.Equals), "") + + // delete directory and verify + c.Assert(dir.Delete(nil), chk.IsNil) + exists, err := dir.Exists() + c.Assert(err, chk.IsNil) + c.Assert(exists, chk.Equals, false) +} + +func (s *StorageDirSuite) TestCreateDirectoryIfNotExists(c *chk.C) { + cli := getFileClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + // create share + share := cli.GetShareReference(shareName(c)) + share.Create(nil) + defer share.Delete(nil) + + // create non exisiting directory + root := share.GetRootDirectoryReference() + dir := root.GetDirectoryReference("dir") + exists, err := dir.CreateIfNotExists(nil) + c.Assert(err, chk.IsNil) + c.Assert(exists, chk.Equals, true) + + c.Assert(dir.Properties.Etag, chk.Not(chk.Equals), "") + c.Assert(dir.Properties.LastModified, chk.Not(chk.Equals), "") + + c.Assert(dir.Delete(nil), chk.IsNil) + exists, err = dir.Exists() + c.Assert(err, chk.IsNil) + c.Assert(exists, chk.Equals, false) +} + +func (s *StorageDirSuite) TestCreateDirectoryIfExists(c *chk.C) { + // create share + cli := getFileClient(c) + share := cli.GetShareReference(shareName(c)) + share.Create(nil) + defer share.Delete(nil) + + // create directory + root := share.GetRootDirectoryReference() + dir := root.GetDirectoryReference("dir") + dir.Create(nil) + + rec := cli.client.appendRecorder(c) + dir.fsc = &cli + defer rec.Stop() + + // try to create directory + exists, err := dir.CreateIfNotExists(nil) + c.Assert(err, chk.IsNil) + c.Assert(exists, chk.Equals, false) + + // check properties + c.Assert(dir.Properties.Etag, chk.Not(chk.Equals), "") + c.Assert(dir.Properties.LastModified, chk.Not(chk.Equals), "") + + // delete directory + c.Assert(dir.Delete(nil), chk.IsNil) +} + +func (s *StorageDirSuite) TestDirectoryMetadata(c *chk.C) { + // create share + cli := getFileClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + share := cli.GetShareReference(shareName(c)) + c.Assert(share.Create(nil), chk.IsNil) + defer share.Delete(nil) + root := share.GetRootDirectoryReference() + + dir := root.GetDirectoryReference("testdir") + c.Assert(dir.Create(nil), chk.IsNil) + + // get metadata, shouldn't be any + c.Assert(dir.Metadata, chk.IsNil) + + // set some custom metadata + md := map[string]string{ + "something": "somethingvalue", + "another": "anothervalue", + } + dir.Metadata = md + c.Assert(dir.SetMetadata(nil), chk.IsNil) + + // retrieve and verify + c.Assert(dir.FetchAttributes(nil), chk.IsNil) + c.Assert(dir.Metadata, chk.DeepEquals, md) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go new file mode 100644 index 000000000000..13e94750731f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go @@ -0,0 +1,439 @@ +package storage + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/satori/uuid" +) + +// Annotating as secure for gas scanning +/* #nosec */ +const ( + partitionKeyNode = "PartitionKey" + rowKeyNode = "RowKey" + etagErrorTemplate = "Etag didn't match: %v" +) + +var ( + errEmptyPayload = errors.New("Empty payload is not a valid metadata level for this operation") + errNilPreviousResult = errors.New("The previous results page is nil") + errNilNextLink = errors.New("There are no more pages in this query results") +) + +// Entity represents an entity inside an Azure table. +type Entity struct { + Table *Table + PartitionKey string + RowKey string + TimeStamp time.Time + OdataMetadata string + OdataType string + OdataID string + OdataEtag string + OdataEditLink string + Properties map[string]interface{} +} + +// GetEntityReference returns an Entity object with the specified +// partition key and row key. +func (t *Table) GetEntityReference(partitionKey, rowKey string) *Entity { + return &Entity{ + PartitionKey: partitionKey, + RowKey: rowKey, + Table: t, + } +} + +// EntityOptions includes options for entity operations. +type EntityOptions struct { + Timeout uint + RequestID string `header:"x-ms-client-request-id"` +} + +// GetEntityOptions includes options for a get entity operation +type GetEntityOptions struct { + Select []string + RequestID string `header:"x-ms-client-request-id"` +} + +// Get gets the referenced entity. Which properties to get can be +// specified using the select option. +// See: +// https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-entities +// https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/querying-tables-and-entities +func (e *Entity) Get(timeout uint, ml MetadataLevel, options *GetEntityOptions) error { + if ml == EmptyPayload { + return errEmptyPayload + } + // RowKey and PartitionKey could be lost if not included in the query + // As those are the entity identifiers, it is best if they are not lost + rk := e.RowKey + pk := e.PartitionKey + + query := url.Values{ + "timeout": {strconv.FormatUint(uint64(timeout), 10)}, + } + headers := e.Table.tsc.client.getStandardHeaders() + headers[headerAccept] = string(ml) + + if options != nil { + if len(options.Select) > 0 { + query.Add("$select", strings.Join(options.Select, ",")) + } + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + + uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query) + resp, err := e.Table.tsc.client.exec(http.MethodGet, uri, headers, nil, e.Table.tsc.auth) + if err != nil { + return err + } + defer readAndCloseBody(resp.body) + + if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return err + } + + respBody, err := ioutil.ReadAll(resp.body) + if err != nil { + return err + } + err = json.Unmarshal(respBody, e) + if err != nil { + return err + } + e.PartitionKey = pk + e.RowKey = rk + + return nil +} + +// Insert inserts the referenced entity in its table. +// The function fails if there is an entity with the same +// PartitionKey and RowKey in the table. +// ml determines the level of detail of metadata in the operation response, +// or no data at all. +// See: https://docs.microsoft.com/rest/api/storageservices/fileservices/insert-entity +func (e *Entity) Insert(ml MetadataLevel, options *EntityOptions) error { + query, headers := options.getParameters() + headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders()) + + body, err := json.Marshal(e) + if err != nil { + return err + } + headers = addBodyRelatedHeaders(headers, len(body)) + headers = addReturnContentHeaders(headers, ml) + + uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.Table.buildPath(), query) + resp, err := e.Table.tsc.client.exec(http.MethodPost, uri, headers, bytes.NewReader(body), e.Table.tsc.auth) + if err != nil { + return err + } + defer resp.body.Close() + + data, err := ioutil.ReadAll(resp.body) + if err != nil { + return err + } + + if ml != EmptyPayload { + if err = checkRespCode(resp.statusCode, []int{http.StatusCreated}); err != nil { + return err + } + if err = e.UnmarshalJSON(data); err != nil { + return err + } + } else { + if err = checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { + return err + } + } + + return nil +} + +// Update updates the contents of an entity. The function fails if there is no entity +// with the same PartitionKey and RowKey in the table or if the ETag is different +// than the one in Azure. +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/update-entity2 +func (e *Entity) Update(force bool, options *EntityOptions) error { + return e.updateMerge(force, http.MethodPut, options) +} + +// Merge merges the contents of entity specified with PartitionKey and RowKey +// with the content specified in Properties. +// The function fails if there is no entity with the same PartitionKey and +// RowKey in the table or if the ETag is different than the one in Azure. +// Read more: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/merge-entity +func (e *Entity) Merge(force bool, options *EntityOptions) error { + return e.updateMerge(force, "MERGE", options) +} + +// Delete deletes the entity. +// The function fails if there is no entity with the same PartitionKey and +// RowKey in the table or if the ETag is different than the one in Azure. +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/delete-entity1 +func (e *Entity) Delete(force bool, options *EntityOptions) error { + query, headers := options.getParameters() + headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders()) + + headers = addIfMatchHeader(headers, force, e.OdataEtag) + headers = addReturnContentHeaders(headers, EmptyPayload) + + uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query) + resp, err := e.Table.tsc.client.exec(http.MethodDelete, uri, headers, nil, e.Table.tsc.auth) + if err != nil { + if resp.statusCode == http.StatusPreconditionFailed { + return fmt.Errorf(etagErrorTemplate, err) + } + return err + } + defer readAndCloseBody(resp.body) + + if err = checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { + return err + } + + return e.updateTimestamp(resp.headers) +} + +// InsertOrReplace inserts an entity or replaces the existing one. +// Read more: https://docs.microsoft.com/rest/api/storageservices/fileservices/insert-or-replace-entity +func (e *Entity) InsertOrReplace(options *EntityOptions) error { + return e.insertOr(http.MethodPut, options) +} + +// InsertOrMerge inserts an entity or merges the existing one. +// Read more: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/insert-or-merge-entity +func (e *Entity) InsertOrMerge(options *EntityOptions) error { + return e.insertOr("MERGE", options) +} + +func (e *Entity) buildPath() string { + return fmt.Sprintf("%s(PartitionKey='%s', RowKey='%s')", e.Table.buildPath(), e.PartitionKey, e.RowKey) +} + +// MarshalJSON is a custom marshaller for entity +func (e *Entity) MarshalJSON() ([]byte, error) { + completeMap := map[string]interface{}{} + completeMap[partitionKeyNode] = e.PartitionKey + completeMap[rowKeyNode] = e.RowKey + for k, v := range e.Properties { + typeKey := strings.Join([]string{k, OdataTypeSuffix}, "") + switch t := v.(type) { + case []byte: + completeMap[typeKey] = OdataBinary + completeMap[k] = string(t) + case time.Time: + completeMap[typeKey] = OdataDateTime + completeMap[k] = t.Format(time.RFC3339Nano) + case uuid.UUID: + completeMap[typeKey] = OdataGUID + completeMap[k] = t.String() + case int64: + completeMap[typeKey] = OdataInt64 + completeMap[k] = fmt.Sprintf("%v", v) + default: + completeMap[k] = v + } + if strings.HasSuffix(k, OdataTypeSuffix) { + if !(completeMap[k] == OdataBinary || + completeMap[k] == OdataDateTime || + completeMap[k] == OdataGUID || + completeMap[k] == OdataInt64) { + return nil, fmt.Errorf("Odata.type annotation %v value is not valid", k) + } + valueKey := strings.TrimSuffix(k, OdataTypeSuffix) + if _, ok := completeMap[valueKey]; !ok { + return nil, fmt.Errorf("Odata.type annotation %v defined without value defined", k) + } + } + } + return json.Marshal(completeMap) +} + +// UnmarshalJSON is a custom unmarshaller for entities +func (e *Entity) UnmarshalJSON(data []byte) error { + errorTemplate := "Deserializing error: %v" + + props := map[string]interface{}{} + err := json.Unmarshal(data, &props) + if err != nil { + return err + } + + // deselialize metadata + e.OdataMetadata = stringFromMap(props, "odata.metadata") + e.OdataType = stringFromMap(props, "odata.type") + e.OdataID = stringFromMap(props, "odata.id") + e.OdataEtag = stringFromMap(props, "odata.etag") + e.OdataEditLink = stringFromMap(props, "odata.editLink") + e.PartitionKey = stringFromMap(props, partitionKeyNode) + e.RowKey = stringFromMap(props, rowKeyNode) + + // deserialize timestamp + timeStamp, ok := props["Timestamp"] + if ok { + str, ok := timeStamp.(string) + if !ok { + return fmt.Errorf(errorTemplate, "Timestamp casting error") + } + t, err := time.Parse(time.RFC3339Nano, str) + if err != nil { + return fmt.Errorf(errorTemplate, err) + } + e.TimeStamp = t + } + delete(props, "Timestamp") + delete(props, "Timestamp@odata.type") + + // deserialize entity (user defined fields) + for k, v := range props { + if strings.HasSuffix(k, OdataTypeSuffix) { + valueKey := strings.TrimSuffix(k, OdataTypeSuffix) + str, ok := props[valueKey].(string) + if !ok { + return fmt.Errorf(errorTemplate, fmt.Sprintf("%v casting error", v)) + } + switch v { + case OdataBinary: + props[valueKey] = []byte(str) + case OdataDateTime: + t, err := time.Parse("2006-01-02T15:04:05Z", str) + if err != nil { + return fmt.Errorf(errorTemplate, err) + } + props[valueKey] = t + case OdataGUID: + props[valueKey] = uuid.FromStringOrNil(str) + case OdataInt64: + i, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return fmt.Errorf(errorTemplate, err) + } + props[valueKey] = i + default: + return fmt.Errorf(errorTemplate, fmt.Sprintf("%v is not supported", v)) + } + delete(props, k) + } + } + + e.Properties = props + return nil +} + +func getAndDelete(props map[string]interface{}, key string) interface{} { + if value, ok := props[key]; ok { + delete(props, key) + return value + } + return nil +} + +func addIfMatchHeader(h map[string]string, force bool, etag string) map[string]string { + if force { + h[headerIfMatch] = "*" + } else { + h[headerIfMatch] = etag + } + return h +} + +// updates Etag and timestamp +func (e *Entity) updateEtagAndTimestamp(headers http.Header) error { + e.OdataEtag = headers.Get(headerEtag) + return e.updateTimestamp(headers) +} + +func (e *Entity) updateTimestamp(headers http.Header) error { + str := headers.Get(headerDate) + t, err := time.Parse(time.RFC1123, str) + if err != nil { + return fmt.Errorf("Update timestamp error: %v", err) + } + e.TimeStamp = t + return nil +} + +func (e *Entity) insertOr(verb string, options *EntityOptions) error { + query, headers := options.getParameters() + headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders()) + + body, err := json.Marshal(e) + if err != nil { + return err + } + headers = addBodyRelatedHeaders(headers, len(body)) + headers = addReturnContentHeaders(headers, EmptyPayload) + + uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query) + resp, err := e.Table.tsc.client.exec(verb, uri, headers, bytes.NewReader(body), e.Table.tsc.auth) + if err != nil { + return err + } + defer readAndCloseBody(resp.body) + + if err = checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { + return err + } + + return e.updateEtagAndTimestamp(resp.headers) +} + +func (e *Entity) updateMerge(force bool, verb string, options *EntityOptions) error { + query, headers := options.getParameters() + headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders()) + + body, err := json.Marshal(e) + if err != nil { + return err + } + headers = addBodyRelatedHeaders(headers, len(body)) + headers = addIfMatchHeader(headers, force, e.OdataEtag) + headers = addReturnContentHeaders(headers, EmptyPayload) + + uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query) + resp, err := e.Table.tsc.client.exec(verb, uri, headers, bytes.NewReader(body), e.Table.tsc.auth) + if err != nil { + if resp.statusCode == http.StatusPreconditionFailed { + return fmt.Errorf(etagErrorTemplate, err) + } + return err + } + defer readAndCloseBody(resp.body) + + if err = checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { + return err + } + + return e.updateEtagAndTimestamp(resp.headers) +} + +func stringFromMap(props map[string]interface{}, key string) string { + value := getAndDelete(props, key) + if value != nil { + return value.(string) + } + return "" +} + +func (options *EntityOptions) getParameters() (url.Values, map[string]string) { + query := url.Values{} + headers := map[string]string{} + if options != nil { + query = addTimeout(query, options.Timeout) + headers = headersFromStruct(*options) + } + return query, headers +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/entity_test.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/entity_test.go new file mode 100644 index 000000000000..22f95bd75147 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/entity_test.go @@ -0,0 +1,550 @@ +package storage + +import ( + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/satori/uuid" + chk "gopkg.in/check.v1" +) + +type StorageEntitySuite struct{} + +var _ = chk.Suite(&StorageEntitySuite{}) + +func (s *StorageEntitySuite) TestGet(c *chk.C) { + cli := getTableClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + table := cli.GetTableReference(tableName(c)) + + err := table.Create(30, EmptyPayload, nil) + c.Assert(err, chk.IsNil) + defer table.Delete(30, nil) + + entity := table.GetEntityReference("mypartitionkey", "myrowkey") + + props := map[string]interface{}{ + "AmountDue": 200.23, + "CustomerCode": uuid.FromStringOrNil("c9da6455-213d-42c9-9a79-3e9149a57833"), + "CustomerSince": time.Date(1992, time.December, 20, 21, 55, 0, 0, time.UTC), + "IsActive": true, + "NumberOfOrders": int64(255), + } + entity.Properties = props + err = entity.Insert(EmptyPayload, nil) + c.Assert(err, chk.IsNil) + + err = entity.Get(30, FullMetadata, &GetEntityOptions{ + Select: []string{"IsActive"}, + }) + c.Assert(err, chk.IsNil) + c.Assert(entity.Properties, chk.HasLen, 1) + + err = entity.Get(30, FullMetadata, &GetEntityOptions{ + Select: []string{ + "AmountDue", + "CustomerCode", + "CustomerSince", + "IsActive", + "NumberOfOrders", + }}) + c.Assert(err, chk.IsNil) + c.Assert(entity.Properties, chk.HasLen, 5) + + err = entity.Get(30, FullMetadata, nil) + c.Assert(err, chk.IsNil) + c.Assert(entity.Properties, chk.HasLen, 5) +} + +const ( + validEtag = "W/\"datetime''2017-04-01T01%3A07%3A23.8881885Z''\"" +) + +func (s *StorageEntitySuite) TestInsert(c *chk.C) { + cli := getTableClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + table := cli.GetTableReference(tableName(c)) + err := table.Create(30, EmptyPayload, nil) + c.Assert(err, chk.IsNil) + defer table.Delete(30, nil) + + entity := table.GetEntityReference("mypartitionkey", "myrowkey") + + props := map[string]interface{}{ + "AmountDue": 200.23, + "CustomerCode": uuid.FromStringOrNil("c9da6455-213d-42c9-9a79-3e9149a57833"), + "CustomerSince": time.Date(1992, time.December, 20, 21, 55, 0, 0, time.UTC), + "IsActive": true, + "NumberOfOrders": int64(255), + } + entity.Properties = props + err = entity.Insert(EmptyPayload, nil) + c.Assert(err, chk.IsNil) + // Did not update + c.Assert(entity.TimeStamp, chk.Equals, time.Time{}) + c.Assert(entity.OdataMetadata, chk.Equals, "") + c.Assert(entity.OdataType, chk.Equals, "") + c.Assert(entity.OdataID, chk.Equals, "") + c.Assert(entity.OdataEtag, chk.Equals, "") + c.Assert(entity.OdataEditLink, chk.Equals, "") + + // Update + entity.PartitionKey = "mypartitionkey2" + entity.RowKey = "myrowkey2" + err = entity.Insert(FullMetadata, nil) + c.Assert(err, chk.IsNil) + // Check everything was updated... + c.Assert(entity.TimeStamp, chk.NotNil) + c.Assert(entity.OdataMetadata, chk.Not(chk.Equals), "") + c.Assert(entity.OdataType, chk.Not(chk.Equals), "") + c.Assert(entity.OdataID, chk.Not(chk.Equals), "") + c.Assert(entity.OdataEtag, chk.Not(chk.Equals), "") + c.Assert(entity.OdataEditLink, chk.Not(chk.Equals), "") +} + +func (s *StorageEntitySuite) TestUpdate(c *chk.C) { + cli := getTableClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + table := cli.GetTableReference(tableName(c)) + err := table.Create(30, EmptyPayload, nil) + c.Assert(err, chk.IsNil) + defer table.Delete(30, nil) + + entity := table.GetEntityReference("mypartitionkey", "myrowkey") + entity.Properties = map[string]interface{}{ + "AmountDue": 200.23, + "CustomerCode": uuid.FromStringOrNil("c9da6455-213d-42c9-9a79-3e9149a57833"), + "CustomerSince": time.Date(1992, time.December, 20, 21, 55, 0, 0, time.UTC), + "IsActive": true, + "NumberOfOrders": int64(255), + } + // Force update + err = entity.Insert(FullMetadata, nil) + c.Assert(err, chk.IsNil) + + etag := entity.OdataEtag + timestamp := entity.TimeStamp + + props := map[string]interface{}{ + "Name": "Anakin", + "FamilyName": "Skywalker", + "HasEpicTheme": true, + } + entity.Properties = props + // Update providing etag + err = entity.Update(false, nil) + c.Assert(err, chk.IsNil) + + c.Assert(entity.Properties, chk.DeepEquals, props) + c.Assert(entity.OdataEtag, chk.Not(chk.Equals), etag) + c.Assert(entity.TimeStamp, chk.Not(chk.Equals), timestamp) + + // Try to update with old etag + entity.OdataEtag = validEtag + err = entity.Update(false, nil) + c.Assert(err, chk.NotNil) + c.Assert(strings.Contains(err.Error(), "Etag didn't match"), chk.Equals, true) + + // Force update + props = map[string]interface{}{ + "Name": "Leia", + "FamilyName": "Organa", + "HasAwesomeDress": true, + } + entity.Properties = props + err = entity.Update(true, nil) + c.Assert(err, chk.IsNil) + c.Assert(entity.Properties, chk.DeepEquals, props) +} + +func (s *StorageEntitySuite) TestMerge(c *chk.C) { + cli := getTableClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + table := cli.GetTableReference(tableName(c)) + err := table.Create(30, EmptyPayload, nil) + c.Assert(err, chk.IsNil) + defer table.Delete(30, nil) + + entity := table.GetEntityReference("mypartitionkey", "myrowkey") + entity.Properties = map[string]interface{}{ + "Country": "Mexico", + "MalePoet": "Nezahualcoyotl", + } + c.Assert(entity.Insert(FullMetadata, nil), chk.IsNil) + + etag := entity.OdataEtag + timestamp := entity.TimeStamp + + entity.Properties = map[string]interface{}{ + "FemalePoet": "Sor Juana Ines de la Cruz", + } + // Merge providing etag + err = entity.Merge(false, nil) + c.Assert(err, chk.IsNil) + c.Assert(entity.OdataEtag, chk.Not(chk.Equals), etag) + c.Assert(entity.TimeStamp, chk.Not(chk.Equals), timestamp) + + // Try to merge with incorrect etag + entity.OdataEtag = validEtag + err = entity.Merge(false, nil) + c.Assert(err, chk.NotNil) + c.Assert(strings.Contains(err.Error(), "Etag didn't match"), chk.Equals, true) + + // Force merge + entity.Properties = map[string]interface{}{ + "MalePainter": "Diego Rivera", + "FemalePainter": "Frida Kahlo", + } + err = entity.Merge(true, nil) + c.Assert(err, chk.IsNil) +} + +func (s *StorageEntitySuite) TestDelete(c *chk.C) { + cli := getTableClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + table := cli.GetTableReference(tableName(c)) + err := table.Create(30, EmptyPayload, nil) + c.Assert(err, chk.IsNil) + defer table.Delete(30, nil) + + // Delete providing etag + entity1 := table.GetEntityReference("pkey1", "rowkey1") + c.Assert(entity1.Insert(FullMetadata, nil), chk.IsNil) + + err = entity1.Delete(false, nil) + c.Assert(err, chk.IsNil) + + // Try to delete with incorrect etag + entity2 := table.GetEntityReference("pkey2", "rowkey2") + c.Assert(entity2.Insert(EmptyPayload, nil), chk.IsNil) + entity2.OdataEtag = "GolangRocksOnAzure" + + err = entity2.Delete(false, nil) + c.Assert(err, chk.NotNil) + + // Force delete + err = entity2.Delete(true, nil) + c.Assert(err, chk.IsNil) +} + +func (s *StorageEntitySuite) TestInsertOrReplace(c *chk.C) { + cli := getTableClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + table := cli.GetTableReference(tableName(c)) + err := table.Create(30, EmptyPayload, nil) + c.Assert(err, chk.IsNil) + defer table.Delete(30, nil) + + entity := table.GetEntityReference("mypartitionkey", "myrowkey") + entity.Properties = map[string]interface{}{ + "Name": "Anakin", + "FamilyName": "Skywalker", + "HasEpicTheme": true, + } + + err = entity.InsertOrReplace(nil) + c.Assert(err, chk.IsNil) + + entity.Properties = map[string]interface{}{ + "Name": "Leia", + "FamilyName": "Organa", + "HasAwesomeDress": true, + } + err = entity.InsertOrReplace(nil) + c.Assert(err, chk.IsNil) +} + +func (s *StorageEntitySuite) TestInsertOrMerge(c *chk.C) { + cli := getTableClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + table := cli.GetTableReference(tableName(c)) + err := table.Create(30, EmptyPayload, nil) + c.Assert(err, chk.IsNil) + defer table.Delete(30, nil) + + entity := table.GetEntityReference("mypartitionkey", "myrowkey") + entity.Properties = map[string]interface{}{ + "Name": "Luke", + "FamilyName": "Skywalker", + } + + err = entity.InsertOrMerge(nil) + c.Assert(err, chk.IsNil) + + entity.Properties = map[string]interface{}{ + "Father": "Anakin", + "Mentor": "Yoda", + } + err = entity.InsertOrMerge(nil) + c.Assert(err, chk.IsNil) +} + +func (s *StorageEntitySuite) Test_InsertAndGetEntities(c *chk.C) { + cli := getTableClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + table := cli.GetTableReference(tableName(c)) + err := table.Create(30, EmptyPayload, nil) + c.Assert(err, chk.IsNil) + defer table.Delete(30, nil) + + entity := table.GetEntityReference("mypartitionkey", "100") + entity.Properties = map[string]interface{}{ + "Name": "Luke", + "FamilyName": "Skywalker", + "HasCoolWeapon": true, + } + c.Assert(entity.Insert(EmptyPayload, nil), chk.IsNil) + + entity.RowKey = "200" + c.Assert(entity.Insert(FullMetadata, nil), chk.IsNil) + + entities, err := table.QueryEntities(30, FullMetadata, nil) + c.Assert(err, chk.IsNil) + + c.Assert(entities.Entities, chk.HasLen, 2) + c.Assert(entities.OdataMetadata+"/@Element", chk.Equals, entity.OdataMetadata) + + compareEntities(entities.Entities[1], entity, c) +} + +func (s *StorageEntitySuite) Test_InsertAndExecuteQuery(c *chk.C) { + cli := getTableClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + table := cli.GetTableReference(tableName(c)) + err := table.Create(30, EmptyPayload, nil) + c.Assert(err, chk.IsNil) + defer table.Delete(30, nil) + + entity := table.GetEntityReference("mypartitionkey", "100") + entity.Properties = map[string]interface{}{ + "Name": "Luke", + "FamilyName": "Skywalker", + "HasCoolWeapon": true, + } + c.Assert(entity.Insert(EmptyPayload, nil), chk.IsNil) + + entity.RowKey = "200" + c.Assert(entity.Insert(EmptyPayload, nil), chk.IsNil) + + queryOptions := QueryOptions{ + Filter: "RowKey eq '200'", + } + + entities, err := table.QueryEntities(30, FullMetadata, &queryOptions) + c.Assert(err, chk.IsNil) + + c.Assert(entities.Entities, chk.HasLen, 1) + c.Assert(entities.Entities[0].RowKey, chk.Equals, entity.RowKey) +} + +func (s *StorageEntitySuite) Test_InsertAndDeleteEntities(c *chk.C) { + cli := getTableClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + table := cli.GetTableReference(tableName(c)) + err := table.Create(30, EmptyPayload, nil) + c.Assert(err, chk.IsNil) + defer table.Delete(30, nil) + + entity := table.GetEntityReference("mypartitionkey", "100") + entity.Properties = map[string]interface{}{ + "FamilyName": "Skywalker", + "Name": "Luke", + "Number": 3, + } + c.Assert(entity.Insert(EmptyPayload, nil), chk.IsNil) + + entity.Properties["Number"] = 1 + entity.RowKey = "200" + c.Assert(entity.Insert(FullMetadata, nil), chk.IsNil) + + options := QueryOptions{ + Filter: "Number eq 1", + } + + result, err := table.QueryEntities(30, FullMetadata, &options) + c.Assert(err, chk.IsNil) + c.Assert(result.Entities, chk.HasLen, 1) + compareEntities(result.Entities[0], entity, c) + + err = result.Entities[0].Delete(true, nil) + c.Assert(err, chk.IsNil) + + result, err = table.QueryEntities(30, FullMetadata, nil) + c.Assert(err, chk.IsNil) + + // only 1 entry must be present + c.Assert(result.Entities, chk.HasLen, 1) +} + +func (s *StorageEntitySuite) TestExecuteQueryNextResults(c *chk.C) { + cli := getTableClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + table := cli.GetTableReference(tableName(c)) + err := table.Create(30, EmptyPayload, nil) + c.Assert(err, chk.IsNil) + defer table.Delete(30, nil) + + var entityList []*Entity + + for i := 0; i < 5; i++ { + entity := table.GetEntityReference("pkey", fmt.Sprintf("r%d", i)) + err := entity.Insert(FullMetadata, nil) + c.Assert(err, chk.IsNil) + entityList = append(entityList, entity) + } + + // retrieve using top = 2. Should return 2 entries, 2 entries and finally + // 1 entry + options := QueryOptions{ + Top: 2, + } + results, err := table.QueryEntities(30, FullMetadata, &options) + c.Assert(err, chk.IsNil) + c.Assert(results.Entities, chk.HasLen, 2) + c.Assert(results.NextLink, chk.NotNil) + compareEntities(results.Entities[0], entityList[0], c) + compareEntities(results.Entities[1], entityList[1], c) + + results, err = results.NextResults(nil) + c.Assert(err, chk.IsNil) + c.Assert(results.Entities, chk.HasLen, 2) + c.Assert(results.NextLink, chk.NotNil) + compareEntities(results.Entities[0], entityList[2], c) + compareEntities(results.Entities[1], entityList[3], c) + + results, err = results.NextResults(nil) + c.Assert(err, chk.IsNil) + c.Assert(results.Entities, chk.HasLen, 1) + c.Assert(results.NextLink, chk.IsNil) + compareEntities(results.Entities[0], entityList[4], c) +} + +func (s *StorageEntitySuite) Test_entityMarshalJSON(c *chk.C) { + expected := `{"Address":"Mountain View","Age":23,"AmountDue":200.23,"Binary":"abcd","Binary@odata.type":"Edm.Binary","CustomerCode":"c9da6455-213d-42c9-9a79-3e9149a57833","CustomerCode@odata.type":"Edm.Guid","CustomerSince":"1992-12-20T21:55:00Z","CustomerSince@odata.type":"Edm.DateTime","IsActive":true,"NumberOfOrders":"255","NumberOfOrders@odata.type":"Edm.Int64","PartitionKey":"mypartitionkey","RowKey":"myrowkey"}` + + entity := Entity{ + PartitionKey: "mypartitionkey", + RowKey: "myrowkey", + Properties: map[string]interface{}{ + "Address": "Mountain View", + "Age": 23, + "AmountDue": 200.23, + "Binary": []byte("abcd"), + "CustomerCode": uuid.FromStringOrNil("c9da6455-213d-42c9-9a79-3e9149a57833"), + "CustomerSince": time.Date(1992, time.December, 20, 21, 55, 0, 0, time.UTC), + "IsActive": true, + "NumberOfOrders": int64(255), + }, + } + got, err := json.Marshal(&entity) + c.Assert(err, chk.IsNil) + c.Assert(string(got), chk.Equals, expected) + + entity.Properties["Contoso@odata.type"] = "Edm.Trololololol" + got, err = json.Marshal(&entity) + c.Assert(got, chk.IsNil) + c.Assert(err, chk.ErrorMatches, ".*Odata.type annotation Contoso@odata.type value is not valid") + + entity.Properties["Contoso@odata.type"] = OdataGUID + got, err = json.Marshal(&entity) + c.Assert(got, chk.IsNil) + c.Assert(err, chk.ErrorMatches, ".*Odata.type annotation Contoso@odata.type defined without value defined") +} + +func (s *StorageEntitySuite) Test_entityUnmarshalJSON(c *chk.C) { + input := `{ + "odata.metadata":"https://azuregosdkstoragetests.table.core.windows.net/$metadata#SampleTable/@Element", + "odata.type":"azuregosdkstoragetests.SampleTable", + "odata.id":"https://azuregosdkstoragetests.table.core.windows.net/SampleTable(PartitionKey=''mypartitionkey'',RowKey=''myrowkey'')", + "odata.etag":"W/\"datetime''2017-01-27T01%3A01%3A44.151805Z''\"", + "odata.editLink":"SampleTable(PartitionKey=''mypartitionkey'',RowKey=''myrowkey'')", + "PartitionKey":"mypartitionkey", + "RowKey":"myrowkey", + "Timestamp":"2017-01-27T01:01:44.151805Z", + "Timestamp@odata.type":"Edm.DateTime", + "Address": "Mountain View", + "Age": 23, + "AmountDue":200.23, + "Binary@odata.type": "Edm.Binary", + "Binary": "abcd", + "CustomerCode@odata.type":"Edm.Guid", + "CustomerCode":"c9da6455-213d-42c9-9a79-3e9149a57833", + "CustomerSince@odata.type":"Edm.DateTime", + "CustomerSince":"1992-12-20T21:55:00Z", + "IsActive":true, + "NumberOfOrders@odata.type":"Edm.Int64", + "NumberOfOrders":"255"}` + + var entity Entity + data := []byte(input) + err := json.Unmarshal(data, &entity) + c.Assert(err, chk.IsNil) + + expectedProperties := map[string]interface{}{ + "Address": "Mountain View", + "Age": 23, + "AmountDue": 200.23, + "Binary": []byte("abcd"), + "CustomerCode": uuid.FromStringOrNil("c9da6455-213d-42c9-9a79-3e9149a57833"), + "CustomerSince": time.Date(1992, 12, 20, 21, 55, 0, 0, time.UTC), + "IsActive": true, + "NumberOfOrders": int64(255), + } + + c.Assert(entity.OdataMetadata, chk.Equals, "https://azuregosdkstoragetests.table.core.windows.net/$metadata#SampleTable/@Element") + c.Assert(entity.OdataType, chk.Equals, "azuregosdkstoragetests.SampleTable") + c.Assert(entity.OdataID, chk.Equals, "https://azuregosdkstoragetests.table.core.windows.net/SampleTable(PartitionKey=''mypartitionkey'',RowKey=''myrowkey'')") + c.Assert(entity.OdataEtag, chk.Equals, "W/\"datetime''2017-01-27T01%3A01%3A44.151805Z''\"") + c.Assert(entity.OdataEditLink, chk.Equals, "SampleTable(PartitionKey=''mypartitionkey'',RowKey=''myrowkey'')") + c.Assert(entity.PartitionKey, chk.Equals, "mypartitionkey") + c.Assert(entity.RowKey, chk.Equals, "myrowkey") + c.Assert(entity.TimeStamp, chk.Equals, time.Date(2017, 1, 27, 1, 1, 44, 151805000, time.UTC)) + + c.Assert(entity.Properties, chk.HasLen, len(expectedProperties)) + c.Assert(entity.Properties["Address"], chk.Equals, expectedProperties["Address"]) + // Note on Age assertion... Looks like the json unmarshaller thinks all numbers are float64. + c.Assert(entity.Properties["Age"], chk.Equals, float64(expectedProperties["Age"].(int))) + c.Assert(entity.Properties["AmountDue"], chk.Equals, expectedProperties["AmountDue"]) + c.Assert(entity.Properties["Binary"], chk.DeepEquals, expectedProperties["Binary"]) + c.Assert(entity.Properties["CustomerSince"], chk.Equals, expectedProperties["CustomerSince"]) + c.Assert(entity.Properties["IsActive"], chk.Equals, expectedProperties["IsActive"]) + c.Assert(entity.Properties["NumberOfOrders"], chk.Equals, expectedProperties["NumberOfOrders"]) + +} + +func compareEntities(got, expected *Entity, c *chk.C) { + c.Assert(got.PartitionKey, chk.Equals, expected.PartitionKey) + c.Assert(got.RowKey, chk.Equals, expected.RowKey) + c.Assert(got.TimeStamp, chk.Equals, expected.TimeStamp) + + c.Assert(got.OdataEtag, chk.Equals, expected.OdataEtag) + c.Assert(got.OdataType, chk.Equals, expected.OdataType) + c.Assert(got.OdataID, chk.Equals, expected.OdataID) + c.Assert(got.OdataEditLink, chk.Equals, expected.OdataEditLink) + + c.Assert(got.Properties, chk.DeepEquals, expected.Properties) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go index f679395bde27..238ac6d6d95a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go @@ -1,123 +1,75 @@ package storage import ( - "encoding/xml" "errors" "fmt" "io" + "io/ioutil" "net/http" "net/url" "strconv" - "strings" ) -// FileServiceClient contains operations for Microsoft Azure File Service. -type FileServiceClient struct { - client Client -} - -// A Share is an entry in ShareListResponse. -type Share struct { - Name string `xml:"Name"` - Properties ShareProperties `xml:"Properties"` -} - -// A Directory is an entry in DirsAndFilesListResponse. -type Directory struct { - Name string `xml:"Name"` -} +const fourMB = uint64(4194304) +const oneTB = uint64(1099511627776) -// A File is an entry in DirsAndFilesListResponse. +// File represents a file on a share. type File struct { - Name string `xml:"Name"` - Properties FileProperties `xml:"Properties"` + fsc *FileServiceClient + Metadata map[string]string + Name string `xml:"Name"` + parent *Directory + Properties FileProperties `xml:"Properties"` + share *Share + FileCopyProperties FileCopyState } -// ShareProperties contains various properties of a share returned from -// various endpoints like ListShares. -type ShareProperties struct { - LastModified string `xml:"Last-Modified"` - Etag string `xml:"Etag"` - Quota string `xml:"Quota"` -} - -// DirectoryProperties contains various properties of a directory returned -// from various endpoints like GetDirectoryProperties. -type DirectoryProperties struct { - LastModified string `xml:"Last-Modified"` - Etag string `xml:"Etag"` -} - -// FileProperties contains various properties of a file returned from -// various endpoints like ListDirsAndFiles. +// FileProperties contains various properties of a file. type FileProperties struct { - CacheControl string `header:"x-ms-cache-control"` - ContentLength uint64 `xml:"Content-Length"` - ContentType string `header:"x-ms-content-type"` - CopyCompletionTime string - CopyID string - CopySource string - CopyProgress string - CopyStatusDesc string - CopyStatus string - Disposition string `header:"x-ms-content-disposition"` - Encoding string `header:"x-ms-content-encoding"` - Etag string - Language string `header:"x-ms-content-language"` - LastModified string - MD5 string `header:"x-ms-content-md5"` + CacheControl string `header:"x-ms-cache-control"` + Disposition string `header:"x-ms-content-disposition"` + Encoding string `header:"x-ms-content-encoding"` + Etag string + Language string `header:"x-ms-content-language"` + LastModified string + Length uint64 `xml:"Content-Length" header:"x-ms-content-length"` + MD5 string `header:"x-ms-content-md5"` + Type string `header:"x-ms-content-type"` +} + +// FileCopyState contains various properties of a file copy operation. +type FileCopyState struct { + CompletionTime string + ID string `header:"x-ms-copy-id"` + Progress string + Source string + Status string `header:"x-ms-copy-status"` + StatusDesc string } // FileStream contains file data returned from a call to GetFile. type FileStream struct { Body io.ReadCloser - Properties *FileProperties - Metadata map[string]string + ContentMD5 string } -// ShareListResponse contains the response fields from -// ListShares call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn167009.aspx -type ShareListResponse struct { - XMLName xml.Name `xml:"EnumerationResults"` - Xmlns string `xml:"xmlns,attr"` - Prefix string `xml:"Prefix"` - Marker string `xml:"Marker"` - NextMarker string `xml:"NextMarker"` - MaxResults int64 `xml:"MaxResults"` - Shares []Share `xml:"Shares>Share"` -} - -// ListSharesParameters defines the set of customizable parameters to make a -// List Shares call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn167009.aspx -type ListSharesParameters struct { - Prefix string - Marker string - Include string - MaxResults uint - Timeout uint +// FileRequestOptions will be passed to misc file operations. +// Currently just Timeout (in seconds) but could expand. +type FileRequestOptions struct { + Timeout uint // timeout duration in seconds. } -// DirsAndFilesListResponse contains the response fields from -// a List Files and Directories call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn166980.aspx -type DirsAndFilesListResponse struct { - XMLName xml.Name `xml:"EnumerationResults"` - Xmlns string `xml:"xmlns,attr"` - Marker string `xml:"Marker"` - MaxResults int64 `xml:"MaxResults"` - Directories []Directory `xml:"Entries>Directory"` - Files []File `xml:"Entries>File"` - NextMarker string `xml:"NextMarker"` +func prepareOptions(options *FileRequestOptions) url.Values { + params := url.Values{} + if options != nil { + params = addTimeout(params, options.Timeout) + } + return params } // FileRanges contains a list of file range information for a file. // -// See https://msdn.microsoft.com/en-us/library/azure/dn166984.aspx +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Ranges type FileRanges struct { ContentLength uint64 LastModified string @@ -127,752 +79,384 @@ type FileRanges struct { // FileRange contains range information for a file. // -// See https://msdn.microsoft.com/en-us/library/azure/dn166984.aspx +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Ranges type FileRange struct { Start uint64 `xml:"Start"` End uint64 `xml:"End"` } -// ListDirsAndFilesParameters defines the set of customizable parameters to -// make a List Files and Directories call. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn166980.aspx -type ListDirsAndFilesParameters struct { - Marker string - MaxResults uint - Timeout uint -} - -// ShareHeaders contains various properties of a file and is an entry -// in SetShareProperties -type ShareHeaders struct { - Quota string `header:"x-ms-share-quota"` -} - -type compType string - -const ( - compNone compType = "" - compList compType = "list" - compMetadata compType = "metadata" - compProperties compType = "properties" - compRangeList compType = "rangelist" -) - -func (ct compType) String() string { - return string(ct) -} - -type resourceType string - -const ( - resourceDirectory resourceType = "directory" - resourceFile resourceType = "" - resourceShare resourceType = "share" -) - -func (rt resourceType) String() string { - return string(rt) -} - -func (p ListSharesParameters) getParameters() url.Values { - out := url.Values{} - - if p.Prefix != "" { - out.Set("prefix", p.Prefix) - } - if p.Marker != "" { - out.Set("marker", p.Marker) - } - if p.Include != "" { - out.Set("include", p.Include) - } - if p.MaxResults != 0 { - out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults)) - } - if p.Timeout != 0 { - out.Set("timeout", fmt.Sprintf("%v", p.Timeout)) - } - - return out -} - -func (p ListDirsAndFilesParameters) getParameters() url.Values { - out := url.Values{} - - if p.Marker != "" { - out.Set("marker", p.Marker) - } - if p.MaxResults != 0 { - out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults)) - } - if p.Timeout != 0 { - out.Set("timeout", fmt.Sprintf("%v", p.Timeout)) - } - - return out -} - func (fr FileRange) String() string { return fmt.Sprintf("bytes=%d-%d", fr.Start, fr.End) } -// ToPathSegment returns the URL path segment for the specified values -func ToPathSegment(parts ...string) string { - join := strings.Join(parts, "/") - if join[0] != '/' { - join = fmt.Sprintf("/%s", join) - } - return join +// builds the complete file path for this file object +func (f *File) buildPath() string { + return f.parent.buildPath() + "/" + f.Name } -// returns url.Values for the specified types -func getURLInitValues(comp compType, res resourceType) url.Values { - values := url.Values{} - if comp != compNone { - values.Set("comp", comp.String()) - } - if res != resourceFile { - values.Set("restype", res.String()) - } - return values -} - -// ListDirsAndFiles returns a list of files or directories under the specified share or -// directory. It also contains a pagination token and other response details. +// ClearRange releases the specified range of space in a file. // -// See https://msdn.microsoft.com/en-us/library/azure/dn166980.aspx -func (f FileServiceClient) ListDirsAndFiles(path string, params ListDirsAndFilesParameters) (DirsAndFilesListResponse, error) { - q := mergeParams(params.getParameters(), getURLInitValues(compList, resourceDirectory)) - - var out DirsAndFilesListResponse - resp, err := f.listContent(path, q, nil) +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Range +func (f *File) ClearRange(fileRange FileRange, options *FileRequestOptions) error { + var timeout *uint + if options != nil { + timeout = &options.Timeout + } + headers, err := f.modifyRange(nil, fileRange, timeout, nil) if err != nil { - return out, err + return err } - defer resp.body.Close() - err = xmlUnmarshal(resp.body, &out) - return out, err + f.updateEtagAndLastModified(headers) + return nil } -// ListFileRanges returns the list of valid ranges for a file. +// Create creates a new file or replaces an existing one. // -// See https://msdn.microsoft.com/en-us/library/azure/dn166984.aspx -func (f FileServiceClient) ListFileRanges(path string, listRange *FileRange) (FileRanges, error) { - params := url.Values{"comp": {"rangelist"}} - - // add optional range to list - var headers map[string]string - if listRange != nil { - headers = make(map[string]string) - headers["Range"] = listRange.String() +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-File +func (f *File) Create(maxSize uint64, options *FileRequestOptions) error { + if maxSize > oneTB { + return fmt.Errorf("max file size is 1TB") } + params := prepareOptions(options) + headers := headersFromStruct(f.Properties) + headers["x-ms-content-length"] = strconv.FormatUint(maxSize, 10) + headers["x-ms-type"] = "file" - var out FileRanges - resp, err := f.listContent(path, params, headers) - if err != nil { - return out, err - } - - defer resp.body.Close() - var cl uint64 - cl, err = strconv.ParseUint(resp.headers.Get("x-ms-content-length"), 10, 64) + outputHeaders, err := f.fsc.createResource(f.buildPath(), resourceFile, params, mergeMDIntoExtraHeaders(f.Metadata, headers), []int{http.StatusCreated}) if err != nil { - return out, err + return err } - out.ContentLength = cl - out.ETag = resp.headers.Get("ETag") - out.LastModified = resp.headers.Get("Last-Modified") - - err = xmlUnmarshal(resp.body, &out) - return out, err + f.Properties.Length = maxSize + f.updateEtagAndLastModified(outputHeaders) + return nil } -// ListShares returns the list of shares in a storage account along with -// pagination token and other response details. +// CopyFile operation copied a file/blob from the sourceURL to the path provided. // -// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx -func (f FileServiceClient) ListShares(params ListSharesParameters) (ShareListResponse, error) { - q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}}) - - var out ShareListResponse - resp, err := f.listContent("", q, nil) - if err != nil { - return out, err - } - - defer resp.body.Close() - err = xmlUnmarshal(resp.body, &out) - return out, err -} - -// retrieves directory or share content -func (f FileServiceClient) listContent(path string, params url.Values, extraHeaders map[string]string) (*storageResponse, error) { - if err := f.checkForStorageEmulator(); err != nil { - return nil, err +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/copy-file +func (f *File) CopyFile(sourceURL string, options *FileRequestOptions) error { + extraHeaders := map[string]string{ + "x-ms-type": "file", + "x-ms-copy-source": sourceURL, } + params := prepareOptions(options) - uri := f.client.getEndpoint(fileServiceName, path, params) - headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) - - resp, err := f.client.exec(http.MethodGet, uri, headers, nil) + headers, err := f.fsc.createResource(f.buildPath(), resourceFile, params, mergeMDIntoExtraHeaders(f.Metadata, extraHeaders), []int{http.StatusAccepted}) if err != nil { - return nil, err - } - - if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - resp.body.Close() - return nil, err + return err } - return resp, nil + f.updateEtagLastModifiedAndCopyHeaders(headers) + return nil } -// CreateDirectory operation creates a new directory with optional metadata in the -// specified share. If a directory with the same name already exists, the operation fails. +// Delete immediately removes this file from the storage account. // -// See https://msdn.microsoft.com/en-us/library/azure/dn166993.aspx -func (f FileServiceClient) CreateDirectory(path string, metadata map[string]string) error { - return f.createResource(path, resourceDirectory, mergeMDIntoExtraHeaders(metadata, nil)) +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-File2 +func (f *File) Delete(options *FileRequestOptions) error { + return f.fsc.deleteResource(f.buildPath(), resourceFile, options) } -// CreateFile operation creates a new file with optional metadata or replaces an existing one. -// Note that this only initializes the file, call PutRange to add content. +// DeleteIfExists removes this file if it exists. // -// See https://msdn.microsoft.com/en-us/library/azure/dn194271.aspx -func (f FileServiceClient) CreateFile(path string, maxSize uint64, metadata map[string]string) error { - extraHeaders := map[string]string{ - "x-ms-content-length": strconv.FormatUint(maxSize, 10), - "x-ms-type": "file", +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-File2 +func (f *File) DeleteIfExists(options *FileRequestOptions) (bool, error) { + resp, err := f.fsc.deleteResourceNoClose(f.buildPath(), resourceFile, options) + if resp != nil { + defer readAndCloseBody(resp.body) + if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { + return resp.statusCode == http.StatusAccepted, nil + } } - return f.createResource(path, resourceFile, mergeMDIntoExtraHeaders(metadata, extraHeaders)) + return false, err } -// ClearRange releases the specified range of space in storage. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn194276.aspx -func (f FileServiceClient) ClearRange(path string, fileRange FileRange) error { - return f.modifyRange(path, nil, fileRange) +// GetFileOptions includes options for a get file operation +type GetFileOptions struct { + Timeout uint + GetContentMD5 bool } -// PutRange writes a range of bytes to a file. Note that the length of bytes must -// match (rangeEnd - rangeStart) + 1 with a maximum size of 4MB. +// DownloadToStream operation downloads the file. // -// See https://msdn.microsoft.com/en-us/library/azure/dn194276.aspx -func (f FileServiceClient) PutRange(path string, bytes io.Reader, fileRange FileRange) error { - return f.modifyRange(path, bytes, fileRange) -} - -// modifies a range of bytes in the specified file -func (f FileServiceClient) modifyRange(path string, bytes io.Reader, fileRange FileRange) error { - if err := f.checkForStorageEmulator(); err != nil { - return err - } - if fileRange.End < fileRange.Start { - return errors.New("the value for rangeEnd must be greater than or equal to rangeStart") - } - if bytes != nil && fileRange.End-fileRange.Start > 4194304 { - return errors.New("range cannot exceed 4MB in size") - } - - uri := f.client.getEndpoint(fileServiceName, path, url.Values{"comp": {"range"}}) - - // default to clear - write := "clear" - cl := uint64(0) - - // if bytes is not nil then this is an update operation - if bytes != nil { - write = "update" - cl = (fileRange.End - fileRange.Start) + 1 - } - - extraHeaders := map[string]string{ - "Content-Length": strconv.FormatUint(cl, 10), - "Range": fileRange.String(), - "x-ms-write": write, - } - - headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) - resp, err := f.client.exec(http.MethodPut, uri, headers, bytes) +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file +func (f *File) DownloadToStream(options *FileRequestOptions) (io.ReadCloser, error) { + params := prepareOptions(options) + resp, err := f.fsc.getResourceNoClose(f.buildPath(), compNone, resourceFile, params, http.MethodGet, nil) if err != nil { - return err + return nil, err } - defer resp.body.Close() - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) + if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + readAndCloseBody(resp.body) + return nil, err + } + return resp.body, nil } -// GetFile operation reads or downloads a file from the system, including its -// metadata and properties. +// DownloadRangeToStream operation downloads the specified range of this file with optional MD5 hash. // // See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file -func (f FileServiceClient) GetFile(path string, fileRange *FileRange) (*FileStream, error) { - var extraHeaders map[string]string - if fileRange != nil { - extraHeaders = map[string]string{ - "Range": fileRange.String(), +func (f *File) DownloadRangeToStream(fileRange FileRange, options *GetFileOptions) (fs FileStream, err error) { + extraHeaders := map[string]string{ + "Range": fileRange.String(), + } + params := url.Values{} + if options != nil { + if options.GetContentMD5 { + if isRangeTooBig(fileRange) { + return fs, fmt.Errorf("must specify a range less than or equal to 4MB when getContentMD5 is true") + } + extraHeaders["x-ms-range-get-content-md5"] = "true" } + params = addTimeout(params, options.Timeout) } - resp, err := f.getResourceNoClose(path, compNone, resourceFile, http.MethodGet, extraHeaders) + resp, err := f.fsc.getResourceNoClose(f.buildPath(), compNone, resourceFile, params, http.MethodGet, extraHeaders) if err != nil { - return nil, err + return fs, err } if err = checkRespCode(resp.statusCode, []int{http.StatusOK, http.StatusPartialContent}); err != nil { - resp.body.Close() - return nil, err - } - - props, err := getFileProps(resp.headers) - md := getFileMDFromHeaders(resp.headers) - return &FileStream{Body: resp.body, Properties: props, Metadata: md}, nil -} - -// CreateShare operation creates a new share with optional metadata under the specified account. -// If the share with the same name already exists, the operation fails. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn167008.aspx -func (f FileServiceClient) CreateShare(name string, metadata map[string]string) error { - return f.createResource(ToPathSegment(name), resourceShare, mergeMDIntoExtraHeaders(metadata, nil)) -} - -// DirectoryExists returns true if the specified directory exists on the specified share. -func (f FileServiceClient) DirectoryExists(path string) (bool, error) { - return f.resourceExists(path, resourceDirectory) -} - -// FileExists returns true if the specified file exists. -func (f FileServiceClient) FileExists(path string) (bool, error) { - return f.resourceExists(path, resourceFile) -} - -// ShareExists returns true if a share with given name exists -// on the storage account, otherwise returns false. -func (f FileServiceClient) ShareExists(name string) (bool, error) { - return f.resourceExists(ToPathSegment(name), resourceShare) -} - -// returns true if the specified directory or share exists -func (f FileServiceClient) resourceExists(path string, res resourceType) (bool, error) { - if err := f.checkForStorageEmulator(); err != nil { - return false, err + readAndCloseBody(resp.body) + return fs, err } - uri := f.client.getEndpoint(fileServiceName, path, getURLInitValues(compNone, res)) - headers := f.client.getStandardHeaders() - - resp, err := f.client.exec(http.MethodHead, uri, headers, nil) - if resp != nil { - defer resp.body.Close() - if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound { - return resp.statusCode == http.StatusOK, nil - } + fs.Body = resp.body + if options != nil && options.GetContentMD5 { + fs.ContentMD5 = resp.headers.Get("Content-MD5") } - return false, err -} - -// GetDirectoryURL gets the canonical URL to the directory with the specified name -// in the specified share. This method does not create a publicly accessible URL if -// the file is private and this method does not check if the directory exists. -func (f FileServiceClient) GetDirectoryURL(path string) string { - return f.client.getEndpoint(fileServiceName, path, url.Values{}) -} - -// GetShareURL gets the canonical URL to the share with the specified name in the -// specified container. This method does not create a publicly accessible URL if -// the file is private and this method does not check if the share exists. -func (f FileServiceClient) GetShareURL(name string) string { - return f.client.getEndpoint(fileServiceName, ToPathSegment(name), url.Values{}) + return fs, nil } -// CreateDirectoryIfNotExists creates a new directory on the specified share -// if it does not exist. Returns true if directory is newly created or false -// if the directory already exists. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn166993.aspx -func (f FileServiceClient) CreateDirectoryIfNotExists(path string) (bool, error) { - resp, err := f.createResourceNoClose(path, resourceDirectory, nil) - if resp != nil { - defer resp.body.Close() - if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict { - return resp.statusCode == http.StatusCreated, nil - } +// Exists returns true if this file exists. +func (f *File) Exists() (bool, error) { + exists, headers, err := f.fsc.resourceExists(f.buildPath(), resourceFile) + if exists { + f.updateEtagAndLastModified(headers) + f.updateProperties(headers) } - return false, err + return exists, err } -// CreateShareIfNotExists creates a new share under the specified account if -// it does not exist. Returns true if container is newly created or false if -// container already exists. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn167008.aspx -func (f FileServiceClient) CreateShareIfNotExists(name string) (bool, error) { - resp, err := f.createResourceNoClose(ToPathSegment(name), resourceShare, nil) - if resp != nil { - defer resp.body.Close() - if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict { - return resp.statusCode == http.StatusCreated, nil - } - } - return false, err -} - -// creates a resource depending on the specified resource type -func (f FileServiceClient) createResource(path string, res resourceType, extraHeaders map[string]string) error { - resp, err := f.createResourceNoClose(path, res, extraHeaders) +// FetchAttributes updates metadata and properties for this file. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file-properties +func (f *File) FetchAttributes(options *FileRequestOptions) error { + params := prepareOptions(options) + headers, err := f.fsc.getResourceHeaders(f.buildPath(), compNone, resourceFile, params, http.MethodHead) if err != nil { return err } - defer resp.body.Close() - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) + + f.updateEtagAndLastModified(headers) + f.updateProperties(headers) + f.Metadata = getMetadataFromHeaders(headers) + return nil } -// creates a resource depending on the specified resource type, doesn't close the response body -func (f FileServiceClient) createResourceNoClose(path string, res resourceType, extraHeaders map[string]string) (*storageResponse, error) { - if err := f.checkForStorageEmulator(); err != nil { - return nil, err +// returns true if the range is larger than 4MB +func isRangeTooBig(fileRange FileRange) bool { + if fileRange.End-fileRange.Start > fourMB { + return true } - values := getURLInitValues(compNone, res) - uri := f.client.getEndpoint(fileServiceName, path, values) - headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) - - return f.client.exec(http.MethodPut, uri, headers, nil) + return false } -// GetDirectoryProperties provides various information about the specified directory. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn194272.aspx -func (f FileServiceClient) GetDirectoryProperties(path string) (*DirectoryProperties, error) { - headers, err := f.getResourceHeaders(path, compNone, resourceDirectory, http.MethodHead) - if err != nil { - return nil, err - } - - return &DirectoryProperties{ - LastModified: headers.Get("Last-Modified"), - Etag: headers.Get("Etag"), - }, nil +// ListRangesOptions includes options for a list file ranges operation +type ListRangesOptions struct { + Timeout uint + ListRange *FileRange } -// GetFileProperties provides various information about the specified file. +// ListRanges returns the list of valid ranges for this file. // -// See https://msdn.microsoft.com/en-us/library/azure/dn166971.aspx -func (f FileServiceClient) GetFileProperties(path string) (*FileProperties, error) { - headers, err := f.getResourceHeaders(path, compNone, resourceFile, http.MethodHead) - if err != nil { - return nil, err - } - return getFileProps(headers) -} +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Ranges +func (f *File) ListRanges(options *ListRangesOptions) (*FileRanges, error) { + params := url.Values{"comp": {"rangelist"}} -// returns file properties from the specified HTTP header -func getFileProps(header http.Header) (*FileProperties, error) { - size, err := strconv.ParseUint(header.Get("Content-Length"), 10, 64) - if err != nil { - return nil, err + // add optional range to list + var headers map[string]string + if options != nil { + params = addTimeout(params, options.Timeout) + if options.ListRange != nil { + headers = make(map[string]string) + headers["Range"] = options.ListRange.String() + } } - return &FileProperties{ - CacheControl: header.Get("Cache-Control"), - ContentLength: size, - ContentType: header.Get("Content-Type"), - CopyCompletionTime: header.Get("x-ms-copy-completion-time"), - CopyID: header.Get("x-ms-copy-id"), - CopyProgress: header.Get("x-ms-copy-progress"), - CopySource: header.Get("x-ms-copy-source"), - CopyStatus: header.Get("x-ms-copy-status"), - CopyStatusDesc: header.Get("x-ms-copy-status-description"), - Disposition: header.Get("Content-Disposition"), - Encoding: header.Get("Content-Encoding"), - Etag: header.Get("ETag"), - Language: header.Get("Content-Language"), - LastModified: header.Get("Last-Modified"), - MD5: header.Get("Content-MD5"), - }, nil -} - -// GetShareProperties provides various information about the specified -// file. See https://msdn.microsoft.com/en-us/library/azure/dn689099.aspx -func (f FileServiceClient) GetShareProperties(name string) (*ShareProperties, error) { - headers, err := f.getResourceHeaders(ToPathSegment(name), compNone, resourceShare, http.MethodHead) + resp, err := f.fsc.listContent(f.buildPath(), params, headers) if err != nil { return nil, err } - return &ShareProperties{ - LastModified: headers.Get("Last-Modified"), - Etag: headers.Get("Etag"), - Quota: headers.Get("x-ms-share-quota"), - }, nil -} -// returns HTTP header data for the specified directory or share -func (f FileServiceClient) getResourceHeaders(path string, comp compType, res resourceType, verb string) (http.Header, error) { - resp, err := f.getResourceNoClose(path, comp, res, verb, nil) + defer resp.body.Close() + var cl uint64 + cl, err = strconv.ParseUint(resp.headers.Get("x-ms-content-length"), 10, 64) if err != nil { + ioutil.ReadAll(resp.body) return nil, err } - defer resp.body.Close() - if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - return nil, err - } + var out FileRanges + out.ContentLength = cl + out.ETag = resp.headers.Get("ETag") + out.LastModified = resp.headers.Get("Last-Modified") - return resp.headers, nil + err = xmlUnmarshal(resp.body, &out) + return &out, err } -// gets the specified resource, doesn't close the response body -func (f FileServiceClient) getResourceNoClose(path string, comp compType, res resourceType, verb string, extraHeaders map[string]string) (*storageResponse, error) { - if err := f.checkForStorageEmulator(); err != nil { +// modifies a range of bytes in this file +func (f *File) modifyRange(bytes io.Reader, fileRange FileRange, timeout *uint, contentMD5 *string) (http.Header, error) { + if err := f.fsc.checkForStorageEmulator(); err != nil { return nil, err } + if fileRange.End < fileRange.Start { + return nil, errors.New("the value for rangeEnd must be greater than or equal to rangeStart") + } + if bytes != nil && isRangeTooBig(fileRange) { + return nil, errors.New("range cannot exceed 4MB in size") + } - params := getURLInitValues(comp, res) - uri := f.client.getEndpoint(fileServiceName, path, params) - headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) - - return f.client.exec(verb, uri, headers, nil) -} - -// SetFileProperties operation sets system properties on the specified file. -// -// Some keys may be converted to Camel-Case before sending. All keys -// are returned in lower case by SetFileProperties. HTTP header names -// are case-insensitive so case munging should not matter to other -// applications either. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn166975.aspx -func (f FileServiceClient) SetFileProperties(path string, props FileProperties) error { - return f.setResourceHeaders(path, compProperties, resourceFile, headersFromStruct(props)) -} - -// SetShareProperties replaces the ShareHeaders for the specified file. -// -// Some keys may be converted to Camel-Case before sending. All keys -// are returned in lower case by SetShareProperties. HTTP header names -// are case-insensitive so case munging should not matter to other -// applications either. -// -// See https://msdn.microsoft.com/en-us/library/azure/mt427368.aspx -func (f FileServiceClient) SetShareProperties(name string, shareHeaders ShareHeaders) error { - return f.setResourceHeaders(ToPathSegment(name), compProperties, resourceShare, headersFromStruct(shareHeaders)) -} + params := url.Values{"comp": {"range"}} + if timeout != nil { + params = addTimeout(params, *timeout) + } -// DeleteDirectory operation removes the specified empty directory. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn166969.aspx -func (f FileServiceClient) DeleteDirectory(path string) error { - return f.deleteResource(path, resourceDirectory) -} + uri := f.fsc.client.getEndpoint(fileServiceName, f.buildPath(), params) -// DeleteFile operation immediately removes the file from the storage account. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn689085.aspx -func (f FileServiceClient) DeleteFile(path string) error { - return f.deleteResource(path, resourceFile) -} + // default to clear + write := "clear" + cl := uint64(0) -// DeleteShare operation marks the specified share for deletion. The share -// and any files contained within it are later deleted during garbage -// collection. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn689090.aspx -func (f FileServiceClient) DeleteShare(name string) error { - return f.deleteResource(ToPathSegment(name), resourceShare) -} + // if bytes is not nil then this is an update operation + if bytes != nil { + write = "update" + cl = (fileRange.End - fileRange.Start) + 1 + } -// DeleteShareIfExists operation marks the specified share for deletion if it -// exists. The share and any files contained within it are later deleted during -// garbage collection. Returns true if share existed and deleted with this call, -// false otherwise. -// -// See https://msdn.microsoft.com/en-us/library/azure/dn689090.aspx -func (f FileServiceClient) DeleteShareIfExists(name string) (bool, error) { - resp, err := f.deleteResourceNoClose(ToPathSegment(name), resourceShare) - if resp != nil { - defer resp.body.Close() - if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { - return resp.statusCode == http.StatusAccepted, nil - } + extraHeaders := map[string]string{ + "Content-Length": strconv.FormatUint(cl, 10), + "Range": fileRange.String(), + "x-ms-write": write, } - return false, err -} -// deletes the resource and returns the response -func (f FileServiceClient) deleteResource(path string, res resourceType) error { - resp, err := f.deleteResourceNoClose(path, res) - if err != nil { - return err + if contentMD5 != nil { + extraHeaders["Content-MD5"] = *contentMD5 } - defer resp.body.Close() - return checkRespCode(resp.statusCode, []int{http.StatusAccepted}) -} -// deletes the resource and returns the response, doesn't close the response body -func (f FileServiceClient) deleteResourceNoClose(path string, res resourceType) (*storageResponse, error) { - if err := f.checkForStorageEmulator(); err != nil { + headers := mergeHeaders(f.fsc.client.getStandardHeaders(), extraHeaders) + resp, err := f.fsc.client.exec(http.MethodPut, uri, headers, bytes, f.fsc.auth) + if err != nil { return nil, err } - - values := getURLInitValues(compNone, res) - uri := f.client.getEndpoint(fileServiceName, path, values) - return f.client.exec(http.MethodDelete, uri, f.client.getStandardHeaders(), nil) -} - -// SetDirectoryMetadata replaces the metadata for the specified directory. -// -// Some keys may be converted to Camel-Case before sending. All keys -// are returned in lower case by GetDirectoryMetadata. HTTP header names -// are case-insensitive so case munging should not matter to other -// applications either. -// -// See https://msdn.microsoft.com/en-us/library/azure/mt427370.aspx -func (f FileServiceClient) SetDirectoryMetadata(path string, metadata map[string]string) error { - return f.setResourceHeaders(path, compMetadata, resourceDirectory, mergeMDIntoExtraHeaders(metadata, nil)) + defer readAndCloseBody(resp.body) + return resp.headers, checkRespCode(resp.statusCode, []int{http.StatusCreated}) } -// SetFileMetadata replaces the metadata for the specified file. +// SetMetadata replaces the metadata for this file. // // Some keys may be converted to Camel-Case before sending. All keys // are returned in lower case by GetFileMetadata. HTTP header names // are case-insensitive so case munging should not matter to other // applications either. // -// See https://msdn.microsoft.com/en-us/library/azure/dn689097.aspx -func (f FileServiceClient) SetFileMetadata(path string, metadata map[string]string) error { - return f.setResourceHeaders(path, compMetadata, resourceFile, mergeMDIntoExtraHeaders(metadata, nil)) +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-File-Metadata +func (f *File) SetMetadata(options *FileRequestOptions) error { + headers, err := f.fsc.setResourceHeaders(f.buildPath(), compMetadata, resourceFile, mergeMDIntoExtraHeaders(f.Metadata, nil), options) + if err != nil { + return err + } + + f.updateEtagAndLastModified(headers) + return nil } -// SetShareMetadata replaces the metadata for the specified Share. +// SetProperties sets system properties on this file. // // Some keys may be converted to Camel-Case before sending. All keys -// are returned in lower case by GetShareMetadata. HTTP header names +// are returned in lower case by SetFileProperties. HTTP header names // are case-insensitive so case munging should not matter to other // applications either. // -// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx -func (f FileServiceClient) SetShareMetadata(name string, metadata map[string]string) error { - return f.setResourceHeaders(ToPathSegment(name), compMetadata, resourceShare, mergeMDIntoExtraHeaders(metadata, nil)) +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-File-Properties +func (f *File) SetProperties(options *FileRequestOptions) error { + headers, err := f.fsc.setResourceHeaders(f.buildPath(), compProperties, resourceFile, headersFromStruct(f.Properties), options) + if err != nil { + return err + } + + f.updateEtagAndLastModified(headers) + return nil } -// merges metadata into extraHeaders and returns extraHeaders -func mergeMDIntoExtraHeaders(metadata, extraHeaders map[string]string) map[string]string { - if metadata == nil && extraHeaders == nil { - return nil - } - if extraHeaders == nil { - extraHeaders = make(map[string]string) - } - for k, v := range metadata { - extraHeaders[userDefinedMetadataHeaderPrefix+k] = v - } - return extraHeaders +// updates Etag and last modified date +func (f *File) updateEtagAndLastModified(headers http.Header) { + f.Properties.Etag = headers.Get("Etag") + f.Properties.LastModified = headers.Get("Last-Modified") } -// merges extraHeaders into headers and returns headers -func mergeHeaders(headers, extraHeaders map[string]string) map[string]string { - for k, v := range extraHeaders { - headers[k] = v - } - return headers +// updates Etag, last modified date and x-ms-copy-id +func (f *File) updateEtagLastModifiedAndCopyHeaders(headers http.Header) { + f.Properties.Etag = headers.Get("Etag") + f.Properties.LastModified = headers.Get("Last-Modified") + f.FileCopyProperties.ID = headers.Get("X-Ms-Copy-Id") + f.FileCopyProperties.Status = headers.Get("X-Ms-Copy-Status") } -// sets extra header data for the specified resource -func (f FileServiceClient) setResourceHeaders(path string, comp compType, res resourceType, extraHeaders map[string]string) error { - if err := f.checkForStorageEmulator(); err != nil { - return err +// updates file properties from the specified HTTP header +func (f *File) updateProperties(header http.Header) { + size, err := strconv.ParseUint(header.Get("Content-Length"), 10, 64) + if err == nil { + f.Properties.Length = size } - params := getURLInitValues(comp, res) - uri := f.client.getEndpoint(fileServiceName, path, params) - headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) - - resp, err := f.client.exec(http.MethodPut, uri, headers, nil) - if err != nil { - return err - } - defer resp.body.Close() - - return checkRespCode(resp.statusCode, []int{http.StatusOK}) + f.updateEtagAndLastModified(header) + f.Properties.CacheControl = header.Get("Cache-Control") + f.Properties.Disposition = header.Get("Content-Disposition") + f.Properties.Encoding = header.Get("Content-Encoding") + f.Properties.Language = header.Get("Content-Language") + f.Properties.MD5 = header.Get("Content-MD5") + f.Properties.Type = header.Get("Content-Type") } -// GetDirectoryMetadata returns all user-defined metadata for the specified directory. -// -// All metadata keys will be returned in lower case. (HTTP header -// names are case-insensitive.) -// -// See https://msdn.microsoft.com/en-us/library/azure/mt427371.aspx -func (f FileServiceClient) GetDirectoryMetadata(path string) (map[string]string, error) { - return f.getMetadata(path, resourceDirectory) +// URL gets the canonical URL to this file. +// This method does not create a publicly accessible URL if the file +// is private and this method does not check if the file exists. +func (f *File) URL() string { + return f.fsc.client.getEndpoint(fileServiceName, f.buildPath(), nil) } -// GetFileMetadata returns all user-defined metadata for the specified file. -// -// All metadata keys will be returned in lower case. (HTTP header -// names are case-insensitive.) -// -// See https://msdn.microsoft.com/en-us/library/azure/dn689098.aspx -func (f FileServiceClient) GetFileMetadata(path string) (map[string]string, error) { - return f.getMetadata(path, resourceFile) +// WriteRangeOptions includes opptions for a write file range operation +type WriteRangeOptions struct { + Timeout uint + ContentMD5 string } -// GetShareMetadata returns all user-defined metadata for the specified share. -// -// All metadata keys will be returned in lower case. (HTTP header -// names are case-insensitive.) +// WriteRange writes a range of bytes to this file with an optional MD5 hash of the content (inside +// options parameter). Note that the length of bytes must match (rangeEnd - rangeStart) + 1 with +// a maximum size of 4MB. // -// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx -func (f FileServiceClient) GetShareMetadata(name string) (map[string]string, error) { - return f.getMetadata(ToPathSegment(name), resourceShare) -} - -// gets metadata for the specified resource -func (f FileServiceClient) getMetadata(path string, res resourceType) (map[string]string, error) { - if err := f.checkForStorageEmulator(); err != nil { - return nil, err +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Range +func (f *File) WriteRange(bytes io.Reader, fileRange FileRange, options *WriteRangeOptions) error { + if bytes == nil { + return errors.New("bytes cannot be nil") } - - headers, err := f.getResourceHeaders(path, compMetadata, res, http.MethodGet) - if err != nil { - return nil, err + var timeout *uint + var md5 *string + if options != nil { + timeout = &options.Timeout + md5 = &options.ContentMD5 } - return getFileMDFromHeaders(headers), nil -} - -// returns a map of custom metadata values from the specified HTTP header -func getFileMDFromHeaders(header http.Header) map[string]string { - metadata := make(map[string]string) - for k, v := range header { - // Can't trust CanonicalHeaderKey() to munge case - // reliably. "_" is allowed in identifiers: - // https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx - // https://msdn.microsoft.com/library/aa664670(VS.71).aspx - // http://tools.ietf.org/html/rfc7230#section-3.2 - // ...but "_" is considered invalid by - // CanonicalMIMEHeaderKey in - // https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542 - // so k can be "X-Ms-Meta-Foo" or "x-ms-meta-foo_bar". - k = strings.ToLower(k) - if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) { - continue - } - // metadata["foo"] = content of the last X-Ms-Meta-Foo header - k = k[len(userDefinedMetadataHeaderPrefix):] - metadata[k] = v[len(v)-1] + headers, err := f.modifyRange(bytes, fileRange, timeout, md5) + if err != nil { + return err } - return metadata -} -//checkForStorageEmulator determines if the client is setup for use with -//Azure Storage Emulator, and returns a relevant error -func (f FileServiceClient) checkForStorageEmulator() error { - if f.client.accountName == StorageEmulatorAccountName { - return fmt.Errorf("Error: File service is not currently supported by Azure Storage Emulator") - } + f.updateEtagAndLastModified(headers) return nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/file_test.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/file_test.go index c0558e5497e7..4993f302ff27 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/file_test.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/file_test.go @@ -2,9 +2,9 @@ package storage import ( "bytes" + "crypto/md5" + "encoding/base64" "io" - "math/rand" - "strconv" chk "gopkg.in/check.v1" ) @@ -13,359 +13,141 @@ type StorageFileSuite struct{} var _ = chk.Suite(&StorageFileSuite{}) -func getFileClient(c *chk.C) FileServiceClient { - return getBasicClient(c).GetFileService() -} - -func (s *StorageFileSuite) Test_pathSegments(c *chk.C) { - c.Assert(ToPathSegment("foo"), chk.Equals, "/foo") - c.Assert(ToPathSegment("foo", "bar"), chk.Equals, "/foo/bar") - c.Assert(ToPathSegment("foo", "bar", "baz"), chk.Equals, "/foo/bar/baz") -} - -func (s *StorageFileSuite) TestGetURL(c *chk.C) { - api, err := NewBasicClient("foo", "YmFy") - c.Assert(err, chk.IsNil) - cli := api.GetFileService() - - c.Assert(cli.GetShareURL("share"), chk.Equals, "https://foo.file.core.windows.net/share") - c.Assert(cli.GetDirectoryURL("share/dir"), chk.Equals, "https://foo.file.core.windows.net/share/dir") -} - -func (s *StorageFileSuite) TestCreateShareDeleteShare(c *chk.C) { - cli := getFileClient(c) - name := randShare() - c.Assert(cli.CreateShare(name, nil), chk.IsNil) - c.Assert(cli.DeleteShare(name), chk.IsNil) -} - -func (s *StorageFileSuite) TestCreateShareIfNotExists(c *chk.C) { - cli := getFileClient(c) - name := randShare() - defer cli.DeleteShare(name) - - // First create - ok, err := cli.CreateShareIfNotExists(name) - c.Assert(err, chk.IsNil) - c.Assert(ok, chk.Equals, true) - - // Second create, should not give errors - ok, err = cli.CreateShareIfNotExists(name) - c.Assert(err, chk.IsNil) - c.Assert(ok, chk.Equals, false) -} - -func (s *StorageFileSuite) TestDeleteShareIfNotExists(c *chk.C) { - cli := getFileClient(c) - name := randShare() - - // delete non-existing share - ok, err := cli.DeleteShareIfExists(name) - c.Assert(err, chk.IsNil) - c.Assert(ok, chk.Equals, false) - - c.Assert(cli.CreateShare(name, nil), chk.IsNil) - - // delete existing share - ok, err = cli.DeleteShareIfExists(name) - c.Assert(err, chk.IsNil) - c.Assert(ok, chk.Equals, true) -} - -func (s *StorageFileSuite) Test_checkForStorageEmulator(c *chk.C) { - f := getEmulatorClient(c).GetFileService() - err := f.checkForStorageEmulator() - c.Assert(err, chk.NotNil) -} - -func (s *StorageFileSuite) TestListShares(c *chk.C) { - cli := getFileClient(c) - c.Assert(deleteTestShares(cli), chk.IsNil) - - name := randShare() - - c.Assert(cli.CreateShare(name, nil), chk.IsNil) - defer cli.DeleteShare(name) - - resp, err := cli.ListShares(ListSharesParameters{ - MaxResults: 5, - Prefix: testSharePrefix}) - c.Assert(err, chk.IsNil) - - c.Check(len(resp.Shares), chk.Equals, 1) - c.Check(resp.Shares[0].Name, chk.Equals, name) - -} - -func (s *StorageFileSuite) TestShareExists(c *chk.C) { - cli := getFileClient(c) - name := randShare() - - ok, err := cli.ShareExists(name) - c.Assert(err, chk.IsNil) - c.Assert(ok, chk.Equals, false) - - c.Assert(cli.CreateShare(name, nil), chk.IsNil) - defer cli.DeleteShare(name) - - ok, err = cli.ShareExists(name) - c.Assert(err, chk.IsNil) - c.Assert(ok, chk.Equals, true) -} - -func (s *StorageFileSuite) TestGetAndSetShareProperties(c *chk.C) { - name := randShare() - quota := rand.Intn(5120) - - cli := getFileClient(c) - c.Assert(cli.CreateShare(name, nil), chk.IsNil) - defer cli.DeleteShare(name) - - err := cli.SetShareProperties(name, ShareHeaders{Quota: strconv.Itoa(quota)}) - c.Assert(err, chk.IsNil) - - props, err := cli.GetShareProperties(name) - c.Assert(err, chk.IsNil) - - c.Assert(props.Quota, chk.Equals, strconv.Itoa(quota)) -} - -func (s *StorageFileSuite) TestGetAndSetShareMetadata(c *chk.C) { - cli := getFileClient(c) - share1 := randShare() - - c.Assert(cli.CreateShare(share1, nil), chk.IsNil) - defer cli.DeleteShare(share1) - - m, err := cli.GetShareMetadata(share1) - c.Assert(err, chk.IsNil) - c.Assert(m, chk.Not(chk.Equals), nil) - c.Assert(len(m), chk.Equals, 0) - - share2 := randShare() - mCreate := map[string]string{ - "create": "data", - } - c.Assert(cli.CreateShare(share2, mCreate), chk.IsNil) - defer cli.DeleteShare(share2) - - m, err = cli.GetShareMetadata(share2) - c.Assert(err, chk.IsNil) - c.Assert(m, chk.Not(chk.Equals), nil) - c.Assert(len(m), chk.Equals, 1) - - mPut := map[string]string{ - "foo": "bar", - "bar_baz": "waz qux", - } - - err = cli.SetShareMetadata(share2, mPut) - c.Assert(err, chk.IsNil) - - m, err = cli.GetShareMetadata(share2) - c.Assert(err, chk.IsNil) - c.Check(m, chk.DeepEquals, mPut) - - // Case munging - - mPutUpper := map[string]string{ - "Foo": "different bar", - "bar_BAZ": "different waz qux", - } - mExpectLower := map[string]string{ - "foo": "different bar", - "bar_baz": "different waz qux", - } - - err = cli.SetShareMetadata(share2, mPutUpper) - c.Assert(err, chk.IsNil) - - m, err = cli.GetShareMetadata(share2) - c.Assert(err, chk.IsNil) - c.Check(m, chk.DeepEquals, mExpectLower) -} - -func (s *StorageFileSuite) TestListDirsAndFiles(c *chk.C) { - // create share - cli := getFileClient(c) - share := randShare() - - c.Assert(cli.CreateShare(share, nil), chk.IsNil) - defer cli.DeleteShare(share) - - // list contents, should be empty - resp, err := cli.ListDirsAndFiles(share, ListDirsAndFilesParameters{}) - c.Assert(err, chk.IsNil) - c.Assert(resp.Directories, chk.IsNil) - c.Assert(resp.Files, chk.IsNil) - - // create a directory and a file - dir := "SomeDirectory" - file := "foo.file" - c.Assert(cli.CreateDirectory(ToPathSegment(share, dir), nil), chk.IsNil) - c.Assert(cli.CreateFile(ToPathSegment(share, file), 512, nil), chk.IsNil) - - // list contents - resp, err = cli.ListDirsAndFiles(share, ListDirsAndFilesParameters{}) - c.Assert(err, chk.IsNil) - c.Assert(len(resp.Directories), chk.Equals, 1) - c.Assert(len(resp.Files), chk.Equals, 1) - c.Assert(resp.Directories[0].Name, chk.Equals, dir) - c.Assert(resp.Files[0].Name, chk.Equals, file) -} - -func (s *StorageFileSuite) TestCreateDirectory(c *chk.C) { - // create share - cli := getFileClient(c) - share := randShare() - - c.Assert(cli.CreateShare(share, nil), chk.IsNil) - defer cli.DeleteShare(share) - - // directory shouldn't exist - dir := ToPathSegment(share, "SomeDirectory") - exists, err := cli.DirectoryExists(dir) - c.Assert(err, chk.IsNil) - c.Assert(exists, chk.Equals, false) - - // create directory - exists, err = cli.CreateDirectoryIfNotExists(dir) - c.Assert(err, chk.IsNil) - c.Assert(exists, chk.Equals, true) - - // try to create again, should fail - c.Assert(cli.CreateDirectory(dir, nil), chk.NotNil) - exists, err = cli.CreateDirectoryIfNotExists(dir) - c.Assert(err, chk.IsNil) - c.Assert(exists, chk.Equals, false) - - // get properties - var props *DirectoryProperties - props, err = cli.GetDirectoryProperties(dir) - c.Assert(props.Etag, chk.Not(chk.Equals), "") - c.Assert(props.LastModified, chk.Not(chk.Equals), "") - - // delete directory and verify - c.Assert(cli.DeleteDirectory(dir), chk.IsNil) - exists, err = cli.DirectoryExists(dir) - c.Assert(err, chk.IsNil) - c.Assert(exists, chk.Equals, false) -} - func (s *StorageFileSuite) TestCreateFile(c *chk.C) { - // create share cli := getFileClient(c) - share := randShare() + cli.deleteAllShares() + rec := cli.client.appendRecorder(c) + defer rec.Stop() - c.Assert(cli.CreateShare(share, nil), chk.IsNil) - defer cli.DeleteShare(share) + // create share + share := cli.GetShareReference(shareName(c)) + c.Assert(share.Create(nil), chk.IsNil) + defer share.Delete(nil) + root := share.GetRootDirectoryReference() // create directory structure - dir1 := ToPathSegment(share, "one") - c.Assert(cli.CreateDirectory(dir1, nil), chk.IsNil) - dir2 := ToPathSegment(dir1, "two") - c.Assert(cli.CreateDirectory(dir2, nil), chk.IsNil) + dir1 := root.GetDirectoryReference("one") + c.Assert(dir1.Create(nil), chk.IsNil) + dir2 := dir1.GetDirectoryReference("two") + c.Assert(dir2.Create(nil), chk.IsNil) // verify file doesn't exist - file := ToPathSegment(dir2, "some.file") - exists, err := cli.FileExists(file) + file := dir2.GetFileReference("some.file") + exists, err := file.Exists() c.Assert(err, chk.IsNil) c.Assert(exists, chk.Equals, false) // create file - c.Assert(cli.CreateFile(file, 1024, nil), chk.IsNil) - exists, err = cli.FileExists(file) - c.Assert(err, chk.IsNil) - c.Assert(exists, chk.Equals, true) + c.Assert(file.Create(1024, nil), chk.IsNil) // delete file and verify - c.Assert(cli.DeleteFile(file), chk.IsNil) - exists, err = cli.FileExists(file) + c.Assert(file.Delete(nil), chk.IsNil) + exists, err = file.Exists() c.Assert(err, chk.IsNil) c.Assert(exists, chk.Equals, false) } func (s *StorageFileSuite) TestGetFile(c *chk.C) { - // create share cli := getFileClient(c) - share := randShare() + rec := cli.client.appendRecorder(c) + defer rec.Stop() - c.Assert(cli.CreateShare(share, nil), chk.IsNil) - defer cli.DeleteShare(share) + // create share + share := cli.GetShareReference(shareName(c)) + c.Assert(share.Create(nil), chk.IsNil) + defer share.Delete(nil) + root := share.GetRootDirectoryReference() // create file const size = uint64(1024) - file := ToPathSegment(share, "some.file") - c.Assert(cli.CreateFile(file, size, nil), chk.IsNil) + byteStream, _ := newByteStream(size) + file := root.GetFileReference("some.file") + c.Assert(file.Create(size, nil), chk.IsNil) // fill file with some data - c.Assert(cli.PutRange(file, newByteStream(size), FileRange{End: size - 1}), chk.IsNil) + c.Assert(file.WriteRange(byteStream, FileRange{End: size - 1}, nil), chk.IsNil) // set some metadata md := map[string]string{ "something": "somethingvalue", "another": "anothervalue", } - c.Assert(cli.SetFileMetadata(file, md), chk.IsNil) + file.Metadata = md + c.Assert(file.SetMetadata(nil), chk.IsNil) + options := GetFileOptions{ + GetContentMD5: false, + } // retrieve full file content and verify - stream, err := cli.GetFile(file, nil) + stream, err := file.DownloadRangeToStream(FileRange{Start: 0, End: size - 1}, &options) c.Assert(err, chk.IsNil) defer stream.Body.Close() var b1 [size]byte count, _ := stream.Body.Read(b1[:]) c.Assert(count, chk.Equals, int(size)) var c1 [size]byte - newByteStream(size).Read(c1[:]) + bs, _ := newByteStream(size) + bs.Read(c1[:]) c.Assert(b1, chk.DeepEquals, c1) - c.Assert(stream.Properties.ContentLength, chk.Equals, size) - c.Assert(stream.Metadata, chk.DeepEquals, md) // retrieve partial file content and verify - stream, err = cli.GetFile(file, &FileRange{Start: size / 2, End: size - 1}) + stream, err = file.DownloadRangeToStream(FileRange{Start: size / 2, End: size - 1}, &options) c.Assert(err, chk.IsNil) defer stream.Body.Close() var b2 [size / 2]byte count, _ = stream.Body.Read(b2[:]) c.Assert(count, chk.Equals, int(size)/2) var c2 [size / 2]byte - newByteStream(size / 2).Read(c2[:]) + bs, _ = newByteStream(size / 2) + bs.Read(c2[:]) c.Assert(b2, chk.DeepEquals, c2) } func (s *StorageFileSuite) TestFileRanges(c *chk.C) { - // create share cli := getFileClient(c) - share := randShare() + rec := cli.client.appendRecorder(c) + defer rec.Stop() - c.Assert(cli.CreateShare(share, nil), chk.IsNil) - defer cli.DeleteShare(share) + share := cli.GetShareReference(shareName(c)) + c.Assert(share.Create(nil), chk.IsNil) + defer share.Delete(nil) + root := share.GetRootDirectoryReference() - // create file fileSize := uint64(4096) - file := ToPathSegment(share, "test.dat") - c.Assert(cli.CreateFile(file, fileSize, nil), chk.IsNil) + contentBytes := content(int(fileSize)) + + // --- File with no valid ranges + file1 := root.GetFileReference("file1.txt") + c.Assert(file1.Create(fileSize, nil), chk.IsNil) - // verify there are no valid ranges - ranges, err := cli.ListFileRanges(file, nil) + ranges, err := file1.ListRanges(nil) c.Assert(err, chk.IsNil) c.Assert(ranges.ContentLength, chk.Equals, fileSize) c.Assert(ranges.FileRanges, chk.IsNil) - // fill entire range and validate - c.Assert(cli.PutRange(file, newByteStream(fileSize), FileRange{End: fileSize - 1}), chk.IsNil) - ranges, err = cli.ListFileRanges(file, nil) + // --- File after writing a range + file2 := root.GetFileReference("file2.txt") + c.Assert(file2.Create(fileSize, nil), chk.IsNil) + c.Assert(file2.WriteRange(bytes.NewReader(contentBytes), FileRange{End: fileSize - 1}, nil), chk.IsNil) + + ranges, err = file2.ListRanges(nil) c.Assert(err, chk.IsNil) c.Assert(len(ranges.FileRanges), chk.Equals, 1) c.Assert((ranges.FileRanges[0].End-ranges.FileRanges[0].Start)+1, chk.Equals, fileSize) - // clear entire range and validate - c.Assert(cli.ClearRange(file, FileRange{End: fileSize - 1}), chk.IsNil) - ranges, err = cli.ListFileRanges(file, nil) + // --- File after writing and clearing + file3 := root.GetFileReference("file3.txt") + c.Assert(file3.Create(fileSize, nil), chk.IsNil) + c.Assert(file3.WriteRange(bytes.NewReader(contentBytes), FileRange{End: fileSize - 1}, nil), chk.IsNil) + c.Assert(file3.ClearRange(FileRange{End: fileSize - 1}, nil), chk.IsNil) + + ranges, err = file3.ListRanges(nil) c.Assert(err, chk.IsNil) c.Assert(ranges.FileRanges, chk.IsNil) - // put partial ranges on 512 byte aligned boundaries + // --- File with ranges and subranges + file4 := root.GetFileReference("file4.txt") + c.Assert(file4.Create(fileSize, nil), chk.IsNil) putRanges := []FileRange{ {End: 511}, {Start: 1024, End: 1535}, @@ -374,46 +156,62 @@ func (s *StorageFileSuite) TestFileRanges(c *chk.C) { } for _, r := range putRanges { - err = cli.PutRange(file, newByteStream(512), r) + err = file4.WriteRange(bytes.NewReader(contentBytes[:512]), r, nil) c.Assert(err, chk.IsNil) } // validate all ranges - ranges, err = cli.ListFileRanges(file, nil) + ranges, err = file4.ListRanges(nil) c.Assert(err, chk.IsNil) c.Assert(ranges.FileRanges, chk.DeepEquals, putRanges) + options := ListRangesOptions{ + ListRange: &FileRange{ + Start: 1000, + End: 3000, + }, + } // validate sub-ranges - ranges, err = cli.ListFileRanges(file, &FileRange{Start: 1000, End: 3000}) + ranges, err = file4.ListRanges(&options) c.Assert(err, chk.IsNil) c.Assert(ranges.FileRanges, chk.DeepEquals, putRanges[1:3]) - // clear partial range and validate - c.Assert(cli.ClearRange(file, putRanges[0]), chk.IsNil) - c.Assert(cli.ClearRange(file, putRanges[2]), chk.IsNil) - ranges, err = cli.ListFileRanges(file, nil) + // --- clear partial range and validate + file5 := root.GetFileReference("file5.txt") + c.Assert(file5.Create(fileSize, nil), chk.IsNil) + c.Assert(file5.WriteRange(bytes.NewReader(contentBytes), FileRange{End: fileSize - 1}, nil), chk.IsNil) + c.Assert(file5.ClearRange(putRanges[0], nil), chk.IsNil) + c.Assert(file5.ClearRange(putRanges[2], nil), chk.IsNil) + + ranges, err = file5.ListRanges(nil) c.Assert(err, chk.IsNil) - c.Assert(len(ranges.FileRanges), chk.Equals, 2) - c.Assert(ranges.FileRanges[0], chk.DeepEquals, putRanges[1]) - c.Assert(ranges.FileRanges[1], chk.DeepEquals, putRanges[3]) + expectedtRanges := []FileRange{ + {Start: 512, End: 2047}, + {Start: 2560, End: 4095}, + } + c.Assert(ranges.FileRanges, chk.HasLen, 2) + c.Assert(ranges.FileRanges[0], chk.DeepEquals, expectedtRanges[0]) + c.Assert(ranges.FileRanges[1], chk.DeepEquals, expectedtRanges[1]) } func (s *StorageFileSuite) TestFileProperties(c *chk.C) { - // create share cli := getFileClient(c) - share := randShare() + rec := cli.client.appendRecorder(c) + defer rec.Stop() - c.Assert(cli.CreateShare(share, nil), chk.IsNil) - defer cli.DeleteShare(share) + // create share + share := cli.GetShareReference(shareName(c)) + c.Assert(share.Create(nil), chk.IsNil) + defer share.Delete(nil) + root := share.GetRootDirectoryReference() fileSize := uint64(512) - file := ToPathSegment(share, "test.dat") - c.Assert(cli.CreateFile(file, fileSize, nil), chk.IsNil) + file := root.GetFileReference("test.dat") + c.Assert(file.Create(fileSize, nil), chk.IsNil) // get initial set of properties - props, err := cli.GetFileProperties(file) - c.Assert(err, chk.IsNil) - c.Assert(props.ContentLength, chk.Equals, fileSize) + c.Assert(file.Properties.Length, chk.Equals, fileSize) + c.Assert(file.Properties.Etag, chk.NotNil) // set some file properties cc := "cachecontrol" @@ -421,135 +219,185 @@ func (s *StorageFileSuite) TestFileProperties(c *chk.C) { enc := "noencoding" lang := "neutral" disp := "friendly" - props.CacheControl = cc - props.ContentType = ct - props.Disposition = disp - props.Encoding = enc - props.Language = lang - c.Assert(cli.SetFileProperties(file, *props), chk.IsNil) + file.Properties.CacheControl = cc + file.Properties.Type = ct + file.Properties.Disposition = disp + file.Properties.Encoding = enc + file.Properties.Language = lang + c.Assert(file.SetProperties(nil), chk.IsNil) // retrieve and verify - props, err = cli.GetFileProperties(file) - c.Assert(err, chk.IsNil) - c.Assert(props.CacheControl, chk.Equals, cc) - c.Assert(props.ContentType, chk.Equals, ct) - c.Assert(props.Disposition, chk.Equals, disp) - c.Assert(props.Encoding, chk.Equals, enc) - c.Assert(props.Language, chk.Equals, lang) + c.Assert(file.FetchAttributes(nil), chk.IsNil) + c.Assert(file.Properties.CacheControl, chk.Equals, cc) + c.Assert(file.Properties.Type, chk.Equals, ct) + c.Assert(file.Properties.Disposition, chk.Equals, disp) + c.Assert(file.Properties.Encoding, chk.Equals, enc) + c.Assert(file.Properties.Language, chk.Equals, lang) } -func (s *StorageFileSuite) TestDirectoryMetadata(c *chk.C) { - // create share +func (s *StorageFileSuite) TestFileMetadata(c *chk.C) { cli := getFileClient(c) - share := randShare() + rec := cli.client.appendRecorder(c) + defer rec.Stop() - c.Assert(cli.CreateShare(share, nil), chk.IsNil) - defer cli.DeleteShare(share) + // create share + share := cli.GetShareReference(shareName(c)) + c.Assert(share.Create(nil), chk.IsNil) + defer share.Delete(nil) + root := share.GetRootDirectoryReference() - dir1 := ToPathSegment(share, "testdir1") - c.Assert(cli.CreateDirectory(dir1, nil), chk.IsNil) + fileSize := uint64(512) + file := root.GetFileReference("test.dat") + c.Assert(file.Create(fileSize, nil), chk.IsNil) // get metadata, shouldn't be any - md, err := cli.GetDirectoryMetadata(dir1) - c.Assert(err, chk.IsNil) - c.Assert(md, chk.HasLen, 0) - - mCreate := map[string]string{ - "create": "data", - } - dir2 := ToPathSegment(share, "testdir2") - c.Assert(cli.CreateDirectory(dir2, mCreate), chk.IsNil) - - // get metadata - md, err = cli.GetDirectoryMetadata(dir2) - c.Assert(err, chk.IsNil) - c.Assert(md, chk.HasLen, 1) + c.Assert(file.Metadata, chk.HasLen, 0) // set some custom metadata - md = map[string]string{ + md := map[string]string{ "something": "somethingvalue", "another": "anothervalue", } - c.Assert(cli.SetDirectoryMetadata(dir2, md), chk.IsNil) + file.Metadata = md + c.Assert(file.SetMetadata(nil), chk.IsNil) // retrieve and verify - var mdRes map[string]string - mdRes, err = cli.GetDirectoryMetadata(dir2) - c.Assert(err, chk.IsNil) - c.Assert(mdRes, chk.DeepEquals, md) + c.Assert(file.FetchAttributes(nil), chk.IsNil) + c.Assert(file.Metadata, chk.DeepEquals, md) } -func (s *StorageFileSuite) TestFileMetadata(c *chk.C) { - // create share +func (s *StorageFileSuite) TestFileMD5(c *chk.C) { cli := getFileClient(c) - share := randShare() + rec := cli.client.appendRecorder(c) + defer rec.Stop() - c.Assert(cli.CreateShare(share, nil), chk.IsNil) - defer cli.DeleteShare(share) + // create share + share := cli.GetShareReference(shareName(c)) + c.Assert(share.Create(nil), chk.IsNil) + defer share.Delete(nil) + root := share.GetRootDirectoryReference() - fileSize := uint64(512) - file1 := ToPathSegment(share, "test1.dat") - c.Assert(cli.CreateFile(file1, fileSize, nil), chk.IsNil) + // create file + const size = uint64(1024) + fileSize := uint64(size) + file := root.GetFileReference("test.dat") + c.Assert(file.Create(fileSize, nil), chk.IsNil) + + // fill file with some data and MD5 hash + byteStream, contentMD5 := newByteStream(size) + options := WriteRangeOptions{ + ContentMD5: contentMD5, + } + c.Assert(file.WriteRange(byteStream, FileRange{End: size - 1}, &options), chk.IsNil) - // get metadata, shouldn't be any - md, err := cli.GetFileMetadata(file1) + // download file and verify + downloadOptions := GetFileOptions{ + GetContentMD5: true, + } + stream, err := file.DownloadRangeToStream(FileRange{Start: 0, End: size - 1}, &downloadOptions) c.Assert(err, chk.IsNil) - c.Assert(md, chk.HasLen, 0) + defer stream.Body.Close() + c.Assert(stream.ContentMD5, chk.Equals, contentMD5) +} - mCreate := map[string]string{ - "create": "data", +// returns a byte stream along with a base-64 encoded MD5 hash of its contents +func newByteStream(count uint64) (io.Reader, string) { + b := make([]uint8, count) + for i := uint64(0); i < count; i++ { + b[i] = 0xff } - file2 := ToPathSegment(share, "test2.dat") - c.Assert(cli.CreateFile(file2, fileSize, mCreate), chk.IsNil) - // get metadata - md, err = cli.GetFileMetadata(file2) + // create an MD5 hash of the array + hash := md5.Sum(b) + + return bytes.NewReader(b), base64.StdEncoding.EncodeToString(hash[:]) +} + +func (s *StorageFileSuite) TestCopyFileSameAccountNoMetaData(c *chk.C) { + cli := getFileClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + // create share + share := cli.GetShareReference(shareName(c)) + c.Assert(share.Create(nil), chk.IsNil) + defer share.Delete(nil) + root := share.GetRootDirectoryReference() + + // create directory structure + dir1 := root.GetDirectoryReference("one") + c.Assert(dir1.Create(nil), chk.IsNil) + dir2 := dir1.GetDirectoryReference("two") + c.Assert(dir2.Create(nil), chk.IsNil) + + // create file + file := dir2.GetFileReference("some.file") + c.Assert(file.Create(1024, nil), chk.IsNil) + exists, err := file.Exists() c.Assert(err, chk.IsNil) - c.Assert(md, chk.HasLen, 1) + c.Assert(exists, chk.Equals, true) - // set some custom metadata - md = map[string]string{ - "something": "somethingvalue", - "another": "anothervalue", - } - c.Assert(cli.SetFileMetadata(file2, md), chk.IsNil) + otherFile := dir2.GetFileReference("someother.file") - // retrieve and verify - var mdRes map[string]string - mdRes, err = cli.GetFileMetadata(file2) + // copy the file, no timeout parameter + err = otherFile.CopyFile(file.URL(), nil) c.Assert(err, chk.IsNil) - c.Assert(mdRes, chk.DeepEquals, md) -} -func deleteTestShares(cli FileServiceClient) error { - for { - resp, err := cli.ListShares(ListSharesParameters{Prefix: testSharePrefix}) - if err != nil { - return err - } - if len(resp.Shares) == 0 { - break - } - for _, c := range resp.Shares { - err = cli.DeleteShare(c.Name) - if err != nil { - return err - } - } - } - return nil + // delete files + c.Assert(file.Delete(nil), chk.IsNil) + c.Assert(otherFile.Delete(nil), chk.IsNil) } -const testSharePrefix = "zzzzztest" +func (s *StorageFileSuite) TestCopyFileSameAccountTimeout(c *chk.C) { + cli := getFileClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + // create share + share := cli.GetShareReference(shareName(c)) + c.Assert(share.Create(nil), chk.IsNil) + defer share.Delete(nil) + root := share.GetRootDirectoryReference() + + // create directory structure + dir1 := root.GetDirectoryReference("one") + c.Assert(dir1.Create(nil), chk.IsNil) + dir2 := dir1.GetDirectoryReference("two") + c.Assert(dir2.Create(nil), chk.IsNil) -func randShare() string { - return testSharePrefix + randString(32-len(testSharePrefix)) + // create file + file := dir2.GetFileReference("some.file") + c.Assert(file.Create(1024, nil), chk.IsNil) + + // copy the file, 60 second timeout. + otherFile := dir2.GetFileReference("someother.file") + options := FileRequestOptions{} + options.Timeout = 60 + c.Assert(otherFile.CopyFile(file.URL(), &options), chk.IsNil) + + // delete files + c.Assert(file.Delete(nil), chk.IsNil) + c.Assert(otherFile.Delete(nil), chk.IsNil) } -func newByteStream(count uint64) io.Reader { - b := make([]uint8, count) - for i := uint64(0); i < count; i++ { - b[i] = 0xff - } - return bytes.NewReader(b) +func (s *StorageFileSuite) TestCopyFileMissingFile(c *chk.C) { + cli := getFileClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + // create share + share := cli.GetShareReference(shareName(c)) + c.Assert(share.Create(nil), chk.IsNil) + defer share.Delete(nil) + root := share.GetRootDirectoryReference() + + // create directory structure + dir1 := root.GetDirectoryReference("one") + c.Assert(dir1.Create(nil), chk.IsNil) + + otherFile := dir1.GetFileReference("someother.file") + + // copy the file, no timeout parameter + err := otherFile.CopyFile("", nil) + c.Assert(err, chk.NotNil) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go new file mode 100644 index 000000000000..81217bdfa80b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go @@ -0,0 +1,324 @@ +package storage + +import ( + "encoding/xml" + "fmt" + "net/http" + "net/url" + "strconv" +) + +// FileServiceClient contains operations for Microsoft Azure File Service. +type FileServiceClient struct { + client Client + auth authentication +} + +// ListSharesParameters defines the set of customizable parameters to make a +// List Shares call. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Shares +type ListSharesParameters struct { + Prefix string + Marker string + Include string + MaxResults uint + Timeout uint +} + +// ShareListResponse contains the response fields from +// ListShares call. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Shares +type ShareListResponse struct { + XMLName xml.Name `xml:"EnumerationResults"` + Xmlns string `xml:"xmlns,attr"` + Prefix string `xml:"Prefix"` + Marker string `xml:"Marker"` + NextMarker string `xml:"NextMarker"` + MaxResults int64 `xml:"MaxResults"` + Shares []Share `xml:"Shares>Share"` +} + +type compType string + +const ( + compNone compType = "" + compList compType = "list" + compMetadata compType = "metadata" + compProperties compType = "properties" + compRangeList compType = "rangelist" +) + +func (ct compType) String() string { + return string(ct) +} + +type resourceType string + +const ( + resourceDirectory resourceType = "directory" + resourceFile resourceType = "" + resourceShare resourceType = "share" +) + +func (rt resourceType) String() string { + return string(rt) +} + +func (p ListSharesParameters) getParameters() url.Values { + out := url.Values{} + + if p.Prefix != "" { + out.Set("prefix", p.Prefix) + } + if p.Marker != "" { + out.Set("marker", p.Marker) + } + if p.Include != "" { + out.Set("include", p.Include) + } + if p.MaxResults != 0 { + out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10)) + } + if p.Timeout != 0 { + out.Set("timeout", strconv.FormatUint(uint64(p.Timeout), 10)) + } + + return out +} + +func (p ListDirsAndFilesParameters) getParameters() url.Values { + out := url.Values{} + + if p.Prefix != "" { + out.Set("prefix", p.Prefix) + } + if p.Marker != "" { + out.Set("marker", p.Marker) + } + if p.MaxResults != 0 { + out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10)) + } + out = addTimeout(out, p.Timeout) + + return out +} + +// returns url.Values for the specified types +func getURLInitValues(comp compType, res resourceType) url.Values { + values := url.Values{} + if comp != compNone { + values.Set("comp", comp.String()) + } + if res != resourceFile { + values.Set("restype", res.String()) + } + return values +} + +// GetShareReference returns a Share object for the specified share name. +func (f *FileServiceClient) GetShareReference(name string) *Share { + return &Share{ + fsc: f, + Name: name, + Properties: ShareProperties{ + Quota: -1, + }, + } +} + +// ListShares returns the list of shares in a storage account along with +// pagination token and other response details. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/list-shares +func (f FileServiceClient) ListShares(params ListSharesParameters) (*ShareListResponse, error) { + q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}}) + + var out ShareListResponse + resp, err := f.listContent("", q, nil) + if err != nil { + return nil, err + } + defer resp.body.Close() + err = xmlUnmarshal(resp.body, &out) + + // assign our client to the newly created Share objects + for i := range out.Shares { + out.Shares[i].fsc = &f + } + return &out, err +} + +// GetServiceProperties gets the properties of your storage account's file service. +// File service does not support logging +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file-service-properties +func (f *FileServiceClient) GetServiceProperties() (*ServiceProperties, error) { + return f.client.getServiceProperties(fileServiceName, f.auth) +} + +// SetServiceProperties sets the properties of your storage account's file service. +// File service does not support logging +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-file-service-properties +func (f *FileServiceClient) SetServiceProperties(props ServiceProperties) error { + return f.client.setServiceProperties(props, fileServiceName, f.auth) +} + +// retrieves directory or share content +func (f FileServiceClient) listContent(path string, params url.Values, extraHeaders map[string]string) (*storageResponse, error) { + if err := f.checkForStorageEmulator(); err != nil { + return nil, err + } + + uri := f.client.getEndpoint(fileServiceName, path, params) + extraHeaders = f.client.protectUserAgent(extraHeaders) + headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) + + resp, err := f.client.exec(http.MethodGet, uri, headers, nil, f.auth) + if err != nil { + return nil, err + } + + if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + readAndCloseBody(resp.body) + return nil, err + } + + return resp, nil +} + +// returns true if the specified resource exists +func (f FileServiceClient) resourceExists(path string, res resourceType) (bool, http.Header, error) { + if err := f.checkForStorageEmulator(); err != nil { + return false, nil, err + } + + uri := f.client.getEndpoint(fileServiceName, path, getURLInitValues(compNone, res)) + headers := f.client.getStandardHeaders() + + resp, err := f.client.exec(http.MethodHead, uri, headers, nil, f.auth) + if resp != nil { + defer readAndCloseBody(resp.body) + if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound { + return resp.statusCode == http.StatusOK, resp.headers, nil + } + } + return false, nil, err +} + +// creates a resource depending on the specified resource type +func (f FileServiceClient) createResource(path string, res resourceType, urlParams url.Values, extraHeaders map[string]string, expectedResponseCodes []int) (http.Header, error) { + resp, err := f.createResourceNoClose(path, res, urlParams, extraHeaders) + if err != nil { + return nil, err + } + defer readAndCloseBody(resp.body) + return resp.headers, checkRespCode(resp.statusCode, expectedResponseCodes) +} + +// creates a resource depending on the specified resource type, doesn't close the response body +func (f FileServiceClient) createResourceNoClose(path string, res resourceType, urlParams url.Values, extraHeaders map[string]string) (*storageResponse, error) { + if err := f.checkForStorageEmulator(); err != nil { + return nil, err + } + + values := getURLInitValues(compNone, res) + combinedParams := mergeParams(values, urlParams) + uri := f.client.getEndpoint(fileServiceName, path, combinedParams) + extraHeaders = f.client.protectUserAgent(extraHeaders) + headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) + + return f.client.exec(http.MethodPut, uri, headers, nil, f.auth) +} + +// returns HTTP header data for the specified directory or share +func (f FileServiceClient) getResourceHeaders(path string, comp compType, res resourceType, params url.Values, verb string) (http.Header, error) { + resp, err := f.getResourceNoClose(path, comp, res, params, verb, nil) + if err != nil { + return nil, err + } + defer readAndCloseBody(resp.body) + + if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return nil, err + } + + return resp.headers, nil +} + +// gets the specified resource, doesn't close the response body +func (f FileServiceClient) getResourceNoClose(path string, comp compType, res resourceType, params url.Values, verb string, extraHeaders map[string]string) (*storageResponse, error) { + if err := f.checkForStorageEmulator(); err != nil { + return nil, err + } + + params = mergeParams(params, getURLInitValues(comp, res)) + uri := f.client.getEndpoint(fileServiceName, path, params) + headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) + + return f.client.exec(verb, uri, headers, nil, f.auth) +} + +// deletes the resource and returns the response +func (f FileServiceClient) deleteResource(path string, res resourceType, options *FileRequestOptions) error { + resp, err := f.deleteResourceNoClose(path, res, options) + if err != nil { + return err + } + defer readAndCloseBody(resp.body) + return checkRespCode(resp.statusCode, []int{http.StatusAccepted}) +} + +// deletes the resource and returns the response, doesn't close the response body +func (f FileServiceClient) deleteResourceNoClose(path string, res resourceType, options *FileRequestOptions) (*storageResponse, error) { + if err := f.checkForStorageEmulator(); err != nil { + return nil, err + } + + values := mergeParams(getURLInitValues(compNone, res), prepareOptions(options)) + uri := f.client.getEndpoint(fileServiceName, path, values) + return f.client.exec(http.MethodDelete, uri, f.client.getStandardHeaders(), nil, f.auth) +} + +// merges metadata into extraHeaders and returns extraHeaders +func mergeMDIntoExtraHeaders(metadata, extraHeaders map[string]string) map[string]string { + if metadata == nil && extraHeaders == nil { + return nil + } + if extraHeaders == nil { + extraHeaders = make(map[string]string) + } + for k, v := range metadata { + extraHeaders[userDefinedMetadataHeaderPrefix+k] = v + } + return extraHeaders +} + +// sets extra header data for the specified resource +func (f FileServiceClient) setResourceHeaders(path string, comp compType, res resourceType, extraHeaders map[string]string, options *FileRequestOptions) (http.Header, error) { + if err := f.checkForStorageEmulator(); err != nil { + return nil, err + } + + params := mergeParams(getURLInitValues(comp, res), prepareOptions(options)) + uri := f.client.getEndpoint(fileServiceName, path, params) + extraHeaders = f.client.protectUserAgent(extraHeaders) + headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) + + resp, err := f.client.exec(http.MethodPut, uri, headers, nil, f.auth) + if err != nil { + return nil, err + } + defer readAndCloseBody(resp.body) + + return resp.headers, checkRespCode(resp.statusCode, []int{http.StatusOK}) +} + +//checkForStorageEmulator determines if the client is setup for use with +//Azure Storage Emulator, and returns a relevant error +func (f FileServiceClient) checkForStorageEmulator() error { + if f.client.accountName == StorageEmulatorAccountName { + return fmt.Errorf("Error: File service is not currently supported by Azure Storage Emulator") + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/leaseblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/leaseblob.go new file mode 100644 index 000000000000..415b740183b3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/leaseblob.go @@ -0,0 +1,187 @@ +package storage + +import ( + "errors" + "net/http" + "net/url" + "strconv" + "time" +) + +// lease constants. +const ( + leaseHeaderPrefix = "x-ms-lease-" + headerLeaseID = "x-ms-lease-id" + leaseAction = "x-ms-lease-action" + leaseBreakPeriod = "x-ms-lease-break-period" + leaseDuration = "x-ms-lease-duration" + leaseProposedID = "x-ms-proposed-lease-id" + leaseTime = "x-ms-lease-time" + + acquireLease = "acquire" + renewLease = "renew" + changeLease = "change" + releaseLease = "release" + breakLease = "break" +) + +// leasePut is common PUT code for the various acquire/release/break etc functions. +func (b *Blob) leaseCommonPut(headers map[string]string, expectedStatus int, options *LeaseOptions) (http.Header, error) { + params := url.Values{"comp": {"lease"}} + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) + if err != nil { + return nil, err + } + defer readAndCloseBody(resp.body) + + if err := checkRespCode(resp.statusCode, []int{expectedStatus}); err != nil { + return nil, err + } + + return resp.headers, nil +} + +// LeaseOptions includes options for all operations regarding leasing blobs +type LeaseOptions struct { + Timeout uint + Origin string `header:"Origin"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + RequestID string `header:"x-ms-client-request-id"` +} + +// AcquireLease creates a lease for a blob +// returns leaseID acquired +// In API Versions starting on 2012-02-12, the minimum leaseTimeInSeconds is 15, the maximum +// non-infinite leaseTimeInSeconds is 60. To specify an infinite lease, provide the value -1. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob +func (b *Blob) AcquireLease(leaseTimeInSeconds int, proposedLeaseID string, options *LeaseOptions) (returnedLeaseID string, err error) { + headers := b.Container.bsc.client.getStandardHeaders() + headers[leaseAction] = acquireLease + + if leaseTimeInSeconds == -1 { + // Do nothing, but don't trigger the following clauses. + } else if leaseTimeInSeconds > 60 || b.Container.bsc.client.apiVersion < "2012-02-12" { + leaseTimeInSeconds = 60 + } else if leaseTimeInSeconds < 15 { + leaseTimeInSeconds = 15 + } + + headers[leaseDuration] = strconv.Itoa(leaseTimeInSeconds) + + if proposedLeaseID != "" { + headers[leaseProposedID] = proposedLeaseID + } + + respHeaders, err := b.leaseCommonPut(headers, http.StatusCreated, options) + if err != nil { + return "", err + } + + returnedLeaseID = respHeaders.Get(http.CanonicalHeaderKey(headerLeaseID)) + + if returnedLeaseID != "" { + return returnedLeaseID, nil + } + + return "", errors.New("LeaseID not returned") +} + +// BreakLease breaks the lease for a blob +// Returns the timeout remaining in the lease in seconds +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob +func (b *Blob) BreakLease(options *LeaseOptions) (breakTimeout int, err error) { + headers := b.Container.bsc.client.getStandardHeaders() + headers[leaseAction] = breakLease + return b.breakLeaseCommon(headers, options) +} + +// BreakLeaseWithBreakPeriod breaks the lease for a blob +// breakPeriodInSeconds is used to determine how long until new lease can be created. +// Returns the timeout remaining in the lease in seconds +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob +func (b *Blob) BreakLeaseWithBreakPeriod(breakPeriodInSeconds int, options *LeaseOptions) (breakTimeout int, err error) { + headers := b.Container.bsc.client.getStandardHeaders() + headers[leaseAction] = breakLease + headers[leaseBreakPeriod] = strconv.Itoa(breakPeriodInSeconds) + return b.breakLeaseCommon(headers, options) +} + +// breakLeaseCommon is common code for both version of BreakLease (with and without break period) +func (b *Blob) breakLeaseCommon(headers map[string]string, options *LeaseOptions) (breakTimeout int, err error) { + + respHeaders, err := b.leaseCommonPut(headers, http.StatusAccepted, options) + if err != nil { + return 0, err + } + + breakTimeoutStr := respHeaders.Get(http.CanonicalHeaderKey(leaseTime)) + if breakTimeoutStr != "" { + breakTimeout, err = strconv.Atoi(breakTimeoutStr) + if err != nil { + return 0, err + } + } + + return breakTimeout, nil +} + +// ChangeLease changes a lease ID for a blob +// Returns the new LeaseID acquired +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob +func (b *Blob) ChangeLease(currentLeaseID string, proposedLeaseID string, options *LeaseOptions) (newLeaseID string, err error) { + headers := b.Container.bsc.client.getStandardHeaders() + headers[leaseAction] = changeLease + headers[headerLeaseID] = currentLeaseID + headers[leaseProposedID] = proposedLeaseID + + respHeaders, err := b.leaseCommonPut(headers, http.StatusOK, options) + if err != nil { + return "", err + } + + newLeaseID = respHeaders.Get(http.CanonicalHeaderKey(headerLeaseID)) + if newLeaseID != "" { + return newLeaseID, nil + } + + return "", errors.New("LeaseID not returned") +} + +// ReleaseLease releases the lease for a blob +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob +func (b *Blob) ReleaseLease(currentLeaseID string, options *LeaseOptions) error { + headers := b.Container.bsc.client.getStandardHeaders() + headers[leaseAction] = releaseLease + headers[headerLeaseID] = currentLeaseID + + _, err := b.leaseCommonPut(headers, http.StatusOK, options) + if err != nil { + return err + } + + return nil +} + +// RenewLease renews the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx +func (b *Blob) RenewLease(currentLeaseID string, options *LeaseOptions) error { + headers := b.Container.bsc.client.getStandardHeaders() + headers[leaseAction] = renewLease + headers[headerLeaseID] = currentLeaseID + + _, err := b.leaseCommonPut(headers, http.StatusOK, options) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/leaseblob_test.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/leaseblob_test.go new file mode 100644 index 000000000000..cd4528be750b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/leaseblob_test.go @@ -0,0 +1,211 @@ +package storage + +import chk "gopkg.in/check.v1" + +type LeaseBlobSuite struct{} + +var _ = chk.Suite(&LeaseBlobSuite{}) + +func (s *LeaseBlobSuite) TestAcquireLeaseWithNoProposedLeaseID(c *chk.C) { + cli := getBlobClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + cnt := cli.GetContainerReference(containerName(c)) + b := cnt.GetBlobReference(blobName(c)) + c.Assert(cnt.Create(nil), chk.IsNil) + defer cnt.Delete(nil) + + c.Assert(b.putSingleBlockBlob([]byte("Hello!")), chk.IsNil) + + _, err := b.AcquireLease(30, "", nil) + c.Assert(err, chk.IsNil) +} + +func (s *LeaseBlobSuite) TestAcquireLeaseWithProposedLeaseID(c *chk.C) { + cli := getBlobClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + cnt := cli.GetContainerReference(containerName(c)) + b := cnt.GetBlobReference(blobName(c)) + c.Assert(cnt.Create(nil), chk.IsNil) + defer cnt.Delete(nil) + + c.Assert(b.putSingleBlockBlob([]byte("Hello!")), chk.IsNil) + + proposedLeaseID := "dfe6dde8-68d5-4910-9248-c97c61768fea" + leaseID, err := b.AcquireLease(30, proposedLeaseID, nil) + c.Assert(err, chk.IsNil) + c.Assert(leaseID, chk.Equals, proposedLeaseID) +} + +func (s *LeaseBlobSuite) TestAcquireLeaseWithBadProposedLeaseID(c *chk.C) { + cli := getBlobClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + cnt := cli.GetContainerReference(containerName(c)) + b := cnt.GetBlobReference(blobName(c)) + c.Assert(cnt.Create(nil), chk.IsNil) + defer cnt.Delete(nil) + + c.Assert(b.putSingleBlockBlob([]byte("Hello!")), chk.IsNil) + + proposedLeaseID := "badbadbad" + _, err := b.AcquireLease(30, proposedLeaseID, nil) + c.Assert(err, chk.NotNil) +} + +func (s *LeaseBlobSuite) TestAcquireInfiniteLease(c *chk.C) { + cli := getBlobClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + cnt := cli.GetContainerReference(containerName(c)) + b := cnt.GetBlobReference(blobName(c)) + c.Assert(cnt.Create(nil), chk.IsNil) + defer cnt.Delete(nil) + + c.Assert(b.putSingleBlockBlob([]byte("Hello!")), chk.IsNil) + + proposedLeaseID := "dfe6dde8-68d5-4910-9248-c97c61768fea" + _, err := b.AcquireLease(-1, proposedLeaseID, nil) + c.Assert(err, chk.IsNil) +} + +func (s *LeaseBlobSuite) TestRenewLeaseSuccessful(c *chk.C) { + cli := getBlobClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + cnt := cli.GetContainerReference(containerName(c)) + b := cnt.GetBlobReference(blobName(c)) + c.Assert(cnt.Create(nil), chk.IsNil) + defer cnt.Delete(nil) + + c.Assert(b.putSingleBlockBlob([]byte("Hello!")), chk.IsNil) + + proposedLeaseID := "dfe6dde8-68d5-4910-9248-c97c61768fea" + leaseID, err := b.AcquireLease(30, proposedLeaseID, nil) + c.Assert(err, chk.IsNil) + + err = b.RenewLease(leaseID, nil) + c.Assert(err, chk.IsNil) +} + +func (s *LeaseBlobSuite) TestRenewLeaseAgainstNoCurrentLease(c *chk.C) { + cli := getBlobClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + cnt := cli.GetContainerReference(containerName(c)) + b := cnt.GetBlobReference(blobName(c)) + c.Assert(cnt.Create(nil), chk.IsNil) + defer cnt.Delete(nil) + + c.Assert(b.putSingleBlockBlob([]byte("Hello!")), chk.IsNil) + + badLeaseID := "Golang rocks on Azure" + err := b.RenewLease(badLeaseID, nil) + c.Assert(err, chk.NotNil) +} + +func (s *LeaseBlobSuite) TestChangeLeaseSuccessful(c *chk.C) { + cli := getBlobClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + cnt := cli.GetContainerReference(containerName(c)) + b := cnt.GetBlobReference(blobName(c)) + c.Assert(cnt.Create(nil), chk.IsNil) + defer cnt.Delete(nil) + + c.Assert(b.putSingleBlockBlob([]byte("Hello!")), chk.IsNil) + proposedLeaseID := "dfe6dde8-68d5-4910-9248-c97c61768fea" + leaseID, err := b.AcquireLease(30, proposedLeaseID, nil) + c.Assert(err, chk.IsNil) + + newProposedLeaseID := "dfe6dde8-68d5-4910-9248-c97c61768fbb" + newLeaseID, err := b.ChangeLease(leaseID, newProposedLeaseID, nil) + c.Assert(err, chk.IsNil) + c.Assert(newLeaseID, chk.Equals, newProposedLeaseID) +} + +func (s *LeaseBlobSuite) TestChangeLeaseNotSuccessfulbadProposedLeaseID(c *chk.C) { + cli := getBlobClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + cnt := cli.GetContainerReference(containerName(c)) + b := cnt.GetBlobReference(blobName(c)) + c.Assert(cnt.Create(nil), chk.IsNil) + defer cnt.Delete(nil) + + c.Assert(b.putSingleBlockBlob([]byte("Hello!")), chk.IsNil) + proposedLeaseID := "dfe6dde8-68d5-4910-9248-c97c61768fea" + leaseID, err := b.AcquireLease(30, proposedLeaseID, nil) + c.Assert(err, chk.IsNil) + + newProposedLeaseID := "1f812371-a41d-49e6-b123-f4b542e" + _, err = b.ChangeLease(leaseID, newProposedLeaseID, nil) + c.Assert(err, chk.NotNil) +} + +func (s *LeaseBlobSuite) TestReleaseLeaseSuccessful(c *chk.C) { + cli := getBlobClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + cnt := cli.GetContainerReference(containerName(c)) + b := cnt.GetBlobReference(blobName(c)) + c.Assert(cnt.Create(nil), chk.IsNil) + defer cnt.Delete(nil) + + c.Assert(b.putSingleBlockBlob([]byte("Hello!")), chk.IsNil) + proposedLeaseID := "dfe6dde8-68d5-4910-9248-c97c61768fea" + leaseID, err := b.AcquireLease(30, proposedLeaseID, nil) + c.Assert(err, chk.IsNil) + + err = b.ReleaseLease(leaseID, nil) + c.Assert(err, chk.IsNil) +} + +func (s *LeaseBlobSuite) TestReleaseLeaseNotSuccessfulBadLeaseID(c *chk.C) { + cli := getBlobClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + cnt := cli.GetContainerReference(containerName(c)) + b := cnt.GetBlobReference(blobName(c)) + c.Assert(cnt.Create(nil), chk.IsNil) + defer cnt.Delete(nil) + + c.Assert(b.putSingleBlockBlob([]byte("Hello!")), chk.IsNil) + proposedLeaseID := "dfe6dde8-68d5-4910-9248-c97c61768fea" + _, err := b.AcquireLease(30, proposedLeaseID, nil) + c.Assert(err, chk.IsNil) + + err = b.ReleaseLease("badleaseid", nil) + c.Assert(err, chk.NotNil) +} + +func (s *LeaseBlobSuite) TestBreakLeaseSuccessful(c *chk.C) { + cli := getBlobClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + cnt := cli.GetContainerReference(containerName(c)) + b := cnt.GetBlobReference(blobName(c)) + c.Assert(cnt.Create(nil), chk.IsNil) + defer cnt.Delete(nil) + + c.Assert(b.putSingleBlockBlob([]byte("Hello!")), chk.IsNil) + + proposedLeaseID := "dfe6dde8-68d5-4910-9248-c97c61768fea" + _, err := b.AcquireLease(30, proposedLeaseID, nil) + c.Assert(err, chk.IsNil) + + _, err = b.BreakLease(nil) + c.Assert(err, chk.IsNil) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/message.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/message.go new file mode 100644 index 000000000000..3ededcd421aa --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/message.go @@ -0,0 +1,153 @@ +package storage + +import ( + "encoding/xml" + "fmt" + "net/http" + "net/url" + "strconv" + "time" +) + +// Message represents an Azure message. +type Message struct { + Queue *Queue + Text string `xml:"MessageText"` + ID string `xml:"MessageId"` + Insertion TimeRFC1123 `xml:"InsertionTime"` + Expiration TimeRFC1123 `xml:"ExpirationTime"` + PopReceipt string `xml:"PopReceipt"` + NextVisible TimeRFC1123 `xml:"TimeNextVisible"` + DequeueCount int `xml:"DequeueCount"` +} + +func (m *Message) buildPath() string { + return fmt.Sprintf("%s/%s", m.Queue.buildPathMessages(), m.ID) +} + +// PutMessageOptions is the set of options can be specified for Put Messsage +// operation. A zero struct does not use any preferences for the request. +type PutMessageOptions struct { + Timeout uint + VisibilityTimeout int + MessageTTL int + RequestID string `header:"x-ms-client-request-id"` +} + +// Put operation adds a new message to the back of the message queue. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Message +func (m *Message) Put(options *PutMessageOptions) error { + query := url.Values{} + headers := m.Queue.qsc.client.getStandardHeaders() + + req := putMessageRequest{MessageText: m.Text} + body, nn, err := xmlMarshal(req) + if err != nil { + return err + } + headers["Content-Length"] = strconv.Itoa(nn) + + if options != nil { + if options.VisibilityTimeout != 0 { + query.Set("visibilitytimeout", strconv.Itoa(options.VisibilityTimeout)) + } + if options.MessageTTL != 0 { + query.Set("messagettl", strconv.Itoa(options.MessageTTL)) + } + query = addTimeout(query, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + + uri := m.Queue.qsc.client.getEndpoint(queueServiceName, m.Queue.buildPathMessages(), query) + resp, err := m.Queue.qsc.client.exec(http.MethodPost, uri, headers, body, m.Queue.qsc.auth) + if err != nil { + return err + } + defer readAndCloseBody(resp.body) + + err = xmlUnmarshal(resp.body, m) + if err != nil { + return err + } + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// UpdateMessageOptions is the set of options can be specified for Update Messsage +// operation. A zero struct does not use any preferences for the request. +type UpdateMessageOptions struct { + Timeout uint + VisibilityTimeout int + RequestID string `header:"x-ms-client-request-id"` +} + +// Update operation updates the specified message. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Update-Message +func (m *Message) Update(options *UpdateMessageOptions) error { + query := url.Values{} + if m.PopReceipt != "" { + query.Set("popreceipt", m.PopReceipt) + } + + headers := m.Queue.qsc.client.getStandardHeaders() + req := putMessageRequest{MessageText: m.Text} + body, nn, err := xmlMarshal(req) + if err != nil { + return err + } + headers["Content-Length"] = strconv.Itoa(nn) + + if options != nil { + if options.VisibilityTimeout != 0 { + query.Set("visibilitytimeout", strconv.Itoa(options.VisibilityTimeout)) + } + query = addTimeout(query, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := m.Queue.qsc.client.getEndpoint(queueServiceName, m.buildPath(), query) + + resp, err := m.Queue.qsc.client.exec(http.MethodPut, uri, headers, body, m.Queue.qsc.auth) + if err != nil { + return err + } + defer readAndCloseBody(resp.body) + + m.PopReceipt = resp.headers.Get("x-ms-popreceipt") + nextTimeStr := resp.headers.Get("x-ms-time-next-visible") + if nextTimeStr != "" { + nextTime, err := time.Parse(time.RFC1123, nextTimeStr) + if err != nil { + return err + } + m.NextVisible = TimeRFC1123(nextTime) + } + + return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) +} + +// Delete operation deletes the specified message. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179347.aspx +func (m *Message) Delete(options *QueueServiceOptions) error { + params := url.Values{"popreceipt": {m.PopReceipt}} + headers := m.Queue.qsc.client.getStandardHeaders() + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := m.Queue.qsc.client.getEndpoint(queueServiceName, m.buildPath(), params) + + resp, err := m.Queue.qsc.client.exec(http.MethodDelete, uri, headers, nil, m.Queue.qsc.auth) + if err != nil { + return err + } + readAndCloseBody(resp.body) + return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) +} + +type putMessageRequest struct { + XMLName xml.Name `xml:"QueueMessage"` + MessageText string `xml:"MessageText"` +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/message_test.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/message_test.go new file mode 100644 index 000000000000..ea3e675a0c6e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/message_test.go @@ -0,0 +1,79 @@ +package storage + +import chk "gopkg.in/check.v1" + +type StorageMessageSuite struct{} + +var _ = chk.Suite(&StorageMessageSuite{}) + +func (s *StorageMessageSuite) Test_pathForMessage(c *chk.C) { + m := getQueueClient(c).GetQueueReference("q").GetMessageReference("m") + m.ID = "ID" + c.Assert(m.buildPath(), chk.Equals, "/q/messages/ID") +} + +func (s *StorageMessageSuite) TestDeleteMessages(c *chk.C) { + cli := getQueueClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + q := cli.GetQueueReference(queueName(c)) + c.Assert(q.Create(nil), chk.IsNil) + defer q.Delete(nil) + + m := q.GetMessageReference("message") + c.Assert(m.Put(nil), chk.IsNil) + + options := GetMessagesOptions{ + VisibilityTimeout: 1, + } + list, err := q.GetMessages(&options) + c.Assert(err, chk.IsNil) + c.Assert(list, chk.HasLen, 1) + + c.Assert(list[0].Delete(nil), chk.IsNil) +} + +func (s *StorageMessageSuite) TestPutMessage_Peek(c *chk.C) { + cli := getQueueClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + queue := cli.GetQueueReference(queueName(c)) + c.Assert(queue.Create(nil), chk.IsNil) + defer queue.Delete(nil) + + msg := queue.GetMessageReference(string(content(64 * 1024))) // exercise max length + c.Assert(msg.Put(nil), chk.IsNil) + + list, err := queue.PeekMessages(nil) + c.Assert(err, chk.IsNil) + c.Assert(len(list), chk.Equals, 1) + c.Assert(list[0].Text, chk.Equals, msg.Text) +} + +func (s *StorageMessageSuite) TestPutMessage_Peek_Update_Delete(c *chk.C) { + cli := getQueueClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + queue := cli.GetQueueReference(queueName(c)) + c.Assert(queue.Create(nil), chk.IsNil) + defer queue.Delete(nil) + + msg1 := queue.GetMessageReference(string(content(64 * 1024))) // exercise max length + msg2 := queue.GetMessageReference("and other message") + c.Assert(msg1.Put(nil), chk.IsNil) + c.Assert(msg2.Put(nil), chk.IsNil) + + list, err := queue.GetMessages(&GetMessagesOptions{NumOfMessages: 2, VisibilityTimeout: 2}) + c.Assert(err, chk.IsNil) + c.Assert(len(list), chk.Equals, 2) + c.Assert(list[0].Text, chk.Equals, msg1.Text) + c.Assert(list[1].Text, chk.Equals, msg2.Text) + + list[0].Text = "updated message" + c.Assert(list[0].Update(&UpdateMessageOptions{VisibilityTimeout: 2}), chk.IsNil) + + c.Assert(list[1].Delete(nil), chk.IsNil) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/odata.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/odata.go new file mode 100644 index 000000000000..41d832e2be11 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/odata.go @@ -0,0 +1,33 @@ +package storage + +// MetadataLevel determines if operations should return a paylod, +// and it level of detail. +type MetadataLevel string + +// This consts are meant to help with Odata supported operations +const ( + OdataTypeSuffix = "@odata.type" + + // Types + + OdataBinary = "Edm.Binary" + OdataDateTime = "Edm.DateTime" + OdataGUID = "Edm.Guid" + OdataInt64 = "Edm.Int64" + + // Query options + + OdataFilter = "$filter" + OdataOrderBy = "$orderby" + OdataTop = "$top" + OdataSkip = "$skip" + OdataCount = "$count" + OdataExpand = "$expand" + OdataSelect = "$select" + OdataSearch = "$search" + + EmptyPayload MetadataLevel = "" + NoMetadata MetadataLevel = "application/json;odata=nometadata" + MinimalMetadata MetadataLevel = "application/json;odata=minimalmetadata" + FullMetadata MetadataLevel = "application/json;odata=fullmetadata" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go new file mode 100644 index 000000000000..bc5b398d3ff8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go @@ -0,0 +1,189 @@ +package storage + +import ( + "encoding/xml" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "time" +) + +// GetPageRangesResponse contains the response fields from +// Get Page Ranges call. +// +// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx +type GetPageRangesResponse struct { + XMLName xml.Name `xml:"PageList"` + PageList []PageRange `xml:"PageRange"` +} + +// PageRange contains information about a page of a page blob from +// Get Pages Range call. +// +// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx +type PageRange struct { + Start int64 `xml:"Start"` + End int64 `xml:"End"` +} + +var ( + errBlobCopyAborted = errors.New("storage: blob copy is aborted") + errBlobCopyIDMismatch = errors.New("storage: blob copy id is a mismatch") +) + +// PutPageOptions includes the options for a put page operation +type PutPageOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + IfSequenceNumberLessThanOrEqualTo *int `header:"x-ms-if-sequence-number-le"` + IfSequenceNumberLessThan *int `header:"x-ms-if-sequence-number-lt"` + IfSequenceNumberEqualTo *int `header:"x-ms-if-sequence-number-eq"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + RequestID string `header:"x-ms-client-request-id"` +} + +// WriteRange writes a range of pages to a page blob. +// Ranges must be aligned with 512-byte boundaries and chunk must be of size +// multiplies by 512. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Page +func (b *Blob) WriteRange(blobRange BlobRange, bytes io.Reader, options *PutPageOptions) error { + if bytes == nil { + return errors.New("bytes cannot be nil") + } + return b.modifyRange(blobRange, bytes, options) +} + +// ClearRange clears the given range in a page blob. +// Ranges must be aligned with 512-byte boundaries and chunk must be of size +// multiplies by 512. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Page +func (b *Blob) ClearRange(blobRange BlobRange, options *PutPageOptions) error { + return b.modifyRange(blobRange, nil, options) +} + +func (b *Blob) modifyRange(blobRange BlobRange, bytes io.Reader, options *PutPageOptions) error { + if blobRange.End < blobRange.Start { + return errors.New("the value for rangeEnd must be greater than or equal to rangeStart") + } + if blobRange.Start%512 != 0 { + return errors.New("the value for rangeStart must be a modulus of 512") + } + if blobRange.End%512 != 511 { + return errors.New("the value for rangeEnd must be a modulus of 511") + } + + params := url.Values{"comp": {"page"}} + + // default to clear + write := "clear" + var cl uint64 + + // if bytes is not nil then this is an update operation + if bytes != nil { + write = "update" + cl = (blobRange.End - blobRange.Start) + 1 + } + + headers := b.Container.bsc.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypePage) + headers["x-ms-page-write"] = write + headers["x-ms-range"] = blobRange.String() + headers["Content-Length"] = fmt.Sprintf("%v", cl) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, bytes, b.Container.bsc.auth) + if err != nil { + return err + } + readAndCloseBody(resp.body) + + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// GetPageRangesOptions includes the options for a get page ranges operation +type GetPageRangesOptions struct { + Timeout uint + Snapshot *time.Time + PreviousSnapshot *time.Time + Range *BlobRange + LeaseID string `header:"x-ms-lease-id"` + RequestID string `header:"x-ms-client-request-id"` +} + +// GetPageRanges returns the list of valid page ranges for a page blob. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Page-Ranges +func (b *Blob) GetPageRanges(options *GetPageRangesOptions) (GetPageRangesResponse, error) { + params := url.Values{"comp": {"pagelist"}} + headers := b.Container.bsc.client.getStandardHeaders() + + if options != nil { + params = addTimeout(params, options.Timeout) + params = addSnapshot(params, options.Snapshot) + if options.PreviousSnapshot != nil { + params.Add("prevsnapshot", timeRfc1123Formatted(*options.PreviousSnapshot)) + } + if options.Range != nil { + headers["Range"] = options.Range.String() + } + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + var out GetPageRangesResponse + resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth) + if err != nil { + return out, err + } + defer resp.body.Close() + + if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return out, err + } + err = xmlUnmarshal(resp.body, &out) + return out, err +} + +// PutPageBlob initializes an empty page blob with specified name and maximum +// size in bytes (size must be aligned to a 512-byte boundary). A page blob must +// be created using this method before writing pages. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob +func (b *Blob) PutPageBlob(options *PutBlobOptions) error { + if b.Properties.ContentLength%512 != 0 { + return errors.New("Content length must be aligned to a 512-byte boundary") + } + + params := url.Values{} + headers := b.Container.bsc.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypePage) + headers["x-ms-blob-content-length"] = fmt.Sprintf("%v", b.Properties.ContentLength) + headers["x-ms-blob-sequence-number"] = fmt.Sprintf("%v", b.Properties.SequenceNumber) + headers = mergeHeaders(headers, headersFromStruct(b.Properties)) + headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) + if err != nil { + return err + } + readAndCloseBody(resp.body) + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob_test.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob_test.go new file mode 100644 index 000000000000..de009cfe06cb --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob_test.go @@ -0,0 +1,179 @@ +package storage + +import ( + "bytes" + "io/ioutil" + + chk "gopkg.in/check.v1" +) + +type PageBlobSuite struct{} + +var _ = chk.Suite(&PageBlobSuite{}) + +func (s *PageBlobSuite) TestPutPageBlob(c *chk.C) { + cli := getBlobClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + cnt := cli.GetContainerReference(containerName(c)) + b := cnt.GetBlobReference(blobName(c)) + c.Assert(cnt.Create(nil), chk.IsNil) + defer cnt.Delete(nil) + + size := int64(10 * 1024 * 1024) + b.Properties.ContentLength = size + c.Assert(b.PutPageBlob(nil), chk.IsNil) + + // Verify + err := b.GetProperties(nil) + c.Assert(err, chk.IsNil) + c.Assert(b.Properties.ContentLength, chk.Equals, size) + c.Assert(b.Properties.BlobType, chk.Equals, BlobTypePage) +} + +func (s *PageBlobSuite) TestPutPagesUpdate(c *chk.C) { + cli := getBlobClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + cnt := cli.GetContainerReference(containerName(c)) + b := cnt.GetBlobReference(blobName(c)) + c.Assert(cnt.Create(nil), chk.IsNil) + defer cnt.Delete(nil) + + size := int64(10 * 1024 * 1024) // larger than we'll use + b.Properties.ContentLength = size + c.Assert(b.PutPageBlob(nil), chk.IsNil) + + chunk1 := content(1024) + chunk2 := content(512) + + // Append chunks + blobRange := BlobRange{ + End: uint64(len(chunk1) - 1), + } + c.Assert(b.WriteRange(blobRange, bytes.NewReader(chunk1), nil), chk.IsNil) + blobRange.Start = uint64(len(chunk1)) + blobRange.End = uint64(len(chunk1) + len(chunk2) - 1) + c.Assert(b.WriteRange(blobRange, bytes.NewReader(chunk2), nil), chk.IsNil) + + // Verify contents + options := GetBlobRangeOptions{ + Range: &BlobRange{ + End: uint64(len(chunk1) + len(chunk2) - 1), + }, + } + out, err := b.GetRange(&options) + c.Assert(err, chk.IsNil) + defer out.Close() + blobContents, err := ioutil.ReadAll(out) + c.Assert(err, chk.IsNil) + c.Assert(blobContents, chk.DeepEquals, append(chunk1, chunk2...)) + + // Overwrite first half of chunk1 + chunk0 := content(512) + blobRange.Start = 0 + blobRange.End = uint64(len(chunk0) - 1) + c.Assert(b.WriteRange(blobRange, bytes.NewReader(chunk0), nil), chk.IsNil) + + // Verify contents + out, err = b.GetRange(&options) + c.Assert(err, chk.IsNil) + defer out.Close() + blobContents, err = ioutil.ReadAll(out) + c.Assert(err, chk.IsNil) + c.Assert(blobContents, chk.DeepEquals, append(append(chunk0, chunk1[512:]...), chunk2...)) +} + +func (s *PageBlobSuite) TestPutPagesClear(c *chk.C) { + cli := getBlobClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + cnt := cli.GetContainerReference(containerName(c)) + b := cnt.GetBlobReference(blobName(c)) + c.Assert(cnt.Create(nil), chk.IsNil) + defer cnt.Delete(nil) + + size := int64(10 * 1024 * 1024) // larger than we'll use + b.Properties.ContentLength = size + c.Assert(b.PutPageBlob(nil), chk.IsNil) + + // Put 0-2047 + chunk := content(2048) + blobRange := BlobRange{ + End: 2047, + } + c.Assert(b.WriteRange(blobRange, bytes.NewReader(chunk), nil), chk.IsNil) + + // Clear 512-1023 + blobRange.Start = 512 + blobRange.End = 1023 + c.Assert(b.ClearRange(blobRange, nil), chk.IsNil) + + // Verify contents + options := GetBlobRangeOptions{ + Range: &BlobRange{ + Start: 0, + End: 2047, + }, + } + out, err := b.GetRange(&options) + c.Assert(err, chk.IsNil) + contents, err := ioutil.ReadAll(out) + c.Assert(err, chk.IsNil) + defer out.Close() + c.Assert(contents, chk.DeepEquals, append(append(chunk[:512], make([]byte, 512)...), chunk[1024:]...)) +} + +func (s *PageBlobSuite) TestGetPageRanges(c *chk.C) { + cli := getBlobClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + cnt := cli.GetContainerReference(containerName(c)) + c.Assert(cnt.Create(nil), chk.IsNil) + defer cnt.Delete(nil) + + size := int64(10 * 1024) // larger than we'll use + + // Get page ranges on empty blob + blob1 := cnt.GetBlobReference(blobName(c, "1")) + blob1.Properties.ContentLength = size + c.Assert(blob1.PutPageBlob(nil), chk.IsNil) + out, err := blob1.GetPageRanges(nil) + c.Assert(err, chk.IsNil) + c.Assert(len(out.PageList), chk.Equals, 0) + + // Get page ranges with just one range + blob2 := cnt.GetBlobReference(blobName(c, "2")) + blob2.Properties.ContentLength = size + c.Assert(blob2.PutPageBlob(nil), chk.IsNil) + blobRange := []BlobRange{ + {End: 511}, + {Start: 1024, End: 2047}, + } + c.Assert(blob2.WriteRange(blobRange[0], bytes.NewReader(content(512)), nil), chk.IsNil) + + out, err = blob2.GetPageRanges(nil) + c.Assert(err, chk.IsNil) + c.Assert(len(out.PageList), chk.Equals, 1) + expected := []PageRange{ + {End: 511}, + {Start: 1024, End: 2047}, + } + c.Assert(out.PageList[0], chk.Equals, expected[0]) + + // Get page ranges with just two range + blob3 := cnt.GetBlobReference(blobName(c, "3")) + blob3.Properties.ContentLength = size + c.Assert(blob3.PutPageBlob(nil), chk.IsNil) + for _, br := range blobRange { + c.Assert(blob3.WriteRange(br, bytes.NewReader(content(int(br.End-br.Start+1))), nil), chk.IsNil) + } + out, err = blob3.GetPageRanges(nil) + c.Assert(err, chk.IsNil) + c.Assert(len(out.PageList), chk.Equals, 2) + c.Assert(out.PageList, chk.DeepEquals, expected) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go index 0cd3578442ea..c2c7f742c452 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go @@ -2,166 +2,139 @@ package storage import ( "encoding/xml" + "errors" "fmt" + "io" "net/http" "net/url" "strconv" - "strings" + "time" ) const ( // casing is per Golang's http.Header canonicalizing the header names. - approximateMessagesCountHeader = "X-Ms-Approximate-Messages-Count" - userDefinedMetadataHeaderPrefix = "X-Ms-Meta-" + approximateMessagesCountHeader = "X-Ms-Approximate-Messages-Count" ) -// QueueServiceClient contains operations for Microsoft Azure Queue Storage -// Service. -type QueueServiceClient struct { - client Client +// QueueAccessPolicy represents each access policy in the queue ACL. +type QueueAccessPolicy struct { + ID string + StartTime time.Time + ExpiryTime time.Time + CanRead bool + CanAdd bool + CanUpdate bool + CanProcess bool } -func pathForQueue(queue string) string { return fmt.Sprintf("/%s", queue) } -func pathForQueueMessages(queue string) string { return fmt.Sprintf("/%s/messages", queue) } -func pathForMessage(queue, name string) string { return fmt.Sprintf("/%s/messages/%s", queue, name) } - -type putMessageRequest struct { - XMLName xml.Name `xml:"QueueMessage"` - MessageText string `xml:"MessageText"` +// QueuePermissions represents the queue ACLs. +type QueuePermissions struct { + AccessPolicies []QueueAccessPolicy } -// PutMessageParameters is the set of options can be specified for Put Messsage -// operation. A zero struct does not use any preferences for the request. -type PutMessageParameters struct { - VisibilityTimeout int - MessageTTL int +// SetQueuePermissionOptions includes options for a set queue permissions operation +type SetQueuePermissionOptions struct { + Timeout uint + RequestID string `header:"x-ms-client-request-id"` } -func (p PutMessageParameters) getParameters() url.Values { - out := url.Values{} - if p.VisibilityTimeout != 0 { - out.Set("visibilitytimeout", strconv.Itoa(p.VisibilityTimeout)) - } - if p.MessageTTL != 0 { - out.Set("messagettl", strconv.Itoa(p.MessageTTL)) - } - return out +// Queue represents an Azure queue. +type Queue struct { + qsc *QueueServiceClient + Name string + Metadata map[string]string + AproxMessageCount uint64 } -// GetMessagesParameters is the set of options can be specified for Get -// Messsages operation. A zero struct does not use any preferences for the -// request. -type GetMessagesParameters struct { - NumOfMessages int - VisibilityTimeout int +func (q *Queue) buildPath() string { + return fmt.Sprintf("/%s", q.Name) } -func (p GetMessagesParameters) getParameters() url.Values { - out := url.Values{} - if p.NumOfMessages != 0 { - out.Set("numofmessages", strconv.Itoa(p.NumOfMessages)) - } - if p.VisibilityTimeout != 0 { - out.Set("visibilitytimeout", strconv.Itoa(p.VisibilityTimeout)) - } - return out +func (q *Queue) buildPathMessages() string { + return fmt.Sprintf("%s/messages", q.buildPath()) } -// PeekMessagesParameters is the set of options can be specified for Peek -// Messsage operation. A zero struct does not use any preferences for the -// request. -type PeekMessagesParameters struct { - NumOfMessages int +// QueueServiceOptions includes options for some queue service operations +type QueueServiceOptions struct { + Timeout uint + RequestID string `header:"x-ms-client-request-id"` } -func (p PeekMessagesParameters) getParameters() url.Values { - out := url.Values{"peekonly": {"true"}} // Required for peek operation - if p.NumOfMessages != 0 { - out.Set("numofmessages", strconv.Itoa(p.NumOfMessages)) +// Create operation creates a queue under the given account. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Queue4 +func (q *Queue) Create(options *QueueServiceOptions) error { + params := url.Values{} + headers := q.qsc.client.getStandardHeaders() + headers = q.qsc.client.addMetadataToHeaders(headers, q.Metadata) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) } - return out -} + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params) -// UpdateMessageParameters is the set of options can be specified for Update Messsage -// operation. A zero struct does not use any preferences for the request. -type UpdateMessageParameters struct { - PopReceipt string - VisibilityTimeout int + resp, err := q.qsc.client.exec(http.MethodPut, uri, headers, nil, q.qsc.auth) + if err != nil { + return err + } + readAndCloseBody(resp.body) + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) } -func (p UpdateMessageParameters) getParameters() url.Values { - out := url.Values{} - if p.PopReceipt != "" { - out.Set("popreceipt", p.PopReceipt) +// Delete operation permanently deletes the specified queue. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Queue3 +func (q *Queue) Delete(options *QueueServiceOptions) error { + params := url.Values{} + headers := q.qsc.client.getStandardHeaders() + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) } - if p.VisibilityTimeout != 0 { - out.Set("visibilitytimeout", strconv.Itoa(p.VisibilityTimeout)) + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params) + resp, err := q.qsc.client.exec(http.MethodDelete, uri, headers, nil, q.qsc.auth) + if err != nil { + return err } - return out -} - -// GetMessagesResponse represents a response returned from Get Messages -// operation. -type GetMessagesResponse struct { - XMLName xml.Name `xml:"QueueMessagesList"` - QueueMessagesList []GetMessageResponse `xml:"QueueMessage"` -} - -// GetMessageResponse represents a QueueMessage object returned from Get -// Messages operation response. -type GetMessageResponse struct { - MessageID string `xml:"MessageId"` - InsertionTime string `xml:"InsertionTime"` - ExpirationTime string `xml:"ExpirationTime"` - PopReceipt string `xml:"PopReceipt"` - TimeNextVisible string `xml:"TimeNextVisible"` - DequeueCount int `xml:"DequeueCount"` - MessageText string `xml:"MessageText"` -} - -// PeekMessagesResponse represents a response returned from Get Messages -// operation. -type PeekMessagesResponse struct { - XMLName xml.Name `xml:"QueueMessagesList"` - QueueMessagesList []PeekMessageResponse `xml:"QueueMessage"` -} - -// PeekMessageResponse represents a QueueMessage object returned from Peek -// Messages operation response. -type PeekMessageResponse struct { - MessageID string `xml:"MessageId"` - InsertionTime string `xml:"InsertionTime"` - ExpirationTime string `xml:"ExpirationTime"` - DequeueCount int `xml:"DequeueCount"` - MessageText string `xml:"MessageText"` + readAndCloseBody(resp.body) + return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) } -// QueueMetadataResponse represents user defined metadata and queue -// properties on a specific queue. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179384.aspx -type QueueMetadataResponse struct { - ApproximateMessageCount int - UserDefinedMetadata map[string]string +// Exists returns true if a queue with given name exists. +func (q *Queue) Exists() (bool, error) { + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), url.Values{"comp": {"metadata"}}) + resp, err := q.qsc.client.exec(http.MethodGet, uri, q.qsc.client.getStandardHeaders(), nil, q.qsc.auth) + if resp != nil { + defer readAndCloseBody(resp.body) + if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound { + return resp.statusCode == http.StatusOK, nil + } + } + return false, err } // SetMetadata operation sets user-defined metadata on the specified queue. // Metadata is associated with the queue as name-value pairs. // -// See https://msdn.microsoft.com/en-us/library/azure/dd179348.aspx -func (c QueueServiceClient) SetMetadata(name string, metadata map[string]string) error { - uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": []string{"metadata"}}) - headers := c.client.getStandardHeaders() - for k, v := range metadata { - headers[userDefinedMetadataHeaderPrefix+k] = v +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Queue-Metadata +func (q *Queue) SetMetadata(options *QueueServiceOptions) error { + params := url.Values{"comp": {"metadata"}} + headers := q.qsc.client.getStandardHeaders() + headers = q.qsc.client.addMetadataToHeaders(headers, q.Metadata) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) } + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params) - resp, err := c.client.exec("PUT", uri, headers, nil) + resp, err := q.qsc.client.exec(http.MethodPut, uri, headers, nil, q.qsc.auth) if err != nil { return err } - defer resp.body.Close() - + readAndCloseBody(resp.body) return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) } @@ -169,176 +142,286 @@ func (c QueueServiceClient) SetMetadata(name string, metadata map[string]string) // properties on the specified queue. Metadata is associated with // the queue as name-values pairs. // -// See https://msdn.microsoft.com/en-us/library/azure/dd179384.aspx +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Queue-Metadata // // Because the way Golang's http client (and http.Header in particular) // canonicalize header names, the returned metadata names would always // be all lower case. -func (c QueueServiceClient) GetMetadata(name string) (QueueMetadataResponse, error) { - qm := QueueMetadataResponse{} - qm.UserDefinedMetadata = make(map[string]string) - uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": []string{"metadata"}}) - headers := c.client.getStandardHeaders() - resp, err := c.client.exec("GET", uri, headers, nil) - if err != nil { - return qm, err +func (q *Queue) GetMetadata(options *QueueServiceOptions) error { + params := url.Values{"comp": {"metadata"}} + headers := q.qsc.client.getStandardHeaders() + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) } - defer resp.body.Close() + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), url.Values{"comp": {"metadata"}}) - for k, v := range resp.headers { - if len(v) != 1 { - return qm, fmt.Errorf("Unexpected number of values (%d) in response header '%s'", len(v), k) - } + resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth) + if err != nil { + return err + } + defer readAndCloseBody(resp.body) - value := v[0] + if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return err + } - if k == approximateMessagesCountHeader { - qm.ApproximateMessageCount, err = strconv.Atoi(value) - if err != nil { - return qm, fmt.Errorf("Unexpected value in response header '%s': '%s' ", k, value) - } - } else if strings.HasPrefix(k, userDefinedMetadataHeaderPrefix) { - name := strings.TrimPrefix(k, userDefinedMetadataHeaderPrefix) - qm.UserDefinedMetadata[strings.ToLower(name)] = value + aproxMessagesStr := resp.headers.Get(http.CanonicalHeaderKey(approximateMessagesCountHeader)) + if aproxMessagesStr != "" { + aproxMessages, err := strconv.ParseUint(aproxMessagesStr, 10, 64) + if err != nil { + return err } + q.AproxMessageCount = aproxMessages } - return qm, checkRespCode(resp.statusCode, []int{http.StatusOK}) + q.Metadata = getMetadataFromHeaders(resp.headers) + return nil } -// CreateQueue operation creates a queue under the given account. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179342.aspx -func (c QueueServiceClient) CreateQueue(name string) error { - uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{}) - headers := c.client.getStandardHeaders() - resp, err := c.client.exec("PUT", uri, headers, nil) - if err != nil { - return err +// GetMessageReference returns a message object with the specified text. +func (q *Queue) GetMessageReference(text string) *Message { + return &Message{ + Queue: q, + Text: text, } - defer resp.body.Close() - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) } -// DeleteQueue operation permanently deletes the specified queue. +// GetMessagesOptions is the set of options can be specified for Get +// Messsages operation. A zero struct does not use any preferences for the +// request. +type GetMessagesOptions struct { + Timeout uint + NumOfMessages int + VisibilityTimeout int + RequestID string `header:"x-ms-client-request-id"` +} + +type messages struct { + XMLName xml.Name `xml:"QueueMessagesList"` + Messages []Message `xml:"QueueMessage"` +} + +// GetMessages operation retrieves one or more messages from the front of the +// queue. // -// See https://msdn.microsoft.com/en-us/library/azure/dd179436.aspx -func (c QueueServiceClient) DeleteQueue(name string) error { - uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{}) - resp, err := c.client.exec("DELETE", uri, c.client.getStandardHeaders(), nil) +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Messages +func (q *Queue) GetMessages(options *GetMessagesOptions) ([]Message, error) { + query := url.Values{} + headers := q.qsc.client.getStandardHeaders() + + if options != nil { + if options.NumOfMessages != 0 { + query.Set("numofmessages", strconv.Itoa(options.NumOfMessages)) + } + if options.VisibilityTimeout != 0 { + query.Set("visibilitytimeout", strconv.Itoa(options.VisibilityTimeout)) + } + query = addTimeout(query, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPathMessages(), query) + + resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth) if err != nil { - return err + return []Message{}, err } - defer resp.body.Close() - return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) -} + defer readAndCloseBody(resp.body) -// QueueExists returns true if a queue with given name exists. -func (c QueueServiceClient) QueueExists(name string) (bool, error) { - uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": {"metadata"}}) - resp, err := c.client.exec("GET", uri, c.client.getStandardHeaders(), nil) - if resp != nil && (resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound) { - return resp.statusCode == http.StatusOK, nil + var out messages + err = xmlUnmarshal(resp.body, &out) + if err != nil { + return []Message{}, err + } + for i := range out.Messages { + out.Messages[i].Queue = q } + return out.Messages, err +} - return false, err +// PeekMessagesOptions is the set of options can be specified for Peek +// Messsage operation. A zero struct does not use any preferences for the +// request. +type PeekMessagesOptions struct { + Timeout uint + NumOfMessages int + RequestID string `header:"x-ms-client-request-id"` } -// PutMessage operation adds a new message to the back of the message queue. +// PeekMessages retrieves one or more messages from the front of the queue, but +// does not alter the visibility of the message. // -// See https://msdn.microsoft.com/en-us/library/azure/dd179346.aspx -func (c QueueServiceClient) PutMessage(queue string, message string, params PutMessageParameters) error { - uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters()) - req := putMessageRequest{MessageText: message} - body, nn, err := xmlMarshal(req) +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Peek-Messages +func (q *Queue) PeekMessages(options *PeekMessagesOptions) ([]Message, error) { + query := url.Values{"peekonly": {"true"}} // Required for peek operation + headers := q.qsc.client.getStandardHeaders() + + if options != nil { + if options.NumOfMessages != 0 { + query.Set("numofmessages", strconv.Itoa(options.NumOfMessages)) + } + query = addTimeout(query, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPathMessages(), query) + + resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth) if err != nil { - return err + return []Message{}, err } - headers := c.client.getStandardHeaders() - headers["Content-Length"] = strconv.Itoa(nn) - resp, err := c.client.exec("POST", uri, headers, body) + defer readAndCloseBody(resp.body) + + var out messages + err = xmlUnmarshal(resp.body, &out) if err != nil { - return err + return []Message{}, err } - defer resp.body.Close() - return checkRespCode(resp.statusCode, []int{http.StatusCreated}) + for i := range out.Messages { + out.Messages[i].Queue = q + } + return out.Messages, err } // ClearMessages operation deletes all messages from the specified queue. // -// See https://msdn.microsoft.com/en-us/library/azure/dd179454.aspx -func (c QueueServiceClient) ClearMessages(queue string) error { - uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), url.Values{}) - resp, err := c.client.exec("DELETE", uri, c.client.getStandardHeaders(), nil) +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Clear-Messages +func (q *Queue) ClearMessages(options *QueueServiceOptions) error { + params := url.Values{} + headers := q.qsc.client.getStandardHeaders() + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPathMessages(), params) + + resp, err := q.qsc.client.exec(http.MethodDelete, uri, headers, nil, q.qsc.auth) if err != nil { return err } - defer resp.body.Close() + readAndCloseBody(resp.body) return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) } -// GetMessages operation retrieves one or more messages from the front of the -// queue. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179474.aspx -func (c QueueServiceClient) GetMessages(queue string, params GetMessagesParameters) (GetMessagesResponse, error) { - var r GetMessagesResponse - uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters()) - resp, err := c.client.exec("GET", uri, c.client.getStandardHeaders(), nil) +// SetPermissions sets up queue permissions +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-queue-acl +func (q *Queue) SetPermissions(permissions QueuePermissions, options *SetQueuePermissionOptions) error { + body, length, err := generateQueueACLpayload(permissions.AccessPolicies) if err != nil { - return r, err + return err } - defer resp.body.Close() - err = xmlUnmarshal(resp.body, &r) - return r, err -} -// PeekMessages retrieves one or more messages from the front of the queue, but -// does not alter the visibility of the message. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179472.aspx -func (c QueueServiceClient) PeekMessages(queue string, params PeekMessagesParameters) (PeekMessagesResponse, error) { - var r PeekMessagesResponse - uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters()) - resp, err := c.client.exec("GET", uri, c.client.getStandardHeaders(), nil) - if err != nil { - return r, err + params := url.Values{ + "comp": {"acl"}, } - defer resp.body.Close() - err = xmlUnmarshal(resp.body, &r) - return r, err -} + headers := q.qsc.client.getStandardHeaders() + headers["Content-Length"] = strconv.Itoa(length) -// DeleteMessage operation deletes the specified message. -// -// See https://msdn.microsoft.com/en-us/library/azure/dd179347.aspx -func (c QueueServiceClient) DeleteMessage(queue, messageID, popReceipt string) error { - uri := c.client.getEndpoint(queueServiceName, pathForMessage(queue, messageID), url.Values{ - "popreceipt": {popReceipt}}) - resp, err := c.client.exec("DELETE", uri, c.client.getStandardHeaders(), nil) + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params) + resp, err := q.qsc.client.exec(http.MethodPut, uri, headers, body, q.qsc.auth) if err != nil { return err } - defer resp.body.Close() - return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) + defer readAndCloseBody(resp.body) + + if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { + return errors.New("Unable to set permissions") + } + + return nil } -// UpdateMessage operation deletes the specified message. -// -// See https://msdn.microsoft.com/en-us/library/azure/hh452234.aspx -func (c QueueServiceClient) UpdateMessage(queue string, messageID string, message string, params UpdateMessageParameters) error { - uri := c.client.getEndpoint(queueServiceName, pathForMessage(queue, messageID), params.getParameters()) - req := putMessageRequest{MessageText: message} - body, nn, err := xmlMarshal(req) - if err != nil { - return err +func generateQueueACLpayload(policies []QueueAccessPolicy) (io.Reader, int, error) { + sil := SignedIdentifiers{ + SignedIdentifiers: []SignedIdentifier{}, + } + for _, qapd := range policies { + permission := qapd.generateQueuePermissions() + signedIdentifier := convertAccessPolicyToXMLStructs(qapd.ID, qapd.StartTime, qapd.ExpiryTime, permission) + sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier) + } + return xmlMarshal(sil) +} + +func (qapd *QueueAccessPolicy) generateQueuePermissions() (permissions string) { + // generate the permissions string (raup). + // still want the end user API to have bool flags. + permissions = "" + + if qapd.CanRead { + permissions += "r" + } + + if qapd.CanAdd { + permissions += "a" + } + + if qapd.CanUpdate { + permissions += "u" + } + + if qapd.CanProcess { + permissions += "p" + } + + return permissions +} + +// GetQueuePermissionOptions includes options for a get queue permissions operation +type GetQueuePermissionOptions struct { + Timeout uint + RequestID string `header:"x-ms-client-request-id"` +} + +// GetPermissions gets the queue permissions as per https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-queue-acl +// If timeout is 0 then it will not be passed to Azure +func (q *Queue) GetPermissions(options *GetQueuePermissionOptions) (*QueuePermissions, error) { + params := url.Values{ + "comp": {"acl"}, } - headers := c.client.getStandardHeaders() - headers["Content-Length"] = fmt.Sprintf("%d", nn) - resp, err := c.client.exec("PUT", uri, headers, body) + headers := q.qsc.client.getStandardHeaders() + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params) + resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth) if err != nil { - return err + return nil, err } defer resp.body.Close() - return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) + + var ap AccessPolicy + err = xmlUnmarshal(resp.body, &ap.SignedIdentifiersList) + if err != nil { + return nil, err + } + return buildQueueAccessPolicy(ap, &resp.headers), nil +} + +func buildQueueAccessPolicy(ap AccessPolicy, headers *http.Header) *QueuePermissions { + permissions := QueuePermissions{ + AccessPolicies: []QueueAccessPolicy{}, + } + + for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers { + qapd := QueueAccessPolicy{ + ID: policy.ID, + StartTime: policy.AccessPolicy.StartTime, + ExpiryTime: policy.AccessPolicy.ExpiryTime, + } + qapd.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r") + qapd.CanAdd = updatePermissions(policy.AccessPolicy.Permission, "a") + qapd.CanUpdate = updatePermissions(policy.AccessPolicy.Permission, "u") + qapd.CanProcess = updatePermissions(policy.AccessPolicy.Permission, "p") + + permissions.AccessPolicies = append(permissions.AccessPolicies, qapd) + } + return &permissions } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/queue_test.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/queue_test.go index 45a8901d6beb..dbed7cacd726 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/queue_test.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/queue_test.go @@ -10,133 +10,352 @@ type StorageQueueSuite struct{} var _ = chk.Suite(&StorageQueueSuite{}) -func getQueueClient(c *chk.C) QueueServiceClient { - return getBasicClient(c).GetQueueService() +func getQueueClient(c *chk.C) *QueueServiceClient { + cli := getBasicClient(c).GetQueueService() + return &cli } func (s *StorageQueueSuite) Test_pathForQueue(c *chk.C) { - c.Assert(pathForQueue("q"), chk.Equals, "/q") + c.Assert(getQueueClient(c). + GetQueueReference("q"). + buildPath(), chk.Equals, "/q") } func (s *StorageQueueSuite) Test_pathForQueueMessages(c *chk.C) { - c.Assert(pathForQueueMessages("q"), chk.Equals, "/q/messages") -} - -func (s *StorageQueueSuite) Test_pathForMessage(c *chk.C) { - c.Assert(pathForMessage("q", "m"), chk.Equals, "/q/messages/m") + c.Assert(getQueueClient(c). + GetQueueReference("q"). + buildPathMessages(), chk.Equals, "/q/messages") } func (s *StorageQueueSuite) TestCreateQueue_DeleteQueue(c *chk.C) { cli := getQueueClient(c) - name := randString(20) - c.Assert(cli.CreateQueue(name), chk.IsNil) - c.Assert(cli.DeleteQueue(name), chk.IsNil) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + q := cli.GetQueueReference(queueName(c)) + c.Assert(q.Create(nil), chk.IsNil) + c.Assert(q.Delete(nil), chk.IsNil) } func (s *StorageQueueSuite) Test_GetMetadata_GetApproximateCount(c *chk.C) { cli := getQueueClient(c) - name := randString(20) - c.Assert(cli.CreateQueue(name), chk.IsNil) - defer cli.DeleteQueue(name) + rec := cli.client.appendRecorder(c) + defer rec.Stop() - qm, err := cli.GetMetadata(name) + queue1 := cli.GetQueueReference(queueName(c, "1")) + c.Assert(queue1.Create(nil), chk.IsNil) + defer queue1.Delete(nil) + + err := queue1.GetMetadata(nil) c.Assert(err, chk.IsNil) - c.Assert(qm.ApproximateMessageCount, chk.Equals, 0) + c.Assert(queue1.AproxMessageCount, chk.Equals, uint64(0)) + queue2 := cli.GetQueueReference(queueName(c, "2")) + c.Assert(queue2.Create(nil), chk.IsNil) + defer queue2.Delete(nil) for ix := 0; ix < 3; ix++ { - err = cli.PutMessage(name, "foobar", PutMessageParameters{}) + msg := queue2.GetMessageReference("lolrofl") + err = msg.Put(nil) c.Assert(err, chk.IsNil) } time.Sleep(1 * time.Second) - qm, err = cli.GetMetadata(name) + err = queue2.GetMetadata(nil) c.Assert(err, chk.IsNil) - c.Assert(qm.ApproximateMessageCount, chk.Equals, 3) + c.Assert(queue2.AproxMessageCount, chk.Equals, uint64(3)) } func (s *StorageQueueSuite) Test_SetMetadataGetMetadata_Roundtrips(c *chk.C) { cli := getQueueClient(c) - name := randString(20) - c.Assert(cli.CreateQueue(name), chk.IsNil) - defer cli.DeleteQueue(name) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + queue1 := cli.GetQueueReference(queueName(c, "1")) + c.Assert(queue1.Create(nil), chk.IsNil) + defer queue1.Delete(nil) metadata := make(map[string]string) - metadata["Foo1"] = "bar1" - metadata["fooBaz"] = "bar" - err := cli.SetMetadata(name, metadata) + metadata["Lol1"] = "rofl1" + metadata["lolBaz"] = "rofl" + queue1.Metadata = metadata + err := queue1.SetMetadata(nil) c.Assert(err, chk.IsNil) - qm, err := cli.GetMetadata(name) + err = queue1.GetMetadata(nil) c.Assert(err, chk.IsNil) - c.Assert(qm.UserDefinedMetadata["foo1"], chk.Equals, "bar1") - c.Assert(qm.UserDefinedMetadata["foobaz"], chk.Equals, "bar") + c.Assert(queue1.Metadata["lol1"], chk.Equals, metadata["Lol1"]) + c.Assert(queue1.Metadata["lolbaz"], chk.Equals, metadata["lolBaz"]) } func (s *StorageQueueSuite) TestQueueExists(c *chk.C) { cli := getQueueClient(c) - ok, err := cli.QueueExists("nonexistent-queue") + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + queue1 := cli.GetQueueReference(queueName(c, "nonexistent")) + ok, err := queue1.Exists() c.Assert(err, chk.IsNil) c.Assert(ok, chk.Equals, false) - name := randString(20) - c.Assert(cli.CreateQueue(name), chk.IsNil) - defer cli.DeleteQueue(name) + queue2 := cli.GetQueueReference(queueName(c, "exisiting")) + c.Assert(queue2.Create(nil), chk.IsNil) + defer queue2.Delete(nil) - ok, err = cli.QueueExists(name) + ok, err = queue2.Exists() c.Assert(err, chk.IsNil) c.Assert(ok, chk.Equals, true) } -func (s *StorageQueueSuite) TestPutMessage_PeekMessage_UpdateMessage_DeleteMessage(c *chk.C) { - q := randString(20) +func (s *StorageQueueSuite) TestGetMessages(c *chk.C) { cli := getQueueClient(c) - c.Assert(cli.CreateQueue(q), chk.IsNil) - defer cli.DeleteQueue(q) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + queue := cli.GetQueueReference(queueName(c)) + c.Assert(queue.Create(nil), chk.IsNil) + defer queue.Delete(nil) + + msg := queue.GetMessageReference("message") + n := 4 + for i := 0; i < n; i++ { + c.Assert(msg.Put(nil), chk.IsNil) + } - msg := randString(64 * 1024) // exercise max length - c.Assert(cli.PutMessage(q, msg, PutMessageParameters{}), chk.IsNil) - r, err := cli.PeekMessages(q, PeekMessagesParameters{}) + list, err := queue.GetMessages(&GetMessagesOptions{NumOfMessages: n}) c.Assert(err, chk.IsNil) - c.Assert(len(r.QueueMessagesList), chk.Equals, 1) - c.Assert(r.QueueMessagesList[0].MessageText, chk.Equals, msg) + c.Assert(len(list), chk.Equals, n) +} - gr, gerr := cli.GetMessages(q, GetMessagesParameters{NumOfMessages: 1, VisibilityTimeout: 2}) - c.Assert(gerr, chk.IsNil) +func (s *StorageQueueSuite) TestDeleteMessages(c *chk.C) { + cli := getQueueClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + queue := cli.GetQueueReference(queueName(c)) + c.Assert(queue.Create(nil), chk.IsNil) + defer queue.Delete(nil) - updatedMsg := "Test Message" - c.Assert(cli.UpdateMessage(q, r.QueueMessagesList[0].MessageID, updatedMsg, - UpdateMessageParameters{PopReceipt: gr.QueueMessagesList[0].PopReceipt, VisibilityTimeout: 2}), chk.IsNil) - r, err = cli.PeekMessages(q, PeekMessagesParameters{}) + msg := queue.GetMessageReference("message") + c.Assert(msg.Put(nil), chk.IsNil) + list, err := queue.GetMessages(&GetMessagesOptions{VisibilityTimeout: 1}) c.Assert(err, chk.IsNil) - c.Assert(len(r.QueueMessagesList), chk.Equals, 0) + c.Assert(len(list), chk.Equals, 1) + msg = &(list[0]) + c.Assert(msg.Delete(nil), chk.IsNil) } -func (s *StorageQueueSuite) TestGetMessages(c *chk.C) { - q := randString(20) +func queueName(c *chk.C, extras ...string) string { + // 63 is the max len for shares + return nameGenerator(63, "queue-", alphanum, c, extras) +} + +func (s *StorageQueueSuite) Test_SetPermissionsAllTrueNoTimeout(c *chk.C) { cli := getQueueClient(c) - c.Assert(cli.CreateQueue(q), chk.IsNil) - defer cli.DeleteQueue(q) + rec := cli.client.appendRecorder(c) + defer rec.Stop() - n := 4 - for i := 0; i < n; i++ { - c.Assert(cli.PutMessage(q, randString(10), PutMessageParameters{}), chk.IsNil) + queue1 := cli.GetQueueReference(queueName(c, "1")) + c.Assert(queue1.Create(nil), chk.IsNil) + defer queue1.Delete(nil) + + perms := QueuePermissions{} + qapd := QueueAccessPolicy{ + ID: "GolangRocksOnAzure", + StartTime: time.Date(2050, time.December, 20, 21, 55, 0, 0, time.FixedZone("GMT", -6)), + ExpiryTime: time.Date(2051, time.December, 20, 21, 55, 0, 0, time.FixedZone("GMT", -6)), + CanRead: true, + CanAdd: true, + CanUpdate: true, + CanProcess: true, + } + perms.AccessPolicies = append(perms.AccessPolicies, qapd) + err := queue1.SetPermissions(perms, nil) + c.Assert(err, chk.IsNil) +} + +func (s *StorageQueueSuite) Test_SetPermissionsAllTrueWithTimeout(c *chk.C) { + cli := getQueueClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + queue1 := cli.GetQueueReference(queueName(c, "1")) + c.Assert(queue1.Create(nil), chk.IsNil) + defer queue1.Delete(nil) + + perms := QueuePermissions{} + qapd := QueueAccessPolicy{ + ID: "GolangRocksOnAzure", + StartTime: time.Date(2050, time.December, 20, 21, 55, 0, 0, time.FixedZone("GMT", -6)), + ExpiryTime: time.Date(2051, time.December, 20, 21, 55, 0, 0, time.FixedZone("GMT", -6)), + CanRead: true, + CanAdd: true, + CanUpdate: true, + CanProcess: true, } + perms.AccessPolicies = append(perms.AccessPolicies, qapd) - r, err := cli.GetMessages(q, GetMessagesParameters{NumOfMessages: n}) + options := SetQueuePermissionOptions{Timeout: 30} + err := queue1.SetPermissions(perms, &options) c.Assert(err, chk.IsNil) - c.Assert(len(r.QueueMessagesList), chk.Equals, n) + } -func (s *StorageQueueSuite) TestDeleteMessages(c *chk.C) { - q := randString(20) +func (s *StorageQueueSuite) Test_SetPermissionsAlternateTrueNoTimeout(c *chk.C) { + cli := getQueueClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + queue1 := cli.GetQueueReference(queueName(c, "1")) + c.Assert(queue1.Create(nil), chk.IsNil) + defer queue1.Delete(nil) + + perms := QueuePermissions{} + qapd := QueueAccessPolicy{ + ID: "GolangRocksOnAzure", + StartTime: time.Date(2050, time.December, 20, 21, 55, 0, 0, time.FixedZone("GMT", -6)), + ExpiryTime: time.Date(2051, time.December, 20, 21, 55, 0, 0, time.FixedZone("GMT", -6)), + CanRead: true, + CanAdd: false, + CanUpdate: true, + CanProcess: false, + } + perms.AccessPolicies = append(perms.AccessPolicies, qapd) + err := queue1.SetPermissions(perms, nil) + c.Assert(err, chk.IsNil) +} + +func (s *StorageQueueSuite) Test_SetPermissionsAlternateTrueWithTimeout(c *chk.C) { + cli := getQueueClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + queue1 := cli.GetQueueReference(queueName(c, "1")) + c.Assert(queue1.Create(nil), chk.IsNil) + defer queue1.Delete(nil) + + perms := QueuePermissions{} + qapd := QueueAccessPolicy{ + ID: "GolangRocksOnAzure", + StartTime: time.Date(2050, time.December, 20, 21, 55, 0, 0, time.FixedZone("GMT", -6)), + ExpiryTime: time.Date(2051, time.December, 20, 21, 55, 0, 0, time.FixedZone("GMT", -6)), + CanRead: true, + CanAdd: false, + CanUpdate: true, + CanProcess: false, + } + perms.AccessPolicies = append(perms.AccessPolicies, qapd) + + options := SetQueuePermissionOptions{Timeout: 30} + err := queue1.SetPermissions(perms, &options) + c.Assert(err, chk.IsNil) +} + +func (s *StorageQueueSuite) Test_GetPermissionsAllTrueNoTimeout(c *chk.C) { + cli := getQueueClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + queue1 := cli.GetQueueReference(queueName(c, "1")) + c.Assert(queue1.Create(nil), chk.IsNil) + defer queue1.Delete(nil) + + perms := QueuePermissions{} + qapd := QueueAccessPolicy{ + ID: "GolangRocksOnAzure", + StartTime: time.Date(2050, time.December, 20, 21, 55, 0, 0, time.UTC), + ExpiryTime: time.Date(2051, time.December, 20, 21, 55, 0, 0, time.UTC), + CanRead: true, + CanAdd: true, + CanUpdate: true, + CanProcess: true, + } + perms.AccessPolicies = append(perms.AccessPolicies, qapd) + err := queue1.SetPermissions(perms, nil) + c.Assert(err, chk.IsNil) + + returnedPerms, err := queue1.GetPermissions(nil) + c.Assert(err, chk.IsNil) + c.Assert(returnedPerms.AccessPolicies, chk.HasLen, 1) + + c.Assert(returnedPerms.AccessPolicies[0].CanRead, chk.Equals, true) + c.Assert(returnedPerms.AccessPolicies[0].CanAdd, chk.Equals, true) + c.Assert(returnedPerms.AccessPolicies[0].CanUpdate, chk.Equals, true) + c.Assert(returnedPerms.AccessPolicies[0].CanProcess, chk.Equals, true) + c.Assert(returnedPerms.AccessPolicies[0].ID, chk.Equals, "GolangRocksOnAzure") + c.Assert(returnedPerms.AccessPolicies[0].StartTime, chk.Equals, qapd.StartTime) + c.Assert(returnedPerms.AccessPolicies[0].ExpiryTime, chk.Equals, qapd.ExpiryTime) +} + +func (s *StorageQueueSuite) Test_GetPermissionsAllTrueWithTimeout(c *chk.C) { + cli := getQueueClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + queue1 := cli.GetQueueReference(queueName(c, "1")) + c.Assert(queue1.Create(nil), chk.IsNil) + defer queue1.Delete(nil) + + perms := QueuePermissions{} + qapd := QueueAccessPolicy{ + ID: "GolangRocksOnAzure", + StartTime: time.Date(2050, time.December, 20, 21, 55, 0, 0, time.UTC), + ExpiryTime: time.Date(2051, time.December, 20, 21, 55, 0, 0, time.UTC), + CanRead: true, + CanAdd: true, + CanUpdate: true, + CanProcess: true, + } + perms.AccessPolicies = append(perms.AccessPolicies, qapd) + err := queue1.SetPermissions(perms, nil) + c.Assert(err, chk.IsNil) + + options := GetQueuePermissionOptions{Timeout: 30} + returnedPerms, err := queue1.GetPermissions(&options) + c.Assert(err, chk.IsNil) + c.Assert(returnedPerms.AccessPolicies, chk.HasLen, 1) + + c.Assert(returnedPerms.AccessPolicies[0].CanRead, chk.Equals, true) + c.Assert(returnedPerms.AccessPolicies[0].CanAdd, chk.Equals, true) + c.Assert(returnedPerms.AccessPolicies[0].CanUpdate, chk.Equals, true) + c.Assert(returnedPerms.AccessPolicies[0].CanProcess, chk.Equals, true) + c.Assert(returnedPerms.AccessPolicies[0].ID, chk.Equals, "GolangRocksOnAzure") + c.Assert(returnedPerms.AccessPolicies[0].StartTime, chk.Equals, qapd.StartTime) + c.Assert(returnedPerms.AccessPolicies[0].ExpiryTime, chk.Equals, qapd.ExpiryTime) + +} + +func (s *StorageQueueSuite) Test_GetPermissionsAlternateTrueNoTimeout(c *chk.C) { cli := getQueueClient(c) - c.Assert(cli.CreateQueue(q), chk.IsNil) - defer cli.DeleteQueue(q) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + queue1 := cli.GetQueueReference(queueName(c, "1")) + c.Assert(queue1.Create(nil), chk.IsNil) + defer queue1.Delete(nil) - c.Assert(cli.PutMessage(q, "message", PutMessageParameters{}), chk.IsNil) - r, err := cli.GetMessages(q, GetMessagesParameters{VisibilityTimeout: 1}) + perms := QueuePermissions{} + qapd := QueueAccessPolicy{ + ID: "GolangRocksOnAzure", + StartTime: time.Date(2050, time.December, 20, 21, 55, 0, 0, time.UTC), + ExpiryTime: time.Date(2051, time.December, 20, 21, 55, 0, 0, time.UTC), + CanRead: true, + CanAdd: false, + CanUpdate: true, + CanProcess: false, + } + perms.AccessPolicies = append(perms.AccessPolicies, qapd) + err := queue1.SetPermissions(perms, nil) + c.Assert(err, chk.IsNil) + + returnedPerms, err := queue1.GetPermissions(nil) c.Assert(err, chk.IsNil) - c.Assert(len(r.QueueMessagesList), chk.Equals, 1) - m := r.QueueMessagesList[0] - c.Assert(cli.DeleteMessage(q, m.MessageID, m.PopReceipt), chk.IsNil) + c.Assert(returnedPerms.AccessPolicies, chk.HasLen, 1) + + c.Assert(returnedPerms.AccessPolicies[0].CanRead, chk.Equals, true) + c.Assert(returnedPerms.AccessPolicies[0].CanAdd, chk.Equals, false) + c.Assert(returnedPerms.AccessPolicies[0].CanUpdate, chk.Equals, true) + c.Assert(returnedPerms.AccessPolicies[0].CanProcess, chk.Equals, false) + c.Assert(returnedPerms.AccessPolicies[0].ID, chk.Equals, "GolangRocksOnAzure") + c.Assert(returnedPerms.AccessPolicies[0].StartTime, chk.Equals, qapd.StartTime) + c.Assert(returnedPerms.AccessPolicies[0].ExpiryTime, chk.Equals, qapd.ExpiryTime) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/queueserviceclient.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/queueserviceclient.go new file mode 100644 index 000000000000..19b44941c8cc --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/queueserviceclient.go @@ -0,0 +1,28 @@ +package storage + +// QueueServiceClient contains operations for Microsoft Azure Queue Storage +// Service. +type QueueServiceClient struct { + client Client + auth authentication +} + +// GetServiceProperties gets the properties of your storage account's queue service. +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-queue-service-properties +func (q *QueueServiceClient) GetServiceProperties() (*ServiceProperties, error) { + return q.client.getServiceProperties(queueServiceName, q.auth) +} + +// SetServiceProperties sets the properties of your storage account's queue service. +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-queue-service-properties +func (q *QueueServiceClient) SetServiceProperties(props ServiceProperties) error { + return q.client.setServiceProperties(props, queueServiceName, q.auth) +} + +// GetQueueReference returns a Container object for the specified queue name. +func (q *QueueServiceClient) GetQueueReference(name string) *Queue { + return &Queue{ + qsc: q, + Name: name, + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/share.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/share.go new file mode 100644 index 000000000000..e6a868081a01 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/share.go @@ -0,0 +1,202 @@ +package storage + +import ( + "fmt" + "net/http" + "net/url" + "strconv" +) + +// Share represents an Azure file share. +type Share struct { + fsc *FileServiceClient + Name string `xml:"Name"` + Properties ShareProperties `xml:"Properties"` + Metadata map[string]string +} + +// ShareProperties contains various properties of a share. +type ShareProperties struct { + LastModified string `xml:"Last-Modified"` + Etag string `xml:"Etag"` + Quota int `xml:"Quota"` +} + +// builds the complete path for this share object. +func (s *Share) buildPath() string { + return fmt.Sprintf("/%s", s.Name) +} + +// Create this share under the associated account. +// If a share with the same name already exists, the operation fails. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Share +func (s *Share) Create(options *FileRequestOptions) error { + extraheaders := map[string]string{} + if s.Properties.Quota > 0 { + extraheaders["x-ms-share-quota"] = strconv.Itoa(s.Properties.Quota) + } + + params := prepareOptions(options) + headers, err := s.fsc.createResource(s.buildPath(), resourceShare, params, mergeMDIntoExtraHeaders(s.Metadata, extraheaders), []int{http.StatusCreated}) + if err != nil { + return err + } + + s.updateEtagAndLastModified(headers) + return nil +} + +// CreateIfNotExists creates this share under the associated account if +// it does not exist. Returns true if the share is newly created or false if +// the share already exists. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Share +func (s *Share) CreateIfNotExists(options *FileRequestOptions) (bool, error) { + extraheaders := map[string]string{} + if s.Properties.Quota > 0 { + extraheaders["x-ms-share-quota"] = strconv.Itoa(s.Properties.Quota) + } + + params := prepareOptions(options) + resp, err := s.fsc.createResourceNoClose(s.buildPath(), resourceShare, params, extraheaders) + if resp != nil { + defer readAndCloseBody(resp.body) + if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict { + if resp.statusCode == http.StatusCreated { + s.updateEtagAndLastModified(resp.headers) + return true, nil + } + return false, s.FetchAttributes(nil) + } + } + + return false, err +} + +// Delete marks this share for deletion. The share along with any files +// and directories contained within it are later deleted during garbage +// collection. If the share does not exist the operation fails +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Share +func (s *Share) Delete(options *FileRequestOptions) error { + return s.fsc.deleteResource(s.buildPath(), resourceShare, options) +} + +// DeleteIfExists operation marks this share for deletion if it exists. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Share +func (s *Share) DeleteIfExists(options *FileRequestOptions) (bool, error) { + resp, err := s.fsc.deleteResourceNoClose(s.buildPath(), resourceShare, options) + if resp != nil { + defer readAndCloseBody(resp.body) + if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { + return resp.statusCode == http.StatusAccepted, nil + } + } + return false, err +} + +// Exists returns true if this share already exists +// on the storage account, otherwise returns false. +func (s *Share) Exists() (bool, error) { + exists, headers, err := s.fsc.resourceExists(s.buildPath(), resourceShare) + if exists { + s.updateEtagAndLastModified(headers) + s.updateQuota(headers) + } + return exists, err +} + +// FetchAttributes retrieves metadata and properties for this share. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-share-properties +func (s *Share) FetchAttributes(options *FileRequestOptions) error { + params := prepareOptions(options) + headers, err := s.fsc.getResourceHeaders(s.buildPath(), compNone, resourceShare, params, http.MethodHead) + if err != nil { + return err + } + + s.updateEtagAndLastModified(headers) + s.updateQuota(headers) + s.Metadata = getMetadataFromHeaders(headers) + + return nil +} + +// GetRootDirectoryReference returns a Directory object at the root of this share. +func (s *Share) GetRootDirectoryReference() *Directory { + return &Directory{ + fsc: s.fsc, + share: s, + } +} + +// ServiceClient returns the FileServiceClient associated with this share. +func (s *Share) ServiceClient() *FileServiceClient { + return s.fsc +} + +// SetMetadata replaces the metadata for this share. +// +// Some keys may be converted to Camel-Case before sending. All keys +// are returned in lower case by GetShareMetadata. HTTP header names +// are case-insensitive so case munging should not matter to other +// applications either. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-share-metadata +func (s *Share) SetMetadata(options *FileRequestOptions) error { + headers, err := s.fsc.setResourceHeaders(s.buildPath(), compMetadata, resourceShare, mergeMDIntoExtraHeaders(s.Metadata, nil), options) + if err != nil { + return err + } + + s.updateEtagAndLastModified(headers) + return nil +} + +// SetProperties sets system properties for this share. +// +// Some keys may be converted to Camel-Case before sending. All keys +// are returned in lower case by SetShareProperties. HTTP header names +// are case-insensitive so case munging should not matter to other +// applications either. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Share-Properties +func (s *Share) SetProperties(options *FileRequestOptions) error { + extraheaders := map[string]string{} + if s.Properties.Quota > 0 { + if s.Properties.Quota > 5120 { + return fmt.Errorf("invalid value %v for quota, valid values are [1, 5120]", s.Properties.Quota) + } + extraheaders["x-ms-share-quota"] = strconv.Itoa(s.Properties.Quota) + } + + headers, err := s.fsc.setResourceHeaders(s.buildPath(), compProperties, resourceShare, extraheaders, options) + if err != nil { + return err + } + + s.updateEtagAndLastModified(headers) + return nil +} + +// updates Etag and last modified date +func (s *Share) updateEtagAndLastModified(headers http.Header) { + s.Properties.Etag = headers.Get("Etag") + s.Properties.LastModified = headers.Get("Last-Modified") +} + +// updates quota value +func (s *Share) updateQuota(headers http.Header) { + quota, err := strconv.Atoi(headers.Get("x-ms-share-quota")) + if err == nil { + s.Properties.Quota = quota + } +} + +// URL gets the canonical URL to this share. This method does not create a publicly accessible +// URL if the share is private and this method does not check if the share exists. +func (s *Share) URL() string { + return s.fsc.client.getEndpoint(fileServiceName, s.buildPath(), url.Values{}) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/share_test.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/share_test.go new file mode 100644 index 000000000000..2e207bdc9d5c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/share_test.go @@ -0,0 +1,207 @@ +package storage + +import chk "gopkg.in/check.v1" + +type StorageShareSuite struct{} + +var _ = chk.Suite(&StorageShareSuite{}) + +func getFileClient(c *chk.C) FileServiceClient { + return getBasicClient(c).GetFileService() +} + +func (s *StorageShareSuite) TestCreateShareDeleteShare(c *chk.C) { + cli := getFileClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + share := cli.GetShareReference(shareName(c)) + c.Assert(share.Create(nil), chk.IsNil) + c.Assert(share.Delete(nil), chk.IsNil) +} + +func (s *StorageShareSuite) TestCreateShareIfNotExists(c *chk.C) { + cli := getFileClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + // Create non existing + share := cli.GetShareReference(shareName(c, "notexists")) + ok, err := share.CreateIfNotExists(nil) + defer share.Delete(nil) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, true) + +} + +func (s *StorageShareSuite) TestCreateShareIfExists(c *chk.C) { + cli := getFileClient(c) + share := cli.GetShareReference(shareName(c, "exists")) + share.Create(nil) + defer share.Delete(nil) + + rec := cli.client.appendRecorder(c) + share.fsc = &cli + defer rec.Stop() + + // Try to create exisiting + ok, err := share.CreateIfNotExists(nil) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, false) +} + +func (s *StorageShareSuite) TestDeleteShareIfNotExists(c *chk.C) { + cli := getFileClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + // delete non-existing share + share1 := cli.GetShareReference(shareName(c, "1")) + ok, err := share1.DeleteIfExists(nil) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, false) + + // delete existing share + share2 := cli.GetShareReference(shareName(c, "2")) + c.Assert(share2.Create(nil), chk.IsNil) + ok, err = share2.DeleteIfExists(nil) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, true) +} + +func (s *StorageShareSuite) TestListShares(c *chk.C) { + cli := getFileClient(c) + cli.deleteAllShares() + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + name := shareName(c) + share := cli.GetShareReference(name) + + c.Assert(share.Create(nil), chk.IsNil) + + resp, err := cli.ListShares(ListSharesParameters{ + MaxResults: 5, + }) + c.Assert(err, chk.IsNil) + + c.Check(len(resp.Shares), chk.Equals, 1) + c.Check(resp.Shares[0].Name, chk.Equals, name) + + // clean up via the retrieved share object + resp.Shares[0].Delete(nil) +} + +func (s *StorageShareSuite) TestShareExists(c *chk.C) { + cli := getFileClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + // Share does not exist + share1 := cli.GetShareReference(shareName(c, "1")) + ok, err := share1.Exists() + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, false) + + // Share exists + share2 := cli.GetShareReference(shareName(c, "2")) + c.Assert(share2.Create(nil), chk.IsNil) + defer share1.Delete(nil) + ok, err = share2.Exists() + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, true) +} + +func (s *StorageShareSuite) TestGetAndSetShareProperties(c *chk.C) { + cli := getFileClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + share := cli.GetShareReference(shareName(c)) + quota := 55 + + c.Assert(share.Create(nil), chk.IsNil) + defer share.Delete(nil) + c.Assert(share.Properties.LastModified, chk.Not(chk.Equals), "") + + share.Properties.Quota = quota + err := share.SetProperties(nil) + c.Assert(err, chk.IsNil) + + err = share.FetchAttributes(nil) + c.Assert(err, chk.IsNil) + + c.Assert(share.Properties.Quota, chk.Equals, quota) +} + +func (s *StorageShareSuite) TestGetAndSetShareMetadata(c *chk.C) { + cli := getFileClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + share1 := cli.GetShareReference(shareName(c, "1")) + + c.Assert(share1.Create(nil), chk.IsNil) + defer share1.Delete(nil) + + // by default there should be no metadata + c.Assert(share1.Metadata, chk.IsNil) + c.Assert(share1.FetchAttributes(nil), chk.IsNil) + c.Assert(share1.Metadata, chk.IsNil) + + share2 := cli.GetShareReference(shareName(c, "2")) + c.Assert(share2.Create(nil), chk.IsNil) + defer share2.Delete(nil) + + c.Assert(share2.Metadata, chk.IsNil) + + mPut := map[string]string{ + "lol": "rofl", + "rofl_baz": "waz qux", + } + + share2.Metadata = mPut + c.Assert(share2.SetMetadata(nil), chk.IsNil) + c.Check(share2.Metadata, chk.DeepEquals, mPut) + + c.Assert(share2.FetchAttributes(nil), chk.IsNil) + c.Check(share2.Metadata, chk.DeepEquals, mPut) +} + +func (s *StorageShareSuite) TestMetadataCaseMunging(c *chk.C) { + cli := getFileClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + share := cli.GetShareReference(shareName(c)) + + c.Assert(share.Create(nil), chk.IsNil) + defer share.Delete(nil) + + mPutUpper := map[string]string{ + "Lol": "different rofl", + "rofl_BAZ": "different waz qux", + } + mExpectLower := map[string]string{ + "lol": "different rofl", + "rofl_baz": "different waz qux", + } + + share.Metadata = mPutUpper + c.Assert(share.SetMetadata(nil), chk.IsNil) + + c.Check(share.Metadata, chk.DeepEquals, mPutUpper) + c.Assert(share.FetchAttributes(nil), chk.IsNil) + c.Check(share.Metadata, chk.DeepEquals, mExpectLower) +} + +func (cli *FileServiceClient) deleteAllShares() { + resp, _ := cli.ListShares(ListSharesParameters{}) + if resp != nil && len(resp.Shares) > 0 { + for _, sh := range resp.Shares { + share := cli.GetShareReference(sh.Name) + share.Delete(nil) + } + } +} + +func shareName(c *chk.C, extras ...string) string { + return nameGenerator(63, "share-", alphanum, c, extras) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/storagepolicy.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/storagepolicy.go new file mode 100644 index 000000000000..bee1c31ad61f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/storagepolicy.go @@ -0,0 +1,47 @@ +package storage + +import ( + "strings" + "time" +) + +// AccessPolicyDetailsXML has specifics about an access policy +// annotated with XML details. +type AccessPolicyDetailsXML struct { + StartTime time.Time `xml:"Start"` + ExpiryTime time.Time `xml:"Expiry"` + Permission string `xml:"Permission"` +} + +// SignedIdentifier is a wrapper for a specific policy +type SignedIdentifier struct { + ID string `xml:"Id"` + AccessPolicy AccessPolicyDetailsXML `xml:"AccessPolicy"` +} + +// SignedIdentifiers part of the response from GetPermissions call. +type SignedIdentifiers struct { + SignedIdentifiers []SignedIdentifier `xml:"SignedIdentifier"` +} + +// AccessPolicy is the response type from the GetPermissions call. +type AccessPolicy struct { + SignedIdentifiersList SignedIdentifiers `xml:"SignedIdentifiers"` +} + +// convertAccessPolicyToXMLStructs converts between AccessPolicyDetails which is a struct better for API usage to the +// AccessPolicy struct which will get converted to XML. +func convertAccessPolicyToXMLStructs(id string, startTime time.Time, expiryTime time.Time, permissions string) SignedIdentifier { + return SignedIdentifier{ + ID: id, + AccessPolicy: AccessPolicyDetailsXML{ + StartTime: startTime.UTC().Round(time.Second), + ExpiryTime: expiryTime.UTC().Round(time.Second), + Permission: permissions, + }, + } +} + +func updatePermissions(permissions, permission string) bool { + return strings.Contains(permissions, permission) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go new file mode 100644 index 000000000000..88700fbc93e6 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go @@ -0,0 +1,117 @@ +package storage + +import ( + "net/http" + "net/url" + "strconv" +) + +// ServiceProperties represents the storage account service properties +type ServiceProperties struct { + Logging *Logging + HourMetrics *Metrics + MinuteMetrics *Metrics + Cors *Cors +} + +// Logging represents the Azure Analytics Logging settings +type Logging struct { + Version string + Delete bool + Read bool + Write bool + RetentionPolicy *RetentionPolicy +} + +// RetentionPolicy indicates if retention is enabled and for how many days +type RetentionPolicy struct { + Enabled bool + Days *int +} + +// Metrics provide request statistics. +type Metrics struct { + Version string + Enabled bool + IncludeAPIs *bool + RetentionPolicy *RetentionPolicy +} + +// Cors includes all the CORS rules +type Cors struct { + CorsRule []CorsRule +} + +// CorsRule includes all settings for a Cors rule +type CorsRule struct { + AllowedOrigins string + AllowedMethods string + MaxAgeInSeconds int + ExposedHeaders string + AllowedHeaders string +} + +func (c Client) getServiceProperties(service string, auth authentication) (*ServiceProperties, error) { + query := url.Values{ + "restype": {"service"}, + "comp": {"properties"}, + } + uri := c.getEndpoint(service, "", query) + headers := c.getStandardHeaders() + + resp, err := c.exec(http.MethodGet, uri, headers, nil, auth) + if err != nil { + return nil, err + } + defer resp.body.Close() + + if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return nil, err + } + + var out ServiceProperties + err = xmlUnmarshal(resp.body, &out) + if err != nil { + return nil, err + } + + return &out, nil +} + +func (c Client) setServiceProperties(props ServiceProperties, service string, auth authentication) error { + query := url.Values{ + "restype": {"service"}, + "comp": {"properties"}, + } + uri := c.getEndpoint(service, "", query) + + // Ideally, StorageServiceProperties would be the output struct + // This is to avoid golint stuttering, while generating the correct XML + type StorageServiceProperties struct { + Logging *Logging + HourMetrics *Metrics + MinuteMetrics *Metrics + Cors *Cors + } + input := StorageServiceProperties{ + Logging: props.Logging, + HourMetrics: props.HourMetrics, + MinuteMetrics: props.MinuteMetrics, + Cors: props.Cors, + } + + body, length, err := xmlMarshal(input) + if err != nil { + return err + } + + headers := c.getStandardHeaders() + headers["Content-Length"] = strconv.Itoa(length) + + resp, err := c.exec(http.MethodPut, uri, headers, body, auth) + if err != nil { + return err + } + readAndCloseBody(resp.body) + return checkRespCode(resp.statusCode, []int{http.StatusAccepted}) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice_test.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice_test.go new file mode 100644 index 000000000000..713aeb8a2792 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice_test.go @@ -0,0 +1,85 @@ +package storage + +import chk "gopkg.in/check.v1" + +type StorageSuite struct{} + +var _ = chk.Suite(&StorageSuite{}) + +// This tests use the Table service, but could also use any other service + +func (s *StorageSuite) TestGetServiceProperties(c *chk.C) { + cli := getTableClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + sp, err := cli.GetServiceProperties() + c.Assert(err, chk.IsNil) + c.Assert(sp, chk.NotNil) +} + +func (s *StorageSuite) TestSetServiceProperties(c *chk.C) { + cli := getTableClient(c) + rec := cli.client.appendRecorder(c) + + t := true + num := 7 + rp := RetentionPolicy{ + Enabled: true, + Days: &num, + } + m := Metrics{ + Version: "1.0", + Enabled: true, + IncludeAPIs: &t, + RetentionPolicy: &rp, + } + spInput := ServiceProperties{ + Logging: &Logging{ + Version: "1.0", + Delete: true, + Read: false, + Write: true, + RetentionPolicy: &rp, + }, + HourMetrics: &m, + MinuteMetrics: &m, + Cors: &Cors{ + CorsRule: []CorsRule{ + { + AllowedOrigins: "*", + AllowedMethods: "GET,PUT", + MaxAgeInSeconds: 500, + ExposedHeaders: "x-ms-meta-customheader,x-ms-meta-data*", + AllowedHeaders: "x-ms-meta-customheader,x-ms-meta-target*", + }, + }, + }, + } + + err := cli.SetServiceProperties(spInput) + c.Assert(err, chk.IsNil) + + spOutput, err := cli.GetServiceProperties() + c.Assert(err, chk.IsNil) + c.Assert(spOutput, chk.NotNil) + c.Assert(*spOutput, chk.DeepEquals, spInput) + + rec.Stop() + + // Back to defaults + defaultRP := RetentionPolicy{ + Enabled: false, + Days: nil, + } + m.Enabled = false + m.IncludeAPIs = nil + m.RetentionPolicy = &defaultRP + spInput.Logging.Delete = false + spInput.Logging.Read = false + spInput.Logging.Write = false + spInput.Logging.RetentionPolicy = &defaultRP + spInput.Cors = &Cors{nil} + + cli.SetServiceProperties(spInput) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go index 39e997503552..4eae3af9df76 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go @@ -4,126 +4,409 @@ import ( "bytes" "encoding/json" "fmt" + "io" + "io/ioutil" "net/http" "net/url" + "strconv" + "strings" + "time" ) -// TableServiceClient contains operations for Microsoft Azure Table Storage -// Service. -type TableServiceClient struct { - client Client +const ( + tablesURIPath = "/Tables" + nextTableQueryParameter = "NextTableName" + headerNextPartitionKey = "x-ms-continuation-NextPartitionKey" + headerNextRowKey = "x-ms-continuation-NextRowKey" + nextPartitionKeyQueryParameter = "NextPartitionKey" + nextRowKeyQueryParameter = "NextRowKey" +) + +// TableAccessPolicy are used for SETTING table policies +type TableAccessPolicy struct { + ID string + StartTime time.Time + ExpiryTime time.Time + CanRead bool + CanAppend bool + CanUpdate bool + CanDelete bool } -// AzureTable is the typedef of the Azure Table name -type AzureTable string +// Table represents an Azure table. +type Table struct { + tsc *TableServiceClient + Name string `json:"TableName"` + OdataEditLink string `json:"odata.editLink"` + OdataID string `json:"odata.id"` + OdataMetadata string `json:"odata.metadata"` + OdataType string `json:"odata.type"` +} -const ( - tablesURIPath = "/Tables" -) +// EntityQueryResult contains the response from +// ExecuteQuery and ExecuteQueryNextResults functions. +type EntityQueryResult struct { + OdataMetadata string `json:"odata.metadata"` + Entities []*Entity `json:"value"` + QueryNextLink + table *Table +} -type createTableRequest struct { - TableName string `json:"TableName"` +type continuationToken struct { + NextPartitionKey string + NextRowKey string } -func pathForTable(table AzureTable) string { return fmt.Sprintf("%s", table) } +func (t *Table) buildPath() string { + return fmt.Sprintf("/%s", t.Name) +} -func (c *TableServiceClient) getStandardHeaders() map[string]string { - return map[string]string{ - "x-ms-version": "2015-02-21", - "x-ms-date": currentTimeRfc1123Formatted(), - "Accept": "application/json;odata=nometadata", - "Accept-Charset": "UTF-8", - "Content-Type": "application/json", - } +func (t *Table) buildSpecificPath() string { + return fmt.Sprintf("%s('%s')", tablesURIPath, t.Name) } -// QueryTables returns the tables created in the -// *TableServiceClient storage account. -func (c *TableServiceClient) QueryTables() ([]AzureTable, error) { - uri := c.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{}) +// Get gets the referenced table. +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/querying-tables-and-entities +func (t *Table) Get(timeout uint, ml MetadataLevel) error { + if ml == EmptyPayload { + return errEmptyPayload + } - headers := c.getStandardHeaders() - headers["Content-Length"] = "0" + query := url.Values{ + "timeout": {strconv.FormatUint(uint64(timeout), 10)}, + } + headers := t.tsc.client.getStandardHeaders() + headers[headerAccept] = string(ml) - resp, err := c.client.execTable("GET", uri, headers, nil) + uri := t.tsc.client.getEndpoint(tableServiceName, t.buildSpecificPath(), query) + resp, err := t.tsc.client.exec(http.MethodGet, uri, headers, nil, t.tsc.auth) if err != nil { - return nil, err + return err } - defer resp.body.Close() + defer readAndCloseBody(resp.body) - if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - return nil, err + if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return err } + respBody, err := ioutil.ReadAll(resp.body) + if err != nil { + return err + } + err = json.Unmarshal(respBody, t) + if err != nil { + return err + } + return nil +} + +// Create creates the referenced table. +// This function fails if the name is not compliant +// with the specification or the tables already exists. +// ml determines the level of detail of metadata in the operation response, +// or no data at all. +// See https://docs.microsoft.com/rest/api/storageservices/fileservices/create-table +func (t *Table) Create(timeout uint, ml MetadataLevel, options *TableOptions) error { + uri := t.tsc.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{ + "timeout": {strconv.FormatUint(uint64(timeout), 10)}, + }) + + type createTableRequest struct { + TableName string `json:"TableName"` + } + req := createTableRequest{TableName: t.Name} buf := new(bytes.Buffer) - buf.ReadFrom(resp.body) + if err := json.NewEncoder(buf).Encode(req); err != nil { + return err + } - var respArray queryTablesResponse - if err := json.Unmarshal(buf.Bytes(), &respArray); err != nil { - return nil, err + headers := t.tsc.client.getStandardHeaders() + headers = addReturnContentHeaders(headers, ml) + headers = addBodyRelatedHeaders(headers, buf.Len()) + headers = options.addToHeaders(headers) + + resp, err := t.tsc.client.exec(http.MethodPost, uri, headers, buf, t.tsc.auth) + if err != nil { + return err + } + defer readAndCloseBody(resp.body) + + if ml == EmptyPayload { + if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { + return err + } + } else { + if err := checkRespCode(resp.statusCode, []int{http.StatusCreated}); err != nil { + return err + } } - s := make([]AzureTable, len(respArray.TableName)) - for i, elem := range respArray.TableName { - s[i] = AzureTable(elem.TableName) + if ml != EmptyPayload { + data, err := ioutil.ReadAll(resp.body) + if err != nil { + return err + } + err = json.Unmarshal(data, t) + if err != nil { + return err + } } - return s, nil + return nil } -// CreateTable creates the table given the specific -// name. This function fails if the name is not compliant -// with the specification or the tables already exists. -func (c *TableServiceClient) CreateTable(table AzureTable) error { - uri := c.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{}) +// Delete deletes the referenced table. +// This function fails if the table is not present. +// Be advised: Delete deletes all the entries that may be present. +// See https://docs.microsoft.com/rest/api/storageservices/fileservices/delete-table +func (t *Table) Delete(timeout uint, options *TableOptions) error { + uri := t.tsc.client.getEndpoint(tableServiceName, t.buildSpecificPath(), url.Values{ + "timeout": {strconv.Itoa(int(timeout))}, + }) - headers := c.getStandardHeaders() + headers := t.tsc.client.getStandardHeaders() + headers = addReturnContentHeaders(headers, EmptyPayload) + headers = options.addToHeaders(headers) - req := createTableRequest{TableName: string(table)} - buf := new(bytes.Buffer) + resp, err := t.tsc.client.exec(http.MethodDelete, uri, headers, nil, t.tsc.auth) + if err != nil { + return err + } + defer readAndCloseBody(resp.body) - if err := json.NewEncoder(buf).Encode(req); err != nil { + if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { return err + } + return nil +} + +// QueryOptions includes options for a query entities operation. +// Top, filter and select are OData query options. +type QueryOptions struct { + Top uint + Filter string + Select []string + RequestID string +} - headers["Content-Length"] = fmt.Sprintf("%d", buf.Len()) +func (options *QueryOptions) getParameters() (url.Values, map[string]string) { + query := url.Values{} + headers := map[string]string{} + if options != nil { + if options.Top > 0 { + query.Add(OdataTop, strconv.FormatUint(uint64(options.Top), 10)) + } + if options.Filter != "" { + query.Add(OdataFilter, options.Filter) + } + if len(options.Select) > 0 { + query.Add(OdataSelect, strings.Join(options.Select, ",")) + } + headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID) + } + return query, headers +} - resp, err := c.client.execTable("POST", uri, headers, buf) +// QueryEntities returns the entities in the table. +// You can use query options defined by the OData Protocol specification. +// +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-entities +func (t *Table) QueryEntities(timeout uint, ml MetadataLevel, options *QueryOptions) (*EntityQueryResult, error) { + if ml == EmptyPayload { + return nil, errEmptyPayload + } + query, headers := options.getParameters() + query = addTimeout(query, timeout) + uri := t.tsc.client.getEndpoint(tableServiceName, t.buildPath(), query) + return t.queryEntities(uri, headers, ml) +} + +// NextResults returns the next page of results +// from a QueryEntities or NextResults operation. +// +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-entities +// See https://docs.microsoft.com/rest/api/storageservices/fileservices/query-timeout-and-pagination +func (eqr *EntityQueryResult) NextResults(options *TableOptions) (*EntityQueryResult, error) { + if eqr == nil { + return nil, errNilPreviousResult + } + if eqr.NextLink == nil { + return nil, errNilNextLink + } + headers := options.addToHeaders(map[string]string{}) + return eqr.table.queryEntities(*eqr.NextLink, headers, eqr.ml) +} + +// SetPermissions sets up table ACL permissions +// See https://docs.microsoft.com/rest/api/storageservices/fileservices/Set-Table-ACL +func (t *Table) SetPermissions(tap []TableAccessPolicy, timeout uint, options *TableOptions) error { + params := url.Values{"comp": {"acl"}, + "timeout": {strconv.Itoa(int(timeout))}, + } + uri := t.tsc.client.getEndpoint(tableServiceName, t.Name, params) + headers := t.tsc.client.getStandardHeaders() + headers = options.addToHeaders(headers) + + body, length, err := generateTableACLPayload(tap) if err != nil { return err } - defer resp.body.Close() + headers["Content-Length"] = strconv.Itoa(length) - if err := checkRespCode(resp.statusCode, []int{http.StatusCreated}); err != nil { + resp, err := t.tsc.client.exec(http.MethodPut, uri, headers, body, t.tsc.auth) + if err != nil { return err } + defer readAndCloseBody(resp.body) + if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { + return err + } return nil } -// DeleteTable deletes the table given the specific -// name. This function fails if the table is not present. -// Be advised: DeleteTable deletes all the entries -// that may be present. -func (c *TableServiceClient) DeleteTable(table AzureTable) error { - uri := c.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{}) - uri += fmt.Sprintf("('%s')", string(table)) +func generateTableACLPayload(policies []TableAccessPolicy) (io.Reader, int, error) { + sil := SignedIdentifiers{ + SignedIdentifiers: []SignedIdentifier{}, + } + for _, tap := range policies { + permission := generateTablePermissions(&tap) + signedIdentifier := convertAccessPolicyToXMLStructs(tap.ID, tap.StartTime, tap.ExpiryTime, permission) + sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier) + } + return xmlMarshal(sil) +} + +// GetPermissions gets the table ACL permissions +// See https://docs.microsoft.com/rest/api/storageservices/fileservices/get-table-acl +func (t *Table) GetPermissions(timeout int, options *TableOptions) ([]TableAccessPolicy, error) { + params := url.Values{"comp": {"acl"}, + "timeout": {strconv.Itoa(int(timeout))}, + } + + uri := t.tsc.client.getEndpoint(tableServiceName, t.Name, params) + headers := t.tsc.client.getStandardHeaders() + headers = options.addToHeaders(headers) + + resp, err := t.tsc.client.exec(http.MethodGet, uri, headers, nil, t.tsc.auth) + if err != nil { + return nil, err + } + defer resp.body.Close() - headers := c.getStandardHeaders() + if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return nil, err + } - headers["Content-Length"] = "0" + var ap AccessPolicy + err = xmlUnmarshal(resp.body, &ap.SignedIdentifiersList) + if err != nil { + return nil, err + } + return updateTableAccessPolicy(ap), nil +} - resp, err := c.client.execTable("DELETE", uri, headers, nil) +func (t *Table) queryEntities(uri string, headers map[string]string, ml MetadataLevel) (*EntityQueryResult, error) { + headers = mergeHeaders(headers, t.tsc.client.getStandardHeaders()) + if ml != EmptyPayload { + headers[headerAccept] = string(ml) + } + resp, err := t.tsc.client.exec(http.MethodGet, uri, headers, nil, t.tsc.auth) if err != nil { - return err + return nil, err } defer resp.body.Close() - if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { - return err + if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return nil, err + } + data, err := ioutil.ReadAll(resp.body) + if err != nil { + return nil, err + } + var entities EntityQueryResult + err = json.Unmarshal(data, &entities) + if err != nil { + return nil, err + } + + for i := range entities.Entities { + entities.Entities[i].Table = t + } + entities.table = t + + contToken := extractContinuationTokenFromHeaders(resp.headers) + if contToken == nil { + entities.NextLink = nil + } else { + originalURI, err := url.Parse(uri) + if err != nil { + return nil, err + } + v := originalURI.Query() + v.Set(nextPartitionKeyQueryParameter, contToken.NextPartitionKey) + v.Set(nextRowKeyQueryParameter, contToken.NextRowKey) + newURI := t.tsc.client.getEndpoint(tableServiceName, t.buildPath(), v) + entities.NextLink = &newURI + entities.ml = ml + } + + return &entities, nil +} + +func extractContinuationTokenFromHeaders(h http.Header) *continuationToken { + ct := continuationToken{ + NextPartitionKey: h.Get(headerNextPartitionKey), + NextRowKey: h.Get(headerNextRowKey), + } + + if ct.NextPartitionKey != "" && ct.NextRowKey != "" { + return &ct } return nil } + +func updateTableAccessPolicy(ap AccessPolicy) []TableAccessPolicy { + taps := []TableAccessPolicy{} + for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers { + tap := TableAccessPolicy{ + ID: policy.ID, + StartTime: policy.AccessPolicy.StartTime, + ExpiryTime: policy.AccessPolicy.ExpiryTime, + } + tap.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r") + tap.CanAppend = updatePermissions(policy.AccessPolicy.Permission, "a") + tap.CanUpdate = updatePermissions(policy.AccessPolicy.Permission, "u") + tap.CanDelete = updatePermissions(policy.AccessPolicy.Permission, "d") + + taps = append(taps, tap) + } + return taps +} + +func generateTablePermissions(tap *TableAccessPolicy) (permissions string) { + // generate the permissions string (raud). + // still want the end user API to have bool flags. + permissions = "" + + if tap.CanRead { + permissions += "r" + } + + if tap.CanAppend { + permissions += "a" + } + + if tap.CanUpdate { + permissions += "u" + } + + if tap.CanDelete { + permissions += "d" + } + return permissions +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go new file mode 100644 index 000000000000..7a0f0915c627 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go @@ -0,0 +1,302 @@ +package storage + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "mime/multipart" + "net/http" + "net/textproto" + "sort" + "strings" + + "github.com/satori/uuid" +) + +// Operation type. Insert, Delete, Replace etc. +type Operation int + +// consts for batch operations. +const ( + InsertOp = Operation(1) + DeleteOp = Operation(2) + ReplaceOp = Operation(3) + MergeOp = Operation(4) + InsertOrReplaceOp = Operation(5) + InsertOrMergeOp = Operation(6) +) + +// BatchEntity used for tracking Entities to operate on and +// whether operations (replace/merge etc) should be forced. +// Wrapper for regular Entity with additional data specific for the entity. +type BatchEntity struct { + *Entity + Force bool + Op Operation +} + +// TableBatch stores all the entities that will be operated on during a batch process. +// Entities can be inserted, replaced or deleted. +type TableBatch struct { + BatchEntitySlice []BatchEntity + + // reference to table we're operating on. + Table *Table +} + +// defaultChangesetHeaders for changeSets +var defaultChangesetHeaders = map[string]string{ + "Accept": "application/json;odata=minimalmetadata", + "Content-Type": "application/json", + "Prefer": "return-no-content", +} + +// NewBatch return new TableBatch for populating. +func (t *Table) NewBatch() *TableBatch { + return &TableBatch{ + Table: t, + } +} + +// InsertEntity adds an entity in preparation for a batch insert. +func (t *TableBatch) InsertEntity(entity *Entity) { + be := BatchEntity{Entity: entity, Force: false, Op: InsertOp} + t.BatchEntitySlice = append(t.BatchEntitySlice, be) +} + +// InsertOrReplaceEntity adds an entity in preparation for a batch insert or replace. +func (t *TableBatch) InsertOrReplaceEntity(entity *Entity, force bool) { + be := BatchEntity{Entity: entity, Force: false, Op: InsertOrReplaceOp} + t.BatchEntitySlice = append(t.BatchEntitySlice, be) +} + +// InsertOrReplaceEntityByForce adds an entity in preparation for a batch insert or replace. Forces regardless of ETag +func (t *TableBatch) InsertOrReplaceEntityByForce(entity *Entity) { + t.InsertOrReplaceEntity(entity, true) +} + +// InsertOrMergeEntity adds an entity in preparation for a batch insert or merge. +func (t *TableBatch) InsertOrMergeEntity(entity *Entity, force bool) { + be := BatchEntity{Entity: entity, Force: false, Op: InsertOrMergeOp} + t.BatchEntitySlice = append(t.BatchEntitySlice, be) +} + +// InsertOrMergeEntityByForce adds an entity in preparation for a batch insert or merge. Forces regardless of ETag +func (t *TableBatch) InsertOrMergeEntityByForce(entity *Entity) { + t.InsertOrMergeEntity(entity, true) +} + +// ReplaceEntity adds an entity in preparation for a batch replace. +func (t *TableBatch) ReplaceEntity(entity *Entity) { + be := BatchEntity{Entity: entity, Force: false, Op: ReplaceOp} + t.BatchEntitySlice = append(t.BatchEntitySlice, be) +} + +// DeleteEntity adds an entity in preparation for a batch delete +func (t *TableBatch) DeleteEntity(entity *Entity, force bool) { + be := BatchEntity{Entity: entity, Force: false, Op: DeleteOp} + t.BatchEntitySlice = append(t.BatchEntitySlice, be) +} + +// DeleteEntityByForce adds an entity in preparation for a batch delete. Forces regardless of ETag +func (t *TableBatch) DeleteEntityByForce(entity *Entity, force bool) { + t.DeleteEntity(entity, true) +} + +// MergeEntity adds an entity in preparation for a batch merge +func (t *TableBatch) MergeEntity(entity *Entity) { + be := BatchEntity{Entity: entity, Force: false, Op: MergeOp} + t.BatchEntitySlice = append(t.BatchEntitySlice, be) +} + +// ExecuteBatch executes many table operations in one request to Azure. +// The operations can be combinations of Insert, Delete, Replace and Merge +// Creates the inner changeset body (various operations, Insert, Delete etc) then creates the outer request packet that encompasses +// the changesets. +// As per document https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/performing-entity-group-transactions +func (t *TableBatch) ExecuteBatch() error { + changesetBoundary := fmt.Sprintf("changeset_%s", uuid.NewV1()) + uri := t.Table.tsc.client.getEndpoint(tableServiceName, "$batch", nil) + changesetBody, err := t.generateChangesetBody(changesetBoundary) + if err != nil { + return err + } + + boundary := fmt.Sprintf("batch_%s", uuid.NewV1()) + body, err := generateBody(changesetBody, changesetBoundary, boundary) + if err != nil { + return err + } + + headers := t.Table.tsc.client.getStandardHeaders() + headers[headerContentType] = fmt.Sprintf("multipart/mixed; boundary=%s", boundary) + + resp, err := t.Table.tsc.client.execBatchOperationJSON(http.MethodPost, uri, headers, bytes.NewReader(body.Bytes()), t.Table.tsc.auth) + if err != nil { + return err + } + defer resp.body.Close() + + if err = checkRespCode(resp.statusCode, []int{http.StatusAccepted}); err != nil { + + // check which batch failed. + operationFailedMessage := t.getFailedOperation(resp.odata.Err.Message.Value) + requestID, date, version := getDebugHeaders(resp.headers) + return AzureStorageServiceError{ + StatusCode: resp.statusCode, + Code: resp.odata.Err.Code, + RequestID: requestID, + Date: date, + APIVersion: version, + Message: operationFailedMessage, + } + } + + return nil +} + +// getFailedOperation parses the original Azure error string and determines which operation failed +// and generates appropriate message. +func (t *TableBatch) getFailedOperation(errorMessage string) string { + // errorMessage consists of "number:string" we just need the number. + sp := strings.Split(errorMessage, ":") + if len(sp) > 1 { + msg := fmt.Sprintf("Element %s in the batch returned an unexpected response code.\n%s", sp[0], errorMessage) + return msg + } + + // cant parse the message, just return the original message to client + return errorMessage +} + +// generateBody generates the complete body for the batch request. +func generateBody(changeSetBody *bytes.Buffer, changesetBoundary string, boundary string) (*bytes.Buffer, error) { + + body := new(bytes.Buffer) + writer := multipart.NewWriter(body) + writer.SetBoundary(boundary) + h := make(textproto.MIMEHeader) + h.Set(headerContentType, fmt.Sprintf("multipart/mixed; boundary=%s\r\n", changesetBoundary)) + batchWriter, err := writer.CreatePart(h) + if err != nil { + return nil, err + } + batchWriter.Write(changeSetBody.Bytes()) + writer.Close() + return body, nil +} + +// generateChangesetBody generates the individual changesets for the various operations within the batch request. +// There is a changeset for Insert, Delete, Merge etc. +func (t *TableBatch) generateChangesetBody(changesetBoundary string) (*bytes.Buffer, error) { + + body := new(bytes.Buffer) + writer := multipart.NewWriter(body) + writer.SetBoundary(changesetBoundary) + + for _, be := range t.BatchEntitySlice { + t.generateEntitySubset(&be, writer) + } + + writer.Close() + return body, nil +} + +// generateVerb generates the HTTP request VERB required for each changeset. +func generateVerb(op Operation) (string, error) { + switch op { + case InsertOp: + return http.MethodPost, nil + case DeleteOp: + return http.MethodDelete, nil + case ReplaceOp, InsertOrReplaceOp: + return http.MethodPut, nil + case MergeOp, InsertOrMergeOp: + return "MERGE", nil + default: + return "", errors.New("Unable to detect operation") + } +} + +// generateQueryPath generates the query path for within the changesets +// For inserts it will just be a table query path (table name) +// but for other operations (modifying an existing entity) then +// the partition/row keys need to be generated. +func (t *TableBatch) generateQueryPath(op Operation, entity *Entity) string { + if op == InsertOp { + return entity.Table.buildPath() + } + return entity.buildPath() +} + +// generateGenericOperationHeaders generates common headers for a given operation. +func generateGenericOperationHeaders(be *BatchEntity) map[string]string { + retval := map[string]string{} + + for k, v := range defaultChangesetHeaders { + retval[k] = v + } + + if be.Op == DeleteOp || be.Op == ReplaceOp || be.Op == MergeOp { + if be.Force || be.Entity.OdataEtag == "" { + retval["If-Match"] = "*" + } else { + retval["If-Match"] = be.Entity.OdataEtag + } + } + + return retval +} + +// generateEntitySubset generates body payload for particular batch entity +func (t *TableBatch) generateEntitySubset(batchEntity *BatchEntity, writer *multipart.Writer) error { + + h := make(textproto.MIMEHeader) + h.Set(headerContentType, "application/http") + h.Set(headerContentTransferEncoding, "binary") + + verb, err := generateVerb(batchEntity.Op) + if err != nil { + return err + } + + genericOpHeadersMap := generateGenericOperationHeaders(batchEntity) + queryPath := t.generateQueryPath(batchEntity.Op, batchEntity.Entity) + uri := t.Table.tsc.client.getEndpoint(tableServiceName, queryPath, nil) + + operationWriter, err := writer.CreatePart(h) + if err != nil { + return err + } + + urlAndVerb := fmt.Sprintf("%s %s HTTP/1.1\r\n", verb, uri) + operationWriter.Write([]byte(urlAndVerb)) + writeHeaders(genericOpHeadersMap, &operationWriter) + operationWriter.Write([]byte("\r\n")) // additional \r\n is needed per changeset separating the "headers" and the body. + + // delete operation doesn't need a body. + if batchEntity.Op != DeleteOp { + //var e Entity = batchEntity.Entity + body, err := json.Marshal(batchEntity.Entity) + if err != nil { + return err + } + operationWriter.Write(body) + } + + return nil +} + +func writeHeaders(h map[string]string, writer *io.Writer) { + // This way it is guaranteed the headers will be written in a sorted order + var keys []string + for k := range h { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + (*writer).Write([]byte(fmt.Sprintf("%s: %s\r\n", k, h[k]))) + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch_test.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch_test.go new file mode 100644 index 000000000000..8b91efa0cf5a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch_test.go @@ -0,0 +1,216 @@ +package storage + +import ( + "time" + + "github.com/satori/uuid" + chk "gopkg.in/check.v1" +) + +type TableBatchSuite struct{} + +var _ = chk.Suite(&TableBatchSuite{}) + +func (s *TableBatchSuite) Test_BatchInsertMultipleEntities(c *chk.C) { + cli := getBasicClient(c).GetTableService() + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + table := cli.GetTableReference(tableName(c, "me")) + err := table.Create(30, EmptyPayload, nil) + c.Assert(err, chk.IsNil) + defer table.Delete(30, nil) + + entity := table.GetEntityReference("mypartitionkey", "myrowkey") + props := map[string]interface{}{ + "AmountDue": 200.23, + "CustomerCode": uuid.FromStringOrNil("c9da6455-213d-42c9-9a79-3e9149a57833"), + "CustomerSince": time.Date(1992, time.December, 20, 21, 55, 0, 0, time.UTC), + "IsActive": true, + "NumberOfOrders": int64(255), + } + entity.Properties = props + + entity2 := table.GetEntityReference("mypartitionkey", "myrowkey2") + props2 := map[string]interface{}{ + "AmountDue": 111.23, + "CustomerCode": uuid.FromStringOrNil("c9da6455-213d-42c9-9a79-3e9149a57833"), + "CustomerSince": time.Date(1992, time.December, 20, 21, 55, 0, 0, time.UTC), + "IsActive": true, + "NumberOfOrders": int64(255), + } + entity2.Properties = props2 + + batch := table.NewBatch() + batch.InsertOrReplaceEntity(entity, false) + batch.InsertOrReplaceEntity(entity2, false) + + err = batch.ExecuteBatch() + c.Assert(err, chk.IsNil) + + options := QueryOptions{ + Top: 2, + } + + results, err := table.QueryEntities(30, FullMetadata, &options) + c.Assert(err, chk.IsNil) + c.Assert(results.Entities, chk.HasLen, 2) +} + +func (s *TableBatchSuite) Test_BatchInsertSameEntryMultipleTimes(c *chk.C) { + cli := getBasicClient(c).GetTableService() + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + table := cli.GetTableReference(tableName(c)) + err := table.Create(30, EmptyPayload, nil) + c.Assert(err, chk.IsNil) + defer table.Delete(30, nil) + + entity := table.GetEntityReference("mypartitionkey", "myrowkey") + props := map[string]interface{}{ + "AmountDue": 200.23, + "CustomerCode": uuid.FromStringOrNil("c9da6455-213d-42c9-9a79-3e9149a57833"), + "CustomerSince": time.Date(1992, time.December, 20, 21, 55, 0, 0, time.UTC), + "IsActive": true, + "NumberOfOrders": int64(255), + } + entity.Properties = props + + batch := table.NewBatch() + batch.InsertOrReplaceEntity(entity, false) + batch.InsertOrReplaceEntity(entity, false) + + err = batch.ExecuteBatch() + c.Assert(err, chk.NotNil) + v, ok := err.(AzureStorageServiceError) + if ok { + c.Assert(v.Code, chk.Equals, "InvalidDuplicateRow") + } +} + +func (s *TableBatchSuite) Test_BatchInsertDeleteSameEntity(c *chk.C) { + cli := getBasicClient(c).GetTableService() + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + table := cli.GetTableReference(tableName(c)) + err := table.Create(30, EmptyPayload, nil) + c.Assert(err, chk.IsNil) + defer table.Delete(30, nil) + + entity := table.GetEntityReference("mypartitionkey", "myrowkey") + props := map[string]interface{}{ + "AmountDue": 200.23, + "CustomerCode": uuid.FromStringOrNil("c9da6455-213d-42c9-9a79-3e9149a57833"), + "CustomerSince": time.Date(1992, time.December, 20, 21, 55, 0, 0, time.UTC), + "IsActive": true, + "NumberOfOrders": int64(255), + } + entity.Properties = props + + batch := table.NewBatch() + batch.InsertOrReplaceEntity(entity, false) + batch.DeleteEntity(entity, true) + + err = batch.ExecuteBatch() + c.Assert(err, chk.NotNil) + + v, ok := err.(AzureStorageServiceError) + if ok { + c.Assert(v.Code, chk.Equals, "InvalidDuplicateRow") + } +} + +func (s *TableBatchSuite) Test_BatchInsertThenDeleteDifferentBatches(c *chk.C) { + cli := getBasicClient(c).GetTableService() + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + table := cli.GetTableReference(tableName(c)) + err := table.Create(30, EmptyPayload, nil) + c.Assert(err, chk.IsNil) + defer table.Delete(30, nil) + + entity := table.GetEntityReference("mypartitionkey", "myrowkey") + props := map[string]interface{}{ + "AmountDue": 200.23, + "CustomerCode": uuid.FromStringOrNil("c9da6455-213d-42c9-9a79-3e9149a57833"), + "CustomerSince": time.Date(1992, time.December, 20, 21, 55, 0, 0, time.UTC), + "IsActive": true, + "NumberOfOrders": int64(255), + } + entity.Properties = props + + batch := table.NewBatch() + batch.InsertOrReplaceEntity(entity, false) + err = batch.ExecuteBatch() + c.Assert(err, chk.IsNil) + + options := QueryOptions{ + Top: 2, + } + + results, err := table.QueryEntities(30, FullMetadata, &options) + c.Assert(err, chk.IsNil) + c.Assert(results.Entities, chk.HasLen, 1) + + batch = table.NewBatch() + batch.DeleteEntity(entity, true) + err = batch.ExecuteBatch() + c.Assert(err, chk.IsNil) + + // Timeout set to 15 for this test to work propwrly with the recordings + results, err = table.QueryEntities(15, FullMetadata, &options) + c.Assert(err, chk.IsNil) + c.Assert(results.Entities, chk.HasLen, 0) +} + +func (s *TableBatchSuite) Test_BatchInsertThenMergeDifferentBatches(c *chk.C) { + cli := getBasicClient(c).GetTableService() + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + table := cli.GetTableReference(tableName(c)) + err := table.Create(30, EmptyPayload, nil) + c.Assert(err, chk.IsNil) + defer table.Delete(30, nil) + + entity := table.GetEntityReference("mypartitionkey", "myrowkey") + props := map[string]interface{}{ + "AmountDue": 200.23, + "CustomerCode": uuid.FromStringOrNil("c9da6455-213d-42c9-9a79-3e9149a57833"), + "CustomerSince": time.Date(1992, time.December, 20, 21, 55, 0, 0, time.UTC), + "IsActive": true, + "NumberOfOrders": int64(255), + } + entity.Properties = props + + batch := table.NewBatch() + batch.InsertOrReplaceEntity(entity, false) + err = batch.ExecuteBatch() + c.Assert(err, chk.IsNil) + + entity2 := table.GetEntityReference("mypartitionkey", "myrowkey") + props2 := map[string]interface{}{ + "AmountDue": 200.23, + "CustomerCode": uuid.FromStringOrNil("c9da6455-213d-42c9-9a79-3e9149a57833"), + "CustomerSince": time.Date(1992, time.December, 20, 21, 55, 0, 0, time.UTC), + "DifferentField": 123, + "NumberOfOrders": int64(255), + } + entity2.Properties = props2 + + batch = table.NewBatch() + batch.InsertOrReplaceEntity(entity2, false) + err = batch.ExecuteBatch() + c.Assert(err, chk.IsNil) + + options := QueryOptions{ + Top: 2, + } + + results, err := table.QueryEntities(30, FullMetadata, &options) + c.Assert(err, chk.IsNil) + c.Assert(results.Entities, chk.HasLen, 1) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/table_entities.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/table_entities.go deleted file mode 100644 index a26d9c6f581f..000000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/table_entities.go +++ /dev/null @@ -1,357 +0,0 @@ -package storage - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "net/http" - "net/url" - "reflect" -) - -// Annotating as secure for gas scanning -/* #nosec */ -const ( - partitionKeyNode = "PartitionKey" - rowKeyNode = "RowKey" - tag = "table" - tagIgnore = "-" - continuationTokenPartitionKeyHeader = "X-Ms-Continuation-Nextpartitionkey" - continuationTokenRowHeader = "X-Ms-Continuation-Nextrowkey" - maxTopParameter = 1000 -) - -type queryTablesResponse struct { - TableName []struct { - TableName string `json:"TableName"` - } `json:"value"` -} - -const ( - tableOperationTypeInsert = iota - tableOperationTypeUpdate = iota - tableOperationTypeMerge = iota - tableOperationTypeInsertOrReplace = iota - tableOperationTypeInsertOrMerge = iota -) - -type tableOperation int - -// TableEntity interface specifies -// the functions needed to support -// marshaling and unmarshaling into -// Azure Tables. The struct must only contain -// simple types because Azure Tables do not -// support hierarchy. -type TableEntity interface { - PartitionKey() string - RowKey() string - SetPartitionKey(string) error - SetRowKey(string) error -} - -// ContinuationToken is an opaque (ie not useful to inspect) -// struct that Get... methods can return if there are more -// entries to be returned than the ones already -// returned. Just pass it to the same function to continue -// receiving the remaining entries. -type ContinuationToken struct { - NextPartitionKey string - NextRowKey string -} - -type getTableEntriesResponse struct { - Elements []map[string]interface{} `json:"value"` -} - -// QueryTableEntities queries the specified table and returns the unmarshaled -// entities of type retType. -// top parameter limits the returned entries up to top. Maximum top -// allowed by Azure API is 1000. In case there are more than top entries to be -// returned the function will return a non nil *ContinuationToken. You can call the -// same function again passing the received ContinuationToken as previousContToken -// parameter in order to get the following entries. The query parameter -// is the odata query. To retrieve all the entries pass the empty string. -// The function returns a pointer to a TableEntity slice, the *ContinuationToken -// if there are more entries to be returned and an error in case something went -// wrong. -// -// Example: -// entities, cToken, err = tSvc.QueryTableEntities("table", cToken, reflect.TypeOf(entity), 20, "") -func (c *TableServiceClient) QueryTableEntities(tableName AzureTable, previousContToken *ContinuationToken, retType reflect.Type, top int, query string) ([]TableEntity, *ContinuationToken, error) { - if top > maxTopParameter { - return nil, nil, fmt.Errorf("top accepts at maximum %d elements. Requested %d instead", maxTopParameter, top) - } - - uri := c.client.getEndpoint(tableServiceName, pathForTable(tableName), url.Values{}) - uri += fmt.Sprintf("?$top=%d", top) - if query != "" { - uri += fmt.Sprintf("&$filter=%s", url.QueryEscape(query)) - } - - if previousContToken != nil { - uri += fmt.Sprintf("&NextPartitionKey=%s&NextRowKey=%s", previousContToken.NextPartitionKey, previousContToken.NextRowKey) - } - - headers := c.getStandardHeaders() - - headers["Content-Length"] = "0" - - resp, err := c.client.execTable("GET", uri, headers, nil) - - if err != nil { - return nil, nil, err - } - - contToken := extractContinuationTokenFromHeaders(resp.headers) - - if err != nil { - return nil, contToken, err - } - defer resp.body.Close() - - if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { - return nil, contToken, err - } - - retEntries, err := deserializeEntity(retType, resp.body) - if err != nil { - return nil, contToken, err - } - - return retEntries, contToken, nil -} - -// InsertEntity inserts an entity in the specified table. -// The function fails if there is an entity with the same -// PartitionKey and RowKey in the table. -func (c *TableServiceClient) InsertEntity(table AzureTable, entity TableEntity) error { - var err error - - if sc, err := c.execTable(table, entity, false, "POST"); err != nil { - return checkRespCode(sc, []int{http.StatusCreated}) - } - - return err -} - -func (c *TableServiceClient) execTable(table AzureTable, entity TableEntity, specifyKeysInURL bool, method string) (int, error) { - uri := c.client.getEndpoint(tableServiceName, pathForTable(table), url.Values{}) - if specifyKeysInURL { - uri += fmt.Sprintf("(PartitionKey='%s',RowKey='%s')", url.QueryEscape(entity.PartitionKey()), url.QueryEscape(entity.RowKey())) - } - - headers := c.getStandardHeaders() - - var buf bytes.Buffer - - if err := injectPartitionAndRowKeys(entity, &buf); err != nil { - return 0, err - } - - headers["Content-Length"] = fmt.Sprintf("%d", buf.Len()) - - var err error - var resp *odataResponse - - resp, err = c.client.execTable(method, uri, headers, &buf) - - if err != nil { - return 0, err - } - - defer resp.body.Close() - - return resp.statusCode, nil -} - -// UpdateEntity updates the contents of an entity with the -// one passed as parameter. The function fails if there is no entity -// with the same PartitionKey and RowKey in the table. -func (c *TableServiceClient) UpdateEntity(table AzureTable, entity TableEntity) error { - var err error - - if sc, err := c.execTable(table, entity, true, "PUT"); err != nil { - return checkRespCode(sc, []int{http.StatusNoContent}) - } - return err -} - -// MergeEntity merges the contents of an entity with the -// one passed as parameter. -// The function fails if there is no entity -// with the same PartitionKey and RowKey in the table. -func (c *TableServiceClient) MergeEntity(table AzureTable, entity TableEntity) error { - var err error - - if sc, err := c.execTable(table, entity, true, "MERGE"); err != nil { - return checkRespCode(sc, []int{http.StatusNoContent}) - } - return err -} - -// DeleteEntityWithoutCheck deletes the entity matching by -// PartitionKey and RowKey. There is no check on IfMatch -// parameter so the entity is always deleted. -// The function fails if there is no entity -// with the same PartitionKey and RowKey in the table. -func (c *TableServiceClient) DeleteEntityWithoutCheck(table AzureTable, entity TableEntity) error { - return c.DeleteEntity(table, entity, "*") -} - -// DeleteEntity deletes the entity matching by -// PartitionKey, RowKey and ifMatch field. -// The function fails if there is no entity -// with the same PartitionKey and RowKey in the table or -// the ifMatch is different. -func (c *TableServiceClient) DeleteEntity(table AzureTable, entity TableEntity, ifMatch string) error { - uri := c.client.getEndpoint(tableServiceName, pathForTable(table), url.Values{}) - uri += fmt.Sprintf("(PartitionKey='%s',RowKey='%s')", url.QueryEscape(entity.PartitionKey()), url.QueryEscape(entity.RowKey())) - - headers := c.getStandardHeaders() - - headers["Content-Length"] = "0" - headers["If-Match"] = ifMatch - - resp, err := c.client.execTable("DELETE", uri, headers, nil) - - if err != nil { - return err - } - defer resp.body.Close() - - if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { - return err - } - - return nil -} - -// InsertOrReplaceEntity inserts an entity in the specified table -// or replaced the existing one. -func (c *TableServiceClient) InsertOrReplaceEntity(table AzureTable, entity TableEntity) error { - var err error - - if sc, err := c.execTable(table, entity, true, "PUT"); err != nil { - return checkRespCode(sc, []int{http.StatusNoContent}) - } - return err -} - -// InsertOrMergeEntity inserts an entity in the specified table -// or merges the existing one. -func (c *TableServiceClient) InsertOrMergeEntity(table AzureTable, entity TableEntity) error { - var err error - - if sc, err := c.execTable(table, entity, true, "MERGE"); err != nil { - return checkRespCode(sc, []int{http.StatusNoContent}) - } - return err -} - -func injectPartitionAndRowKeys(entity TableEntity, buf *bytes.Buffer) error { - if err := json.NewEncoder(buf).Encode(entity); err != nil { - return err - } - - dec := make(map[string]interface{}) - if err := json.NewDecoder(buf).Decode(&dec); err != nil { - return err - } - - // Inject PartitionKey and RowKey - dec[partitionKeyNode] = entity.PartitionKey() - dec[rowKeyNode] = entity.RowKey() - - // Remove tagged fields - // The tag is defined in the const section - // This is useful to avoid storing the PartitionKey and RowKey twice. - numFields := reflect.ValueOf(entity).Elem().NumField() - for i := 0; i < numFields; i++ { - f := reflect.ValueOf(entity).Elem().Type().Field(i) - - if f.Tag.Get(tag) == tagIgnore { - // we must look for its JSON name in the dictionary - // as the user can rename it using a tag - jsonName := f.Name - if f.Tag.Get("json") != "" { - jsonName = f.Tag.Get("json") - } - delete(dec, jsonName) - } - } - - buf.Reset() - - if err := json.NewEncoder(buf).Encode(&dec); err != nil { - return err - } - - return nil -} - -func deserializeEntity(retType reflect.Type, reader io.Reader) ([]TableEntity, error) { - buf := new(bytes.Buffer) - - var ret getTableEntriesResponse - if err := json.NewDecoder(reader).Decode(&ret); err != nil { - return nil, err - } - - tEntries := make([]TableEntity, len(ret.Elements)) - - for i, entry := range ret.Elements { - - buf.Reset() - if err := json.NewEncoder(buf).Encode(entry); err != nil { - return nil, err - } - - dec := make(map[string]interface{}) - if err := json.NewDecoder(buf).Decode(&dec); err != nil { - return nil, err - } - - var pKey, rKey string - // strip pk and rk - for key, val := range dec { - switch key { - case partitionKeyNode: - pKey = val.(string) - case rowKeyNode: - rKey = val.(string) - } - } - - delete(dec, partitionKeyNode) - delete(dec, rowKeyNode) - - buf.Reset() - if err := json.NewEncoder(buf).Encode(dec); err != nil { - return nil, err - } - - // Create a empty retType instance - tEntries[i] = reflect.New(retType.Elem()).Interface().(TableEntity) - // Popolate it with the values - if err := json.NewDecoder(buf).Decode(&tEntries[i]); err != nil { - return nil, err - } - - // Reset PartitionKey and RowKey - tEntries[i].SetPartitionKey(pKey) - tEntries[i].SetRowKey(rKey) - } - - return tEntries, nil -} - -func extractContinuationTokenFromHeaders(h http.Header) *ContinuationToken { - ct := ContinuationToken{h.Get(continuationTokenPartitionKeyHeader), h.Get(continuationTokenRowHeader)} - - if ct.NextPartitionKey != "" && ct.NextRowKey != "" { - return &ct - } - return nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/table_test.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/table_test.go index 307e14a3924d..fd17223dbf40 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/table_test.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/table_test.go @@ -1,287 +1,209 @@ package storage import ( - "crypto/rand" - "encoding/base64" - "fmt" - "reflect" + "strconv" + "time" chk "gopkg.in/check.v1" ) -type TableClient struct{} +type StorageTableSuite struct{} + +var _ = chk.Suite(&StorageTableSuite{}) func getTableClient(c *chk.C) TableServiceClient { return getBasicClient(c).GetTableService() } -type CustomEntity struct { - Name string `json:"name"` - Surname string `json:"surname"` - Number int - PKey string `json:"pk" table:"-"` - RKey string `json:"rk" table:"-"` -} - -type CustomEntityExtended struct { - *CustomEntity - ExtraField string -} - -func (c *CustomEntity) PartitionKey() string { - return c.PKey -} - -func (c *CustomEntity) RowKey() string { - return c.RKey -} - -func (c *CustomEntity) SetPartitionKey(s string) error { - c.PKey = s - return nil -} - -func (c *CustomEntity) SetRowKey(s string) error { - c.RKey = s - return nil -} - -func (s *StorageBlobSuite) Test_SharedKeyLite(c *chk.C) { - cli := getTableClient(c) - - // override the accountKey and accountName - // but make sure to reset when returning - oldAK := cli.client.accountKey - oldAN := cli.client.accountName - - defer func() { - cli.client.accountKey = oldAK - cli.client.accountName = oldAN - }() - - // don't worry, I've already changed mine :) - key, err := base64.StdEncoding.DecodeString("zHDHGs7C+Di9pZSDMuarxJJz3xRBzAHBYaobxpLEc7kwTptR/hPEa9j93hIfb2Tbe9IA50MViGmjQ6nUF/OVvA==") - if err != nil { - c.Fail() +func (cli *TableServiceClient) deleteAllTables() { + if result, _ := cli.QueryTables(MinimalMetadata, nil); result != nil { + for _, t := range result.Tables { + t.Delete(30, nil) + } } - - cli.client.accountKey = key - cli.client.accountName = "mindgotest" - - headers := map[string]string{ - "Accept-Charset": "UTF-8", - "Content-Type": "application/json", - "x-ms-date": "Wed, 23 Sep 2015 16:40:05 GMT", - "Content-Length": "0", - "x-ms-version": "2015-02-21", - "Accept": "application/json;odata=nometadata", - } - url := "https://mindgotest.table.core.windows.net/tquery()" - - ret, err := cli.client.createSharedKeyLite(url, headers) - if err != nil { - c.Fail() - } - - c.Assert(ret, chk.Equals, "SharedKeyLite mindgotest:+32DTgsPUgXPo/O7RYaTs0DllA6FTXMj3uK4Qst8y/E=") } -func (s *StorageBlobSuite) Test_CreateAndDeleteTable(c *chk.C) { +func (s *StorageTableSuite) Test_CreateAndDeleteTable(c *chk.C) { cli := getTableClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() - tn := AzureTable(randTable()) - - err := cli.CreateTable(tn) + table1 := cli.GetTableReference(tableName(c, "1")) + err := table1.Create(30, EmptyPayload, nil) c.Assert(err, chk.IsNil) - err = cli.DeleteTable(tn) + // update table metadata + table2 := cli.GetTableReference(tableName(c, "2")) + err = table2.Create(30, FullMetadata, nil) + defer table2.Delete(30, nil) c.Assert(err, chk.IsNil) -} - -func (s *StorageBlobSuite) Test_InsertEntities(c *chk.C) { - cli := getTableClient(c) - tn := AzureTable(randTable()) + // Check not empty values + c.Assert(table2.OdataEditLink, chk.Not(chk.Equals), "") + c.Assert(table2.OdataID, chk.Not(chk.Equals), "") + c.Assert(table2.OdataMetadata, chk.Not(chk.Equals), "") + c.Assert(table2.OdataType, chk.Not(chk.Equals), "") - err := cli.CreateTable(tn) + err = table1.Delete(30, nil) c.Assert(err, chk.IsNil) - defer cli.DeleteTable(tn) - - ce := &CustomEntity{Name: "Luke", Surname: "Skywalker", Number: 1543, PKey: "pkey"} - - for i := 0; i < 12; i++ { - ce.SetRowKey(fmt.Sprintf("%d", i)) - - err = cli.InsertEntity(tn, ce) - c.Assert(err, chk.IsNil) - } } -func (s *StorageBlobSuite) Test_InsertOrReplaceEntities(c *chk.C) { +func (s *StorageTableSuite) Test_CreateTableWithAllResponsePayloadLevels(c *chk.C) { cli := getTableClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() - tn := AzureTable(randTable()) - - err := cli.CreateTable(tn) - c.Assert(err, chk.IsNil) - defer cli.DeleteTable(tn) - - ce := &CustomEntity{Name: "Darth", Surname: "Skywalker", Number: 60, PKey: "pkey", RKey: "5"} - - err = cli.InsertOrReplaceEntity(tn, ce) - c.Assert(err, chk.IsNil) - - cextra := &CustomEntityExtended{&CustomEntity{PKey: "pkey", RKey: "5"}, "extra"} - err = cli.InsertOrReplaceEntity(tn, cextra) - c.Assert(err, chk.IsNil) + createAndDeleteTable(cli, EmptyPayload, c, "empty") + createAndDeleteTable(cli, NoMetadata, c, "nm") + createAndDeleteTable(cli, MinimalMetadata, c, "minimal") + createAndDeleteTable(cli, FullMetadata, c, "full") } -func (s *StorageBlobSuite) Test_InsertOrMergeEntities(c *chk.C) { +func (s *StorageTableSuite) TestGet(c *chk.C) { cli := getTableClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() - tn := AzureTable(randTable()) - - err := cli.CreateTable(tn) + tn := tableName(c) + table := cli.GetTableReference(tn) + err := table.Create(30, EmptyPayload, nil) c.Assert(err, chk.IsNil) - defer cli.DeleteTable(tn) + defer table.Delete(30, nil) - ce := &CustomEntity{Name: "Darth", Surname: "Skywalker", Number: 60, PKey: "pkey", RKey: "5"} - - err = cli.InsertOrMergeEntity(tn, ce) + err = table.Get(30, FullMetadata) c.Assert(err, chk.IsNil) + c.Assert(table.Name, chk.Equals, tn) + c.Assert(table.OdataEditLink, chk.Not(chk.Equals), "") + c.Assert(table.OdataID, chk.Not(chk.Equals), "") + c.Assert(table.OdataMetadata, chk.Not(chk.Equals), "") + c.Assert(table.OdataType, chk.Not(chk.Equals), "") +} - cextra := &CustomEntityExtended{&CustomEntity{PKey: "pkey", RKey: "5"}, "extra"} - err = cli.InsertOrReplaceEntity(tn, cextra) - c.Assert(err, chk.IsNil) +func createAndDeleteTable(cli TableServiceClient, ml MetadataLevel, c *chk.C, extra string) { + table := cli.GetTableReference(tableName(c, extra)) + c.Assert(table.Create(30, ml, nil), chk.IsNil) + c.Assert(table.Delete(30, nil), chk.IsNil) } -func (s *StorageBlobSuite) Test_InsertAndGetEntities(c *chk.C) { +func (s *StorageTableSuite) TestQueryTablesNextResults(c *chk.C) { cli := getTableClient(c) + cli.deleteAllTables() + rec := cli.client.appendRecorder(c) + defer rec.Stop() - tn := AzureTable(randTable()) + for i := 0; i < 3; i++ { + table := cli.GetTableReference(tableName(c, strconv.Itoa(i))) + err := table.Create(30, EmptyPayload, nil) + c.Assert(err, chk.IsNil) + defer table.Delete(30, nil) + } - err := cli.CreateTable(tn) + options := QueryTablesOptions{ + Top: 2, + } + result, err := cli.QueryTables(MinimalMetadata, &options) c.Assert(err, chk.IsNil) - defer cli.DeleteTable(tn) + c.Assert(result.Tables, chk.HasLen, 2) + c.Assert(result.NextLink, chk.NotNil) - ce := &CustomEntity{Name: "Darth", Surname: "Skywalker", Number: 60, PKey: "pkey", RKey: "100"} - c.Assert(cli.InsertOrReplaceEntity(tn, ce), chk.IsNil) - - ce.SetRowKey("200") - c.Assert(cli.InsertOrReplaceEntity(tn, ce), chk.IsNil) - - entries, _, err := cli.QueryTableEntities(tn, nil, reflect.TypeOf(ce), 10, "") + result, err = result.NextResults(nil) c.Assert(err, chk.IsNil) + c.Assert(result.Tables, chk.HasLen, 1) + c.Assert(result.NextLink, chk.IsNil) - c.Assert(len(entries), chk.Equals, 2) + result, err = result.NextResults(nil) + c.Assert(result, chk.IsNil) + c.Assert(err, chk.NotNil) +} - c.Assert(ce.RowKey(), chk.Equals, entries[1].RowKey()) +func appendTablePermission(policies []TableAccessPolicy, ID string, + canRead bool, canAppend bool, canUpdate bool, canDelete bool, + startTime time.Time, expiryTime time.Time) []TableAccessPolicy { - c.Assert(entries[1].(*CustomEntity), chk.DeepEquals, ce) + tap := TableAccessPolicy{ + ID: ID, + StartTime: startTime, + ExpiryTime: expiryTime, + CanRead: canRead, + CanAppend: canAppend, + CanUpdate: canUpdate, + CanDelete: canDelete, + } + policies = append(policies, tap) + return policies } -func (s *StorageBlobSuite) Test_InsertAndQueryEntities(c *chk.C) { +func (s *StorageTableSuite) TestSetPermissionsSuccessfully(c *chk.C) { cli := getTableClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() - tn := AzureTable(randTable()) + table := cli.GetTableReference(tableName(c)) + c.Assert(table.Create(30, EmptyPayload, nil), chk.IsNil) + defer table.Delete(30, nil) - err := cli.CreateTable(tn) - c.Assert(err, chk.IsNil) - defer cli.DeleteTable(tn) + policies := []TableAccessPolicy{} + policies = appendTablePermission(policies, "GolangRocksOnAzure", true, true, true, true, fixedTime, fixedTime.Add(10*time.Hour)) - ce := &CustomEntity{Name: "Darth", Surname: "Skywalker", Number: 60, PKey: "pkey", RKey: "100"} - c.Assert(cli.InsertOrReplaceEntity(tn, ce), chk.IsNil) - - ce.SetRowKey("200") - c.Assert(cli.InsertOrReplaceEntity(tn, ce), chk.IsNil) - - entries, _, err := cli.QueryTableEntities(tn, nil, reflect.TypeOf(ce), 10, "RowKey eq '200'") + err := table.SetPermissions(policies, 30, nil) c.Assert(err, chk.IsNil) - - c.Assert(len(entries), chk.Equals, 1) - - c.Assert(ce.RowKey(), chk.Equals, entries[0].RowKey()) } -func (s *StorageBlobSuite) Test_InsertAndDeleteEntities(c *chk.C) { +func (s *StorageTableSuite) TestSetPermissionsUnsuccessfully(c *chk.C) { cli := getTableClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() - tn := AzureTable(randTable()) + table := cli.GetTableReference("nonexistingtable") - err := cli.CreateTable(tn) - c.Assert(err, chk.IsNil) - defer cli.DeleteTable(tn) - - ce := &CustomEntity{Name: "Test", Surname: "Test2", Number: 0, PKey: "pkey", RKey: "r01"} - c.Assert(cli.InsertOrReplaceEntity(tn, ce), chk.IsNil) - - ce.Number = 1 - ce.SetRowKey("r02") - c.Assert(cli.InsertOrReplaceEntity(tn, ce), chk.IsNil) - - entries, _, err := cli.QueryTableEntities(tn, nil, reflect.TypeOf(ce), 10, "Number eq 1") - c.Assert(err, chk.IsNil) - - c.Assert(len(entries), chk.Equals, 1) - - c.Assert(entries[0].(*CustomEntity), chk.DeepEquals, ce) - - c.Assert(cli.DeleteEntityWithoutCheck(tn, entries[0]), chk.IsNil) - - entries, _, err = cli.QueryTableEntities(tn, nil, reflect.TypeOf(ce), 10, "") - c.Assert(err, chk.IsNil) + policies := []TableAccessPolicy{} + policies = appendTablePermission(policies, "GolangRocksOnAzure", true, true, true, true, fixedTime, fixedTime.Add(10*time.Hour)) - // only 1 entry must be present - c.Assert(len(entries), chk.Equals, 1) + err := table.SetPermissions(policies, 30, nil) + c.Assert(err, chk.NotNil) } -func (s *StorageBlobSuite) Test_ContinuationToken(c *chk.C) { +func (s *StorageTableSuite) TestSetThenGetPermissionsSuccessfully(c *chk.C) { cli := getTableClient(c) + rec := cli.client.appendRecorder(c) + defer rec.Stop() + + table := cli.GetTableReference(tableName(c)) + c.Assert(table.Create(30, EmptyPayload, nil), chk.IsNil) + defer table.Delete(30, nil) - tn := AzureTable(randTable()) + policies := []TableAccessPolicy{} + policies = appendTablePermission(policies, "GolangRocksOnAzure", true, true, true, true, fixedTime, fixedTime.Add(10*time.Hour)) + policies = appendTablePermission(policies, "AutoRestIsSuperCool", true, true, false, true, fixedTime.Add(20*time.Hour), fixedTime.Add(30*time.Hour)) - err := cli.CreateTable(tn) + err := table.SetPermissions(policies, 30, nil) c.Assert(err, chk.IsNil) - defer cli.DeleteTable(tn) - var ce *CustomEntity - var ceList [5]*CustomEntity + newPolicies, err := table.GetPermissions(30, nil) + c.Assert(err, chk.IsNil) - for i := 0; i < 5; i++ { - ce = &CustomEntity{Name: "Test", Surname: "Test2", Number: i, PKey: "pkey", RKey: fmt.Sprintf("r%d", i)} - ceList[i] = ce - c.Assert(cli.InsertOrReplaceEntity(tn, ce), chk.IsNil) - } + // fixedTime check policy set. + c.Assert(newPolicies, chk.HasLen, 2) - // retrieve using top = 2. Should return 2 entries, 2 entries and finally - // 1 entry - entries, contToken, err := cli.QueryTableEntities(tn, nil, reflect.TypeOf(ce), 2, "") - c.Assert(err, chk.IsNil) - c.Assert(len(entries), chk.Equals, 2) - c.Assert(entries[0].(*CustomEntity), chk.DeepEquals, ceList[0]) - c.Assert(entries[1].(*CustomEntity), chk.DeepEquals, ceList[1]) - c.Assert(contToken, chk.NotNil) + for i := range newPolicies { + c.Assert(newPolicies[i].ID, chk.Equals, policies[i].ID) - entries, contToken, err = cli.QueryTableEntities(tn, contToken, reflect.TypeOf(ce), 2, "") - c.Assert(err, chk.IsNil) - c.Assert(len(entries), chk.Equals, 2) - c.Assert(entries[0].(*CustomEntity), chk.DeepEquals, ceList[2]) - c.Assert(entries[1].(*CustomEntity), chk.DeepEquals, ceList[3]) - c.Assert(contToken, chk.NotNil) + // test timestamps down the second + // rounding start/expiry time original perms since the returned perms would have been rounded. + // so need rounded vs rounded. + c.Assert(newPolicies[i].StartTime.UTC().Round(time.Second).Format(time.RFC1123), + chk.Equals, policies[i].StartTime.UTC().Round(time.Second).Format(time.RFC1123)) + c.Assert(newPolicies[i].ExpiryTime.UTC().Round(time.Second).Format(time.RFC1123), + chk.Equals, policies[i].ExpiryTime.UTC().Round(time.Second).Format(time.RFC1123)) - entries, contToken, err = cli.QueryTableEntities(tn, contToken, reflect.TypeOf(ce), 2, "") - c.Assert(err, chk.IsNil) - c.Assert(len(entries), chk.Equals, 1) - c.Assert(entries[0].(*CustomEntity), chk.DeepEquals, ceList[4]) - c.Assert(contToken, chk.IsNil) + c.Assert(newPolicies[i].CanRead, chk.Equals, policies[i].CanRead) + c.Assert(newPolicies[i].CanAppend, chk.Equals, policies[i].CanAppend) + c.Assert(newPolicies[i].CanUpdate, chk.Equals, policies[i].CanUpdate) + c.Assert(newPolicies[i].CanDelete, chk.Equals, policies[i].CanDelete) + } } -func randTable() string { - const alphanum = "abcdefghijklmnopqrstuvwxyz" - var bytes = make([]byte, 32) - rand.Read(bytes) - for i, b := range bytes { - bytes[i] = alphanum[b%byte(len(alphanum))] - } - return string(bytes) +func tableName(c *chk.C, extras ...string) string { + // 32 is the max len for table names + return nameGenerator(32, "table", alpha, c, extras) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/tableserviceclient.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/tableserviceclient.go new file mode 100644 index 000000000000..895dcfded891 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/tableserviceclient.go @@ -0,0 +1,190 @@ +package storage + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strconv" +) + +const ( + headerAccept = "Accept" + headerEtag = "Etag" + headerPrefer = "Prefer" + headerXmsContinuation = "x-ms-Continuation-NextTableName" +) + +// TableServiceClient contains operations for Microsoft Azure Table Storage +// Service. +type TableServiceClient struct { + client Client + auth authentication +} + +// TableOptions includes options for some table operations +type TableOptions struct { + RequestID string +} + +func (options *TableOptions) addToHeaders(h map[string]string) map[string]string { + if options != nil { + h = addToHeaders(h, "x-ms-client-request-id", options.RequestID) + } + return h +} + +// QueryNextLink includes information for getting the next page of +// results in query operations +type QueryNextLink struct { + NextLink *string + ml MetadataLevel +} + +// GetServiceProperties gets the properties of your storage account's table service. +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-table-service-properties +func (t *TableServiceClient) GetServiceProperties() (*ServiceProperties, error) { + return t.client.getServiceProperties(tableServiceName, t.auth) +} + +// SetServiceProperties sets the properties of your storage account's table service. +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-table-service-properties +func (t *TableServiceClient) SetServiceProperties(props ServiceProperties) error { + return t.client.setServiceProperties(props, tableServiceName, t.auth) +} + +// GetTableReference returns a Table object for the specified table name. +func (t *TableServiceClient) GetTableReference(name string) *Table { + return &Table{ + tsc: t, + Name: name, + } +} + +// QueryTablesOptions includes options for some table operations +type QueryTablesOptions struct { + Top uint + Filter string + RequestID string +} + +func (options *QueryTablesOptions) getParameters() (url.Values, map[string]string) { + query := url.Values{} + headers := map[string]string{} + if options != nil { + if options.Top > 0 { + query.Add(OdataTop, strconv.FormatUint(uint64(options.Top), 10)) + } + if options.Filter != "" { + query.Add(OdataFilter, options.Filter) + } + headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID) + } + return query, headers +} + +// QueryTables returns the tables in the storage account. +// You can use query options defined by the OData Protocol specification. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-tables +func (t *TableServiceClient) QueryTables(ml MetadataLevel, options *QueryTablesOptions) (*TableQueryResult, error) { + query, headers := options.getParameters() + uri := t.client.getEndpoint(tableServiceName, tablesURIPath, query) + return t.queryTables(uri, headers, ml) +} + +// NextResults returns the next page of results +// from a QueryTables or a NextResults operation. +// +// See https://docs.microsoft.com/rest/api/storageservices/fileservices/query-tables +// See https://docs.microsoft.com/rest/api/storageservices/fileservices/query-timeout-and-pagination +func (tqr *TableQueryResult) NextResults(options *TableOptions) (*TableQueryResult, error) { + if tqr == nil { + return nil, errNilPreviousResult + } + if tqr.NextLink == nil { + return nil, errNilNextLink + } + headers := options.addToHeaders(map[string]string{}) + + return tqr.tsc.queryTables(*tqr.NextLink, headers, tqr.ml) +} + +// TableQueryResult contains the response from +// QueryTables and QueryTablesNextResults functions. +type TableQueryResult struct { + OdataMetadata string `json:"odata.metadata"` + Tables []Table `json:"value"` + QueryNextLink + tsc *TableServiceClient +} + +func (t *TableServiceClient) queryTables(uri string, headers map[string]string, ml MetadataLevel) (*TableQueryResult, error) { + if ml == EmptyPayload { + return nil, errEmptyPayload + } + headers = mergeHeaders(headers, t.client.getStandardHeaders()) + headers[headerAccept] = string(ml) + + resp, err := t.client.exec(http.MethodGet, uri, headers, nil, t.auth) + if err != nil { + return nil, err + } + defer resp.body.Close() + + if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return nil, err + } + + respBody, err := ioutil.ReadAll(resp.body) + if err != nil { + return nil, err + } + var out TableQueryResult + err = json.Unmarshal(respBody, &out) + if err != nil { + return nil, err + } + + for i := range out.Tables { + out.Tables[i].tsc = t + } + out.tsc = t + + nextLink := resp.headers.Get(http.CanonicalHeaderKey(headerXmsContinuation)) + if nextLink == "" { + out.NextLink = nil + } else { + originalURI, err := url.Parse(uri) + if err != nil { + return nil, err + } + v := originalURI.Query() + v.Set(nextTableQueryParameter, nextLink) + newURI := t.client.getEndpoint(tableServiceName, tablesURIPath, v) + out.NextLink = &newURI + out.ml = ml + } + + return &out, nil +} + +func addBodyRelatedHeaders(h map[string]string, length int) map[string]string { + h[headerContentType] = "application/json" + h[headerContentLength] = fmt.Sprintf("%v", length) + h[headerAcceptCharset] = "UTF-8" + return h +} + +func addReturnContentHeaders(h map[string]string, ml MetadataLevel) map[string]string { + if ml != EmptyPayload { + h[headerPrefer] = "return-content" + h[headerAccept] = string(ml) + } else { + h[headerPrefer] = "return-no-content" + // From API version 2015-12-11 onwards, Accept header is required + h[headerAccept] = string(NoMetadata) + } + return h +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go index 57ca1b6d937e..8a902be2f05b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go @@ -12,9 +12,15 @@ import ( "net/http" "net/url" "reflect" + "strconv" + "strings" "time" ) +var ( + fixedTime = time.Date(2050, time.December, 20, 21, 55, 0, 0, time.FixedZone("GMT", -6)) +) + func (c Client) computeHmac256(message string) string { h := hmac.New(sha256.New, c.accountKey) h.Write([]byte(message)) @@ -76,10 +82,118 @@ func headersFromStruct(v interface{}) map[string]string { value := reflect.ValueOf(v) for i := 0; i < value.NumField(); i++ { key := value.Type().Field(i).Tag.Get("header") - val := value.Field(i).String() - if key != "" && val != "" { - headers[key] = val + if key != "" { + reflectedValue := reflect.Indirect(value.Field(i)) + var val string + if reflectedValue.IsValid() { + switch reflectedValue.Type() { + case reflect.TypeOf(fixedTime): + val = timeRfc1123Formatted(reflectedValue.Interface().(time.Time)) + case reflect.TypeOf(uint64(0)), reflect.TypeOf(uint(0)): + val = strconv.FormatUint(reflectedValue.Uint(), 10) + case reflect.TypeOf(int(0)): + val = strconv.FormatInt(reflectedValue.Int(), 10) + default: + val = reflectedValue.String() + } + } + if val != "" { + headers[key] = val + } } } return headers } + +// merges extraHeaders into headers and returns headers +func mergeHeaders(headers, extraHeaders map[string]string) map[string]string { + for k, v := range extraHeaders { + headers[k] = v + } + return headers +} + +func addToHeaders(h map[string]string, key, value string) map[string]string { + if value != "" { + h[key] = value + } + return h +} + +func addTimeToHeaders(h map[string]string, key string, value *time.Time) map[string]string { + if value != nil { + h = addToHeaders(h, key, timeRfc1123Formatted(*value)) + } + return h +} + +func addTimeout(params url.Values, timeout uint) url.Values { + if timeout > 0 { + params.Add("timeout", fmt.Sprintf("%v", timeout)) + } + return params +} + +func addSnapshot(params url.Values, snapshot *time.Time) url.Values { + if snapshot != nil { + params.Add("snapshot", timeRfc1123Formatted(*snapshot)) + } + return params +} + +func getTimeFromHeaders(h http.Header, key string) (*time.Time, error) { + var out time.Time + var err error + outStr := h.Get(key) + if outStr != "" { + out, err = time.Parse(time.RFC1123, outStr) + if err != nil { + return nil, err + } + } + return &out, nil +} + +// TimeRFC1123 is an alias for time.Time needed for custom Unmarshalling +type TimeRFC1123 time.Time + +// UnmarshalXML is a custom unmarshaller that overrides the default time unmarshal which uses a different time layout. +func (t *TimeRFC1123) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + var value string + d.DecodeElement(&value, &start) + parse, err := time.Parse(time.RFC1123, value) + if err != nil { + return err + } + *t = TimeRFC1123(parse) + return nil +} + +// returns a map of custom metadata values from the specified HTTP header +func getMetadataFromHeaders(header http.Header) map[string]string { + metadata := make(map[string]string) + for k, v := range header { + // Can't trust CanonicalHeaderKey() to munge case + // reliably. "_" is allowed in identifiers: + // https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx + // https://msdn.microsoft.com/library/aa664670(VS.71).aspx + // http://tools.ietf.org/html/rfc7230#section-3.2 + // ...but "_" is considered invalid by + // CanonicalMIMEHeaderKey in + // https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542 + // so k can be "X-Ms-Meta-Lol" or "x-ms-meta-lol_rofl". + k = strings.ToLower(k) + if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) { + continue + } + // metadata["lol"] = content of the last X-Ms-Meta-Lol header + k = k[len(userDefinedMetadataHeaderPrefix):] + metadata[k] = v[len(v)-1] + } + + if len(metadata) == 0 { + return nil + } + + return metadata +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/util_test.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/util_test.go index a1817d162428..ec57f5056cd2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/util_test.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/util_test.go @@ -1,15 +1,83 @@ package storage import ( + "bytes" + "encoding/hex" "encoding/xml" + "fmt" "io/ioutil" "net/url" + "os" + "path/filepath" "strings" + "testing" "time" chk "gopkg.in/check.v1" ) +func TestMain(m *testing.M) { + exitStatus := m.Run() + err := fixRecordings() + if err != nil { + fmt.Fprintf(os.Stderr, "After test run, fixing recordings failed with error: %v\n", err) + exitStatus = 1 + } + os.Exit(exitStatus) +} + +func fixRecordings() error { + err := filepath.Walk(recordingsFolder, func(path string, file os.FileInfo, err error) error { + if strings.ToLower(filepath.Ext(path)) == ".yaml" { + recording, err := ioutil.ReadFile(path) + if err != nil { + fmt.Fprintf(os.Stderr, "Error reading file '%s': %v", path, err) + } + + fixedRecording := replaceStorageAccount(string(recording)) + + err = ioutil.WriteFile(path, []byte(fixedRecording), 0) + if err != nil { + fmt.Fprintf(os.Stderr, "Error writing file '%s': %v", path, err) + } + } + return err + }) + return err +} + +func replaceStorageAccount(recording string) string { + name := os.Getenv("ACCOUNT_NAME") + if name == "" { + // do nothing + return recording + } + + nameHex := getHex(name) + dummyHex := getHex(dummyStorageAccount) + + r := strings.NewReplacer(name, dummyStorageAccount, + nameHex, dummyHex) + + return r.Replace(string(recording)) +} + +func getHex(input string) string { + encoded := strings.ToUpper(hex.EncodeToString([]byte(input))) + formatted := bytes.Buffer{} + for i := 0; i < len(encoded); i += 2 { + formatted.WriteString(`\x`) + formatted.WriteString(encoded[i : i+2]) + } + return formatted.String() +} + +const ( + dummyStorageAccount = "golangrocksonazure" + dummyMiniStorageKey = "YmFy" + recordingsFolder = "recordings" +) + func (s *StorageClientSuite) Test_timeRfc1123Formatted(c *chk.C) { now := time.Now().UTC() expectedLayout := "Mon, 02 Jan 2006 15:04:05 GMT" @@ -35,8 +103,8 @@ func (s *StorageClientSuite) Test_prepareBlockListRequest(c *chk.C) { expected := `` c.Assert(prepareBlockListRequest(empty), chk.DeepEquals, expected) - blocks := []Block{{"foo", BlockStatusLatest}, {"bar", BlockStatusUncommitted}} - expected = `foobar` + blocks := []Block{{"lol", BlockStatusLatest}, {"rofl", BlockStatusUncommitted}} + expected = `lolrofl` c.Assert(prepareBlockListRequest(blocks), chk.DeepEquals, expected) } @@ -70,14 +138,47 @@ func (s *StorageClientSuite) Test_xmlMarshal(c *chk.C) { func (s *StorageClientSuite) Test_headersFromStruct(c *chk.C) { type t struct { - header1 string `header:"HEADER1"` - header2 string `header:"HEADER2"` + Header1 string `header:"HEADER1"` + Header2 string `header:"HEADER2"` + TimePtr *time.Time `header:"ptr-time-header"` + TimeHeader time.Time `header:"time-header"` + UintPtr *uint `header:"ptr-uint-header"` + UintHeader uint `header:"uint-header"` + IntPtr *int `header:"ptr-int-header"` + IntHeader int `header:"int-header"` + StringAliasPtr *BlobType `header:"ptr-string-alias-header"` + StringAlias BlobType `header:"string-alias-header"` + NilPtr *time.Time `header:"nil-ptr"` + EmptyString string `header:"empty-string"` } - h := t{header1: "value1", header2: "value2"} + timeHeader := time.Date(1985, time.February, 23, 10, 0, 0, 0, time.Local) + uintHeader := uint(15) + intHeader := 30 + alias := BlobTypeAppend + h := t{ + Header1: "value1", + Header2: "value2", + TimePtr: &timeHeader, + TimeHeader: timeHeader, + UintPtr: &uintHeader, + UintHeader: uintHeader, + IntPtr: &intHeader, + IntHeader: intHeader, + StringAliasPtr: &alias, + StringAlias: alias, + } expected := map[string]string{ - "HEADER1": "value1", - "HEADER2": "value2", + "HEADER1": "value1", + "HEADER2": "value2", + "ptr-time-header": "Sat, 23 Feb 1985 10:00:00 GMT", + "time-header": "Sat, 23 Feb 1985 10:00:00 GMT", + "ptr-uint-header": "15", + "uint-header": "15", + "ptr-int-header": "30", + "int-header": "30", + "ptr-string-alias-header": "AppendBlob", + "string-alias-header": "AppendBlob", } out := headersFromStruct(h) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/version.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/version.go new file mode 100644 index 000000000000..a23fff1e2e15 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/version.go @@ -0,0 +1,5 @@ +package storage + +var ( + sdkVersion = "10.0.2" +) diff --git a/vendor/github.com/Azure/go-ansiterm/parser_test_helpers_test.go b/vendor/github.com/Azure/go-ansiterm/parser_test_helpers.go similarity index 100% rename from vendor/github.com/Azure/go-ansiterm/parser_test_helpers_test.go rename to vendor/github.com/Azure/go-ansiterm/parser_test_helpers.go diff --git a/vendor/github.com/Azure/go-ansiterm/parser_test_utilities_test.go b/vendor/github.com/Azure/go-ansiterm/parser_test_utilities.go similarity index 100% rename from vendor/github.com/Azure/go-ansiterm/parser_test_utilities_test.go rename to vendor/github.com/Azure/go-ansiterm/parser_test_utilities.go diff --git a/vendor/github.com/Azure/go-ansiterm/test_event_handler_test.go b/vendor/github.com/Azure/go-ansiterm/test_event_handler.go similarity index 100% rename from vendor/github.com/Azure/go-ansiterm/test_event_handler_test.go rename to vendor/github.com/Azure/go-ansiterm/test_event_handler.go diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/README.md b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md new file mode 100644 index 000000000000..a17cf98c6215 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md @@ -0,0 +1,253 @@ +# Azure Active Directory library for Go + +This project provides a stand alone Azure Active Directory library for Go. The code was extracted +from [go-autorest](https://github.com/Azure/go-autorest/) project, which is used as a base for +[azure-sdk-for-go](https://github.com/Azure/azure-sdk-for-go). + + +## Installation + +``` +go get -u github.com/Azure/go-autorest/autorest/adal +``` + +## Usage + +An Active Directory application is required in order to use this library. An application can be registered in the [Azure Portal](https://portal.azure.com/) follow these [guidelines](https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-integrating-applications) or using the [Azure CLI](https://github.com/Azure/azure-cli). + +### Register an Azure AD Application with secret + + +1. Register a new application with a `secret` credential + + ``` + az ad app create \ + --display-name example-app \ + --homepage https://example-app/home \ + --identifier-uris https://example-app/app \ + --password secret + ``` + +2. Create a service principal using the `Application ID` from previous step + + ``` + az ad sp create --id "Application ID" + ``` + + * Replace `Application ID` with `appId` from step 1. + +### Register an Azure AD Application with certificate + +1. Create a private key + + ``` + openssl genrsa -out "example-app.key" 2048 + ``` + +2. Create the certificate + + ``` + openssl req -new -key "example-app.key" -subj "/CN=example-app" -out "example-app.csr" + openssl x509 -req -in "example-app.csr" -signkey "example-app.key" -out "example-app.crt" -days 10000 + ``` + +3. Create the PKCS12 version of the certificate containing also the private key + + ``` + openssl pkcs12 -export -out "example-app.pfx" -inkey "example-app.key" -in "example-app.crt" -passout pass: + + ``` + +4. Register a new application with the certificate content form `example-app.crt` + + ``` + certificateContents="$(tail -n+2 "example-app.crt" | head -n-1)" + + az ad app create \ + --display-name example-app \ + --homepage https://example-app/home \ + --identifier-uris https://example-app/app \ + --key-usage Verify --end-date 2018-01-01 \ + --key-value "${certificateContents}" + ``` + +5. Create a service principal using the `Application ID` from previous step + + ``` + az ad sp create --id "APPLICATION_ID" + ``` + + * Replace `APPLICATION_ID` with `appId` from step 4. + + +### Grant the necessary permissions + +Azure relies on a Role-Based Access Control (RBAC) model to manage the access to resources at a fine-grained +level. There is a set of [pre-defined roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-built-in-roles) +which can be assigned to a service principal of an Azure AD application depending of your needs. + +``` +az role assignment create --assigner "SERVICE_PRINCIPAL_ID" --role "ROLE_NAME" +``` + +* Replace the `SERVICE_PRINCIPAL_ID` with the `appId` from previous step. +* Replace the `ROLE_NAME` with a role name of your choice. + +It is also possible to define custom role definitions. + +``` +az role definition create --role-definition role-definition.json +``` + +* Check [custom roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-control-custom-roles) for more details regarding the content of `role-definition.json` file. + + +### Acquire Access Token + +The common configuration used by all flows: + +```Go +const activeDirectoryEndpoint = "https://login.microsoftonline.com/" +tenantID := "TENANT_ID" +oauthConfig, err := adal.NewOAuthConfig(activeDirectoryEndpoint, tenantID) + +applicationID := "APPLICATION_ID" + +callback := func(token adal.Token) error { + // This is called after the token is acquired +} + +// The resource for which the token is acquired +resource := "https://management.core.windows.net/" +``` + +* Replace the `TENANT_ID` with your tenant ID. +* Replace the `APPLICATION_ID` with the value from previous section. + +#### Client Credentials + +```Go +applicationSecret := "APPLICATION_SECRET" + +spt, err := adal.NewServicePrincipalToken( + oauthConfig, + appliationID, + applicationSecret, + resource, + callbacks...) +if err != nil { + return nil, err +} + +// Acquire a new access token +err = spt.Refresh() +if (err == nil) { + token := spt.Token +} +``` + +* Replace the `APPLICATION_SECRET` with the `password` value from previous section. + +#### Client Certificate + +```Go +certificatePath := "./example-app.pfx" + +certData, err := ioutil.ReadFile(certificatePath) +if err != nil { + return nil, fmt.Errorf("failed to read the certificate file (%s): %v", certificatePath, err) +} + +// Get the certificate and private key from pfx file +certificate, rsaPrivateKey, err := decodePkcs12(certData, "") +if err != nil { + return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err) +} + +spt, err := adal.NewServicePrincipalTokenFromCertificate( + oauthConfig, + applicationID, + certificate, + rsaPrivateKey, + resource, + callbacks...) + +// Acquire a new access token +err = spt.Refresh() +if (err == nil) { + token := spt.Token +} +``` + +* Update the certificate path to point to the example-app.pfx file which was created in previous section. + + +#### Device Code + +```Go +oauthClient := &http.Client{} + +// Acquire the device code +deviceCode, err := adal.InitiateDeviceAuth( + oauthClient, + oauthConfig, + applicationID, + resource) +if err != nil { + return nil, fmt.Errorf("Failed to start device auth flow: %s", err) +} + +// Display the authentication message +fmt.Println(*deviceCode.Message) + +// Wait here until the user is authenticated +token, err := adal.WaitForUserCompletion(oauthClient, deviceCode) +if err != nil { + return nil, fmt.Errorf("Failed to finish device auth flow: %s", err) +} + +spt, err := adal.NewServicePrincipalTokenFromManualToken( + oauthConfig, + applicationID, + resource, + *token, + callbacks...) + +if (err == nil) { + token := spt.Token +} +``` + +### Command Line Tool + +A command line tool is available in `cmd/adal.go` that can acquire a token for a given resource. It supports all flows mentioned above. + +``` +adal -h + +Usage of ./adal: + -applicationId string + application id + -certificatePath string + path to pk12/PFC application certificate + -mode string + authentication mode (device, secret, cert, refresh) (default "device") + -resource string + resource for which the token is requested + -secret string + application secret + -tenantId string + tenant id + -tokenCachePath string + location of oath token cache (default "/home/cgc/.adal/accessToken.json") +``` + +Example acquire a token for `https://management.core.windows.net/` using device code flow: + +``` +adal -mode device \ + -applicationId "APPLICATION_ID" \ + -tenantId "TENANT_ID" \ + -resource https://management.core.windows.net/ + +``` diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go new file mode 100644 index 000000000000..12375e0e4bb8 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go @@ -0,0 +1,51 @@ +package adal + +import ( + "fmt" + "net/url" +) + +const ( + activeDirectoryAPIVersion = "1.0" +) + +// OAuthConfig represents the endpoints needed +// in OAuth operations +type OAuthConfig struct { + AuthorityEndpoint url.URL + AuthorizeEndpoint url.URL + TokenEndpoint url.URL + DeviceCodeEndpoint url.URL +} + +// NewOAuthConfig returns an OAuthConfig with tenant specific urls +func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) { + const activeDirectoryEndpointTemplate = "%s/oauth2/%s?api-version=%s" + u, err := url.Parse(activeDirectoryEndpoint) + if err != nil { + return nil, err + } + authorityURL, err := u.Parse(tenantID) + if err != nil { + return nil, err + } + authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", activeDirectoryAPIVersion)) + if err != nil { + return nil, err + } + tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", activeDirectoryAPIVersion)) + if err != nil { + return nil, err + } + deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", activeDirectoryAPIVersion)) + if err != nil { + return nil, err + } + + return &OAuthConfig{ + AuthorityEndpoint: *authorityURL, + AuthorizeEndpoint: *authorizeURL, + TokenEndpoint: *tokenURL, + DeviceCodeEndpoint: *deviceCodeURL, + }, nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/config_test.go b/vendor/github.com/Azure/go-autorest/autorest/adal/config_test.go new file mode 100644 index 000000000000..e8a58809eb74 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/config_test.go @@ -0,0 +1,30 @@ +package adal + +import ( + "testing" +) + +func TestNewOAuthConfig(t *testing.T) { + const testActiveDirectoryEndpoint = "https://login.test.com" + const testTenantID = "tenant-id-test" + + config, err := NewOAuthConfig(testActiveDirectoryEndpoint, testTenantID) + if err != nil { + t.Fatalf("autorest/adal: Unexpected error while creating oauth configuration for tenant: %v.", err) + } + + expected := "https://login.test.com/tenant-id-test/oauth2/authorize?api-version=1.0" + if config.AuthorizeEndpoint.String() != expected { + t.Fatalf("autorest/adal: Incorrect authorize url for Tenant from Environment. expected(%s). actual(%v).", expected, config.AuthorizeEndpoint) + } + + expected = "https://login.test.com/tenant-id-test/oauth2/token?api-version=1.0" + if config.TokenEndpoint.String() != expected { + t.Fatalf("autorest/adal: Incorrect authorize url for Tenant from Environment. expected(%s). actual(%v).", expected, config.TokenEndpoint) + } + + expected = "https://login.test.com/tenant-id-test/oauth2/devicecode?api-version=1.0" + if config.DeviceCodeEndpoint.String() != expected { + t.Fatalf("autorest/adal Incorrect devicecode url for Tenant from Environment. expected(%s). actual(%v).", expected, config.DeviceCodeEndpoint) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/devicetoken.go b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go similarity index 64% rename from vendor/github.com/Azure/go-autorest/autorest/azure/devicetoken.go rename to vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go index e1d5498a80f1..6c511f8c8779 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/devicetoken.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go @@ -1,4 +1,4 @@ -package azure +package adal /* This file is largely based on rjw57/oauth2device's code, with the follow differences: @@ -10,16 +10,17 @@ package azure */ import ( + "encoding/json" "fmt" + "io/ioutil" "net/http" "net/url" + "strings" "time" - - "github.com/Azure/go-autorest/autorest" ) const ( - logPrefix = "autorest/azure/devicetoken:" + logPrefix = "autorest/adal/devicetoken:" ) var ( @@ -38,10 +39,17 @@ var ( // ErrDeviceSlowDown represents the service telling us we're polling too often during device flow ErrDeviceSlowDown = fmt.Errorf("%s Error while retrieving OAuth token: Slow Down", logPrefix) + // ErrDeviceCodeEmpty represents an empty device code from the device endpoint while using device flow + ErrDeviceCodeEmpty = fmt.Errorf("%s Error while retrieving device code: Device Code Empty", logPrefix) + + // ErrOAuthTokenEmpty represents an empty OAuth token from the token endpoint when using device flow + ErrOAuthTokenEmpty = fmt.Errorf("%s Error while retrieving OAuth token: Token Empty", logPrefix) + errCodeSendingFails = "Error occurred while sending request for Device Authorization Code" errCodeHandlingFails = "Error occurred while handling response from the Device Endpoint" errTokenSendingFails = "Error occurred while sending request with device code for a token" errTokenHandlingFails = "Error occurred while handling response from the Token Endpoint (during device flow)" + errStatusNotOK = "Error HTTP status != 200" ) // DeviceCode is the object returned by the device auth endpoint @@ -79,31 +87,45 @@ type deviceToken struct { // InitiateDeviceAuth initiates a device auth flow. It returns a DeviceCode // that can be used with CheckForUserCompletion or WaitForUserCompletion. -func InitiateDeviceAuth(client *autorest.Client, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) { - req, _ := autorest.Prepare( - &http.Request{}, - autorest.AsPost(), - autorest.AsFormURLEncoded(), - autorest.WithBaseURL(oauthConfig.DeviceCodeEndpoint.String()), - autorest.WithFormData(url.Values{ - "client_id": []string{clientID}, - "resource": []string{resource}, - }), - ) - - resp, err := autorest.SendWithSender(client, req) +func InitiateDeviceAuth(sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) { + v := url.Values{ + "client_id": []string{clientID}, + "resource": []string{resource}, + } + + s := v.Encode() + body := ioutil.NopCloser(strings.NewReader(s)) + + req, err := http.NewRequest(http.MethodPost, oauthConfig.DeviceCodeEndpoint.String(), body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) + } + + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + resp, err := sender.Do(req) if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err) + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) + } + defer resp.Body.Close() + + rb, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) + } + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, errStatusNotOK) + } + + if len(strings.Trim(string(rb), " ")) == 0 { + return nil, ErrDeviceCodeEmpty } var code DeviceCode - err = autorest.Respond( - resp, - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&code), - autorest.ByClosing()) + err = json.Unmarshal(rb, &code) if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err) + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) } code.ClientID = clientID @@ -115,33 +137,46 @@ func InitiateDeviceAuth(client *autorest.Client, oauthConfig OAuthConfig, client // CheckForUserCompletion takes a DeviceCode and checks with the Azure AD OAuth endpoint // to see if the device flow has: been completed, timed out, or otherwise failed -func CheckForUserCompletion(client *autorest.Client, code *DeviceCode) (*Token, error) { - req, _ := autorest.Prepare( - &http.Request{}, - autorest.AsPost(), - autorest.AsFormURLEncoded(), - autorest.WithBaseURL(code.OAuthConfig.TokenEndpoint.String()), - autorest.WithFormData(url.Values{ - "client_id": []string{code.ClientID}, - "code": []string{*code.DeviceCode}, - "grant_type": []string{OAuthGrantTypeDeviceCode}, - "resource": []string{code.Resource}, - }), - ) - - resp, err := autorest.SendWithSender(client, req) +func CheckForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { + v := url.Values{ + "client_id": []string{code.ClientID}, + "code": []string{*code.DeviceCode}, + "grant_type": []string{OAuthGrantTypeDeviceCode}, + "resource": []string{code.Resource}, + } + + s := v.Encode() + body := ioutil.NopCloser(strings.NewReader(s)) + + req, err := http.NewRequest(http.MethodPost, code.OAuthConfig.TokenEndpoint.String(), body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) + } + + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + resp, err := sender.Do(req) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) + } + defer resp.Body.Close() + + rb, err := ioutil.ReadAll(resp.Body) if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err) + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) + } + + if resp.StatusCode != http.StatusOK && len(strings.Trim(string(rb), " ")) == 0 { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, errStatusNotOK) + } + if len(strings.Trim(string(rb), " ")) == 0 { + return nil, ErrOAuthTokenEmpty } var token deviceToken - err = autorest.Respond( - resp, - autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusBadRequest), - autorest.ByUnmarshallingJSON(&token), - autorest.ByClosing()) + err = json.Unmarshal(rb, &token) if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err) + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) } if token.Error == nil { @@ -164,12 +199,12 @@ func CheckForUserCompletion(client *autorest.Client, code *DeviceCode) (*Token, // WaitForUserCompletion calls CheckForUserCompletion repeatedly until a token is granted or an error state occurs. // This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'. -func WaitForUserCompletion(client *autorest.Client, code *DeviceCode) (*Token, error) { +func WaitForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { intervalDuration := time.Duration(*code.Interval) * time.Second waitDuration := intervalDuration for { - token, err := CheckForUserCompletion(client, code) + token, err := CheckForUserCompletion(sender, code) if err == nil { return token, nil diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/devicetoken_test.go b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken_test.go similarity index 62% rename from vendor/github.com/Azure/go-autorest/autorest/azure/devicetoken_test.go rename to vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken_test.go index ab8a7889315e..f7bf0a79dee1 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/devicetoken_test.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken_test.go @@ -1,4 +1,4 @@ -package azure +package adal import ( "encoding/json" @@ -7,18 +7,18 @@ import ( "strings" "testing" - "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/mocks" ) const ( - TestResource = "SomeResource" - TestClientID = "SomeClientID" - TestTenantID = "SomeTenantID" + TestResource = "SomeResource" + TestClientID = "SomeClientID" + TestTenantID = "SomeTenantID" + TestActiveDirectoryEndpoint = "https://login.test.com/" ) var ( - testOAuthConfig, _ = PublicCloud.OAuthConfigForTenant(TestTenantID) + testOAuthConfig, _ = NewOAuthConfig(TestActiveDirectoryEndpoint, TestTenantID) TestOAuthConfig = *testOAuthConfig ) @@ -46,38 +46,35 @@ const MockDeviceTokenResponse = `{ func TestDeviceCodeIncludesResource(t *testing.T) { sender := mocks.NewSender() sender.AppendResponse(mocks.NewResponseWithContent(MockDeviceCodeResponse)) - client := &autorest.Client{Sender: sender} - code, err := InitiateDeviceAuth(client, TestOAuthConfig, TestClientID, TestResource) + code, err := InitiateDeviceAuth(sender, TestOAuthConfig, TestClientID, TestResource) if err != nil { - t.Fatalf("azure: unexpected error initiating device auth") + t.Fatalf("adal: unexpected error initiating device auth") } if code.Resource != TestResource { - t.Fatalf("azure: InitiateDeviceAuth failed to stash the resource in the DeviceCode struct") + t.Fatalf("adal: InitiateDeviceAuth failed to stash the resource in the DeviceCode struct") } } func TestDeviceCodeReturnsErrorIfSendingFails(t *testing.T) { sender := mocks.NewSender() sender.SetError(fmt.Errorf("this is an error")) - client := &autorest.Client{Sender: sender} - _, err := InitiateDeviceAuth(client, TestOAuthConfig, TestClientID, TestResource) + _, err := InitiateDeviceAuth(sender, TestOAuthConfig, TestClientID, TestResource) if err == nil || !strings.Contains(err.Error(), errCodeSendingFails) { - t.Fatalf("azure: failed to get correct error expected(%s) actual(%s)", errCodeSendingFails, err.Error()) + t.Fatalf("adal: failed to get correct error expected(%s) actual(%s)", errCodeSendingFails, err.Error()) } } func TestDeviceCodeReturnsErrorIfBadRequest(t *testing.T) { sender := mocks.NewSender() body := mocks.NewBody("doesn't matter") - sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, 400, "Bad Request")) - client := &autorest.Client{Sender: sender} + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusBadRequest, "Bad Request")) - _, err := InitiateDeviceAuth(client, TestOAuthConfig, TestClientID, TestResource) + _, err := InitiateDeviceAuth(sender, TestOAuthConfig, TestClientID, TestResource) if err == nil || !strings.Contains(err.Error(), errCodeHandlingFails) { - t.Fatalf("azure: failed to get correct error expected(%s) actual(%s)", errCodeHandlingFails, err.Error()) + t.Fatalf("adal: failed to get correct error expected(%s) actual(%s)", errCodeHandlingFails, err.Error()) } if body.IsOpen() { @@ -89,12 +86,26 @@ func TestDeviceCodeReturnsErrorIfCannotDeserializeDeviceCode(t *testing.T) { gibberishJSON := strings.Replace(MockDeviceCodeResponse, "expires_in", "\":, :gibberish", -1) sender := mocks.NewSender() body := mocks.NewBody(gibberishJSON) - sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, 200, "OK")) - client := &autorest.Client{Sender: sender} + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK")) - _, err := InitiateDeviceAuth(client, TestOAuthConfig, TestClientID, TestResource) + _, err := InitiateDeviceAuth(sender, TestOAuthConfig, TestClientID, TestResource) if err == nil || !strings.Contains(err.Error(), errCodeHandlingFails) { - t.Fatalf("azure: failed to get correct error expected(%s) actual(%s)", errCodeHandlingFails, err.Error()) + t.Fatalf("adal: failed to get correct error expected(%s) actual(%s)", errCodeHandlingFails, err.Error()) + } + + if body.IsOpen() { + t.Fatalf("response body was left open!") + } +} + +func TestDeviceCodeReturnsErrorIfEmptyDeviceCode(t *testing.T) { + sender := mocks.NewSender() + body := mocks.NewBody("") + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK")) + + _, err := InitiateDeviceAuth(sender, TestOAuthConfig, TestClientID, TestResource) + if err != ErrDeviceCodeEmpty { + t.Fatalf("adal: failed to get correct error expected(%s) actual(%s)", ErrDeviceCodeEmpty, err.Error()) } if body.IsOpen() { @@ -104,7 +115,7 @@ func TestDeviceCodeReturnsErrorIfCannotDeserializeDeviceCode(t *testing.T) { func deviceCode() *DeviceCode { var deviceCode DeviceCode - json.Unmarshal([]byte(MockDeviceCodeResponse), &deviceCode) + _ = json.Unmarshal([]byte(MockDeviceCodeResponse), &deviceCode) deviceCode.Resource = TestResource deviceCode.ClientID = TestClientID return &deviceCode @@ -113,12 +124,11 @@ func deviceCode() *DeviceCode { func TestDeviceTokenReturns(t *testing.T) { sender := mocks.NewSender() body := mocks.NewBody(MockDeviceTokenResponse) - sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, 200, "OK")) - client := &autorest.Client{Sender: sender} + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK")) - _, err := WaitForUserCompletion(client, deviceCode()) + _, err := WaitForUserCompletion(sender, deviceCode()) if err != nil { - t.Fatalf("azure: got error unexpectedly") + t.Fatalf("adal: got error unexpectedly") } if body.IsOpen() { @@ -129,23 +139,21 @@ func TestDeviceTokenReturns(t *testing.T) { func TestDeviceTokenReturnsErrorIfSendingFails(t *testing.T) { sender := mocks.NewSender() sender.SetError(fmt.Errorf("this is an error")) - client := &autorest.Client{Sender: sender} - _, err := WaitForUserCompletion(client, deviceCode()) + _, err := WaitForUserCompletion(sender, deviceCode()) if err == nil || !strings.Contains(err.Error(), errTokenSendingFails) { - t.Fatalf("azure: failed to get correct error expected(%s) actual(%s)", errTokenSendingFails, err.Error()) + t.Fatalf("adal: failed to get correct error expected(%s) actual(%s)", errTokenSendingFails, err.Error()) } } func TestDeviceTokenReturnsErrorIfServerError(t *testing.T) { sender := mocks.NewSender() body := mocks.NewBody("") - sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, 500, "Internal Server Error")) - client := &autorest.Client{Sender: sender} + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusInternalServerError, "Internal Server Error")) - _, err := WaitForUserCompletion(client, deviceCode()) + _, err := WaitForUserCompletion(sender, deviceCode()) if err == nil || !strings.Contains(err.Error(), errTokenHandlingFails) { - t.Fatalf("azure: failed to get correct error expected(%s) actual(%s)", errTokenHandlingFails, err.Error()) + t.Fatalf("adal: failed to get correct error expected(%s) actual(%s)", errTokenHandlingFails, err.Error()) } if body.IsOpen() { @@ -157,12 +165,11 @@ func TestDeviceTokenReturnsErrorIfCannotDeserializeDeviceToken(t *testing.T) { gibberishJSON := strings.Replace(MockDeviceTokenResponse, "expires_in", ";:\"gibberish", -1) sender := mocks.NewSender() body := mocks.NewBody(gibberishJSON) - sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, 200, "OK")) - client := &autorest.Client{Sender: sender} + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK")) - _, err := WaitForUserCompletion(client, deviceCode()) + _, err := WaitForUserCompletion(sender, deviceCode()) if err == nil || !strings.Contains(err.Error(), errTokenHandlingFails) { - t.Fatalf("azure: failed to get correct error expected(%s) actual(%s)", errTokenHandlingFails, err.Error()) + t.Fatalf("adal: failed to get correct error expected(%s) actual(%s)", errTokenHandlingFails, err.Error()) } if body.IsOpen() { @@ -177,10 +184,9 @@ func errorDeviceTokenResponse(message string) string { func TestDeviceTokenReturnsErrorIfAuthorizationPending(t *testing.T) { sender := mocks.NewSender() body := mocks.NewBody(errorDeviceTokenResponse("authorization_pending")) - sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, 400, "Bad Request")) - client := &autorest.Client{Sender: sender} + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusBadRequest, "Bad Request")) - _, err := CheckForUserCompletion(client, deviceCode()) + _, err := CheckForUserCompletion(sender, deviceCode()) if err != ErrDeviceAuthorizationPending { t.Fatalf("!!!") } @@ -193,10 +199,9 @@ func TestDeviceTokenReturnsErrorIfAuthorizationPending(t *testing.T) { func TestDeviceTokenReturnsErrorIfSlowDown(t *testing.T) { sender := mocks.NewSender() body := mocks.NewBody(errorDeviceTokenResponse("slow_down")) - sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, 400, "Bad Request")) - client := &autorest.Client{Sender: sender} + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusBadRequest, "Bad Request")) - _, err := CheckForUserCompletion(client, deviceCode()) + _, err := CheckForUserCompletion(sender, deviceCode()) if err != ErrDeviceSlowDown { t.Fatalf("!!!") } @@ -230,9 +235,8 @@ func (s *deviceTokenSender) Do(req *http.Request) (*http.Response, error) { // but with the intent of showing that WaitForUserCompletion loops properly. func TestDeviceTokenSucceedsWithIntermediateAuthPending(t *testing.T) { sender := newDeviceTokenSender("authorization_pending") - client := &autorest.Client{Sender: sender} - _, err := WaitForUserCompletion(client, deviceCode()) + _, err := WaitForUserCompletion(sender, deviceCode()) if err != nil { t.Fatalf("unexpected error occurred") } @@ -241,9 +245,8 @@ func TestDeviceTokenSucceedsWithIntermediateAuthPending(t *testing.T) { // same as above but with SlowDown now func TestDeviceTokenSucceedsWithIntermediateSlowDown(t *testing.T) { sender := newDeviceTokenSender("slow_down") - client := &autorest.Client{Sender: sender} - _, err := WaitForUserCompletion(client, deviceCode()) + _, err := WaitForUserCompletion(sender, deviceCode()) if err != nil { t.Fatalf("unexpected error occurred") } @@ -252,12 +255,11 @@ func TestDeviceTokenSucceedsWithIntermediateSlowDown(t *testing.T) { func TestDeviceTokenReturnsErrorIfAccessDenied(t *testing.T) { sender := mocks.NewSender() body := mocks.NewBody(errorDeviceTokenResponse("access_denied")) - sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, 400, "Bad Request")) - client := &autorest.Client{Sender: sender} + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusBadRequest, "Bad Request")) - _, err := WaitForUserCompletion(client, deviceCode()) + _, err := WaitForUserCompletion(sender, deviceCode()) if err != ErrDeviceAccessDenied { - t.Fatalf("azure: got wrong error expected(%s) actual(%s)", ErrDeviceAccessDenied.Error(), err.Error()) + t.Fatalf("adal: got wrong error expected(%s) actual(%s)", ErrDeviceAccessDenied.Error(), err.Error()) } if body.IsOpen() { @@ -268,12 +270,11 @@ func TestDeviceTokenReturnsErrorIfAccessDenied(t *testing.T) { func TestDeviceTokenReturnsErrorIfCodeExpired(t *testing.T) { sender := mocks.NewSender() body := mocks.NewBody(errorDeviceTokenResponse("code_expired")) - sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, 400, "Bad Request")) - client := &autorest.Client{Sender: sender} + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusBadRequest, "Bad Request")) - _, err := WaitForUserCompletion(client, deviceCode()) + _, err := WaitForUserCompletion(sender, deviceCode()) if err != ErrDeviceCodeExpired { - t.Fatalf("azure: got wrong error expected(%s) actual(%s)", ErrDeviceCodeExpired.Error(), err.Error()) + t.Fatalf("adal: got wrong error expected(%s) actual(%s)", ErrDeviceCodeExpired.Error(), err.Error()) } if body.IsOpen() { @@ -284,15 +285,29 @@ func TestDeviceTokenReturnsErrorIfCodeExpired(t *testing.T) { func TestDeviceTokenReturnsErrorForUnknownError(t *testing.T) { sender := mocks.NewSender() body := mocks.NewBody(errorDeviceTokenResponse("unknown_error")) - sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, 400, "Bad Request")) - client := &autorest.Client{Sender: sender} + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusBadRequest, "Bad Request")) - _, err := WaitForUserCompletion(client, deviceCode()) + _, err := WaitForUserCompletion(sender, deviceCode()) if err == nil { t.Fatalf("failed to get error") } if err != ErrDeviceGeneric { - t.Fatalf("azure: got wrong error expected(%s) actual(%s)", ErrDeviceGeneric.Error(), err.Error()) + t.Fatalf("adal: got wrong error expected(%s) actual(%s)", ErrDeviceGeneric.Error(), err.Error()) + } + + if body.IsOpen() { + t.Fatalf("response body was left open!") + } +} + +func TestDeviceTokenReturnsErrorIfTokenEmptyAndStatusOK(t *testing.T) { + sender := mocks.NewSender() + body := mocks.NewBody("") + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK")) + + _, err := WaitForUserCompletion(sender, deviceCode()) + if err != ErrOAuthTokenEmpty { + t.Fatalf("adal: got wrong error expected(%s) actual(%s)", ErrOAuthTokenEmpty.Error(), err.Error()) } if body.IsOpen() { diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/persist.go b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go similarity index 99% rename from vendor/github.com/Azure/go-autorest/autorest/azure/persist.go rename to vendor/github.com/Azure/go-autorest/autorest/adal/persist.go index d5cf62ddc7ba..73711c6674ea 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/persist.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go @@ -1,4 +1,4 @@ -package azure +package adal import ( "encoding/json" diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/persist_test.go b/vendor/github.com/Azure/go-autorest/autorest/adal/persist_test.go similarity index 99% rename from vendor/github.com/Azure/go-autorest/autorest/azure/persist_test.go rename to vendor/github.com/Azure/go-autorest/autorest/adal/persist_test.go index bf5cb6453144..12c7ecbbbaeb 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/persist_test.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/persist_test.go @@ -1,4 +1,4 @@ -package azure +package adal import ( "encoding/json" diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go new file mode 100644 index 000000000000..7928c971abbe --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go @@ -0,0 +1,46 @@ +package adal + +import ( + "net/http" +) + +const ( + contentType = "Content-Type" + mimeTypeFormPost = "application/x-www-form-urlencoded" +) + +// Sender is the interface that wraps the Do method to send HTTP requests. +// +// The standard http.Client conforms to this interface. +type Sender interface { + Do(*http.Request) (*http.Response, error) +} + +// SenderFunc is a method that implements the Sender interface. +type SenderFunc func(*http.Request) (*http.Response, error) + +// Do implements the Sender interface on SenderFunc. +func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { + return sf(r) +} + +// SendDecorator takes and possibily decorates, by wrapping, a Sender. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then react to the +// http.Response result. +type SendDecorator func(Sender) Sender + +// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. +func CreateSender(decorators ...SendDecorator) Sender { + return DecorateSender(&http.Client{}, decorators...) +} + +// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to +// the Sender. Decorators are applied in the order received, but their affect upon the request +// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a +// post-decorator (pass the http.Request along and react to the results in http.Response). +func DecorateSender(s Sender, decorators ...SendDecorator) Sender { + for _, decorate := range decorators { + s = decorate(s) + } + return s +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/token.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go similarity index 69% rename from vendor/github.com/Azure/go-autorest/autorest/azure/token.go rename to vendor/github.com/Azure/go-autorest/autorest/adal/token.go index cfcd030114c6..559fc6653583 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/token.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go @@ -1,4 +1,4 @@ -package azure +package adal import ( "crypto/rand" @@ -6,13 +6,15 @@ import ( "crypto/sha1" "crypto/x509" "encoding/base64" + "encoding/json" "fmt" + "io/ioutil" "net/http" "net/url" "strconv" + "strings" "time" - "github.com/Azure/go-autorest/autorest" "github.com/dgrijalva/jwt-go" ) @@ -28,6 +30,9 @@ const ( // OAuthGrantTypeRefreshToken is the "grant_type" identifier used in refresh token flows OAuthGrantTypeRefreshToken = "refresh_token" + + // managedIdentitySettingsPath is the path to the MSI Extension settings file (to discover the endpoint) + managedIdentitySettingsPath = "/var/lib/waagent/ManagedIdentity-Settings" ) var expirationBase time.Time @@ -36,6 +41,18 @@ func init() { expirationBase, _ = time.Parse(time.RFC3339, tokenBaseDate) } +// OAuthTokenProvider is an interface which should be implemented by an access token retriever +type OAuthTokenProvider interface { + OAuthToken() string +} + +// Refresher is an interface for token refresh functionality +type Refresher interface { + Refresh() error + RefreshExchange(resource string) error + EnsureFresh() error +} + // TokenRefreshCallback is the type representing callbacks that will be called after // a successful token refresh type TokenRefreshCallback func(Token) error @@ -73,14 +90,9 @@ func (t Token) WillExpireIn(d time.Duration) bool { return !t.Expires().After(time.Now().Add(d)) } -// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose -// value is "Bearer " followed by the AccessToken of the Token. -func (t *Token) WithAuthorization() autorest.PrepareDecorator { - return func(p autorest.Preparer) autorest.Preparer { - return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { - return (autorest.WithBearerAuthorization(t.AccessToken)(p)).Prepare(r) - }) - } +//OAuthToken return the current access token +func (t *Token) OAuthToken() string { + return t.AccessToken } // ServicePrincipalNoSecret represents a secret type that contains no secret @@ -118,6 +130,17 @@ type ServicePrincipalCertificateSecret struct { PrivateKey *rsa.PrivateKey } +// ServicePrincipalMSISecret implements ServicePrincipalSecret for machines running the MSI Extension. +type ServicePrincipalMSISecret struct { +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +// MSI extension requires the authority field to be set to the real tenant authority endpoint +func (msiSecret *ServicePrincipalMSISecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + v.Set("authority", spt.oauthConfig.AuthorityEndpoint.String()) + return nil +} + // SignJwt returns the JWT signed with the certificate's private key. func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalToken) (string, error) { hasher := sha1.New() @@ -173,7 +196,7 @@ type ServicePrincipalToken struct { resource string autoRefresh bool refreshWithin time.Duration - sender autorest.Sender + sender Sender refreshCallbacks []TokenRefreshCallback } @@ -238,10 +261,56 @@ func NewServicePrincipalTokenFromCertificate(oauthConfig OAuthConfig, clientID s ) } +// NewServicePrincipalTokenFromMSI creates a ServicePrincipalToken via the MSI VM Extension. +func NewServicePrincipalTokenFromMSI(oauthConfig OAuthConfig, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + return newServicePrincipalTokenFromMSI(oauthConfig, resource, managedIdentitySettingsPath, callbacks...) +} + +func newServicePrincipalTokenFromMSI(oauthConfig OAuthConfig, resource, settingsPath string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + // Read MSI settings + bytes, err := ioutil.ReadFile(settingsPath) + if err != nil { + return nil, err + } + msiSettings := struct { + URL string `json:"url"` + }{} + err = json.Unmarshal(bytes, &msiSettings) + if err != nil { + return nil, err + } + + // We set the oauth config token endpoint to be MSI's endpoint + // We leave the authority as-is so MSI can POST it with the token request + msiEndpointURL, err := url.Parse(msiSettings.URL) + if err != nil { + return nil, err + } + + msiTokenEndpointURL, err := msiEndpointURL.Parse("/oauth2/token") + if err != nil { + return nil, err + } + + oauthConfig.TokenEndpoint = *msiTokenEndpointURL + + spt := &ServicePrincipalToken{ + oauthConfig: oauthConfig, + secret: &ServicePrincipalMSISecret{}, + resource: resource, + autoRefresh: true, + refreshWithin: defaultRefresh, + sender: &http.Client{}, + refreshCallbacks: callbacks, + } + + return spt, nil +} + // EnsureFresh will refresh the token if it will expire within the refresh window (as set by -// RefreshWithin). +// RefreshWithin) and autoRefresh flag is on. func (spt *ServicePrincipalToken) EnsureFresh() error { - if spt.WillExpireIn(spt.refreshWithin) { + if spt.autoRefresh && spt.WillExpireIn(spt.refreshWithin) { return spt.Refresh() } return nil @@ -253,8 +322,7 @@ func (spt *ServicePrincipalToken) InvokeRefreshCallbacks(token Token) error { for _, callback := range spt.refreshCallbacks { err := callback(spt.Token) if err != nil { - return autorest.NewErrorWithError(err, - "azure.ServicePrincipalToken", "InvokeRefreshCallbacks", nil, "A TokenRefreshCallback handler returned an error") + return fmt.Errorf("adal: TokenRefreshCallback handler failed. Error = '%v'", err) } } } @@ -287,39 +355,40 @@ func (spt *ServicePrincipalToken) refreshInternal(resource string) error { } } - req, _ := autorest.Prepare(&http.Request{}, - autorest.AsPost(), - autorest.AsFormURLEncoded(), - autorest.WithBaseURL(spt.oauthConfig.TokenEndpoint.String()), - autorest.WithFormData(v)) - - resp, err := autorest.SendWithSender(spt.sender, req) + s := v.Encode() + body := ioutil.NopCloser(strings.NewReader(s)) + req, err := http.NewRequest(http.MethodPost, spt.oauthConfig.TokenEndpoint.String(), body) if err != nil { - return autorest.NewErrorWithError(err, - "azure.ServicePrincipalToken", "Refresh", resp, "Failure sending request for Service Principal %s", - spt.clientID) + return fmt.Errorf("adal: Failed to build the refresh request. Error = '%v'", err) } - var newToken Token - err = autorest.Respond(resp, - autorest.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&newToken), - autorest.ByClosing()) + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + resp, err := spt.sender.Do(req) if err != nil { - return autorest.NewErrorWithError(err, - "azure.ServicePrincipalToken", "Refresh", resp, "Failure handling response to Service Principal %s request", - spt.clientID) + return fmt.Errorf("adal: Failed to execute the refresh request. Error = '%v'", err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("adal: Refresh request failed. Status Code = '%d'", resp.StatusCode) } - spt.Token = newToken - - err = spt.InvokeRefreshCallbacks(newToken) + rb, err := ioutil.ReadAll(resp.Body) if err != nil { - // its already wrapped inside InvokeRefreshCallbacks - return err + return fmt.Errorf("adal: Failed to read a new service principal token during refresh. Error = '%v'", err) + } + if len(strings.Trim(string(rb), " ")) == 0 { + return fmt.Errorf("adal: Empty service principal token received during refresh") + } + var token Token + err = json.Unmarshal(rb, &token) + if err != nil { + return fmt.Errorf("adal: Failed to unmarshal the service principal token during refresh. Error = '%v' JSON = '%s'", err, string(rb)) } - return nil + spt.Token = token + + return spt.InvokeRefreshCallbacks(token) } // SetAutoRefresh enables or disables automatic refreshing of stale tokens. @@ -334,30 +403,6 @@ func (spt *ServicePrincipalToken) SetRefreshWithin(d time.Duration) { return } -// SetSender sets the autorest.Sender used when obtaining the Service Principal token. An +// SetSender sets the http.Client used when obtaining the Service Principal token. An // undecorated http.Client is used by default. -func (spt *ServicePrincipalToken) SetSender(s autorest.Sender) { - spt.sender = s -} - -// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose -// value is "Bearer " followed by the AccessToken of the ServicePrincipalToken. -// -// By default, the token will automatically refresh if nearly expired (as determined by the -// RefreshWithin interval). Use the AutoRefresh method to enable or disable automatically refreshing -// tokens. -func (spt *ServicePrincipalToken) WithAuthorization() autorest.PrepareDecorator { - return func(p autorest.Preparer) autorest.Preparer { - return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { - if spt.autoRefresh { - err := spt.EnsureFresh() - if err != nil { - return r, autorest.NewErrorWithError(err, - "azure.ServicePrincipalToken", "WithAuthorization", nil, "Failed to refresh Service Principal Token for request to %s", - r.URL) - } - } - return (autorest.WithBearerAuthorization(spt.AccessToken)(p)).Prepare(r) - }) - } -} +func (spt *ServicePrincipalToken) SetSender(s Sender) { spt.sender = s } diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/token_test.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token_test.go similarity index 54% rename from vendor/github.com/Azure/go-autorest/autorest/azure/token_test.go rename to vendor/github.com/Azure/go-autorest/autorest/adal/token_test.go index 3d8990fa307e..9c92f4198765 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/token_test.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token_test.go @@ -1,4 +1,4 @@ -package azure +package adal import ( "crypto/rand" @@ -10,13 +10,13 @@ import ( "math/big" "net/http" "net/url" + "os" "reflect" "strconv" "strings" "testing" "time" - "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/mocks" ) @@ -30,7 +30,7 @@ func TestTokenExpires(t *testing.T) { tk := newTokenExpiresAt(tt) if tk.Expires().Equal(tt) { - t.Fatalf("azure: Token#Expires miscalculated expiration time -- received %v, expected %v", tk.Expires(), tt) + t.Fatalf("adal: Token#Expires miscalculated expiration time -- received %v, expected %v", tk.Expires(), tt) } } @@ -38,7 +38,7 @@ func TestTokenIsExpired(t *testing.T) { tk := newTokenExpiresAt(time.Now().Add(-5 * time.Second)) if !tk.IsExpired() { - t.Fatalf("azure: Token#IsExpired failed to mark a stale token as expired -- now %v, token expires at %v", + t.Fatalf("adal: Token#IsExpired failed to mark a stale token as expired -- now %v, token expires at %v", time.Now().UTC(), tk.Expires()) } } @@ -47,7 +47,7 @@ func TestTokenIsExpiredUninitialized(t *testing.T) { tk := &Token{} if !tk.IsExpired() { - t.Fatalf("azure: An uninitialized Token failed to mark itself as expired (expiration time %v)", tk.Expires()) + t.Fatalf("adal: An uninitialized Token failed to mark itself as expired (expiration time %v)", tk.Expires()) } } @@ -55,7 +55,7 @@ func TestTokenIsNoExpired(t *testing.T) { tk := newTokenExpiresAt(time.Now().Add(1000 * time.Second)) if tk.IsExpired() { - t.Fatalf("azure: Token marked a fresh token as expired -- now %v, token expires at %v", time.Now().UTC(), tk.Expires()) + t.Fatalf("adal: Token marked a fresh token as expired -- now %v, token expires at %v", time.Now().UTC(), tk.Expires()) } } @@ -64,18 +64,7 @@ func TestTokenWillExpireIn(t *testing.T) { tk := newTokenExpiresIn(d) if !tk.WillExpireIn(d) { - t.Fatal("azure: Token#WillExpireIn mismeasured expiration time") - } -} - -func TestTokenWithAuthorization(t *testing.T) { - tk := newToken() - - req, err := autorest.Prepare(&http.Request{}, tk.WithAuthorization()) - if err != nil { - t.Fatalf("azure: Token#WithAuthorization returned an error (%v)", err) - } else if req.Header.Get(http.CanonicalHeaderKey("Authorization")) != fmt.Sprintf("Bearer %s", tk.AccessToken) { - t.Fatal("azure: Token#WithAuthorization failed to set Authorization header") + t.Fatal("adal: Token#WillExpireIn mismeasured expiration time") } } @@ -83,12 +72,12 @@ func TestServicePrincipalTokenSetAutoRefresh(t *testing.T) { spt := newServicePrincipalToken() if !spt.autoRefresh { - t.Fatal("azure: ServicePrincipalToken did not default to automatic token refreshing") + t.Fatal("adal: ServicePrincipalToken did not default to automatic token refreshing") } spt.SetAutoRefresh(false) if spt.autoRefresh { - t.Fatal("azure: ServicePrincipalToken#SetAutoRefresh did not disable automatic token refreshing") + t.Fatal("adal: ServicePrincipalToken#SetAutoRefresh did not disable automatic token refreshing") } } @@ -96,46 +85,48 @@ func TestServicePrincipalTokenSetRefreshWithin(t *testing.T) { spt := newServicePrincipalToken() if spt.refreshWithin != defaultRefresh { - t.Fatal("azure: ServicePrincipalToken did not correctly set the default refresh interval") + t.Fatal("adal: ServicePrincipalToken did not correctly set the default refresh interval") } spt.SetRefreshWithin(2 * defaultRefresh) if spt.refreshWithin != 2*defaultRefresh { - t.Fatal("azure: ServicePrincipalToken#SetRefreshWithin did not set the refresh interval") + t.Fatal("adal: ServicePrincipalToken#SetRefreshWithin did not set the refresh interval") } } func TestServicePrincipalTokenSetSender(t *testing.T) { spt := newServicePrincipalToken() - var s autorest.Sender - s = mocks.NewSender() - spt.SetSender(s) - if !reflect.DeepEqual(s, spt.sender) { - t.Fatal("azure: ServicePrincipalToken#SetSender did not set the sender") + c := &http.Client{} + spt.SetSender(c) + if !reflect.DeepEqual(c, spt.sender) { + t.Fatal("adal: ServicePrincipalToken#SetSender did not set the sender") } } func TestServicePrincipalTokenRefreshUsesPOST(t *testing.T) { spt := newServicePrincipalToken() - body := mocks.NewBody("") - resp := mocks.NewResponseWithBodyAndStatus(body, 200, "OK") + body := mocks.NewBody(newTokenJSON("test", "test")) + resp := mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK") c := mocks.NewSender() - s := autorest.DecorateSender(c, - (func() autorest.SendDecorator { - return func(s autorest.Sender) autorest.Sender { - return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { + s := DecorateSender(c, + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { if r.Method != "POST" { - t.Fatalf("azure: ServicePrincipalToken#Refresh did not correctly set HTTP method -- expected %v, received %v", "POST", r.Method) + t.Fatalf("adal: ServicePrincipalToken#Refresh did not correctly set HTTP method -- expected %v, received %v", "POST", r.Method) } return resp, nil }) } })()) spt.SetSender(s) - spt.Refresh() + err := spt.Refresh() + if err != nil { + t.Fatalf("adal: ServicePrincipalToken#Refresh returned an unexpected error (%v)", err) + } if body.IsOpen() { t.Fatalf("the response was not closed!") @@ -145,68 +136,86 @@ func TestServicePrincipalTokenRefreshUsesPOST(t *testing.T) { func TestServicePrincipalTokenRefreshSetsMimeType(t *testing.T) { spt := newServicePrincipalToken() + body := mocks.NewBody(newTokenJSON("test", "test")) + resp := mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK") + c := mocks.NewSender() - s := autorest.DecorateSender(c, - (func() autorest.SendDecorator { - return func(s autorest.Sender) autorest.Sender { - return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { + s := DecorateSender(c, + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { if r.Header.Get(http.CanonicalHeaderKey("Content-Type")) != "application/x-www-form-urlencoded" { - t.Fatalf("azure: ServicePrincipalToken#Refresh did not correctly set Content-Type -- expected %v, received %v", + t.Fatalf("adal: ServicePrincipalToken#Refresh did not correctly set Content-Type -- expected %v, received %v", "application/x-form-urlencoded", r.Header.Get(http.CanonicalHeaderKey("Content-Type"))) } - return mocks.NewResponse(), nil + return resp, nil }) } })()) spt.SetSender(s) - spt.Refresh() + err := spt.Refresh() + if err != nil { + t.Fatalf("adal: ServicePrincipalToken#Refresh returned an unexpected error (%v)", err) + } } func TestServicePrincipalTokenRefreshSetsURL(t *testing.T) { spt := newServicePrincipalToken() + body := mocks.NewBody(newTokenJSON("test", "test")) + resp := mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK") + c := mocks.NewSender() - s := autorest.DecorateSender(c, - (func() autorest.SendDecorator { - return func(s autorest.Sender) autorest.Sender { - return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { + s := DecorateSender(c, + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { if r.URL.String() != TestOAuthConfig.TokenEndpoint.String() { - t.Fatalf("azure: ServicePrincipalToken#Refresh did not correctly set the URL -- expected %v, received %v", + t.Fatalf("adal: ServicePrincipalToken#Refresh did not correctly set the URL -- expected %v, received %v", TestOAuthConfig.TokenEndpoint, r.URL) } - return mocks.NewResponse(), nil + return resp, nil }) } })()) spt.SetSender(s) - spt.Refresh() + err := spt.Refresh() + if err != nil { + t.Fatalf("adal: ServicePrincipalToken#Refresh returned an unexpected error (%v)", err) + } } func testServicePrincipalTokenRefreshSetsBody(t *testing.T, spt *ServicePrincipalToken, f func(*testing.T, []byte)) { + body := mocks.NewBody(newTokenJSON("test", "test")) + resp := mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK") + c := mocks.NewSender() - s := autorest.DecorateSender(c, - (func() autorest.SendDecorator { - return func(s autorest.Sender) autorest.Sender { - return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { + s := DecorateSender(c, + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { b, err := ioutil.ReadAll(r.Body) if err != nil { - t.Fatalf("azure: Failed to read body of Service Principal token request (%v)", err) + t.Fatalf("adal: Failed to read body of Service Principal token request (%v)", err) } f(t, b) - return mocks.NewResponse(), nil + return resp, nil }) } })()) spt.SetSender(s) - spt.Refresh() + err := spt.Refresh() + if err != nil { + t.Fatalf("adal: ServicePrincipalToken#Refresh returned an unexpected error (%v)", err) + } } func TestServicePrincipalTokenManualRefreshSetsBody(t *testing.T) { sptManual := newServicePrincipalTokenManual() testServicePrincipalTokenRefreshSetsBody(t, sptManual, func(t *testing.T, b []byte) { if string(b) != defaultManualFormData { - t.Fatalf("azure: ServicePrincipalToken#Refresh did not correctly set the HTTP Request Body -- expected %v, received %v", + t.Fatalf("adal: ServicePrincipalToken#Refresh did not correctly set the HTTP Request Body -- expected %v, received %v", defaultManualFormData, string(b)) } }) @@ -222,7 +231,7 @@ func TestServicePrincipalTokenCertficateRefreshSetsBody(t *testing.T) { values["client_id"][0] != "id" || values["grant_type"][0] != "client_credentials" || values["resource"][0] != "resource" { - t.Fatalf("azure: ServicePrincipalTokenCertificate#Refresh did not correctly set the HTTP Request Body.") + t.Fatalf("adal: ServicePrincipalTokenCertificate#Refresh did not correctly set the HTTP Request Body.") } }) } @@ -231,7 +240,7 @@ func TestServicePrincipalTokenSecretRefreshSetsBody(t *testing.T) { spt := newServicePrincipalToken() testServicePrincipalTokenRefreshSetsBody(t, spt, func(t *testing.T, b []byte) { if string(b) != defaultFormData { - t.Fatalf("azure: ServicePrincipalToken#Refresh did not correctly set the HTTP Request Body -- expected %v, received %v", + t.Fatalf("adal: ServicePrincipalToken#Refresh did not correctly set the HTTP Request Body -- expected %v, received %v", defaultFormData, string(b)) } @@ -241,21 +250,66 @@ func TestServicePrincipalTokenSecretRefreshSetsBody(t *testing.T) { func TestServicePrincipalTokenRefreshClosesRequestBody(t *testing.T) { spt := newServicePrincipalToken() - resp := mocks.NewResponse() + body := mocks.NewBody(newTokenJSON("test", "test")) + resp := mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK") + c := mocks.NewSender() - s := autorest.DecorateSender(c, - (func() autorest.SendDecorator { - return func(s autorest.Sender) autorest.Sender { - return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { + s := DecorateSender(c, + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { return resp, nil }) } })()) spt.SetSender(s) - spt.Refresh() - + err := spt.Refresh() + if err != nil { + t.Fatalf("adal: ServicePrincipalToken#Refresh returned an unexpected error (%v)", err) + } if resp.Body.(*mocks.Body).IsOpen() { - t.Fatal("azure: ServicePrincipalToken#Refresh failed to close the HTTP Response Body") + t.Fatal("adal: ServicePrincipalToken#Refresh failed to close the HTTP Response Body") + } +} + +func TestServicePrincipalTokenRefreshRejectsResponsesWithStatusNotOK(t *testing.T) { + spt := newServicePrincipalToken() + + body := mocks.NewBody(newTokenJSON("test", "test")) + resp := mocks.NewResponseWithBodyAndStatus(body, http.StatusUnauthorized, "Unauthorized") + + c := mocks.NewSender() + s := DecorateSender(c, + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return resp, nil + }) + } + })()) + spt.SetSender(s) + err := spt.Refresh() + if err == nil { + t.Fatalf("adal: ServicePrincipalToken#Refresh should reject a response with status != %d", http.StatusOK) + } +} + +func TestServicePrincipalTokenRefreshRejectsEmptyBody(t *testing.T) { + spt := newServicePrincipalToken() + + c := mocks.NewSender() + s := DecorateSender(c, + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return mocks.NewResponse(), nil + }) + } + })()) + spt.SetSender(s) + err := spt.Refresh() + if err == nil { + t.Fatal("adal: ServicePrincipalToken#Refresh should reject an empty token") } } @@ -268,7 +322,7 @@ func TestServicePrincipalTokenRefreshPropagatesErrors(t *testing.T) { err := spt.Refresh() if err == nil { - t.Fatal("azure: Failed to propagate the request error") + t.Fatal("adal: Failed to propagate the request error") } } @@ -276,12 +330,12 @@ func TestServicePrincipalTokenRefreshReturnsErrorIfNotOk(t *testing.T) { spt := newServicePrincipalToken() c := mocks.NewSender() - c.AppendResponse(mocks.NewResponseWithStatus("401 NotAuthorized", 401)) + c.AppendResponse(mocks.NewResponseWithStatus("401 NotAuthorized", http.StatusUnauthorized)) spt.SetSender(c) err := spt.Refresh() if err == nil { - t.Fatal("azure: Failed to return an when receiving a status code other than HTTP 200") + t.Fatalf("adal: Failed to return an when receiving a status code other than HTTP %d", http.StatusOK) } } @@ -292,10 +346,10 @@ func TestServicePrincipalTokenRefreshUnmarshals(t *testing.T) { j := newTokenJSON(expiresOn, "resource") resp := mocks.NewResponseWithContent(j) c := mocks.NewSender() - s := autorest.DecorateSender(c, - (func() autorest.SendDecorator { - return func(s autorest.Sender) autorest.Sender { - return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { + s := DecorateSender(c, + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { return resp, nil }) } @@ -304,14 +358,14 @@ func TestServicePrincipalTokenRefreshUnmarshals(t *testing.T) { err := spt.Refresh() if err != nil { - t.Fatalf("azure: ServicePrincipalToken#Refresh returned an unexpected error (%v)", err) + t.Fatalf("adal: ServicePrincipalToken#Refresh returned an unexpected error (%v)", err) } else if spt.AccessToken != "accessToken" || spt.ExpiresIn != "3600" || spt.ExpiresOn != expiresOn || spt.NotBefore != expiresOn || spt.Resource != "resource" || spt.Type != "Bearer" { - t.Fatalf("azure: ServicePrincipalToken#Refresh failed correctly unmarshal the JSON -- expected %v, received %v", + t.Fatalf("adal: ServicePrincipalToken#Refresh failed correctly unmarshal the JSON -- expected %v, received %v", j, *spt) } } @@ -320,21 +374,27 @@ func TestServicePrincipalTokenEnsureFreshRefreshes(t *testing.T) { spt := newServicePrincipalToken() expireToken(&spt.Token) + body := mocks.NewBody(newTokenJSON("test", "test")) + resp := mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK") + f := false c := mocks.NewSender() - s := autorest.DecorateSender(c, - (func() autorest.SendDecorator { - return func(s autorest.Sender) autorest.Sender { - return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { + s := DecorateSender(c, + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { f = true - return mocks.NewResponse(), nil + return resp, nil }) } })()) spt.SetSender(s) - spt.EnsureFresh() + err := spt.EnsureFresh() + if err != nil { + t.Fatalf("adal: ServicePrincipalToken#EnsureFresh returned an unexpected error (%v)", err) + } if !f { - t.Fatal("azure: ServicePrincipalToken#EnsureFresh failed to call Refresh for stale token") + t.Fatal("adal: ServicePrincipalToken#EnsureFresh failed to call Refresh for stale token") } } @@ -344,46 +404,22 @@ func TestServicePrincipalTokenEnsureFreshSkipsIfFresh(t *testing.T) { f := false c := mocks.NewSender() - s := autorest.DecorateSender(c, - (func() autorest.SendDecorator { - return func(s autorest.Sender) autorest.Sender { - return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { + s := DecorateSender(c, + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { f = true return mocks.NewResponse(), nil }) } })()) spt.SetSender(s) - spt.EnsureFresh() - if f { - t.Fatal("azure: ServicePrincipalToken#EnsureFresh invoked Refresh for fresh token") - } -} - -func TestServicePrincipalTokenWithAuthorization(t *testing.T) { - spt := newServicePrincipalToken() - setTokenToExpireIn(&spt.Token, 1000*time.Second) - r := mocks.NewRequest() - s := mocks.NewSender() - spt.SetSender(s) - - req, err := autorest.Prepare(r, spt.WithAuthorization()) + err := spt.EnsureFresh() if err != nil { - t.Fatalf("azure: ServicePrincipalToken#WithAuthorization returned an error (%v)", err) - } else if req.Header.Get(http.CanonicalHeaderKey("Authorization")) != fmt.Sprintf("Bearer %s", spt.AccessToken) { - t.Fatal("azure: ServicePrincipalToken#WithAuthorization failed to set Authorization header") + t.Fatalf("adal: ServicePrincipalToken#EnsureFresh returned an unexpected error (%v)", err) } -} - -func TestServicePrincipalTokenWithAuthorizationReturnsErrorIfCannotRefresh(t *testing.T) { - spt := newServicePrincipalToken() - s := mocks.NewSender() - s.AppendResponse(mocks.NewResponseWithStatus("400 Bad Request", 400)) - spt.SetSender(s) - - _, err := autorest.Prepare(mocks.NewRequest(), spt.WithAuthorization()) - if err == nil { - t.Fatal("azure: ServicePrincipalToken#WithAuthorization failed to return an error when refresh fails") + if f { + t.Fatal("adal: ServicePrincipalToken#EnsureFresh invoked Refresh for fresh token") } } @@ -400,10 +436,12 @@ func TestRefreshCallback(t *testing.T) { j := newTokenJSON(expiresOn, "resource") sender.AppendResponse(mocks.NewResponseWithContent(j)) spt.SetSender(sender) - spt.Refresh() - + err := spt.Refresh() + if err != nil { + t.Fatalf("adal: ServicePrincipalToken#Refresh returned an unexpected error (%v)", err) + } if !callbackTriggered { - t.Fatalf("azure: RefreshCallback failed to trigger call callback") + t.Fatalf("adal: RefreshCallback failed to trigger call callback") } } @@ -422,7 +460,7 @@ func TestRefreshCallbackErrorPropagates(t *testing.T) { err := spt.Refresh() if err == nil || !strings.Contains(err.Error(), errorText) { - t.Fatalf("azure: RefreshCallback failed to propagate error") + t.Fatalf("adal: RefreshCallback failed to propagate error") } } @@ -432,7 +470,53 @@ func TestServicePrincipalTokenManualRefreshFailsWithoutRefresh(t *testing.T) { spt.RefreshToken = "" err := spt.Refresh() if err == nil { - t.Fatalf("azure: ServicePrincipalToken#Refresh should have failed with a ManualTokenSecret without a refresh token") + t.Fatalf("adal: ServicePrincipalToken#Refresh should have failed with a ManualTokenSecret without a refresh token") + } +} + +func TestNewServicePrincipalTokenFromMSI(t *testing.T) { + resource := "https://resource" + + cb := func(token Token) error { return nil } + tempSettingsFile, err := ioutil.TempFile("", "ManagedIdentity-Settings") + if err != nil { + t.Fatal("Couldn't write temp settings file") + } + defer os.Remove(tempSettingsFile.Name()) + + settingsContents := []byte(`{ + "url": "http://msiendpoint/" + }`) + + if _, err := tempSettingsFile.Write(settingsContents); err != nil { + t.Fatal("Couldn't fill temp settings file") + } + + oauthConfig, err := NewOAuthConfig("http://adendpoint", "1-2-3-4") + if err != nil { + t.Fatal("Failed to construct oauthconfig") + } + + spt, err := newServicePrincipalTokenFromMSI( + *oauthConfig, + resource, + tempSettingsFile.Name(), + cb) + if err != nil { + t.Fatalf("Failed to get MSI SPT: %v", err) + } + + // check some of the SPT fields + if _, ok := spt.secret.(*ServicePrincipalMSISecret); !ok { + t.Fatal("SPT secret was not of MSI type") + } + + if spt.resource != resource { + t.Fatal("SPT came back with incorrect resource") + } + + if len(spt.refreshCallbacks) != 1 { + t.Fatal("SPT had incorrect refresh callbacks.") } } @@ -498,6 +582,9 @@ func newServicePrincipalTokenCertificate(t *testing.T) *ServicePrincipalToken { BasicConstraintsValid: true, } privateKey, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + t.Fatal(err) + } certificateBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &privateKey.PublicKey, privateKey) if err != nil { t.Fatal(err) diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization.go b/vendor/github.com/Azure/go-autorest/autorest/authorization.go new file mode 100644 index 000000000000..7f4e3d84540e --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/authorization.go @@ -0,0 +1,57 @@ +package autorest + +import ( + "fmt" + "net/http" + + "github.com/Azure/go-autorest/autorest/adal" +) + +// Authorizer is the interface that provides a PrepareDecorator used to supply request +// authorization. Most often, the Authorizer decorator runs last so it has access to the full +// state of the formed HTTP request. +type Authorizer interface { + WithAuthorization() PrepareDecorator +} + +// NullAuthorizer implements a default, "do nothing" Authorizer. +type NullAuthorizer struct{} + +// WithAuthorization returns a PrepareDecorator that does nothing. +func (na NullAuthorizer) WithAuthorization() PrepareDecorator { + return WithNothing() +} + +// BearerAuthorizer implements the bearer authorization +type BearerAuthorizer struct { + tokenProvider adal.OAuthTokenProvider +} + +// NewBearerAuthorizer crates a BearerAuthorizer using the given token provider +func NewBearerAuthorizer(tp adal.OAuthTokenProvider) *BearerAuthorizer { + return &BearerAuthorizer{tokenProvider: tp} +} + +func (ba *BearerAuthorizer) withBearerAuthorization() PrepareDecorator { + return WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", ba.tokenProvider.OAuthToken())) +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Bearer " followed by the token. +// +// By default, the token will be automatically refreshed through the Refresher interface. +func (ba *BearerAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + refresher, ok := ba.tokenProvider.(adal.Refresher) + if ok { + err := refresher.EnsureFresh() + if err != nil { + return r, NewErrorWithError(err, "azure.BearerAuthorizer", "WithAuthorization", nil, + "Failed to refresh the Token for request to %s", r.URL) + } + } + return (ba.withBearerAuthorization()(p)).Prepare(r) + }) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization_test.go b/vendor/github.com/Azure/go-autorest/autorest/authorization_test.go new file mode 100644 index 000000000000..d3f63888013c --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/authorization_test.go @@ -0,0 +1,137 @@ +package autorest + +import ( + "fmt" + "net/http" + "reflect" + "testing" + + "github.com/Azure/go-autorest/autorest/adal" + "github.com/Azure/go-autorest/autorest/mocks" +) + +const ( + TestTenantID = "TestTenantID" + TestActiveDirectoryEndpoint = "https://login/test.com/" +) + +func TestWithAuthorizer(t *testing.T) { + r1 := mocks.NewRequest() + + na := &NullAuthorizer{} + r2, err := Prepare(r1, + na.WithAuthorization()) + if err != nil { + t.Fatalf("autorest: NullAuthorizer#WithAuthorization returned an unexpected error (%v)", err) + } else if !reflect.DeepEqual(r1, r2) { + t.Fatalf("autorest: NullAuthorizer#WithAuthorization modified the request -- received %v, expected %v", r2, r1) + } +} + +func TestTokenWithAuthorization(t *testing.T) { + token := &adal.Token{ + AccessToken: "TestToken", + Resource: "https://azure.microsoft.com/", + Type: "Bearer", + } + + ba := NewBearerAuthorizer(token) + req, err := Prepare(&http.Request{}, ba.WithAuthorization()) + if err != nil { + t.Fatalf("azure: BearerAuthorizer#WithAuthorization returned an error (%v)", err) + } else if req.Header.Get(http.CanonicalHeaderKey("Authorization")) != fmt.Sprintf("Bearer %s", token.AccessToken) { + t.Fatal("azure: BearerAuthorizer#WithAuthorization failed to set Authorization header") + } +} + +func TestServicePrincipalTokenWithAuthorizationNoRefresh(t *testing.T) { + oauthConfig, err := adal.NewOAuthConfig(TestActiveDirectoryEndpoint, TestTenantID) + if err != nil { + t.Fatalf("azure: BearerAuthorizer#WithAuthorization returned an error (%v)", err) + } + spt, err := adal.NewServicePrincipalToken(*oauthConfig, "id", "secret", "resource", nil) + if err != nil { + t.Fatalf("azure: BearerAuthorizer#WithAuthorization returned an error (%v)", err) + } + spt.SetAutoRefresh(false) + s := mocks.NewSender() + spt.SetSender(s) + + ba := NewBearerAuthorizer(spt) + req, err := Prepare(mocks.NewRequest(), ba.WithAuthorization()) + if err != nil { + t.Fatalf("azure: BearerAuthorizer#WithAuthorization returned an error (%v)", err) + } else if req.Header.Get(http.CanonicalHeaderKey("Authorization")) != fmt.Sprintf("Bearer %s", spt.AccessToken) { + t.Fatal("azure: BearerAuthorizer#WithAuthorization failed to set Authorization header") + } +} + +func TestServicePrincipalTokenWithAuthorizationRefresh(t *testing.T) { + + oauthConfig, err := adal.NewOAuthConfig(TestActiveDirectoryEndpoint, TestTenantID) + if err != nil { + t.Fatalf("azure: BearerAuthorizer#WithAuthorization returned an error (%v)", err) + } + refreshed := false + spt, err := adal.NewServicePrincipalToken(*oauthConfig, "id", "secret", "resource", func(t adal.Token) error { + refreshed = true + return nil + }) + if err != nil { + t.Fatalf("azure: BearerAuthorizer#WithAuthorization returned an error (%v)", err) + } + + jwt := `{ + "access_token" : "accessToken", + "expires_in" : "3600", + "expires_on" : "test", + "not_before" : "test", + "resource" : "test", + "token_type" : "Bearer" + }` + body := mocks.NewBody(jwt) + resp := mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK") + c := mocks.NewSender() + s := DecorateSender(c, + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return resp, nil + }) + } + })()) + spt.SetSender(s) + + ba := NewBearerAuthorizer(spt) + req, err := Prepare(mocks.NewRequest(), ba.WithAuthorization()) + if err != nil { + t.Fatalf("azure: BearerAuthorizer#WithAuthorization returned an error (%v)", err) + } else if req.Header.Get(http.CanonicalHeaderKey("Authorization")) != fmt.Sprintf("Bearer %s", spt.AccessToken) { + t.Fatal("azure: BearerAuthorizer#WithAuthorization failed to set Authorization header") + } + + if !refreshed { + t.Fatal("azure: BearerAuthorizer#WithAuthorization must refresh the token") + } +} + +func TestServicePrincipalTokenWithAuthorizationReturnsErrorIfConnotRefresh(t *testing.T) { + oauthConfig, err := adal.NewOAuthConfig(TestActiveDirectoryEndpoint, TestTenantID) + if err != nil { + t.Fatalf("azure: BearerAuthorizer#WithAuthorization returned an error (%v)", err) + } + spt, err := adal.NewServicePrincipalToken(*oauthConfig, "id", "secret", "resource", nil) + if err != nil { + t.Fatalf("azure: BearerAuthorizer#WithAuthorization returned an error (%v)", err) + } + + s := mocks.NewSender() + s.AppendResponse(mocks.NewResponseWithStatus("400 Bad Request", http.StatusBadRequest)) + spt.SetSender(s) + + ba := NewBearerAuthorizer(spt) + _, err = Prepare(mocks.NewRequest(), ba.WithAuthorization()) + if err == nil { + t.Fatal("azure: BearerAuthorizer#WithAuthorization failed to return an error when refresh fails") + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/autorest.go b/vendor/github.com/Azure/go-autorest/autorest/autorest.go index 9804f401ef9f..51f1c4bbcac2 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/autorest.go +++ b/vendor/github.com/Azure/go-autorest/autorest/autorest.go @@ -16,6 +16,7 @@ and Responding. A typical pattern is: DoRetryForAttempts(5, time.Second)) err = Respond(resp, + ByDiscardingBody(), ByClosing()) Each phase relies on decorators to modify and / or manage processing. Decorators may first modify diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go index 280d32a61dde..332a8909d1a9 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go @@ -3,12 +3,13 @@ package azure import ( "bytes" "fmt" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/date" "io/ioutil" "net/http" "strings" "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/date" ) const ( @@ -16,12 +17,6 @@ const ( ) const ( - methodDelete = "DELETE" - methodPatch = "PATCH" - methodPost = "POST" - methodPut = "PUT" - methodGet = "GET" - operationInProgress string = "InProgress" operationCanceled string = "Canceled" operationFailed string = "Failed" @@ -225,7 +220,7 @@ func updatePollingState(resp *http.Response, ps *pollingState) error { // Lastly, requests against an existing resource, use the last request URI if ps.uri == "" { m := strings.ToUpper(req.Method) - if m == methodPatch || m == methodPut || m == methodGet { + if m == http.MethodPatch || m == http.MethodPut || m == http.MethodGet { ps.uri = req.URL.String() } } diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/async_test.go b/vendor/github.com/Azure/go-autorest/autorest/azure/async_test.go index 0a06b0b15c5d..4c2c695fd903 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/async_test.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/async_test.go @@ -2,8 +2,6 @@ package azure import ( "fmt" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/mocks" "io/ioutil" "net/http" "reflect" @@ -11,6 +9,9 @@ import ( "sync" "testing" "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/mocks" ) func TestGetAsyncOperation_ReturnsAzureAsyncOperationHeader(t *testing.T) { @@ -357,7 +358,7 @@ func TestUpdatePollingState_UsesTheObjectLocationIfAsyncHeadersAreMissing(t *tes resp := newAsynchronousResponse() resp.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) resp.Header.Del(http.CanonicalHeaderKey(autorest.HeaderLocation)) - resp.Request.Method = methodPatch + resp.Request.Method = http.MethodPatch ps := pollingState{} updatePollingState(resp, &ps) @@ -368,7 +369,7 @@ func TestUpdatePollingState_UsesTheObjectLocationIfAsyncHeadersAreMissing(t *tes } func TestUpdatePollingState_RecognizesLowerCaseHTTPVerbs(t *testing.T) { - for _, m := range []string{"patch", "put", "get"} { + for _, m := range []string{strings.ToLower(http.MethodPatch), strings.ToLower(http.MethodPut), strings.ToLower(http.MethodGet)} { resp := newAsynchronousResponse() resp.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) resp.Header.Del(http.CanonicalHeaderKey(autorest.HeaderLocation)) @@ -388,7 +389,7 @@ func TestUpdatePollingState_ReturnsAnErrorIfAsyncHeadersAreMissingForANewOrDelet resp.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) resp.Header.Del(http.CanonicalHeaderKey(autorest.HeaderLocation)) - for _, m := range []string{methodDelete, methodPost} { + for _, m := range []string{http.MethodDelete, http.MethodPost} { resp.Request.Method = m err := updatePollingState(resp, &pollingState{}) if err == nil { diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/config.go b/vendor/github.com/Azure/go-autorest/autorest/azure/config.go deleted file mode 100644 index bea30b0d67ea..000000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/config.go +++ /dev/null @@ -1,13 +0,0 @@ -package azure - -import ( - "net/url" -) - -// OAuthConfig represents the endpoints needed -// in OAuth operations -type OAuthConfig struct { - AuthorizeEndpoint url.URL - TokenEndpoint url.URL - DeviceCodeEndpoint url.URL -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go index 3af1b48d270d..1cf55651f2a4 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go @@ -2,14 +2,9 @@ package azure import ( "fmt" - "net/url" "strings" ) -const ( - activeDirectoryAPIVersion = "1.0" -) - var environments = map[string]Environment{ "AZURECHINACLOUD": ChinaCloud, "AZUREGERMANCLOUD": GermanCloud, @@ -35,6 +30,7 @@ type Environment struct { ServiceBusEndpointSuffix string `json:"serviceBusEndpointSuffix"` ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"` ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"` + ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"` } var ( @@ -56,6 +52,7 @@ var ( ServiceBusEndpointSuffix: "servicebus.azure.com", ServiceManagementVMDNSSuffix: "cloudapp.net", ResourceManagerVMDNSSuffix: "cloudapp.azure.com", + ContainerRegistryDNSSuffix: "azurecr.io", } // USGovernmentCloud is the cloud environment for the US Government @@ -76,6 +73,7 @@ var ( ServiceBusEndpointSuffix: "servicebus.usgovcloudapi.net", ServiceManagementVMDNSSuffix: "usgovcloudapp.net", ResourceManagerVMDNSSuffix: "cloudapp.windowsazure.us", + ContainerRegistryDNSSuffix: "azurecr.io", } // ChinaCloud is the cloud environment operated in China @@ -85,7 +83,7 @@ var ( PublishSettingsURL: "https://manage.chinacloudapi.com/publishsettings/index", ServiceManagementEndpoint: "https://management.core.chinacloudapi.cn/", ResourceManagerEndpoint: "https://management.chinacloudapi.cn/", - ActiveDirectoryEndpoint: "https://login.chinacloudapi.cn/?api-version=1.0", + ActiveDirectoryEndpoint: "https://login.chinacloudapi.cn/", GalleryEndpoint: "https://gallery.chinacloudapi.cn/", KeyVaultEndpoint: "https://vault.azure.cn/", GraphEndpoint: "https://graph.chinacloudapi.cn/", @@ -96,6 +94,7 @@ var ( ServiceBusEndpointSuffix: "servicebus.chinacloudapi.net", ServiceManagementVMDNSSuffix: "chinacloudapp.cn", ResourceManagerVMDNSSuffix: "cloudapp.azure.cn", + ContainerRegistryDNSSuffix: "azurecr.io", } // GermanCloud is the cloud environment operated in Germany @@ -116,6 +115,7 @@ var ( ServiceBusEndpointSuffix: "servicebus.cloudapi.de", ServiceManagementVMDNSSuffix: "azurecloudapp.de", ResourceManagerVMDNSSuffix: "cloudapp.microsoftazure.de", + ContainerRegistryDNSSuffix: "azurecr.io", } ) @@ -128,35 +128,3 @@ func EnvironmentFromName(name string) (Environment, error) { } return env, nil } - -// OAuthConfigForTenant returns an OAuthConfig with tenant specific urls -func (env Environment) OAuthConfigForTenant(tenantID string) (*OAuthConfig, error) { - return OAuthConfigForTenant(env.ActiveDirectoryEndpoint, tenantID) -} - -// OAuthConfigForTenant returns an OAuthConfig with tenant specific urls for target cloud auth endpoint -func OAuthConfigForTenant(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) { - template := "%s/oauth2/%s?api-version=%s" - u, err := url.Parse(activeDirectoryEndpoint) - if err != nil { - return nil, err - } - authorizeURL, err := u.Parse(fmt.Sprintf(template, tenantID, "authorize", activeDirectoryAPIVersion)) - if err != nil { - return nil, err - } - tokenURL, err := u.Parse(fmt.Sprintf(template, tenantID, "token", activeDirectoryAPIVersion)) - if err != nil { - return nil, err - } - deviceCodeURL, err := u.Parse(fmt.Sprintf(template, tenantID, "devicecode", activeDirectoryAPIVersion)) - if err != nil { - return nil, err - } - - return &OAuthConfig{ - AuthorizeEndpoint: *authorizeURL, - TokenEndpoint: *tokenURL, - DeviceCodeEndpoint: *deviceCodeURL, - }, nil -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/environments_test.go b/vendor/github.com/Azure/go-autorest/autorest/azure/environments_test.go index 73d49429507f..a36b34b408e8 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/environments_test.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/environments_test.go @@ -6,30 +6,6 @@ import ( "testing" ) -func TestOAuthConfigForTenant(t *testing.T) { - az := PublicCloud - - config, err := az.OAuthConfigForTenant("tenant-id-test") - if err != nil { - t.Fatalf("autorest/azure: Unexpected error while retrieving oauth configuration for tenant: %v.", err) - } - - expected := "https://login.microsoftonline.com/tenant-id-test/oauth2/authorize?api-version=1.0" - if config.AuthorizeEndpoint.String() != expected { - t.Fatalf("autorest/azure: Incorrect authorize url for Tenant from Environment. expected(%s). actual(%v).", expected, config.AuthorizeEndpoint) - } - - expected = "https://login.microsoftonline.com/tenant-id-test/oauth2/token?api-version=1.0" - if config.TokenEndpoint.String() != expected { - t.Fatalf("autorest/azure: Incorrect authorize url for Tenant from Environment. expected(%s). actual(%v).", expected, config.TokenEndpoint) - } - - expected = "https://login.microsoftonline.com/tenant-id-test/oauth2/devicecode?api-version=1.0" - if config.DeviceCodeEndpoint.String() != expected { - t.Fatalf("autorest/azure: Incorrect devicecode url for Tenant from Environment. expected(%s). actual(%v).", expected, config.DeviceCodeEndpoint) - } -} - func TestEnvironmentFromName(t *testing.T) { name := "azurechinacloud" if env, _ := EnvironmentFromName(name); env != ChinaCloud { @@ -94,7 +70,8 @@ func TestDeserializeEnvironment(t *testing.T) { "storageEndpointSuffix": "--storage-endpoint-suffix--", "trafficManagerDNSSuffix": "--traffic-manager-dns-suffix--", "serviceManagementVMDNSSuffix": "--asm-vm-dns-suffix--", - "resourceManagerVMDNSSuffix": "--arm-vm-dns-suffix--" + "resourceManagerVMDNSSuffix": "--arm-vm-dns-suffix--", + "containerRegistryDNSSuffix": "--container-registry-dns-suffix--" }` testSubject := Environment{} @@ -148,6 +125,9 @@ func TestDeserializeEnvironment(t *testing.T) { if "--arm-vm-dns-suffix--" != testSubject.ResourceManagerVMDNSSuffix { t.Errorf("Expected ResourceManagerVMDNSSuffix to be \"--arm-vm-dns-suffix--\", but got %q", testSubject.ResourceManagerVMDNSSuffix) } + if "--container-registry-dns-suffix--" != testSubject.ContainerRegistryDNSSuffix { + t.Errorf("Expected ContainerRegistryDNSSuffix to be \"--container-registry-dns-suffix--\", but got %q", testSubject.ContainerRegistryDNSSuffix) + } } func TestRoundTripSerialization(t *testing.T) { @@ -168,6 +148,7 @@ func TestRoundTripSerialization(t *testing.T) { ServiceBusEndpointSuffix: "--service-bus-endpoint-suffix--", ServiceManagementVMDNSSuffix: "--asm-vm-dns-suffix--", ResourceManagerVMDNSSuffix: "--arm-vm-dns-suffix--", + ContainerRegistryDNSSuffix: "--container-registry-dns-suffix--", } bytes, err := json.Marshal(env) @@ -229,4 +210,7 @@ func TestRoundTripSerialization(t *testing.T) { if env.ResourceManagerVMDNSSuffix != testSubject.ResourceManagerVMDNSSuffix { t.Errorf("Expected ResourceManagerVMDNSSuffix to be %q, but got %q", env.ResourceManagerVMDNSSuffix, testSubject.ResourceManagerVMDNSSuffix) } + if env.ContainerRegistryDNSSuffix != testSubject.ContainerRegistryDNSSuffix { + t.Errorf("Expected ContainerRegistryDNSSuffix to be %q, but got %q", env.ContainerRegistryDNSSuffix, testSubject.ContainerRegistryDNSSuffix) + } } diff --git a/vendor/github.com/Azure/go-autorest/autorest/client.go b/vendor/github.com/Azure/go-autorest/autorest/client.go index b55b3d103534..b5f94b5c3c75 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/client.go +++ b/vendor/github.com/Azure/go-autorest/autorest/client.go @@ -8,6 +8,7 @@ import ( "log" "net/http" "net/http/cookiejar" + "runtime" "time" ) @@ -22,13 +23,24 @@ const ( DefaultRetryAttempts = 3 ) -var statusCodesForRetry = []int{ - http.StatusRequestTimeout, // 408 - http.StatusInternalServerError, // 500 - http.StatusBadGateway, // 502 - http.StatusServiceUnavailable, // 503 - http.StatusGatewayTimeout, // 504 -} +var ( + // defaultUserAgent builds a string containing the Go version, system archityecture and OS, + // and the go-autorest version. + defaultUserAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s", + runtime.Version(), + runtime.GOARCH, + runtime.GOOS, + Version(), + ) + + statusCodesForRetry = []int{ + http.StatusRequestTimeout, // 408 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + } +) const ( requestFormat = `HTTP Request Begin =================================================== @@ -140,13 +152,24 @@ type Client struct { // NewClientWithUserAgent returns an instance of a Client with the UserAgent set to the passed // string. func NewClientWithUserAgent(ua string) Client { - return Client{ + c := Client{ PollingDelay: DefaultPollingDelay, PollingDuration: DefaultPollingDuration, RetryAttempts: DefaultRetryAttempts, RetryDuration: 30 * time.Second, - UserAgent: ua, + UserAgent: defaultUserAgent, + } + c.AddToUserAgent(ua) + return c +} + +// AddToUserAgent adds an extension to the current user agent +func (c *Client) AddToUserAgent(extension string) error { + if extension != "" { + c.UserAgent = fmt.Sprintf("%s %s", c.UserAgent, extension) + return nil } + return fmt.Errorf("Extension was empty, User Agent stayed as %s", c.UserAgent) } // Do implements the Sender interface by invoking the active Sender after applying authorization. diff --git a/vendor/github.com/Azure/go-autorest/autorest/client_test.go b/vendor/github.com/Azure/go-autorest/autorest/client_test.go index fba3aa3b2d19..78a8a59baf09 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/client_test.go +++ b/vendor/github.com/Azure/go-autorest/autorest/client_test.go @@ -111,10 +111,37 @@ func TestLoggingInspectorByInspectingRestoresBody(t *testing.T) { func TestNewClientWithUserAgent(t *testing.T) { ua := "UserAgent" c := NewClientWithUserAgent(ua) + completeUA := fmt.Sprintf("%s %s", defaultUserAgent, ua) - if c.UserAgent != ua { + if c.UserAgent != completeUA { t.Fatalf("autorest: NewClientWithUserAgent failed to set the UserAgent -- expected %s, received %s", - ua, c.UserAgent) + completeUA, c.UserAgent) + } +} + +func TestAddToUserAgent(t *testing.T) { + ua := "UserAgent" + c := NewClientWithUserAgent(ua) + ext := "extension" + err := c.AddToUserAgent(ext) + if err != nil { + t.Fatalf("autorest: AddToUserAgent returned error -- expected nil, received %s", err) + } + completeUA := fmt.Sprintf("%s %s %s", defaultUserAgent, ua, ext) + + if c.UserAgent != completeUA { + t.Fatalf("autorest: AddToUserAgent failed to add an extension to the UserAgent -- expected %s, received %s", + completeUA, c.UserAgent) + } + + err = c.AddToUserAgent("") + if err == nil { + t.Fatalf("autorest: AddToUserAgent didn't return error -- expected %s, received nil", + fmt.Errorf("Extension was empty, User Agent stayed as %s", c.UserAgent)) + } + if c.UserAgent != completeUA { + t.Fatalf("autorest: AddToUserAgent failed to not add an empty extension to the UserAgent -- expected %s, received %s", + completeUA, c.UserAgent) } } diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go b/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go new file mode 100644 index 000000000000..e085c77eea50 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go @@ -0,0 +1,109 @@ +package date + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "time" +) + +// unixEpoch is the moment in time that should be treated as timestamp 0. +var unixEpoch = time.Date(1970, time.January, 1, 0, 0, 0, 0, time.UTC) + +// UnixTime marshals and unmarshals a time that is represented as the number +// of seconds (ignoring skip-seconds) since the Unix Epoch. +type UnixTime time.Time + +// Duration returns the time as a Duration since the UnixEpoch. +func (t UnixTime) Duration() time.Duration { + return time.Time(t).Sub(unixEpoch) +} + +// NewUnixTimeFromSeconds creates a UnixTime as a number of seconds from the UnixEpoch. +func NewUnixTimeFromSeconds(seconds float64) UnixTime { + return NewUnixTimeFromDuration(time.Duration(seconds * float64(time.Second))) +} + +// NewUnixTimeFromNanoseconds creates a UnixTime as a number of nanoseconds from the UnixEpoch. +func NewUnixTimeFromNanoseconds(nanoseconds int64) UnixTime { + return NewUnixTimeFromDuration(time.Duration(nanoseconds)) +} + +// NewUnixTimeFromDuration creates a UnixTime as a duration of time since the UnixEpoch. +func NewUnixTimeFromDuration(dur time.Duration) UnixTime { + return UnixTime(unixEpoch.Add(dur)) +} + +// UnixEpoch retreives the moment considered the Unix Epoch. I.e. The time represented by '0' +func UnixEpoch() time.Time { + return unixEpoch +} + +// MarshalJSON preserves the UnixTime as a JSON number conforming to Unix Timestamp requirements. +// (i.e. the number of seconds since midnight January 1st, 1970 not considering leap seconds.) +func (t UnixTime) MarshalJSON() ([]byte, error) { + buffer := &bytes.Buffer{} + enc := json.NewEncoder(buffer) + err := enc.Encode(float64(time.Time(t).UnixNano()) / 1e9) + if err != nil { + return nil, err + } + return buffer.Bytes(), nil +} + +// UnmarshalJSON reconstitures a UnixTime saved as a JSON number of the number of seconds since +// midnight January 1st, 1970. +func (t *UnixTime) UnmarshalJSON(text []byte) error { + dec := json.NewDecoder(bytes.NewReader(text)) + + var secondsSinceEpoch float64 + if err := dec.Decode(&secondsSinceEpoch); err != nil { + return err + } + + *t = NewUnixTimeFromSeconds(secondsSinceEpoch) + + return nil +} + +// MarshalText stores the number of seconds since the Unix Epoch as a textual floating point number. +func (t UnixTime) MarshalText() ([]byte, error) { + cast := time.Time(t) + return cast.MarshalText() +} + +// UnmarshalText populates a UnixTime with a value stored textually as a floating point number of seconds since the Unix Epoch. +func (t *UnixTime) UnmarshalText(raw []byte) error { + var unmarshaled time.Time + + if err := unmarshaled.UnmarshalText(raw); err != nil { + return err + } + + *t = UnixTime(unmarshaled) + return nil +} + +// MarshalBinary converts a UnixTime into a binary.LittleEndian float64 of nanoseconds since the epoch. +func (t UnixTime) MarshalBinary() ([]byte, error) { + buf := &bytes.Buffer{} + + payload := int64(t.Duration()) + + if err := binary.Write(buf, binary.LittleEndian, &payload); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +// UnmarshalBinary converts a from a binary.LittleEndian float64 of nanoseconds since the epoch into a UnixTime. +func (t *UnixTime) UnmarshalBinary(raw []byte) error { + var nanosecondsSinceEpoch int64 + + if err := binary.Read(bytes.NewReader(raw), binary.LittleEndian, &nanosecondsSinceEpoch); err != nil { + return err + } + *t = NewUnixTimeFromNanoseconds(nanosecondsSinceEpoch) + return nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/unixtime_test.go b/vendor/github.com/Azure/go-autorest/autorest/date/unixtime_test.go new file mode 100644 index 000000000000..3d18fd6008ab --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/unixtime_test.go @@ -0,0 +1,267 @@ +package date + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "fmt" + "math" + "testing" + "time" +) + +func ExampleUnixTime_MarshalJSON() { + epoch := UnixTime(UnixEpoch()) + text, _ := json.Marshal(epoch) + fmt.Print(string(text)) + // Output: 0 +} + +func ExampleUnixTime_UnmarshalJSON() { + var myTime UnixTime + json.Unmarshal([]byte("1.3e2"), &myTime) + fmt.Printf("%v", time.Time(myTime)) + // Output: 1970-01-01 00:02:10 +0000 UTC +} + +func TestUnixTime_MarshalJSON(t *testing.T) { + testCases := []time.Time{ + UnixEpoch().Add(-1 * time.Second), // One second befote the Unix Epoch + time.Date(2017, time.April, 14, 20, 27, 47, 0, time.UTC), // The time this test was written + UnixEpoch(), + time.Date(1800, 01, 01, 0, 0, 0, 0, time.UTC), + time.Date(2200, 12, 29, 00, 01, 37, 82, time.UTC), + } + + for _, tc := range testCases { + t.Run(tc.String(), func(subT *testing.T) { + var actual, expected float64 + var marshaled []byte + + target := UnixTime(tc) + expected = float64(target.Duration().Nanoseconds()) / 1e9 + + if temp, err := json.Marshal(target); err == nil { + marshaled = temp + } else { + subT.Error(err) + return + } + + dec := json.NewDecoder(bytes.NewReader(marshaled)) + if err := dec.Decode(&actual); err != nil { + subT.Error(err) + return + } + + diff := math.Abs(actual - expected) + subT.Logf("\ngot :\t%g\nwant:\t%g\ndiff:\t%g", actual, expected, diff) + if diff > 1e-9 { //Must be within 1 nanosecond of one another + subT.Fail() + } + }) + } +} + +func TestUnixTime_UnmarshalJSON(t *testing.T) { + testCases := []struct { + text string + expected time.Time + }{ + {"1", UnixEpoch().Add(time.Second)}, + {"0", UnixEpoch()}, + {"1492203742", time.Date(2017, time.April, 14, 21, 02, 22, 0, time.UTC)}, // The time this test was written + {"-1", time.Date(1969, time.December, 31, 23, 59, 59, 0, time.UTC)}, + {"1.5", UnixEpoch().Add(1500 * time.Millisecond)}, + {"0e1", UnixEpoch()}, // See http://json.org for 'number' format definition. + {"1.3e+2", UnixEpoch().Add(130 * time.Second)}, + {"1.6E-10", UnixEpoch()}, // This is so small, it should get truncated into the UnixEpoch + {"2E-6", UnixEpoch().Add(2 * time.Microsecond)}, + {"1.289345e9", UnixEpoch().Add(1289345000 * time.Second)}, + {"1e-9", UnixEpoch().Add(time.Nanosecond)}, + } + + for _, tc := range testCases { + t.Run(tc.text, func(subT *testing.T) { + var rehydrated UnixTime + if err := json.Unmarshal([]byte(tc.text), &rehydrated); err != nil { + subT.Error(err) + return + } + + if time.Time(rehydrated) != tc.expected { + subT.Logf("\ngot: \t%v\nwant:\t%v\ndiff:\t%v", time.Time(rehydrated), tc.expected, time.Time(rehydrated).Sub(tc.expected)) + subT.Fail() + } + }) + } +} + +func TestUnixTime_JSONRoundTrip(t *testing.T) { + testCases := []time.Time{ + UnixEpoch(), + time.Date(2005, time.November, 5, 0, 0, 0, 0, time.UTC), // The day V for Vendetta (film) was released. + UnixEpoch().Add(-6 * time.Second), + UnixEpoch().Add(800 * time.Hour), + UnixEpoch().Add(time.Nanosecond), + time.Date(2015, time.September, 05, 4, 30, 12, 9992, time.UTC), + } + + for _, tc := range testCases { + t.Run(tc.String(), func(subT *testing.T) { + subject := UnixTime(tc) + var marshaled []byte + if temp, err := json.Marshal(subject); err == nil { + marshaled = temp + } else { + subT.Error(err) + return + } + + var unmarshaled UnixTime + if err := json.Unmarshal(marshaled, &unmarshaled); err != nil { + subT.Error(err) + } + + actual := time.Time(unmarshaled) + diff := actual.Sub(tc) + subT.Logf("\ngot :\t%s\nwant:\t%s\ndiff:\t%s", actual.String(), tc.String(), diff.String()) + + if diff > time.Duration(100) { // We lose some precision be working in floats. We shouldn't lose more than 100 nanoseconds. + subT.Fail() + } + }) + } +} + +func TestUnixTime_MarshalBinary(t *testing.T) { + testCases := []struct { + expected int64 + subject time.Time + }{ + {0, UnixEpoch()}, + {-15 * int64(time.Second), UnixEpoch().Add(-15 * time.Second)}, + {54, UnixEpoch().Add(54 * time.Nanosecond)}, + } + + for _, tc := range testCases { + t.Run("", func(subT *testing.T) { + var marshaled []byte + + if temp, err := UnixTime(tc.subject).MarshalBinary(); err == nil { + marshaled = temp + } else { + subT.Error(err) + return + } + + var unmarshaled int64 + if err := binary.Read(bytes.NewReader(marshaled), binary.LittleEndian, &unmarshaled); err != nil { + subT.Error(err) + return + } + + if unmarshaled != tc.expected { + subT.Logf("\ngot: \t%d\nwant:\t%d", unmarshaled, tc.expected) + subT.Fail() + } + }) + } +} + +func TestUnixTime_BinaryRoundTrip(t *testing.T) { + testCases := []time.Time{ + UnixEpoch(), + UnixEpoch().Add(800 * time.Minute), + UnixEpoch().Add(7 * time.Hour), + UnixEpoch().Add(-1 * time.Nanosecond), + } + + for _, tc := range testCases { + t.Run(tc.String(), func(subT *testing.T) { + original := UnixTime(tc) + var marshaled []byte + + if temp, err := original.MarshalBinary(); err == nil { + marshaled = temp + } else { + subT.Error(err) + return + } + + var traveled UnixTime + if err := traveled.UnmarshalBinary(marshaled); err != nil { + subT.Error(err) + return + } + + if traveled != original { + subT.Logf("\ngot: \t%s\nwant:\t%s", time.Time(original).String(), time.Time(traveled).String()) + subT.Fail() + } + }) + } +} + +func TestUnixTime_MarshalText(t *testing.T) { + testCases := []time.Time{ + UnixEpoch(), + UnixEpoch().Add(45 * time.Second), + UnixEpoch().Add(time.Nanosecond), + UnixEpoch().Add(-100000 * time.Second), + } + + for _, tc := range testCases { + expected, _ := tc.MarshalText() + t.Run("", func(subT *testing.T) { + var marshaled []byte + + if temp, err := UnixTime(tc).MarshalText(); err == nil { + marshaled = temp + } else { + subT.Error(err) + return + } + + if string(marshaled) != string(expected) { + subT.Logf("\ngot: \t%s\nwant:\t%s", string(marshaled), string(expected)) + subT.Fail() + } + }) + } +} + +func TestUnixTime_TextRoundTrip(t *testing.T) { + testCases := []time.Time{ + UnixEpoch(), + UnixEpoch().Add(-1 * time.Nanosecond), + UnixEpoch().Add(1 * time.Nanosecond), + time.Date(2017, time.April, 17, 21, 00, 00, 00, time.UTC), + } + + for _, tc := range testCases { + t.Run(tc.String(), func(subT *testing.T) { + unixTC := UnixTime(tc) + + var marshaled []byte + + if temp, err := unixTC.MarshalText(); err == nil { + marshaled = temp + } else { + subT.Error(err) + return + } + + var unmarshaled UnixTime + if err := unmarshaled.UnmarshalText(marshaled); err != nil { + subT.Error(err) + return + } + + if unmarshaled != unixTC { + t.Logf("\ngot: \t%s\nwant:\t%s", time.Time(unmarshaled).String(), tc.String()) + t.Fail() + } + }) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/preparer.go b/vendor/github.com/Azure/go-autorest/autorest/preparer.go index 5b2c52704a29..afd114821bc7 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/preparer.go +++ b/vendor/github.com/Azure/go-autorest/autorest/preparer.go @@ -183,6 +183,16 @@ func WithBaseURL(baseURL string) PrepareDecorator { } } +// WithCustomBaseURL returns a PrepareDecorator that replaces brace-enclosed keys within the +// request base URL (i.e., http.Request.URL) with the corresponding values from the passed map. +func WithCustomBaseURL(baseURL string, urlParameters map[string]interface{}) PrepareDecorator { + parameters := ensureValueStrings(urlParameters) + for key, value := range parameters { + baseURL = strings.Replace(baseURL, "{"+key+"}", value, -1) + } + return WithBaseURL(baseURL) +} + // WithFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) into the // http.Request body. func WithFormData(v url.Values) PrepareDecorator { @@ -416,18 +426,3 @@ func WithQueryParameters(queryParameters map[string]interface{}) PrepareDecorato }) } } - -// Authorizer is the interface that provides a PrepareDecorator used to supply request -// authorization. Most often, the Authorizer decorator runs last so it has access to the full -// state of the formed HTTP request. -type Authorizer interface { - WithAuthorization() PrepareDecorator -} - -// NullAuthorizer implements a default, "do nothing" Authorizer. -type NullAuthorizer struct{} - -// WithAuthorization returns a PrepareDecorator that does nothing. -func (na NullAuthorizer) WithAuthorization() PrepareDecorator { - return WithNothing() -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/preparer_test.go b/vendor/github.com/Azure/go-autorest/autorest/preparer_test.go index 1b36379aa7df..1f715f9b2963 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/preparer_test.go +++ b/vendor/github.com/Azure/go-autorest/autorest/preparer_test.go @@ -149,6 +149,28 @@ func ExampleWithBaseURL_second() { // Output: parse :: missing protocol scheme } +func ExampleWithCustomBaseURL() { + r, err := Prepare(&http.Request{}, + WithCustomBaseURL("https://{account}.{service}.core.windows.net/", + map[string]interface{}{ + "account": "myaccount", + "service": "blob", + })) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Println(r.URL) + } + // Output: https://myaccount.blob.core.windows.net/ +} + +func ExampleWithCustomBaseURL_second() { + _, err := Prepare(&http.Request{}, + WithCustomBaseURL(":", map[string]interface{}{})) + fmt.Println(err) + // Output: parse :: missing protocol scheme +} + // Create a request with a custom HTTP header func ExampleWithHeader() { r, err := Prepare(&http.Request{}, @@ -254,6 +276,31 @@ func ExampleWithQueryParameters() { // Output: https://microsoft.com/a/b/c/?q1=value1&q2=value2 } +func TestWithCustomBaseURL(t *testing.T) { + r, err := Prepare(&http.Request{}, WithCustomBaseURL("https://{account}.{service}.core.windows.net/", + map[string]interface{}{ + "account": "myaccount", + "service": "blob", + })) + if err != nil { + t.Fatalf("autorest: WithCustomBaseURL should not fail") + } + if r.URL.String() != "https://myaccount.blob.core.windows.net/" { + t.Fatalf("autorest: WithCustomBaseURL expected https://myaccount.blob.core.windows.net/, got %s", r.URL) + } +} + +func TestWithCustomBaseURLwithInvalidURL(t *testing.T) { + _, err := Prepare(&http.Request{}, WithCustomBaseURL("hello/{account}.{service}.core.windows.net/", + map[string]interface{}{ + "account": "myaccount", + "service": "blob", + })) + if err == nil { + t.Fatalf("autorest: WithCustomBaseURL should fail fo URL parse error") + } +} + func TestWithPathWithInvalidPath(t *testing.T) { p := "path%2*end" if _, err := Prepare(&http.Request{}, WithBaseURL("https://microsoft.com/"), WithPath(p)); err == nil { @@ -703,16 +750,3 @@ func TestModifyingExistingRequest(t *testing.T) { t.Fatalf("autorest: Preparing an existing request failed (%s)", r.URL) } } - -func TestWithAuthorizer(t *testing.T) { - r1 := mocks.NewRequest() - - na := &NullAuthorizer{} - r2, err := Prepare(r1, - na.WithAuthorization()) - if err != nil { - t.Fatalf("autorest: NullAuthorizer#WithAuthorization returned an unexpected error (%v)", err) - } else if !reflect.DeepEqual(r1, r2) { - t.Fatalf("autorest: NullAuthorizer#WithAuthorization modified the request -- received %v, expected %v", r2, r1) - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/responder.go b/vendor/github.com/Azure/go-autorest/autorest/responder.go index 07cd7ef5cc8c..87f71e5854b5 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/responder.go +++ b/vendor/github.com/Azure/go-autorest/autorest/responder.go @@ -5,6 +5,7 @@ import ( "encoding/json" "encoding/xml" "fmt" + "io" "io/ioutil" "net/http" "strings" @@ -87,6 +88,24 @@ func ByCopying(b *bytes.Buffer) RespondDecorator { } } +// ByDiscardingBody returns a RespondDecorator that first invokes the passed Responder after which +// it copies the remaining bytes (if any) in the response body to ioutil.Discard. Since the passed +// Responder is invoked prior to discarding the response body, the decorator may occur anywhere +// within the set. +func ByDiscardingBody() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && resp != nil && resp.Body != nil { + if _, err := io.Copy(ioutil.Discard, resp.Body); err != nil { + return fmt.Errorf("Error discarding the response body: %v", err) + } + } + return err + }) + } +} + // ByClosing returns a RespondDecorator that first invokes the passed Responder after which it // closes the response body. Since the passed Responder is invoked prior to closing the response // body, the decorator may occur anywhere within the set. @@ -128,6 +147,8 @@ func ByUnmarshallingJSON(v interface{}) RespondDecorator { err := r.Respond(resp) if err == nil { b, errInner := ioutil.ReadAll(resp.Body) + // Some responses might include a BOM, remove for successful unmarshalling + b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf")) if errInner != nil { err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) } else if len(strings.Trim(string(b), " ")) > 0 { diff --git a/vendor/github.com/Azure/go-autorest/autorest/responder_test.go b/vendor/github.com/Azure/go-autorest/autorest/responder_test.go index 5355cc63fe28..99b858b9873b 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/responder_test.go +++ b/vendor/github.com/Azure/go-autorest/autorest/responder_test.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/json" "fmt" + "io/ioutil" "net/http" "reflect" "strings" @@ -19,6 +20,7 @@ func ExampleWithErrorUnlessOK() { // Respond and leave the response body open (for a subsequent responder to close) err := Respond(r, WithErrorUnlessOK(), + ByDiscardingBody(), ByClosingIfError()) if err == nil { @@ -26,6 +28,7 @@ func ExampleWithErrorUnlessOK() { // Complete handling the response and close the body Respond(r, + ByDiscardingBody(), ByClosing()) } // Output: GET of https://microsoft.com/a/b/c/ returned HTTP 200 @@ -328,6 +331,63 @@ func TestByClosingIfErrorDoesNotClosesIfNoErrorOccurs(t *testing.T) { } } +func TestByDiscardingBody(t *testing.T) { + r := mocks.NewResponse() + err := Respond(r, + ByDiscardingBody()) + if err != nil { + t.Fatalf("autorest: ByDiscardingBody failed (%v)", err) + } + buf, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("autorest: Reading result of ByDiscardingBody failed (%v)", err) + } + + if len(buf) != 0 { + t.Logf("autorest: Body was not empty after calling ByDiscardingBody.") + t.Fail() + } +} + +func TestByDiscardingBodyAcceptsNilResponse(t *testing.T) { + var e error + + r := mocks.NewResponse() + + Respond(r, + withErrorRespondDecorator(&e), + (func() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + resp.Body.Close() + r.Respond(nil) + return nil + }) + } + })(), + ByDiscardingBody()) +} + +func TestByDiscardingBodyAcceptsNilBody(t *testing.T) { + var e error + + r := mocks.NewResponse() + + Respond(r, + withErrorRespondDecorator(&e), + (func() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + resp.Body.Close() + resp.Body = nil + r.Respond(resp) + return nil + }) + } + })(), + ByDiscardingBody()) +} + func TestByUnmarshallingJSON(t *testing.T) { v := &mocks.T{} r := mocks.NewResponseWithContent(jsonT) diff --git a/vendor/github.com/Azure/go-autorest/autorest/sender.go b/vendor/github.com/Azure/go-autorest/autorest/sender.go index a12f0f7ff551..9c0697815bba 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/sender.go +++ b/vendor/github.com/Azure/go-autorest/autorest/sender.go @@ -97,7 +97,7 @@ func DoCloseIfError() SendDecorator { return SenderFunc(func(r *http.Request) (*http.Response, error) { resp, err := s.Do(r) if err != nil { - Respond(resp, ByClosing()) + Respond(resp, ByDiscardingBody(), ByClosing()) } return resp, err }) @@ -156,6 +156,7 @@ func DoPollForStatusCodes(duration time.Duration, delay time.Duration, codes ... for err == nil && ResponseHasStatusCode(resp, codes...) { Respond(resp, + ByDiscardingBody(), ByClosing()) resp, err = SendWithSender(s, r, AfterDelay(GetRetryAfter(resp, delay))) diff --git a/vendor/github.com/Azure/go-autorest/autorest/sender_test.go b/vendor/github.com/Azure/go-autorest/autorest/sender_test.go index 4c462dda6a51..d8e7beddbd75 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/sender_test.go +++ b/vendor/github.com/Azure/go-autorest/autorest/sender_test.go @@ -36,6 +36,7 @@ func ExampleSendWithSender() { DoRetryForAttempts(5, time.Duration(0))) Respond(r, + ByDiscardingBody(), ByClosing()) // Output: @@ -61,6 +62,7 @@ func ExampleDoRetryForAttempts() { DoRetryForAttempts(5, time.Duration(0))) Respond(r, + ByDiscardingBody(), ByClosing()) fmt.Printf("Retry stopped after %d attempts", client.Attempts()) @@ -78,6 +80,7 @@ func ExampleDoErrorIfStatusCode() { DoRetryForAttempts(5, time.Duration(0))) Respond(r, + ByDiscardingBody(), ByClosing()) fmt.Printf("Retry stopped after %d attempts with code %s", client.Attempts(), r.Status) @@ -97,6 +100,7 @@ func TestSendWithSenderRunsDecoratorsInOrder(t *testing.T) { } Respond(r, + ByDiscardingBody(), ByClosing()) if s != "abc" { @@ -155,6 +159,7 @@ func TestAfterDelayWaits(t *testing.T) { } Respond(r, + ByDiscardingBody(), ByClosing()) } @@ -194,6 +199,7 @@ func TestAfterDelayDoesNotWaitTooLong(t *testing.T) { } Respond(r, + ByDiscardingBody(), ByClosing()) } @@ -212,8 +218,10 @@ func TestAsIs(t *testing.T) { } Respond(r1, + ByDiscardingBody(), ByClosing()) Respond(r2, + ByDiscardingBody(), ByClosing()) } @@ -230,6 +238,7 @@ func TestDoCloseIfError(t *testing.T) { } Respond(r, + ByDiscardingBody(), ByClosing()) } @@ -282,6 +291,7 @@ func TestDoErrorIfStatusCode(t *testing.T) { } Respond(r, + ByDiscardingBody(), ByClosing()) } @@ -297,6 +307,7 @@ func TestDoErrorIfStatusCodeIgnoresStatusCodes(t *testing.T) { } Respond(r, + ByDiscardingBody(), ByClosing()) } @@ -312,6 +323,7 @@ func TestDoErrorUnlessStatusCode(t *testing.T) { } Respond(r, + ByDiscardingBody(), ByClosing()) } @@ -327,6 +339,7 @@ func TestDoErrorUnlessStatusCodeIgnoresStatusCodes(t *testing.T) { } Respond(r, + ByDiscardingBody(), ByClosing()) } @@ -344,6 +357,7 @@ func TestDoRetryForAttemptsStopsAfterSuccess(t *testing.T) { } Respond(r, + ByDiscardingBody(), ByClosing()) } @@ -359,6 +373,7 @@ func TestDoRetryForAttemptsStopsAfterAttempts(t *testing.T) { } Respond(r, + ByDiscardingBody(), ByClosing()) if client.Attempts() != 5 { @@ -381,6 +396,7 @@ func TestDoRetryForAttemptsReturnsResponse(t *testing.T) { } Respond(r, + ByDiscardingBody(), ByClosing()) } @@ -398,6 +414,7 @@ func TestDoRetryForDurationStopsAfterSuccess(t *testing.T) { } Respond(r, + ByDiscardingBody(), ByClosing()) } @@ -419,6 +436,7 @@ func TestDoRetryForDurationStopsAfterDuration(t *testing.T) { } Respond(r, + ByDiscardingBody(), ByClosing()) } @@ -440,6 +458,7 @@ func TestDoRetryForDurationStopsWithinReason(t *testing.T) { } Respond(r, + ByDiscardingBody(), ByClosing()) } @@ -459,6 +478,7 @@ func TestDoRetryForDurationReturnsResponse(t *testing.T) { } Respond(r, + ByDiscardingBody(), ByClosing()) } @@ -512,6 +532,7 @@ func TestDoPollForStatusCodes_IgnoresUnspecifiedStatusCodes(t *testing.T) { } Respond(r, + ByDiscardingBody(), ByClosing()) } @@ -527,6 +548,7 @@ func TestDoPollForStatusCodes_PollsForSpecifiedStatusCodes(t *testing.T) { } Respond(r, + ByDiscardingBody(), ByClosing()) } @@ -547,6 +569,7 @@ func TestDoPollForStatusCodes_CanBeCanceled(t *testing.T) { r, _ := SendWithSender(client, mocks.NewRequest(), DoPollForStatusCodes(time.Millisecond, time.Millisecond, http.StatusAccepted)) Respond(r, + ByDiscardingBody(), ByClosing()) }() wg.Wait() @@ -571,6 +594,7 @@ func TestDoPollForStatusCodes_ClosesAllNonreturnedResponseBodiesWhenPolling(t *t } Respond(r, + ByDiscardingBody(), ByClosing()) } @@ -586,6 +610,7 @@ func TestDoPollForStatusCodes_LeavesLastResponseBodyOpen(t *testing.T) { } Respond(r, + ByDiscardingBody(), ByClosing()) } @@ -603,6 +628,7 @@ func TestDoPollForStatusCodes_StopsPollingAfterAnError(t *testing.T) { } Respond(r, + ByDiscardingBody(), ByClosing()) } @@ -620,6 +646,7 @@ func TestDoPollForStatusCodes_ReturnsPollingError(t *testing.T) { } Respond(r, + ByDiscardingBody(), ByClosing()) } @@ -636,6 +663,7 @@ func TestWithLogging_Logs(t *testing.T) { } Respond(r, + ByDiscardingBody(), ByClosing()) } @@ -657,6 +685,7 @@ func TestWithLogging_HandlesMissingResponse(t *testing.T) { } Respond(r, + ByDiscardingBody(), ByClosing()) } @@ -670,6 +699,7 @@ func TestDoRetryForStatusCodesWithSuccess(t *testing.T) { ) Respond(r, + ByDiscardingBody(), ByClosing()) if client.Attempts() != 3 { @@ -686,6 +716,7 @@ func TestDoRetryForStatusCodesWithNoSuccess(t *testing.T) { DoRetryForStatusCodes(2, time.Duration(2*time.Second), http.StatusGatewayTimeout), ) Respond(r, + ByDiscardingBody(), ByClosing()) if client.Attempts() != 3 { @@ -703,6 +734,7 @@ func TestDoRetryForStatusCodes_CodeNotInRetryList(t *testing.T) { ) Respond(r, + ByDiscardingBody(), ByClosing()) if client.Attempts() != 1 || r.Status != "204 No Content" { @@ -720,6 +752,7 @@ func TestDoRetryForStatusCodes_RequestBodyReadError(t *testing.T) { ) Respond(r, + ByDiscardingBody(), ByClosing()) if err == nil || client.Attempts() != 0 { diff --git a/vendor/github.com/Azure/go-autorest/autorest/version.go b/vendor/github.com/Azure/go-autorest/autorest/version.go index 8031a332cd2d..a222e8efaaf9 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/version.go +++ b/vendor/github.com/Azure/go-autorest/autorest/version.go @@ -1,18 +1,35 @@ package autorest import ( + "bytes" "fmt" + "strings" + "sync" ) const ( - major = "7" - minor = "0" - patch = "0" - tag = "" - semVerFormat = "%s.%s.%s%s" + major = 8 + minor = 0 + patch = 0 + tag = "" ) +var once sync.Once +var version string + // Version returns the semantic version (see http://semver.org). func Version() string { - return fmt.Sprintf(semVerFormat, major, minor, patch, tag) + once.Do(func() { + semver := fmt.Sprintf("%d.%d.%d", major, minor, patch) + verBuilder := bytes.NewBufferString(semver) + if tag != "" && tag != "-" { + updated := strings.TrimPrefix(tag, "-") + _, err := verBuilder.WriteString("-" + updated) + if err == nil { + verBuilder = bytes.NewBufferString(semver) + } + } + version = verBuilder.String() + }) + return version } diff --git a/vendor/github.com/Azure/go-autorest/autorest/version_test.go b/vendor/github.com/Azure/go-autorest/autorest/version_test.go deleted file mode 100644 index 14925fe4e53f..000000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/version_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package autorest - -import ( - "testing" -) - -func TestVersion(t *testing.T) { - v := "7.0.0" - if Version() != v { - t.Fatalf("autorest: Version failed to return the expected version -- expected %s, received %s", - v, Version()) - } -} diff --git a/vendor/github.com/coreos/go-oidc/http/http.go b/vendor/github.com/coreos/go-oidc/http/http.go index c3f512151336..48717833cabb 100644 --- a/vendor/github.com/coreos/go-oidc/http/http.go +++ b/vendor/github.com/coreos/go-oidc/http/http.go @@ -91,7 +91,12 @@ func expires(date, expires string) (time.Duration, bool, error) { return 0, false, nil } - te, err := time.Parse(time.RFC1123, expires) + var te time.Time + var err error + if expires == "0" { + return 0, false, nil + } + te, err = time.Parse(time.RFC1123, expires) if err != nil { return 0, false, err } diff --git a/vendor/github.com/coreos/go-oidc/http/http_test.go b/vendor/github.com/coreos/go-oidc/http/http_test.go index dc2cabff7955..48e723ab31dd 100644 --- a/vendor/github.com/coreos/go-oidc/http/http_test.go +++ b/vendor/github.com/coreos/go-oidc/http/http_test.go @@ -177,6 +177,13 @@ func TestExpiresPass(t *testing.T) { wantTTL: 0, wantOK: false, }, + // Expires set to false + { + date: "Thu, 01 Dec 1983 22:00:00 GMT", + exp: "0", + wantTTL: 0, + wantOK: false, + }, // Expires < Date { date: "Fri, 02 Dec 1983 01:00:00 GMT", diff --git a/vendor/github.com/coreos/go-oidc/oidc/provider_test.go b/vendor/github.com/coreos/go-oidc/oidc/provider_test.go index 9b39f92ccefc..b36e5ba33732 100644 --- a/vendor/github.com/coreos/go-oidc/oidc/provider_test.go +++ b/vendor/github.com/coreos/go-oidc/oidc/provider_test.go @@ -473,8 +473,9 @@ func (g *fakeProviderConfigGetterSetter) Set(cfg ProviderConfig) error { } type fakeProviderConfigHandler struct { - cfg ProviderConfig - maxAge time.Duration + cfg ProviderConfig + maxAge time.Duration + noExpires bool } func (s *fakeProviderConfigHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { @@ -482,6 +483,9 @@ func (s *fakeProviderConfigHandler) ServeHTTP(w http.ResponseWriter, r *http.Req if s.maxAge.Seconds() >= 0 { w.Header().Set("Cache-Control", fmt.Sprintf("public, max-age=%d", int(s.maxAge.Seconds()))) } + if s.noExpires { + w.Header().Set("Expires", "0") + } w.Header().Set("Content-Type", "application/json") w.Write(b) } @@ -552,10 +556,11 @@ func TestHTTPProviderConfigGetter(t *testing.T) { now := fc.Now().UTC() tests := []struct { - dsc string - age time.Duration - cfg ProviderConfig - ok bool + dsc string + age time.Duration + cfg ProviderConfig + noExpires bool + ok bool }{ // everything is good { @@ -596,6 +601,17 @@ func TestHTTPProviderConfigGetter(t *testing.T) { }, ok: true, }, + // An expires header set to 0 + { + dsc: "https://example.com", + age: time.Minute, + cfg: ProviderConfig{ + Issuer: &url.URL{Scheme: "https", Host: "example.com"}, + ExpiresAt: now.Add(time.Minute), + }, + ok: true, + noExpires: true, + }, } for i, tt := range tests { diff --git a/vendor/github.com/satori/uuid/.travis.yml b/vendor/github.com/satori/uuid/.travis.yml new file mode 100644 index 000000000000..fdf960e86b55 --- /dev/null +++ b/vendor/github.com/satori/uuid/.travis.yml @@ -0,0 +1,22 @@ +language: go +sudo: false +go: + - 1.2 + - 1.3 + - 1.4 + - 1.5 + - 1.6 + - 1.7 + - 1.8 + - tip +matrix: + allow_failures: + - go: tip + fast_finish: true +before_install: + - go get github.com/mattn/goveralls + - go get golang.org/x/tools/cmd/cover +script: + - $HOME/gopath/bin/goveralls -service=travis-ci +notifications: + email: false diff --git a/vendor/github.com/satori/uuid/LICENSE b/vendor/github.com/satori/uuid/LICENSE new file mode 100644 index 000000000000..488357b8af1f --- /dev/null +++ b/vendor/github.com/satori/uuid/LICENSE @@ -0,0 +1,20 @@ +Copyright (C) 2013-2016 by Maxim Bublis + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/satori/uuid/README.md b/vendor/github.com/satori/uuid/README.md new file mode 100644 index 000000000000..b6aad1c81303 --- /dev/null +++ b/vendor/github.com/satori/uuid/README.md @@ -0,0 +1,65 @@ +# UUID package for Go language + +[![Build Status](https://travis-ci.org/satori/go.uuid.png?branch=master)](https://travis-ci.org/satori/go.uuid) +[![Coverage Status](https://coveralls.io/repos/github/satori/go.uuid/badge.svg?branch=master)](https://coveralls.io/github/satori/go.uuid) +[![GoDoc](http://godoc.org/github.com/satori/go.uuid?status.png)](http://godoc.org/github.com/satori/go.uuid) + +This package provides pure Go implementation of Universally Unique Identifier (UUID). Supported both creation and parsing of UUIDs. + +With 100% test coverage and benchmarks out of box. + +Supported versions: +* Version 1, based on timestamp and MAC address (RFC 4122) +* Version 2, based on timestamp, MAC address and POSIX UID/GID (DCE 1.1) +* Version 3, based on MD5 hashing (RFC 4122) +* Version 4, based on random numbers (RFC 4122) +* Version 5, based on SHA-1 hashing (RFC 4122) + +## Installation + +Use the `go` command: + + $ go get github.com/satori/go.uuid + +## Requirements + +UUID package requires Go >= 1.2. + +## Example + +```go +package main + +import ( + "fmt" + "github.com/satori/go.uuid" +) + +func main() { + // Creating UUID Version 4 + u1 := uuid.NewV4() + fmt.Printf("UUIDv4: %s\n", u1) + + // Parsing UUID from string input + u2, err := uuid.FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + if err != nil { + fmt.Printf("Something gone wrong: %s", err) + } + fmt.Printf("Successfully parsed: %s", u2) +} +``` + +## Documentation + +[Documentation](http://godoc.org/github.com/satori/go.uuid) is hosted at GoDoc project. + +## Links +* [RFC 4122](http://tools.ietf.org/html/rfc4122) +* [DCE 1.1: Authentication and Security Services](http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01) + +## Copyright + +Copyright (C) 2013-2016 by Maxim Bublis . + +UUID package released under MIT License. +See [LICENSE](https://github.com/satori/go.uuid/blob/master/LICENSE) for details. diff --git a/vendor/github.com/satori/uuid/benchmarks_test.go b/vendor/github.com/satori/uuid/benchmarks_test.go new file mode 100644 index 000000000000..c3baeab8b262 --- /dev/null +++ b/vendor/github.com/satori/uuid/benchmarks_test.go @@ -0,0 +1,123 @@ +// Copyright (C) 2013-2015 by Maxim Bublis +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package uuid + +import ( + "testing" +) + +func BenchmarkFromBytes(b *testing.B) { + bytes := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + for i := 0; i < b.N; i++ { + FromBytes(bytes) + } +} + +func BenchmarkFromString(b *testing.B) { + s := "6ba7b810-9dad-11d1-80b4-00c04fd430c8" + for i := 0; i < b.N; i++ { + FromString(s) + } +} + +func BenchmarkFromStringUrn(b *testing.B) { + s := "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" + for i := 0; i < b.N; i++ { + FromString(s) + } +} + +func BenchmarkFromStringWithBrackets(b *testing.B) { + s := "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}" + for i := 0; i < b.N; i++ { + FromString(s) + } +} + +func BenchmarkNewV1(b *testing.B) { + for i := 0; i < b.N; i++ { + NewV1() + } +} + +func BenchmarkNewV2(b *testing.B) { + for i := 0; i < b.N; i++ { + NewV2(DomainPerson) + } +} + +func BenchmarkNewV3(b *testing.B) { + for i := 0; i < b.N; i++ { + NewV3(NamespaceDNS, "www.example.com") + } +} + +func BenchmarkNewV4(b *testing.B) { + for i := 0; i < b.N; i++ { + NewV4() + } +} + +func BenchmarkNewV5(b *testing.B) { + for i := 0; i < b.N; i++ { + NewV5(NamespaceDNS, "www.example.com") + } +} + +func BenchmarkMarshalBinary(b *testing.B) { + u := NewV4() + for i := 0; i < b.N; i++ { + u.MarshalBinary() + } +} + +func BenchmarkMarshalText(b *testing.B) { + u := NewV4() + for i := 0; i < b.N; i++ { + u.MarshalText() + } +} + +func BenchmarkUnmarshalBinary(b *testing.B) { + bytes := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + u := UUID{} + for i := 0; i < b.N; i++ { + u.UnmarshalBinary(bytes) + } +} + +func BenchmarkUnmarshalText(b *testing.B) { + bytes := []byte("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + u := UUID{} + for i := 0; i < b.N; i++ { + u.UnmarshalText(bytes) + } +} + +var sink string + +func BenchmarkMarshalToString(b *testing.B) { + u := NewV4() + for i := 0; i < b.N; i++ { + sink = u.String() + } +} diff --git a/vendor/github.com/satori/uuid/uuid.go b/vendor/github.com/satori/uuid/uuid.go new file mode 100644 index 000000000000..295f3fc2c57f --- /dev/null +++ b/vendor/github.com/satori/uuid/uuid.go @@ -0,0 +1,481 @@ +// Copyright (C) 2013-2015 by Maxim Bublis +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +// Package uuid provides implementation of Universally Unique Identifier (UUID). +// Supported versions are 1, 3, 4 and 5 (as specified in RFC 4122) and +// version 2 (as specified in DCE 1.1). +package uuid + +import ( + "bytes" + "crypto/md5" + "crypto/rand" + "crypto/sha1" + "database/sql/driver" + "encoding/binary" + "encoding/hex" + "fmt" + "hash" + "net" + "os" + "sync" + "time" +) + +// UUID layout variants. +const ( + VariantNCS = iota + VariantRFC4122 + VariantMicrosoft + VariantFuture +) + +// UUID DCE domains. +const ( + DomainPerson = iota + DomainGroup + DomainOrg +) + +// Difference in 100-nanosecond intervals between +// UUID epoch (October 15, 1582) and Unix epoch (January 1, 1970). +const epochStart = 122192928000000000 + +// Used in string method conversion +const dash byte = '-' + +// UUID v1/v2 storage. +var ( + storageMutex sync.Mutex + storageOnce sync.Once + epochFunc = unixTimeFunc + clockSequence uint16 + lastTime uint64 + hardwareAddr [6]byte + posixUID = uint32(os.Getuid()) + posixGID = uint32(os.Getgid()) +) + +// String parse helpers. +var ( + urnPrefix = []byte("urn:uuid:") + byteGroups = []int{8, 4, 4, 4, 12} +) + +func initClockSequence() { + buf := make([]byte, 2) + safeRandom(buf) + clockSequence = binary.BigEndian.Uint16(buf) +} + +func initHardwareAddr() { + interfaces, err := net.Interfaces() + if err == nil { + for _, iface := range interfaces { + if len(iface.HardwareAddr) >= 6 { + copy(hardwareAddr[:], iface.HardwareAddr) + return + } + } + } + + // Initialize hardwareAddr randomly in case + // of real network interfaces absence + safeRandom(hardwareAddr[:]) + + // Set multicast bit as recommended in RFC 4122 + hardwareAddr[0] |= 0x01 +} + +func initStorage() { + initClockSequence() + initHardwareAddr() +} + +func safeRandom(dest []byte) { + if _, err := rand.Read(dest); err != nil { + panic(err) + } +} + +// Returns difference in 100-nanosecond intervals between +// UUID epoch (October 15, 1582) and current time. +// This is default epoch calculation function. +func unixTimeFunc() uint64 { + return epochStart + uint64(time.Now().UnixNano()/100) +} + +// UUID representation compliant with specification +// described in RFC 4122. +type UUID [16]byte + +// NullUUID can be used with the standard sql package to represent a +// UUID value that can be NULL in the database +type NullUUID struct { + UUID UUID + Valid bool +} + +// The nil UUID is special form of UUID that is specified to have all +// 128 bits set to zero. +var Nil = UUID{} + +// Predefined namespace UUIDs. +var ( + NamespaceDNS, _ = FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + NamespaceURL, _ = FromString("6ba7b811-9dad-11d1-80b4-00c04fd430c8") + NamespaceOID, _ = FromString("6ba7b812-9dad-11d1-80b4-00c04fd430c8") + NamespaceX500, _ = FromString("6ba7b814-9dad-11d1-80b4-00c04fd430c8") +) + +// And returns result of binary AND of two UUIDs. +func And(u1 UUID, u2 UUID) UUID { + u := UUID{} + for i := 0; i < 16; i++ { + u[i] = u1[i] & u2[i] + } + return u +} + +// Or returns result of binary OR of two UUIDs. +func Or(u1 UUID, u2 UUID) UUID { + u := UUID{} + for i := 0; i < 16; i++ { + u[i] = u1[i] | u2[i] + } + return u +} + +// Equal returns true if u1 and u2 equals, otherwise returns false. +func Equal(u1 UUID, u2 UUID) bool { + return bytes.Equal(u1[:], u2[:]) +} + +// Version returns algorithm version used to generate UUID. +func (u UUID) Version() uint { + return uint(u[6] >> 4) +} + +// Variant returns UUID layout variant. +func (u UUID) Variant() uint { + switch { + case (u[8] & 0x80) == 0x00: + return VariantNCS + case (u[8]&0xc0)|0x80 == 0x80: + return VariantRFC4122 + case (u[8]&0xe0)|0xc0 == 0xc0: + return VariantMicrosoft + } + return VariantFuture +} + +// Bytes returns bytes slice representation of UUID. +func (u UUID) Bytes() []byte { + return u[:] +} + +// Returns canonical string representation of UUID: +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx. +func (u UUID) String() string { + buf := make([]byte, 36) + + hex.Encode(buf[0:8], u[0:4]) + buf[8] = dash + hex.Encode(buf[9:13], u[4:6]) + buf[13] = dash + hex.Encode(buf[14:18], u[6:8]) + buf[18] = dash + hex.Encode(buf[19:23], u[8:10]) + buf[23] = dash + hex.Encode(buf[24:], u[10:]) + + return string(buf) +} + +// SetVersion sets version bits. +func (u *UUID) SetVersion(v byte) { + u[6] = (u[6] & 0x0f) | (v << 4) +} + +// SetVariant sets variant bits as described in RFC 4122. +func (u *UUID) SetVariant() { + u[8] = (u[8] & 0xbf) | 0x80 +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The encoding is the same as returned by String. +func (u UUID) MarshalText() (text []byte, err error) { + text = []byte(u.String()) + return +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// Following formats are supported: +// "6ba7b810-9dad-11d1-80b4-00c04fd430c8", +// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}", +// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" +func (u *UUID) UnmarshalText(text []byte) (err error) { + if len(text) < 32 { + err = fmt.Errorf("uuid: UUID string too short: %s", text) + return + } + + t := text[:] + braced := false + + if bytes.Equal(t[:9], urnPrefix) { + t = t[9:] + } else if t[0] == '{' { + braced = true + t = t[1:] + } + + b := u[:] + + for i, byteGroup := range byteGroups { + if i > 0 { + if t[0] != '-' { + err = fmt.Errorf("uuid: invalid string format") + return + } + t = t[1:] + } + + if len(t) < byteGroup { + err = fmt.Errorf("uuid: UUID string too short: %s", text) + return + } + + if i == 4 && len(t) > byteGroup && + ((braced && t[byteGroup] != '}') || len(t[byteGroup:]) > 1 || !braced) { + err = fmt.Errorf("uuid: UUID string too long: %s", text) + return + } + + _, err = hex.Decode(b[:byteGroup/2], t[:byteGroup]) + if err != nil { + return + } + + t = t[byteGroup:] + b = b[byteGroup/2:] + } + + return +} + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (u UUID) MarshalBinary() (data []byte, err error) { + data = u.Bytes() + return +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +// It will return error if the slice isn't 16 bytes long. +func (u *UUID) UnmarshalBinary(data []byte) (err error) { + if len(data) != 16 { + err = fmt.Errorf("uuid: UUID must be exactly 16 bytes long, got %d bytes", len(data)) + return + } + copy(u[:], data) + + return +} + +// Value implements the driver.Valuer interface. +func (u UUID) Value() (driver.Value, error) { + return u.String(), nil +} + +// Scan implements the sql.Scanner interface. +// A 16-byte slice is handled by UnmarshalBinary, while +// a longer byte slice or a string is handled by UnmarshalText. +func (u *UUID) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + if len(src) == 16 { + return u.UnmarshalBinary(src) + } + return u.UnmarshalText(src) + + case string: + return u.UnmarshalText([]byte(src)) + } + + return fmt.Errorf("uuid: cannot convert %T to UUID", src) +} + +// Value implements the driver.Valuer interface. +func (u NullUUID) Value() (driver.Value, error) { + if !u.Valid { + return nil, nil + } + // Delegate to UUID Value function + return u.UUID.Value() +} + +// Scan implements the sql.Scanner interface. +func (u *NullUUID) Scan(src interface{}) error { + if src == nil { + u.UUID, u.Valid = Nil, false + return nil + } + + // Delegate to UUID Scan function + u.Valid = true + return u.UUID.Scan(src) +} + +// FromBytes returns UUID converted from raw byte slice input. +// It will return error if the slice isn't 16 bytes long. +func FromBytes(input []byte) (u UUID, err error) { + err = u.UnmarshalBinary(input) + return +} + +// FromBytesOrNil returns UUID converted from raw byte slice input. +// Same behavior as FromBytes, but returns a Nil UUID on error. +func FromBytesOrNil(input []byte) UUID { + uuid, err := FromBytes(input) + if err != nil { + return Nil + } + return uuid +} + +// FromString returns UUID parsed from string input. +// Input is expected in a form accepted by UnmarshalText. +func FromString(input string) (u UUID, err error) { + err = u.UnmarshalText([]byte(input)) + return +} + +// FromStringOrNil returns UUID parsed from string input. +// Same behavior as FromString, but returns a Nil UUID on error. +func FromStringOrNil(input string) UUID { + uuid, err := FromString(input) + if err != nil { + return Nil + } + return uuid +} + +// Returns UUID v1/v2 storage state. +// Returns epoch timestamp, clock sequence, and hardware address. +func getStorage() (uint64, uint16, []byte) { + storageOnce.Do(initStorage) + + storageMutex.Lock() + defer storageMutex.Unlock() + + timeNow := epochFunc() + // Clock changed backwards since last UUID generation. + // Should increase clock sequence. + if timeNow <= lastTime { + clockSequence++ + } + lastTime = timeNow + + return timeNow, clockSequence, hardwareAddr[:] +} + +// NewV1 returns UUID based on current timestamp and MAC address. +func NewV1() UUID { + u := UUID{} + + timeNow, clockSeq, hardwareAddr := getStorage() + + binary.BigEndian.PutUint32(u[0:], uint32(timeNow)) + binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32)) + binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48)) + binary.BigEndian.PutUint16(u[8:], clockSeq) + + copy(u[10:], hardwareAddr) + + u.SetVersion(1) + u.SetVariant() + + return u +} + +// NewV2 returns DCE Security UUID based on POSIX UID/GID. +func NewV2(domain byte) UUID { + u := UUID{} + + timeNow, clockSeq, hardwareAddr := getStorage() + + switch domain { + case DomainPerson: + binary.BigEndian.PutUint32(u[0:], posixUID) + case DomainGroup: + binary.BigEndian.PutUint32(u[0:], posixGID) + } + + binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32)) + binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48)) + binary.BigEndian.PutUint16(u[8:], clockSeq) + u[9] = domain + + copy(u[10:], hardwareAddr) + + u.SetVersion(2) + u.SetVariant() + + return u +} + +// NewV3 returns UUID based on MD5 hash of namespace UUID and name. +func NewV3(ns UUID, name string) UUID { + u := newFromHash(md5.New(), ns, name) + u.SetVersion(3) + u.SetVariant() + + return u +} + +// NewV4 returns random generated UUID. +func NewV4() UUID { + u := UUID{} + safeRandom(u[:]) + u.SetVersion(4) + u.SetVariant() + + return u +} + +// NewV5 returns UUID based on SHA-1 hash of namespace UUID and name. +func NewV5(ns UUID, name string) UUID { + u := newFromHash(sha1.New(), ns, name) + u.SetVersion(5) + u.SetVariant() + + return u +} + +// Returns UUID based on hashing of namespace UUID and name. +func newFromHash(h hash.Hash, ns UUID, name string) UUID { + u := UUID{} + h.Write(ns[:]) + h.Write([]byte(name)) + copy(u[:], h.Sum(nil)) + + return u +} diff --git a/vendor/github.com/satori/uuid/uuid_test.go b/vendor/github.com/satori/uuid/uuid_test.go new file mode 100644 index 000000000000..56504808f95a --- /dev/null +++ b/vendor/github.com/satori/uuid/uuid_test.go @@ -0,0 +1,633 @@ +// Copyright (C) 2013, 2015 by Maxim Bublis +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package uuid + +import ( + "bytes" + "testing" +) + +func TestBytes(t *testing.T) { + u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + + bytes1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + + if !bytes.Equal(u.Bytes(), bytes1) { + t.Errorf("Incorrect bytes representation for UUID: %s", u) + } +} + +func TestString(t *testing.T) { + if NamespaceDNS.String() != "6ba7b810-9dad-11d1-80b4-00c04fd430c8" { + t.Errorf("Incorrect string representation for UUID: %s", NamespaceDNS.String()) + } +} + +func TestEqual(t *testing.T) { + if !Equal(NamespaceDNS, NamespaceDNS) { + t.Errorf("Incorrect comparison of %s and %s", NamespaceDNS, NamespaceDNS) + } + + if Equal(NamespaceDNS, NamespaceURL) { + t.Errorf("Incorrect comparison of %s and %s", NamespaceDNS, NamespaceURL) + } +} + +func TestOr(t *testing.T) { + u1 := UUID{0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff} + u2 := UUID{0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00} + + u := UUID{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} + + if !Equal(u, Or(u1, u2)) { + t.Errorf("Incorrect bitwise OR result %s", Or(u1, u2)) + } +} + +func TestAnd(t *testing.T) { + u1 := UUID{0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff} + u2 := UUID{0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00} + + u := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + + if !Equal(u, And(u1, u2)) { + t.Errorf("Incorrect bitwise AND result %s", And(u1, u2)) + } +} + +func TestVersion(t *testing.T) { + u := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + + if u.Version() != 1 { + t.Errorf("Incorrect version for UUID: %d", u.Version()) + } +} + +func TestSetVersion(t *testing.T) { + u := UUID{} + u.SetVersion(4) + + if u.Version() != 4 { + t.Errorf("Incorrect version for UUID after u.setVersion(4): %d", u.Version()) + } +} + +func TestVariant(t *testing.T) { + u1 := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + + if u1.Variant() != VariantNCS { + t.Errorf("Incorrect variant for UUID variant %d: %d", VariantNCS, u1.Variant()) + } + + u2 := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + + if u2.Variant() != VariantRFC4122 { + t.Errorf("Incorrect variant for UUID variant %d: %d", VariantRFC4122, u2.Variant()) + } + + u3 := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + + if u3.Variant() != VariantMicrosoft { + t.Errorf("Incorrect variant for UUID variant %d: %d", VariantMicrosoft, u3.Variant()) + } + + u4 := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + + if u4.Variant() != VariantFuture { + t.Errorf("Incorrect variant for UUID variant %d: %d", VariantFuture, u4.Variant()) + } +} + +func TestSetVariant(t *testing.T) { + u := new(UUID) + u.SetVariant() + + if u.Variant() != VariantRFC4122 { + t.Errorf("Incorrect variant for UUID after u.setVariant(): %d", u.Variant()) + } +} + +func TestFromBytes(t *testing.T) { + u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + b1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + + u1, err := FromBytes(b1) + if err != nil { + t.Errorf("Error parsing UUID from bytes: %s", err) + } + + if !Equal(u, u1) { + t.Errorf("UUIDs should be equal: %s and %s", u, u1) + } + + b2 := []byte{} + + _, err = FromBytes(b2) + if err == nil { + t.Errorf("Should return error parsing from empty byte slice, got %s", err) + } +} + +func TestMarshalBinary(t *testing.T) { + u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + b1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + + b2, err := u.MarshalBinary() + if err != nil { + t.Errorf("Error marshaling UUID: %s", err) + } + + if !bytes.Equal(b1, b2) { + t.Errorf("Marshaled UUID should be %s, got %s", b1, b2) + } +} + +func TestUnmarshalBinary(t *testing.T) { + u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + b1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + + u1 := UUID{} + err := u1.UnmarshalBinary(b1) + if err != nil { + t.Errorf("Error unmarshaling UUID: %s", err) + } + + if !Equal(u, u1) { + t.Errorf("UUIDs should be equal: %s and %s", u, u1) + } + + b2 := []byte{} + u2 := UUID{} + + err = u2.UnmarshalBinary(b2) + if err == nil { + t.Errorf("Should return error unmarshalling from empty byte slice, got %s", err) + } +} + +func TestFromString(t *testing.T) { + u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + + s1 := "6ba7b810-9dad-11d1-80b4-00c04fd430c8" + s2 := "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}" + s3 := "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" + + _, err := FromString("") + if err == nil { + t.Errorf("Should return error trying to parse empty string, got %s", err) + } + + u1, err := FromString(s1) + if err != nil { + t.Errorf("Error parsing UUID from string: %s", err) + } + + if !Equal(u, u1) { + t.Errorf("UUIDs should be equal: %s and %s", u, u1) + } + + u2, err := FromString(s2) + if err != nil { + t.Errorf("Error parsing UUID from string: %s", err) + } + + if !Equal(u, u2) { + t.Errorf("UUIDs should be equal: %s and %s", u, u2) + } + + u3, err := FromString(s3) + if err != nil { + t.Errorf("Error parsing UUID from string: %s", err) + } + + if !Equal(u, u3) { + t.Errorf("UUIDs should be equal: %s and %s", u, u3) + } +} + +func TestFromStringShort(t *testing.T) { + // Invalid 35-character UUID string + s1 := "6ba7b810-9dad-11d1-80b4-00c04fd430c" + + for i := len(s1); i >= 0; i-- { + _, err := FromString(s1[:i]) + if err == nil { + t.Errorf("Should return error trying to parse too short string, got %s", err) + } + } +} + +func TestFromStringLong(t *testing.T) { + // Invalid 37+ character UUID string + s := []string{ + "6ba7b810-9dad-11d1-80b4-00c04fd430c8=", + "6ba7b810-9dad-11d1-80b4-00c04fd430c8}", + "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}f", + "6ba7b810-9dad-11d1-80b4-00c04fd430c800c04fd430c8", + } + + for _, str := range s { + _, err := FromString(str) + if err == nil { + t.Errorf("Should return error trying to parse too long string, passed %s", str) + } + } +} + +func TestFromStringInvalid(t *testing.T) { + // Invalid UUID string formats + s := []string{ + "6ba7b8109dad11d180b400c04fd430c8", + "6ba7b8109dad11d180b400c04fd430c86ba7b8109dad11d180b400c04fd430c8", + "urn:uuid:{6ba7b810-9dad-11d1-80b4-00c04fd430c8}", + "6ba7b8109-dad-11d1-80b4-00c04fd430c8", + "6ba7b810-9dad1-1d1-80b4-00c04fd430c8", + "6ba7b810-9dad-11d18-0b4-00c04fd430c8", + "6ba7b810-9dad-11d1-80b40-0c04fd430c8", + "6ba7b810+9dad+11d1+80b4+00c04fd430c8", + "6ba7b810-9dad11d180b400c04fd430c8", + "6ba7b8109dad-11d180b400c04fd430c8", + "6ba7b8109dad11d1-80b400c04fd430c8", + "6ba7b8109dad11d180b4-00c04fd430c8", + } + + for _, str := range s { + _, err := FromString(str) + if err == nil { + t.Errorf("Should return error trying to parse invalid string, passed %s", str) + } + } +} + +func TestFromStringOrNil(t *testing.T) { + u := FromStringOrNil("") + if u != Nil { + t.Errorf("Should return Nil UUID on parse failure, got %s", u) + } +} + +func TestFromBytesOrNil(t *testing.T) { + b := []byte{} + u := FromBytesOrNil(b) + if u != Nil { + t.Errorf("Should return Nil UUID on parse failure, got %s", u) + } +} + +func TestMarshalText(t *testing.T) { + u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + b1 := []byte("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + + b2, err := u.MarshalText() + if err != nil { + t.Errorf("Error marshaling UUID: %s", err) + } + + if !bytes.Equal(b1, b2) { + t.Errorf("Marshaled UUID should be %s, got %s", b1, b2) + } +} + +func TestUnmarshalText(t *testing.T) { + u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + b1 := []byte("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + + u1 := UUID{} + err := u1.UnmarshalText(b1) + if err != nil { + t.Errorf("Error unmarshaling UUID: %s", err) + } + + if !Equal(u, u1) { + t.Errorf("UUIDs should be equal: %s and %s", u, u1) + } + + b2 := []byte("") + u2 := UUID{} + + err = u2.UnmarshalText(b2) + if err == nil { + t.Errorf("Should return error trying to unmarshal from empty string") + } +} + +func TestValue(t *testing.T) { + u, err := FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + if err != nil { + t.Errorf("Error parsing UUID from string: %s", err) + } + + val, err := u.Value() + if err != nil { + t.Errorf("Error getting UUID value: %s", err) + } + + if val != u.String() { + t.Errorf("Wrong value returned, should be equal: %s and %s", val, u) + } +} + +func TestValueNil(t *testing.T) { + u := UUID{} + + val, err := u.Value() + if err != nil { + t.Errorf("Error getting UUID value: %s", err) + } + + if val != Nil.String() { + t.Errorf("Wrong value returned, should be equal to UUID.Nil: %s", val) + } +} + +func TestNullUUIDValueNil(t *testing.T) { + u := NullUUID{} + + val, err := u.Value() + if err != nil { + t.Errorf("Error getting UUID value: %s", err) + } + + if val != nil { + t.Errorf("Wrong value returned, should be nil: %s", val) + } +} + +func TestScanBinary(t *testing.T) { + u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + b1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + + u1 := UUID{} + err := u1.Scan(b1) + if err != nil { + t.Errorf("Error unmarshaling UUID: %s", err) + } + + if !Equal(u, u1) { + t.Errorf("UUIDs should be equal: %s and %s", u, u1) + } + + b2 := []byte{} + u2 := UUID{} + + err = u2.Scan(b2) + if err == nil { + t.Errorf("Should return error unmarshalling from empty byte slice, got %s", err) + } +} + +func TestScanString(t *testing.T) { + u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + s1 := "6ba7b810-9dad-11d1-80b4-00c04fd430c8" + + u1 := UUID{} + err := u1.Scan(s1) + if err != nil { + t.Errorf("Error unmarshaling UUID: %s", err) + } + + if !Equal(u, u1) { + t.Errorf("UUIDs should be equal: %s and %s", u, u1) + } + + s2 := "" + u2 := UUID{} + + err = u2.Scan(s2) + if err == nil { + t.Errorf("Should return error trying to unmarshal from empty string") + } +} + +func TestScanText(t *testing.T) { + u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + b1 := []byte("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + + u1 := UUID{} + err := u1.Scan(b1) + if err != nil { + t.Errorf("Error unmarshaling UUID: %s", err) + } + + if !Equal(u, u1) { + t.Errorf("UUIDs should be equal: %s and %s", u, u1) + } + + b2 := []byte("") + u2 := UUID{} + + err = u2.Scan(b2) + if err == nil { + t.Errorf("Should return error trying to unmarshal from empty string") + } +} + +func TestScanUnsupported(t *testing.T) { + u := UUID{} + + err := u.Scan(true) + if err == nil { + t.Errorf("Should return error trying to unmarshal from bool") + } +} + +func TestScanNil(t *testing.T) { + u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + + err := u.Scan(nil) + if err == nil { + t.Errorf("Error UUID shouldn't allow unmarshalling from nil") + } +} + +func TestNullUUIDScanValid(t *testing.T) { + u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + s1 := "6ba7b810-9dad-11d1-80b4-00c04fd430c8" + + u1 := NullUUID{} + err := u1.Scan(s1) + if err != nil { + t.Errorf("Error unmarshaling NullUUID: %s", err) + } + + if !u1.Valid { + t.Errorf("NullUUID should be valid") + } + + if !Equal(u, u1.UUID) { + t.Errorf("UUIDs should be equal: %s and %s", u, u1.UUID) + } +} + +func TestNullUUIDScanNil(t *testing.T) { + u := NullUUID{UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}, true} + + err := u.Scan(nil) + if err != nil { + t.Errorf("Error unmarshaling NullUUID: %s", err) + } + + if u.Valid { + t.Errorf("NullUUID should not be valid") + } + + if !Equal(u.UUID, Nil) { + t.Errorf("NullUUID value should be equal to Nil: %v", u) + } +} + +func TestNewV1(t *testing.T) { + u := NewV1() + + if u.Version() != 1 { + t.Errorf("UUIDv1 generated with incorrect version: %d", u.Version()) + } + + if u.Variant() != VariantRFC4122 { + t.Errorf("UUIDv1 generated with incorrect variant: %d", u.Variant()) + } + + u1 := NewV1() + u2 := NewV1() + + if Equal(u1, u2) { + t.Errorf("UUIDv1 generated two equal UUIDs: %s and %s", u1, u2) + } + + oldFunc := epochFunc + epochFunc = func() uint64 { return 0 } + + u3 := NewV1() + u4 := NewV1() + + if Equal(u3, u4) { + t.Errorf("UUIDv1 generated two equal UUIDs: %s and %s", u3, u4) + } + + epochFunc = oldFunc +} + +func TestNewV2(t *testing.T) { + u1 := NewV2(DomainPerson) + + if u1.Version() != 2 { + t.Errorf("UUIDv2 generated with incorrect version: %d", u1.Version()) + } + + if u1.Variant() != VariantRFC4122 { + t.Errorf("UUIDv2 generated with incorrect variant: %d", u1.Variant()) + } + + u2 := NewV2(DomainGroup) + + if u2.Version() != 2 { + t.Errorf("UUIDv2 generated with incorrect version: %d", u2.Version()) + } + + if u2.Variant() != VariantRFC4122 { + t.Errorf("UUIDv2 generated with incorrect variant: %d", u2.Variant()) + } +} + +func TestNewV3(t *testing.T) { + u := NewV3(NamespaceDNS, "www.example.com") + + if u.Version() != 3 { + t.Errorf("UUIDv3 generated with incorrect version: %d", u.Version()) + } + + if u.Variant() != VariantRFC4122 { + t.Errorf("UUIDv3 generated with incorrect variant: %d", u.Variant()) + } + + if u.String() != "5df41881-3aed-3515-88a7-2f4a814cf09e" { + t.Errorf("UUIDv3 generated incorrectly: %s", u.String()) + } + + u = NewV3(NamespaceDNS, "python.org") + + if u.String() != "6fa459ea-ee8a-3ca4-894e-db77e160355e" { + t.Errorf("UUIDv3 generated incorrectly: %s", u.String()) + } + + u1 := NewV3(NamespaceDNS, "golang.org") + u2 := NewV3(NamespaceDNS, "golang.org") + if !Equal(u1, u2) { + t.Errorf("UUIDv3 generated different UUIDs for same namespace and name: %s and %s", u1, u2) + } + + u3 := NewV3(NamespaceDNS, "example.com") + if Equal(u1, u3) { + t.Errorf("UUIDv3 generated same UUIDs for different names in same namespace: %s and %s", u1, u2) + } + + u4 := NewV3(NamespaceURL, "golang.org") + if Equal(u1, u4) { + t.Errorf("UUIDv3 generated same UUIDs for sane names in different namespaces: %s and %s", u1, u4) + } +} + +func TestNewV4(t *testing.T) { + u := NewV4() + + if u.Version() != 4 { + t.Errorf("UUIDv4 generated with incorrect version: %d", u.Version()) + } + + if u.Variant() != VariantRFC4122 { + t.Errorf("UUIDv4 generated with incorrect variant: %d", u.Variant()) + } +} + +func TestNewV5(t *testing.T) { + u := NewV5(NamespaceDNS, "www.example.com") + + if u.Version() != 5 { + t.Errorf("UUIDv5 generated with incorrect version: %d", u.Version()) + } + + if u.Variant() != VariantRFC4122 { + t.Errorf("UUIDv5 generated with incorrect variant: %d", u.Variant()) + } + + u = NewV5(NamespaceDNS, "python.org") + + if u.String() != "886313e1-3b8a-5372-9b90-0c9aee199e5d" { + t.Errorf("UUIDv5 generated incorrectly: %s", u.String()) + } + + u1 := NewV5(NamespaceDNS, "golang.org") + u2 := NewV5(NamespaceDNS, "golang.org") + if !Equal(u1, u2) { + t.Errorf("UUIDv5 generated different UUIDs for same namespace and name: %s and %s", u1, u2) + } + + u3 := NewV5(NamespaceDNS, "example.com") + if Equal(u1, u3) { + t.Errorf("UUIDv5 generated same UUIDs for different names in same namespace: %s and %s", u1, u2) + } + + u4 := NewV5(NamespaceURL, "golang.org") + if Equal(u1, u4) { + t.Errorf("UUIDv3 generated same UUIDs for sane names in different namespaces: %s and %s", u1, u4) + } +} diff --git a/vendor/gopkg.in/gcfg.v1/doc.go b/vendor/gopkg.in/gcfg.v1/doc.go index 99687b46f411..40c80fe84392 100644 --- a/vendor/gopkg.in/gcfg.v1/doc.go +++ b/vendor/gopkg.in/gcfg.v1/doc.go @@ -48,6 +48,9 @@ // When using a map, and there is a section with the same section name but // without a subsection name, its values are stored with the empty string used // as the key. +// It is possible to provide default values for subsections in the section +// "default-" (or by setting values in the corresponding struct +// field "Default_"). // // The functions in this package panic if config is not a pointer to a struct, // or when a field is not of a suitable type (either a struct or a map with @@ -95,6 +98,30 @@ // The types subpackage for provides helpers for parsing "enum-like" and integer // types. // +// Error handling +// +// There are 3 types of errors: +// +// - programmer errors / panics: +// - invalid configuration structure +// - data errors: +// - fatal errors: +// - invalid configuration syntax +// - warnings: +// - data that doesn't belong to any part of the config structure +// +// Programmer errors trigger panics. These are should be fixed by the programmer +// before releasing code that uses gcfg. +// +// Data errors cause gcfg to return a non-nil error value. This includes the +// case when there are extra unknown key-value definitions in the configuration +// data (extra data). +// However, in some occasions it is desirable to be able to proceed in +// situations when the only data error is that of extra data. +// These errors are handled at a different (warning) priority and can be +// filtered out programmatically. To ignore extra data warnings, wrap the +// gcfg.Read*Into invocation into a call to gcfg.FatalOnly. +// // TODO // // The following is a list of changes under consideration: diff --git a/vendor/gopkg.in/gcfg.v1/errors.go b/vendor/gopkg.in/gcfg.v1/errors.go new file mode 100644 index 000000000000..853c76021de4 --- /dev/null +++ b/vendor/gopkg.in/gcfg.v1/errors.go @@ -0,0 +1,41 @@ +package gcfg + +import ( + "gopkg.in/warnings.v0" +) + +// FatalOnly filters the results of a Read*Into invocation and returns only +// fatal errors. That is, errors (warnings) indicating data for unknown +// sections / variables is ignored. Example invocation: +// +// err := gcfg.FatalOnly(gcfg.ReadFileInto(&cfg, configFile)) +// if err != nil { +// ... +// +func FatalOnly(err error) error { + return warnings.FatalOnly(err) +} + +func isFatal(err error) bool { + _, ok := err.(extraData) + return !ok +} + +type extraData struct { + section string + subsection *string + variable *string +} + +func (e extraData) Error() string { + s := "can't store data at section \"" + e.section + "\"" + if e.subsection != nil { + s += ", subsection \"" + *e.subsection + "\"" + } + if e.variable != nil { + s += ", variable \"" + *e.variable + "\"" + } + return s +} + +var _ error = extraData{} diff --git a/vendor/gopkg.in/gcfg.v1/read.go b/vendor/gopkg.in/gcfg.v1/read.go index fdfb5f3a2c8f..8400cf124e92 100644 --- a/vendor/gopkg.in/gcfg.v1/read.go +++ b/vendor/gopkg.in/gcfg.v1/read.go @@ -6,11 +6,10 @@ import ( "io/ioutil" "os" "strings" -) -import ( "gopkg.in/gcfg.v1/scanner" "gopkg.in/gcfg.v1/token" + "gopkg.in/warnings.v0" ) var unescape = map[rune]rune{'\\': '\\', '"': '"', 'n': '\n', 't': '\t'} @@ -49,7 +48,9 @@ func unquote(s string) string { return string(u) } -func readInto(config interface{}, fset *token.FileSet, file *token.File, src []byte) error { +func readIntoPass(c *warnings.Collector, config interface{}, fset *token.FileSet, + file *token.File, src []byte, subsectPass bool) error { + // var s scanner.Scanner var errs scanner.ErrorList s.Init(file, src, func(p token.Position, m string) { errs.Add(p, m) }, 0) @@ -60,7 +61,9 @@ func readInto(config interface{}, fset *token.FileSet, file *token.File, src []b } for { if errs.Len() > 0 { - return errs.Err() + if err := c.Collect(errs.Err()); err != nil { + return err + } } switch tok { case token.EOF: @@ -70,46 +73,64 @@ func readInto(config interface{}, fset *token.FileSet, file *token.File, src []b case token.LBRACK: pos, tok, lit = s.Scan() if errs.Len() > 0 { - return errs.Err() + if err := c.Collect(errs.Err()); err != nil { + return err + } } if tok != token.IDENT { - return errfn("expected section name") + if err := c.Collect(errfn("expected section name")); err != nil { + return err + } } sect, sectsub = lit, "" pos, tok, lit = s.Scan() if errs.Len() > 0 { - return errs.Err() + if err := c.Collect(errs.Err()); err != nil { + return err + } } if tok == token.STRING { sectsub = unquote(lit) if sectsub == "" { - return errfn("empty subsection name") + if err := c.Collect(errfn("empty subsection name")); err != nil { + return err + } } pos, tok, lit = s.Scan() if errs.Len() > 0 { - return errs.Err() + if err := c.Collect(errs.Err()); err != nil { + return err + } } } if tok != token.RBRACK { if sectsub == "" { - return errfn("expected subsection name or right bracket") + if err := c.Collect(errfn("expected subsection name or right bracket")); err != nil { + return err + } + } + if err := c.Collect(errfn("expected right bracket")); err != nil { + return err } - return errfn("expected right bracket") } pos, tok, lit = s.Scan() if tok != token.EOL && tok != token.EOF && tok != token.COMMENT { - return errfn("expected EOL, EOF, or comment") + if err := c.Collect(errfn("expected EOL, EOF, or comment")); err != nil { + return err + } } // If a section/subsection header was found, ensure a // container object is created, even if there are no // variables further down. - err := set(config, sect, sectsub, "", true, "") + err := c.Collect(set(c, config, sect, sectsub, "", true, "", subsectPass)) if err != nil { return err } case token.IDENT: if sect == "" { - return errfn("expected section header") + if err := c.Collect(errfn("expected section header")); err != nil { + return err + } } n := lit pos, tok, lit = s.Scan() @@ -119,38 +140,67 @@ func readInto(config interface{}, fset *token.FileSet, file *token.File, src []b blank, v := tok == token.EOF || tok == token.EOL || tok == token.COMMENT, "" if !blank { if tok != token.ASSIGN { - return errfn("expected '='") + if err := c.Collect(errfn("expected '='")); err != nil { + return err + } } pos, tok, lit = s.Scan() if errs.Len() > 0 { - return errs.Err() + if err := c.Collect(errs.Err()); err != nil { + return err + } } if tok != token.STRING { - return errfn("expected value") + if err := c.Collect(errfn("expected value")); err != nil { + return err + } } v = unquote(lit) pos, tok, lit = s.Scan() if errs.Len() > 0 { - return errs.Err() + if err := c.Collect(errs.Err()); err != nil { + return err + } } if tok != token.EOL && tok != token.EOF && tok != token.COMMENT { - return errfn("expected EOL, EOF, or comment") + if err := c.Collect(errfn("expected EOL, EOF, or comment")); err != nil { + return err + } } } - err := set(config, sect, sectsub, n, blank, v) + err := set(c, config, sect, sectsub, n, blank, v, subsectPass) if err != nil { return err } default: if sect == "" { - return errfn("expected section header") + if err := c.Collect(errfn("expected section header")); err != nil { + return err + } + } + if err := c.Collect(errfn("expected section header or variable declaration")); err != nil { + return err } - return errfn("expected section header or variable declaration") } } panic("never reached") } +func readInto(config interface{}, fset *token.FileSet, file *token.File, + src []byte) error { + // + c := warnings.NewCollector(isFatal) + err := readIntoPass(c, config, fset, file, src, false) + if err != nil { + return err + } + err = readIntoPass(c, config, fset, file, src, true) + if err != nil { + return err + } + return c.Done() +} + // ReadInto reads gcfg formatted data from reader and sets the values into the // corresponding fields in config. func ReadInto(config interface{}, reader io.Reader) error { diff --git a/vendor/gopkg.in/gcfg.v1/read_test.go b/vendor/gopkg.in/gcfg.v1/read_test.go index 404d04dd9d73..3542a98aae55 100644 --- a/vendor/gopkg.in/gcfg.v1/read_test.go +++ b/vendor/gopkg.in/gcfg.v1/read_test.go @@ -319,7 +319,7 @@ func TestReadFileInto(t *testing.T) { res := &struct{ Section struct{ Name string } }{} err := ReadFileInto(res, "testdata/gcfg_test.gcfg") if err != nil { - t.Errorf(err.Error()) + t.Error(err) } if "value" != res.Section.Name { t.Errorf("got %q, wanted %q", res.Section.Name, "value") @@ -330,9 +330,49 @@ func TestReadFileIntoUnicode(t *testing.T) { res := &struct{ X甲 struct{ X乙 string } }{} err := ReadFileInto(res, "testdata/gcfg_unicode_test.gcfg") if err != nil { - t.Errorf(err.Error()) + t.Error(err) } if "丙" != res.X甲.X乙 { t.Errorf("got %q, wanted %q", res.X甲.X乙, "丙") } } + +func TestReadStringIntoSubsectDefaults(t *testing.T) { + type subsect struct { + Color string + Orientation string + } + res := &struct { + Default_Profile subsect + Profile map[string]*subsect + }{Default_Profile: subsect{Color: "green"}} + cfg := ` + [profile "one"] + orientation = left` + err := ReadStringInto(res, cfg) + if err != nil { + t.Error(err) + } + if res.Profile["one"].Color != "green" { + t.Errorf("got %q; want %q", res.Profile["one"].Color, "green") + } +} + +func TestReadStringIntoExtraData(t *testing.T) { + res := &struct { + Section struct { + Name string + } + }{} + cfg := ` + [section] + name = value + name2 = value2` + err := FatalOnly(ReadStringInto(res, cfg)) + if err != nil { + t.Error(err) + } + if res.Section.Name != "value" { + t.Errorf("res.Section.Name=%q; want %q", res.Section.Name, "value") + } +} diff --git a/vendor/gopkg.in/gcfg.v1/set.go b/vendor/gopkg.in/gcfg.v1/set.go index 7252b6894653..e85ec155dcaf 100644 --- a/vendor/gopkg.in/gcfg.v1/set.go +++ b/vendor/gopkg.in/gcfg.v1/set.go @@ -1,6 +1,8 @@ package gcfg import ( + "bytes" + "encoding/gob" "fmt" "math/big" "reflect" @@ -9,6 +11,7 @@ import ( "unicode/utf8" "gopkg.in/gcfg.v1/types" + "gopkg.in/warnings.v0" ) type tag struct { @@ -189,7 +192,30 @@ func scanSetter(d interface{}, blank bool, val string, tt tag) error { return types.ScanFully(d, val, 'v') } -func set(cfg interface{}, sect, sub, name string, blank bool, value string) error { +func newValue(sect string, vCfg reflect.Value, vType reflect.Type) (reflect.Value, error) { + pv := reflect.New(vType) + dfltName := "default-" + sect + dfltField, _ := fieldFold(vCfg, dfltName) + var err error + if dfltField.IsValid() { + b := bytes.NewBuffer(nil) + ge := gob.NewEncoder(b) + err = ge.EncodeValue(dfltField) + if err != nil { + return pv, err + } + gd := gob.NewDecoder(bytes.NewReader(b.Bytes())) + err = gd.DecodeValue(pv.Elem()) + if err != nil { + return pv, err + } + } + return pv, nil +} + +func set(c *warnings.Collector, cfg interface{}, sect, sub, name string, + blank bool, value string, subsectPass bool) error { + // vPCfg := reflect.ValueOf(cfg) if vPCfg.Kind() != reflect.Ptr || vPCfg.Elem().Kind() != reflect.Struct { panic(fmt.Errorf("config must be a pointer to a struct")) @@ -197,9 +223,14 @@ func set(cfg interface{}, sect, sub, name string, blank bool, value string) erro vCfg := vPCfg.Elem() vSect, _ := fieldFold(vCfg, sect) if !vSect.IsValid() { - return fmt.Errorf("invalid section: section %q", sect) + err := extraData{section: sect} + return c.Collect(err) + } + isSubsect := vSect.Kind() == reflect.Map + if subsectPass != isSubsect { + return nil } - if vSect.Kind() == reflect.Map { + if isSubsect { vst := vSect.Type() if vst.Key().Kind() != reflect.String || vst.Elem().Kind() != reflect.Ptr || @@ -214,7 +245,11 @@ func set(cfg interface{}, sect, sub, name string, blank bool, value string) erro pv := vSect.MapIndex(k) if !pv.IsValid() { vType := vSect.Type().Elem().Elem() - pv = reflect.New(vType) + var err error + pv, err = newValue(sect, vCfg, vType) + if err != nil { + return err + } vSect.SetMapIndex(k, pv) } vSect = pv.Elem() @@ -222,8 +257,8 @@ func set(cfg interface{}, sect, sub, name string, blank bool, value string) erro panic(fmt.Errorf("field for section must be a map or a struct: "+ "section %q", sect)) } else if sub != "" { - return fmt.Errorf("invalid subsection: "+ - "section %q subsection %q", sect, sub) + err := extraData{section: sect, subsection: &sub} + return c.Collect(err) } // Empty name is a special value, meaning that only the // section/subsection object is to be created, with no values set. @@ -232,8 +267,13 @@ func set(cfg interface{}, sect, sub, name string, blank bool, value string) erro } vVar, t := fieldFold(vSect, name) if !vVar.IsValid() { - return fmt.Errorf("invalid variable: "+ - "section %q subsection %q variable %q", sect, sub, name) + var err error + if isSubsect { + err = extraData{section: sect, subsection: &sub, variable: &name} + } else { + err = extraData{section: sect, variable: &name} + } + return c.Collect(err) } // vVal is either single-valued var, or newly allocated value within multi-valued var var vVal reflect.Value diff --git a/vendor/gopkg.in/warnings.v0/LICENSE b/vendor/gopkg.in/warnings.v0/LICENSE new file mode 100644 index 000000000000..d65f7e9d8cd6 --- /dev/null +++ b/vendor/gopkg.in/warnings.v0/LICENSE @@ -0,0 +1,24 @@ +Copyright (c) 2016 Péter Surányi. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/gopkg.in/warnings.v0/README b/vendor/gopkg.in/warnings.v0/README new file mode 100644 index 000000000000..9922b75655d2 --- /dev/null +++ b/vendor/gopkg.in/warnings.v0/README @@ -0,0 +1,74 @@ +Package warnings implements error handling with non-fatal errors (warnings). + +import path: "gopkg.in/warnings.v0" +package docs: https://godoc.org/gopkg.in/warnings.v0 +issues: https://github.com/go-warnings/warnings/issues +pull requests: https://github.com/go-warnings/warnings/pulls + +A recurring pattern in Go programming is the following: + + func myfunc(params) error { + if err := doSomething(...); err != nil { + return err + } + if err := doSomethingElse(...); err != nil { + return err + } + if ok := doAnotherThing(...); !ok { + return errors.New("my error") + } + ... + return nil + } + +This pattern allows interrupting the flow on any received error. But what if +there are errors that should be noted but still not fatal, for which the flow +should not be interrupted? Implementing such logic at each if statement would +make the code complex and the flow much harder to follow. + +Package warnings provides the Collector type and a clean and simple pattern +for achieving such logic. The Collector takes care of deciding when to break +the flow and when to continue, collecting any non-fatal errors (warnings) +along the way. The only requirement is that fatal and non-fatal errors can be +distinguished programmatically; that is a function such as + + IsFatal(error) bool + +must be implemented. The following is an example of what the above snippet +could look like using the warnings package: + + import "gopkg.in/warnings.v0" + + func isFatal(err error) bool { + _, ok := err.(WarningType) + return !ok + } + + func myfunc(params) error { + c := warnings.NewCollector(isFatal) + c.FatalWithWarnings = true + if err := c.Collect(doSomething()); err != nil { + return err + } + if err := c.Collect(doSomethingElse(...)); err != nil { + return err + } + if ok := doAnotherThing(...); !ok { + if err := c.Collect(errors.New("my error")); err != nil { + return err + } + } + ... + return c.Done() + } + +Rules for using warnings + + - ensure that warnings are programmatically distinguishable from fatal + errors (i.e. implement an isFatal function and any necessary error types) + - ensure that there is a single Collector instance for a call of each + exported function + - ensure that all errors (fatal or warning) are fed through Collect + - ensure that every time an error is returned, it is one returned by a + Collector (from Collect or Done) + - ensure that Collect is never called after Done diff --git a/vendor/gopkg.in/warnings.v0/warnings.go b/vendor/gopkg.in/warnings.v0/warnings.go new file mode 100644 index 000000000000..0e678004849a --- /dev/null +++ b/vendor/gopkg.in/warnings.v0/warnings.go @@ -0,0 +1,191 @@ +// Package warnings implements error handling with non-fatal errors (warnings). +// +// A recurring pattern in Go programming is the following: +// +// func myfunc(params) error { +// if err := doSomething(...); err != nil { +// return err +// } +// if err := doSomethingElse(...); err != nil { +// return err +// } +// if ok := doAnotherThing(...); !ok { +// return errors.New("my error") +// } +// ... +// return nil +// } +// +// This pattern allows interrupting the flow on any received error. But what if +// there are errors that should be noted but still not fatal, for which the flow +// should not be interrupted? Implementing such logic at each if statement would +// make the code complex and the flow much harder to follow. +// +// Package warnings provides the Collector type and a clean and simple pattern +// for achieving such logic. The Collector takes care of deciding when to break +// the flow and when to continue, collecting any non-fatal errors (warnings) +// along the way. The only requirement is that fatal and non-fatal errors can be +// distinguished programmatically; that is a function such as +// +// IsFatal(error) bool +// +// must be implemented. The following is an example of what the above snippet +// could look like using the warnings package: +// +// import "gopkg.in/warnings.v0" +// +// func isFatal(err error) bool { +// _, ok := err.(WarningType) +// return !ok +// } +// +// func myfunc(params) error { +// c := warnings.NewCollector(isFatal) +// c.FatalWithWarnings = true +// if err := c.Collect(doSomething()); err != nil { +// return err +// } +// if err := c.Collect(doSomethingElse(...)); err != nil { +// return err +// } +// if ok := doAnotherThing(...); !ok { +// if err := c.Collect(errors.New("my error")); err != nil { +// return err +// } +// } +// ... +// return c.Done() +// } +// +// Rules for using warnings +// +// - ensure that warnings are programmatically distinguishable from fatal +// errors (i.e. implement an isFatal function and any necessary error types) +// - ensure that there is a single Collector instance for a call of each +// exported function +// - ensure that all errors (fatal or warning) are fed through Collect +// - ensure that every time an error is returned, it is one returned by a +// Collector (from Collect or Done) +// - ensure that Collect is never called after Done +// +// TODO +// +// - optionally limit the number of warnings (e.g. stop after 20 warnings) (?) +// - consider interaction with contexts +// - go vet-style invocations verifier +// - semi-automatic code converter +// +package warnings + +import ( + "bytes" + "fmt" +) + +// List holds a collection of warnings and optionally one fatal error. +type List struct { + Warnings []error + Fatal error +} + +// Error implements the error interface. +func (l List) Error() string { + b := bytes.NewBuffer(nil) + if l.Fatal != nil { + fmt.Fprintln(b, "fatal:") + fmt.Fprintln(b, l.Fatal) + } + switch len(l.Warnings) { + case 0: + // nop + case 1: + fmt.Fprintln(b, "warning:") + default: + fmt.Fprintln(b, "warnings:") + } + for _, err := range l.Warnings { + fmt.Fprintln(b, err) + } + return b.String() +} + +// A Collector collects errors up to the first fatal error. +type Collector struct { + // IsFatal distinguishes between warnings and fatal errors. + IsFatal func(error) bool + // FatalWithWarnings set to true means that a fatal error is returned as + // a List together with all warnings so far. The default behavior is to + // only return the fatal error and discard any warnings that have been + // collected. + FatalWithWarnings bool + + l List + done bool +} + +// NewCollector returns a new Collector; it uses isFatal to distinguish between +// warnings and fatal errors. +func NewCollector(isFatal func(error) bool) *Collector { + return &Collector{IsFatal: isFatal} +} + +// Collect collects a single error (warning or fatal). It returns nil if +// collection can continue (only warnings so far), or otherwise the errors +// collected. Collect mustn't be called after the first fatal error or after +// Done has been called. +func (c *Collector) Collect(err error) error { + if c.done { + panic("warnings.Collector already done") + } + if err == nil { + return nil + } + if c.IsFatal(err) { + c.done = true + c.l.Fatal = err + } else { + c.l.Warnings = append(c.l.Warnings, err) + } + if c.l.Fatal != nil { + return c.erorr() + } + return nil +} + +// Done ends collection and returns the collected error(s). +func (c *Collector) Done() error { + c.done = true + return c.erorr() +} + +func (c *Collector) erorr() error { + if !c.FatalWithWarnings && c.l.Fatal != nil { + return c.l.Fatal + } + if c.l.Fatal == nil && len(c.l.Warnings) == 0 { + return nil + } + // Note that a single warning is also returned as a List. This is to make it + // easier to determine fatal-ness of the returned error. + return c.l +} + +// FatalOnly returns the fatal error, if any, **in an error returned by a +// Collector**. It returns nil if and only if err is nil or err is a List +// with err.Fatal == nil. +func FatalOnly(err error) error { + l, ok := err.(List) + if !ok { + return err + } + return l.Fatal +} + +// WarningsOnly returns the warnings **in an error returned by a Collector**. +func WarningsOnly(err error) []error { + l, ok := err.(List) + if !ok { + return nil + } + return l.Warnings +} diff --git a/vendor/gopkg.in/warnings.v0/warnings_test.go b/vendor/gopkg.in/warnings.v0/warnings_test.go new file mode 100644 index 000000000000..8d6ad0d6748d --- /dev/null +++ b/vendor/gopkg.in/warnings.v0/warnings_test.go @@ -0,0 +1,82 @@ +package warnings_test + +import ( + "errors" + "reflect" + "testing" + + w "gopkg.in/warnings.v0" +) + +var _ error = List{} + +type warn string + +func (w warn) Error() string { return string(w) } + +func warning(s string) error { return warn(s) } +func fatal(s string) error { return errors.New(s) } + +func isFatal(err error) bool { + _, ok := err.(warn) + return !ok +} + +func omitNils(errs []error) []error { + if errs == nil { + return nil + } + res := []error{} + for _, err := range errs { + if err != nil { + res = append(res, err) + } + } + return res +} + +var collectorTests = [...]struct { + collector w.Collector + warnings []error + fatal error +}{ + {w.Collector{IsFatal: isFatal}, nil, nil}, + {w.Collector{IsFatal: isFatal}, nil, fatal("1f")}, + {w.Collector{IsFatal: isFatal}, []error{warning("1w")}, nil}, + {w.Collector{IsFatal: isFatal}, []error{warning("1w")}, fatal("2f")}, + {w.Collector{IsFatal: isFatal}, []error{warning("1w"), warning("2w")}, fatal("3f")}, + {w.Collector{IsFatal: isFatal}, []error{warning("1w"), nil, warning("2w")}, fatal("3f")}, + {w.Collector{IsFatal: isFatal, FatalWithWarnings: true}, []error{warning("1w")}, fatal("2f")}, +} + +func TestCollector(t *testing.T) { + for _, tt := range collectorTests { + c := tt.collector + for _, warn := range tt.warnings { + err := c.Collect(warn) + if err != nil { + t.Fatalf("Collect(%v) = %v; want nil", warn, err) + } + } + if tt.fatal != nil { + err := c.Collect(tt.fatal) + if err == nil || w.FatalOnly(err) != tt.fatal { + t.Fatalf("Collect(%v) = %v; want fatal %v", tt.fatal, + err, tt.fatal) + } + } + err := c.Done() + if tt.fatal != nil { + if err == nil || w.FatalOnly(err) != tt.fatal { + t.Fatalf("Done() = %v; want fatal %v", err, tt.fatal) + } + } + if tt.fatal == nil || c.FatalWithWarnings { + warns := w.WarningsOnly(err) + if !reflect.DeepEqual(warns, omitNils(tt.warnings)) { + t.Fatalf("Done() = %v; want warnings %v", err, + omitNils(tt.warnings)) + } + } + } +} diff --git a/vendor/k8s.io/kubernetes/README.md b/vendor/k8s.io/kubernetes/README.md index e859cd7a1182..697f2c4dcedf 100644 --- a/vendor/k8s.io/kubernetes/README.md +++ b/vendor/k8s.io/kubernetes/README.md @@ -81,6 +81,6 @@ That said, if you have questions, reach out to us [Scalable Microservices with Kubernetes]: https://www.udacity.com/course/scalable-microservices-with-kubernetes--ud615 [Submit Queue]: http://submit-queue.k8s.io/#/e2e [Submit Queue Widget]: http://submit-queue.k8s.io/health.svg?v=1 -[troubleshooting guide]: https://kubernetes.io/docs/tasks/debug-application-cluster/troubleshooting/ +[troubleshooting guide]: https://kubernetes.io/docs/tasks/debug-application-cluster/troubleshooting/ [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/README.md?pixel)]() diff --git a/vendor/k8s.io/kubernetes/api/swagger-spec/apps_v1beta1.json b/vendor/k8s.io/kubernetes/api/swagger-spec/apps_v1beta1.json index 69eb5c27eb66..6b2ac61d94e3 100644 --- a/vendor/k8s.io/kubernetes/api/swagger-spec/apps_v1beta1.json +++ b/vendor/k8s.io/kubernetes/api/swagger-spec/apps_v1beta1.json @@ -1737,8 +1737,8 @@ { "type": "v1beta1.DeploymentRollback", "method": "POST", - "summary": "create rollback of a DeploymentRollback", - "nickname": "createNamespacedDeploymentRollbackRollback", + "summary": "create rollback of a Deployment", + "nickname": "createNamespacedDeploymentRollback", "parameters": [ { "type": "string", @@ -1798,8 +1798,8 @@ { "type": "v1beta1.Scale", "method": "GET", - "summary": "read scale of the specified Scale", - "nickname": "readNamespacedScaleScale", + "summary": "read scale of the specified Deployment", + "nickname": "readNamespacedDeploymentScale", "parameters": [ { "type": "string", @@ -1845,8 +1845,8 @@ { "type": "v1beta1.Scale", "method": "PUT", - "summary": "replace scale of the specified Scale", - "nickname": "replaceNamespacedScaleScale", + "summary": "replace scale of the specified Deployment", + "nickname": "replaceNamespacedDeploymentScale", "parameters": [ { "type": "string", @@ -1900,8 +1900,8 @@ { "type": "v1beta1.Scale", "method": "PATCH", - "summary": "partially update scale of the specified Scale", - "nickname": "patchNamespacedScaleScale", + "summary": "partially update scale of the specified Deployment", + "nickname": "patchNamespacedDeploymentScale", "parameters": [ { "type": "string", diff --git a/vendor/k8s.io/kubernetes/api/swagger-spec/extensions_v1beta1.json b/vendor/k8s.io/kubernetes/api/swagger-spec/extensions_v1beta1.json index 0739c0d082f4..1e441fb3d4c4 100644 --- a/vendor/k8s.io/kubernetes/api/swagger-spec/extensions_v1beta1.json +++ b/vendor/k8s.io/kubernetes/api/swagger-spec/extensions_v1beta1.json @@ -1902,8 +1902,8 @@ { "type": "v1beta1.DeploymentRollback", "method": "POST", - "summary": "create rollback of a DeploymentRollback", - "nickname": "createNamespacedDeploymentRollbackRollback", + "summary": "create rollback of a Deployment", + "nickname": "createNamespacedDeploymentRollback", "parameters": [ { "type": "string", @@ -1963,8 +1963,8 @@ { "type": "v1beta1.Scale", "method": "GET", - "summary": "read scale of the specified Scale", - "nickname": "readNamespacedScaleScale", + "summary": "read scale of the specified Deployment", + "nickname": "readNamespacedDeploymentScale", "parameters": [ { "type": "string", @@ -2010,8 +2010,8 @@ { "type": "v1beta1.Scale", "method": "PUT", - "summary": "replace scale of the specified Scale", - "nickname": "replaceNamespacedScaleScale", + "summary": "replace scale of the specified Deployment", + "nickname": "replaceNamespacedDeploymentScale", "parameters": [ { "type": "string", @@ -2065,8 +2065,8 @@ { "type": "v1beta1.Scale", "method": "PATCH", - "summary": "partially update scale of the specified Scale", - "nickname": "patchNamespacedScaleScale", + "summary": "partially update scale of the specified Deployment", + "nickname": "patchNamespacedDeploymentScale", "parameters": [ { "type": "string", @@ -5656,8 +5656,8 @@ { "type": "v1beta1.Scale", "method": "GET", - "summary": "read scale of the specified Scale", - "nickname": "readNamespacedScaleScale", + "summary": "read scale of the specified ReplicaSet", + "nickname": "readNamespacedReplicaSetScale", "parameters": [ { "type": "string", @@ -5703,8 +5703,8 @@ { "type": "v1beta1.Scale", "method": "PUT", - "summary": "replace scale of the specified Scale", - "nickname": "replaceNamespacedScaleScale", + "summary": "replace scale of the specified ReplicaSet", + "nickname": "replaceNamespacedReplicaSetScale", "parameters": [ { "type": "string", @@ -5758,8 +5758,8 @@ { "type": "v1beta1.Scale", "method": "PATCH", - "summary": "partially update scale of the specified Scale", - "nickname": "patchNamespacedScaleScale", + "summary": "partially update scale of the specified ReplicaSet", + "nickname": "patchNamespacedReplicaSetScale", "parameters": [ { "type": "string", @@ -5986,8 +5986,8 @@ { "type": "v1beta1.Scale", "method": "GET", - "summary": "read scale of the specified Scale", - "nickname": "readNamespacedScaleScale", + "summary": "read scale of the specified ReplicationControllerDummy", + "nickname": "readNamespacedReplicationControllerDummyScale", "parameters": [ { "type": "string", @@ -6033,8 +6033,8 @@ { "type": "v1beta1.Scale", "method": "PUT", - "summary": "replace scale of the specified Scale", - "nickname": "replaceNamespacedScaleScale", + "summary": "replace scale of the specified ReplicationControllerDummy", + "nickname": "replaceNamespacedReplicationControllerDummyScale", "parameters": [ { "type": "string", @@ -6088,8 +6088,8 @@ { "type": "v1beta1.Scale", "method": "PATCH", - "summary": "partially update scale of the specified Scale", - "nickname": "patchNamespacedScaleScale", + "summary": "partially update scale of the specified ReplicationControllerDummy", + "nickname": "patchNamespacedReplicationControllerDummyScale", "parameters": [ { "type": "string", diff --git a/vendor/k8s.io/kubernetes/api/swagger-spec/storage.authorization.k8s.io_v1beta1.json b/vendor/k8s.io/kubernetes/api/swagger-spec/storage.authorization.k8s.io_v1beta1.json index 8b137891791f..e69de29bb2d1 100644 --- a/vendor/k8s.io/kubernetes/api/swagger-spec/storage.authorization.k8s.io_v1beta1.json +++ b/vendor/k8s.io/kubernetes/api/swagger-spec/storage.authorization.k8s.io_v1beta1.json @@ -1 +0,0 @@ - diff --git a/vendor/k8s.io/kubernetes/api/swagger-spec/v1.json b/vendor/k8s.io/kubernetes/api/swagger-spec/v1.json index d9c48398667b..077b1214033a 100644 --- a/vendor/k8s.io/kubernetes/api/swagger-spec/v1.json +++ b/vendor/k8s.io/kubernetes/api/swagger-spec/v1.json @@ -9271,8 +9271,8 @@ { "type": "v1.Binding", "method": "POST", - "summary": "create binding of a Binding", - "nickname": "createNamespacedBindingBinding", + "summary": "create binding of a Pod", + "nickname": "createNamespacedPodBinding", "parameters": [ { "type": "string", @@ -9332,8 +9332,8 @@ { "type": "v1beta1.Eviction", "method": "POST", - "summary": "create eviction of an Eviction", - "nickname": "createNamespacedEvictionEviction", + "summary": "create eviction of a Pod", + "nickname": "createNamespacedPodEviction", "parameters": [ { "type": "string", @@ -12234,8 +12234,8 @@ { "type": "v1.Scale", "method": "GET", - "summary": "read scale of the specified Scale", - "nickname": "readNamespacedScaleScale", + "summary": "read scale of the specified ReplicationController", + "nickname": "readNamespacedReplicationControllerScale", "parameters": [ { "type": "string", @@ -12281,8 +12281,8 @@ { "type": "v1.Scale", "method": "PUT", - "summary": "replace scale of the specified Scale", - "nickname": "replaceNamespacedScaleScale", + "summary": "replace scale of the specified ReplicationController", + "nickname": "replaceNamespacedReplicationControllerScale", "parameters": [ { "type": "string", @@ -12336,8 +12336,8 @@ { "type": "v1.Scale", "method": "PATCH", - "summary": "partially update scale of the specified Scale", - "nickname": "patchNamespacedScaleScale", + "summary": "partially update scale of the specified ReplicationController", + "nickname": "patchNamespacedReplicationControllerScale", "parameters": [ { "type": "string", diff --git a/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/BUILD b/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/BUILD index f07d4759fa5d..93faf6520646 100644 --- a/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/BUILD +++ b/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/BUILD @@ -81,7 +81,6 @@ go_library( "//vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion:go_default_library", "//vendor/k8s.io/apiextensions-apiserver/pkg/cmd/server:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/openapi:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", @@ -100,10 +99,12 @@ go_library( "//vendor/k8s.io/apiserver/pkg/server/storage:go_default_library", "//vendor/k8s.io/client-go/informers:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", + "//vendor/k8s.io/client-go/tools/cache:go_default_library", "//vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration:go_default_library", "//vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1:go_default_library", "//vendor/k8s.io/kube-aggregator/pkg/apiserver:go_default_library", "//vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/typed/apiregistration/internalversion:go_default_library", + "//vendor/k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration/internalversion:go_default_library", "//vendor/k8s.io/kube-aggregator/pkg/controllers/autoregister:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/aggregator.go b/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/aggregator.go index e146d98c9b99..82cc4ef1835a 100644 --- a/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/aggregator.go +++ b/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/aggregator.go @@ -24,21 +24,24 @@ import ( "io/ioutil" "net/http" "strings" + "sync" "github.com/golang/glog" apiextensionsinformers "k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" genericapiserver "k8s.io/apiserver/pkg/server" "k8s.io/apiserver/pkg/server/healthz" genericoptions "k8s.io/apiserver/pkg/server/options" kubeexternalinformers "k8s.io/client-go/informers" + "k8s.io/client-go/tools/cache" "k8s.io/kube-aggregator/pkg/apis/apiregistration" "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1" aggregatorapiserver "k8s.io/kube-aggregator/pkg/apiserver" apiregistrationclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/typed/apiregistration/internalversion" + apiregistrationinformers "k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration/internalversion" "k8s.io/kube-aggregator/pkg/controllers/autoregister" "k8s.io/kubernetes/cmd/kube-apiserver/app/options" informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion" @@ -104,39 +107,23 @@ func createAggregatorServer(aggregatorConfig *aggregatorapiserver.Config, delega autoRegistrationController) aggregatorServer.GenericAPIServer.AddPostStartHook("kube-apiserver-autoregistration", func(context genericapiserver.PostStartHookContext) error { - go autoRegistrationController.Run(5, context.StopCh) go tprRegistrationController.Run(5, context.StopCh) + go func() { + // let the CRD controller process the initial set of CRDs before starting the autoregistration controller. + // this prevents the autoregistration controller's initial sync from deleting APIServices for CRDs that still exist. + tprRegistrationController.WaitForInitialSync() + autoRegistrationController.Run(5, context.StopCh) + }() return nil }) - aggregatorServer.GenericAPIServer.AddHealthzChecks(healthz.NamedCheck("autoregister-completion", func(r *http.Request) error { - items, err := aggregatorServer.APIRegistrationInformers.Apiregistration().InternalVersion().APIServices().Lister().List(labels.Everything()) - if err != nil { - return err - } - missing := []apiregistration.APIService{} - for _, apiService := range apiServices { - found := false - for _, item := range items { - if item.Name != apiService.Name { - continue - } - if apiregistration.IsAPIServiceConditionTrue(item, apiregistration.Available) { - found = true - break - } - } - - if !found { - missing = append(missing, *apiService) - } - } - - if len(missing) > 0 { - return fmt.Errorf("missing APIService: %v", missing) - } - return nil - })) + aggregatorServer.GenericAPIServer.AddHealthzChecks( + makeAPIServiceAvailableHealthzCheck( + "autoregister-completion", + apiServices, + aggregatorServer.APIRegistrationInformers.Apiregistration().InternalVersion().APIServices(), + ), + ) return aggregatorServer, nil } @@ -160,6 +147,45 @@ func makeAPIService(gv schema.GroupVersion) *apiregistration.APIService { } } +// makeAPIServiceAvailableHealthzCheck returns a healthz check that returns healthy +// once all of the specified services have been observed to be available at least once. +func makeAPIServiceAvailableHealthzCheck(name string, apiServices []*apiregistration.APIService, apiServiceInformer apiregistrationinformers.APIServiceInformer) healthz.HealthzChecker { + // Track the auto-registered API services that have not been observed to be available yet + pendingServiceNamesLock := &sync.RWMutex{} + pendingServiceNames := sets.NewString() + for _, service := range apiServices { + pendingServiceNames.Insert(service.Name) + } + + // When an APIService in the list is seen as available, remove it from the pending list + handleAPIServiceChange := func(service *apiregistration.APIService) { + pendingServiceNamesLock.Lock() + defer pendingServiceNamesLock.Unlock() + if !pendingServiceNames.Has(service.Name) { + return + } + if apiregistration.IsAPIServiceConditionTrue(service, apiregistration.Available) { + pendingServiceNames.Delete(service.Name) + } + } + + // Watch add/update events for APIServices + apiServiceInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { handleAPIServiceChange(obj.(*apiregistration.APIService)) }, + UpdateFunc: func(old, new interface{}) { handleAPIServiceChange(new.(*apiregistration.APIService)) }, + }) + + // Don't return healthy until the pending list is empty + return healthz.NamedCheck(name, func(r *http.Request) error { + pendingServiceNamesLock.RLock() + defer pendingServiceNamesLock.RUnlock() + if pendingServiceNames.Len() > 0 { + return fmt.Errorf("missing APIService: %v", pendingServiceNames.List()) + } + return nil + }) +} + type priority struct { group int32 version int32 @@ -202,7 +228,7 @@ func apiServicesToRegister(delegateAPIServer genericapiserver.DelegationTarget, for _, curr := range delegateAPIServer.ListedPaths() { if curr == "/api/v1" { apiService := makeAPIService(schema.GroupVersion{Group: "", Version: "v1"}) - registration.AddAPIServiceToSync(apiService) + registration.AddAPIServiceToSyncOnStart(apiService) apiServices = append(apiServices, apiService) continue } @@ -220,7 +246,7 @@ func apiServicesToRegister(delegateAPIServer genericapiserver.DelegationTarget, if apiService == nil { continue } - registration.AddAPIServiceToSync(apiService) + registration.AddAPIServiceToSyncOnStart(apiService) apiServices = append(apiServices, apiService) } diff --git a/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/options/options_test.go b/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/options/options_test.go index 06eee371d50a..a9aa37bb6e42 100644 --- a/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/options/options_test.go +++ b/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/options/options_test.go @@ -34,6 +34,7 @@ func TestAddFlagsFlag(t *testing.T) { args := []string{ "--enable-swagger-ui=true", + "--request-timeout=2m", } f.Parse(args) if !s.Features.EnableSwaggerUI { diff --git a/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/server.go b/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/server.go index bf3d3716ee8d..a08120464f0d 100644 --- a/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/server.go +++ b/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/server.go @@ -50,7 +50,6 @@ import ( genericregistry "k8s.io/apiserver/pkg/registry/generic" genericapiserver "k8s.io/apiserver/pkg/server" "k8s.io/apiserver/pkg/server/filters" - serveroptions "k8s.io/apiserver/pkg/server/options" "k8s.io/apiserver/pkg/server/options/encryptionconfig" serverstorage "k8s.io/apiserver/pkg/server/storage" aggregatorapiserver "k8s.io/kube-aggregator/pkg/apiserver" @@ -638,16 +637,8 @@ func defaultOptions(s *options.ServerRunOptions) error { } if s.Etcd.EnableWatchCache { glog.V(2).Infof("Initializing cache sizes based on %dMB limit", s.GenericServerRunOptions.TargetRAMMB) - sizes := cachesize.NewHeuristicWatchCacheSizes(s.GenericServerRunOptions.TargetRAMMB) - if userSpecified, err := serveroptions.ParseWatchCacheSizes(s.Etcd.WatchCacheSizes); err == nil { - for resource, size := range userSpecified { - sizes[resource] = size - } - } - s.Etcd.WatchCacheSizes, err = serveroptions.WriteWatchCacheSizes(sizes) - if err != nil { - return err - } + cachesize.InitializeWatchCacheSizes(s.GenericServerRunOptions.TargetRAMMB) + cachesize.SetWatchCacheSizes(s.GenericServerRunOptions.WatchCacheSizes) } return nil diff --git a/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/BUILD b/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/BUILD index 65ab9ca32104..80f9bfa3a871 100644 --- a/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/BUILD +++ b/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/BUILD @@ -23,6 +23,7 @@ go_library( "//pkg/client/informers/informers_generated/internalversion:go_default_library", "//pkg/kubectl/cmd/util:go_default_library", "//pkg/kubelet/qos:go_default_library", + "//pkg/master/ports:go_default_library", "//pkg/proxy:go_default_library", "//pkg/proxy/config:go_default_library", "//pkg/proxy/healthcheck:go_default_library", diff --git a/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server.go b/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server.go index d8f69fcfd543..2a0d88cc7a5a 100644 --- a/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server.go +++ b/vendor/k8s.io/kubernetes/cmd/kube-proxy/app/server.go @@ -53,6 +53,7 @@ import ( informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubelet/qos" + "k8s.io/kubernetes/pkg/master/ports" "k8s.io/kubernetes/pkg/proxy" proxyconfig "k8s.io/kubernetes/pkg/proxy/config" "k8s.io/kubernetes/pkg/proxy/healthcheck" @@ -129,6 +130,7 @@ func AddFlags(options *Options, fs *pflag.FlagSet) { fs.StringVar(&options.master, "master", options.master, "The address of the Kubernetes API server (overrides any value in kubeconfig)") fs.Int32Var(&options.healthzPort, "healthz-port", options.healthzPort, "The port to bind the health check server. Use 0 to disable.") fs.Var(componentconfig.IPVar{Val: &options.config.HealthzBindAddress}, "healthz-bind-address", "The IP address and port for the health check server to serve on (set to 0.0.0.0 for all interfaces)") + fs.Var(componentconfig.IPVar{Val: &options.config.MetricsBindAddress}, "metrics-bind-address", "The IP address and port for the metrics server to serve on (set to 0.0.0.0 for all interfaces)") fs.Int32Var(options.config.OOMScoreAdj, "oom-score-adj", util.Int32PtrDerefOr(options.config.OOMScoreAdj, int32(qos.KubeProxyOOMScoreAdj)), "The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000]") fs.StringVar(&options.config.ResourceContainer, "resource-container", options.config.ResourceContainer, "Absolute name of the resource-only container to create and run the Kube-proxy in (Default: /kube-proxy).") fs.MarkDeprecated("resource-container", "This feature will be removed in a later release.") @@ -166,7 +168,7 @@ func AddFlags(options *Options, fs *pflag.FlagSet) { func NewOptions() (*Options, error) { o := &Options{ config: new(componentconfig.KubeProxyConfiguration), - healthzPort: 10256, + healthzPort: ports.ProxyHealthzPort, } o.scheme = runtime.NewScheme() @@ -461,8 +463,10 @@ func NewProxyServer(config *componentconfig.KubeProxyConfiguration, cleanupAndEx recorder := eventBroadcaster.NewRecorder(scheme, clientv1.EventSource{Component: "kube-proxy", Host: hostname}) var healthzServer *healthcheck.HealthzServer + var healthzUpdater healthcheck.HealthzUpdater if len(config.HealthzBindAddress) > 0 { healthzServer = healthcheck.NewDefaultHealthzServer(config.HealthzBindAddress, 2*config.IPTables.SyncPeriod.Duration) + healthzUpdater = healthzServer } var proxier proxy.ProxyProvider @@ -496,7 +500,7 @@ func NewProxyServer(config *componentconfig.KubeProxyConfiguration, cleanupAndEx hostname, nodeIP, recorder, - healthzServer, + healthzUpdater, ) if err != nil { return nil, fmt.Errorf("unable to create proxier: %v", err) diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/types.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/types.go index d5885fbd6bbf..04125b6cd682 100644 --- a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/types.go +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/types.go @@ -37,6 +37,7 @@ type MasterConfiguration struct { Networking Networking KubernetesVersion string CloudProvider string + NodeName string AuthorizationModes []string Token string @@ -93,6 +94,7 @@ type NodeConfiguration struct { DiscoveryToken string // Currently we only pay attention to one api server but hope to support >1 in the future DiscoveryTokenAPIServers []string + NodeName string TLSBootstrapToken string Token string } diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1/types.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1/types.go index b555cd3fa360..1a683ef1029a 100644 --- a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1/types.go +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1/types.go @@ -30,6 +30,7 @@ type MasterConfiguration struct { Networking Networking `json:"networking"` KubernetesVersion string `json:"kubernetesVersion"` CloudProvider string `json:"cloudProvider"` + NodeName string `json:"nodeName"` AuthorizationModes []string `json:"authorizationModes"` Token string `json:"token"` @@ -85,6 +86,7 @@ type NodeConfiguration struct { DiscoveryFile string `json:"discoveryFile"` DiscoveryToken string `json:"discoveryToken"` DiscoveryTokenAPIServers []string `json:"discoveryTokenAPIServers"` + NodeName string `json:"nodeName"` TLSBootstrapToken string `json:"tlsBootstrapToken"` Token string `json:"token"` } diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/validation/validation.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/validation/validation.go index e338c4aefcfc..850159f12a8a 100644 --- a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/validation/validation.go +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/validation/validation.go @@ -24,6 +24,8 @@ import ( "path/filepath" "strings" + "github.com/spf13/pflag" + "k8s.io/apimachinery/pkg/util/validation" "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" @@ -32,6 +34,7 @@ import ( apivalidation "k8s.io/kubernetes/pkg/api/validation" authzmodes "k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes" "k8s.io/kubernetes/pkg/registry/core/service/ipallocator" + "k8s.io/kubernetes/pkg/util/node" ) // TODO: Break out the cloudprovider functionality out of core and only support the new flow @@ -56,6 +59,7 @@ func ValidateMasterConfiguration(c *kubeadm.MasterConfiguration) field.ErrorList allErrs = append(allErrs, ValidateNetworking(&c.Networking, field.NewPath("networking"))...) allErrs = append(allErrs, ValidateAPIServerCertSANs(c.APIServerCertSANs, field.NewPath("cert-altnames"))...) allErrs = append(allErrs, ValidateAbsolutePath(c.CertificatesDir, field.NewPath("certificates-dir"))...) + allErrs = append(allErrs, ValidateNodeName(c.NodeName, field.NewPath("node-name"))...) allErrs = append(allErrs, ValidateToken(c.Token, field.NewPath("token"))...) return allErrs } @@ -226,6 +230,14 @@ func ValidateAbsolutePath(path string, fldPath *field.Path) field.ErrorList { return allErrs } +func ValidateNodeName(nodename string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if node.GetHostname(nodename) != nodename { + allErrs = append(allErrs, field.Invalid(fldPath, nodename, "nodename is not valid")) + } + return allErrs +} + func ValidateCloudProvider(provider string, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(provider) == 0 { @@ -239,3 +251,24 @@ func ValidateCloudProvider(provider string, fldPath *field.Path) field.ErrorList allErrs = append(allErrs, field.Invalid(fldPath, provider, "cloudprovider not supported")) return allErrs } + +func ValidateMixedArguments(flag *pflag.FlagSet) error { + // If --config isn't set, we have nothing to validate + if !flag.Changed("config") { + return nil + } + + mixedInvalidFlags := []string{} + flag.Visit(func(f *pflag.Flag) { + if f.Name == "config" || strings.HasPrefix(f.Name, "skip-") { + // "--skip-*" flags can be set with --config + return + } + mixedInvalidFlags = append(mixedInvalidFlags, f.Name) + }) + + if len(mixedInvalidFlags) != 0 { + return fmt.Errorf("can not mix '--config' with arguments %v", mixedInvalidFlags) + } + return nil +} diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/cmd/defaults.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/cmd/defaults.go index f9f8d3aac723..c7e2f74d4556 100644 --- a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/cmd/defaults.go +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/cmd/defaults.go @@ -27,6 +27,7 @@ import ( kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util" tokenutil "k8s.io/kubernetes/cmd/kubeadm/app/util/token" authzmodes "k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes" + "k8s.io/kubernetes/pkg/util/node" "k8s.io/kubernetes/pkg/util/version" ) @@ -77,6 +78,9 @@ func setInitDynamicDefaults(cfg *kubeadmapi.MasterConfiguration) error { } } + // Use cfg.NodeName if set, otherwise get that from os.Hostname(). This also makes sure the hostname is lower-cased + cfg.NodeName = node.GetHostname(cfg.NodeName) + return nil } diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/cmd/init.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/cmd/init.go index 7b750984a3e4..3ae5ad392810 100644 --- a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/cmd/init.go +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/cmd/init.go @@ -84,7 +84,13 @@ func NewCmdInit(out io.Writer) *cobra.Command { i, err := NewInit(cfgPath, internalcfg, skipPreFlight, skipTokenPrint) kubeadmutil.CheckErr(err) - kubeadmutil.CheckErr(i.Validate()) + kubeadmutil.CheckErr(i.Validate(cmd)) + + // TODO: remove this warning in 1.9 + if !cmd.Flags().Lookup("token-ttl").Changed { + fmt.Println("[kubeadm] WARNING: starting in 1.8, tokens expire after 24 hours by default (if you require a non-expiring token use --token-ttl 0)") + } + kubeadmutil.CheckErr(i.Run(out)) }, } @@ -121,13 +127,19 @@ func NewCmdInit(out io.Writer) *cobra.Command { &cfg.APIServerCertSANs, "apiserver-cert-extra-sans", cfg.APIServerCertSANs, `Optional extra altnames to use for the API Server serving cert. Can be both IP addresses and dns names.`, ) + cmd.PersistentFlags().StringVar( + &cfg.NodeName, "node-name", cfg.NodeName, + `Specify the node name`, + ) cmd.PersistentFlags().StringVar(&cfgPath, "config", cfgPath, "Path to kubeadm config file (WARNING: Usage of a configuration file is experimental)") + // Note: All flags that are not bound to the cfg object should be whitelisted in cmd/kubeadm/app/apis/kubeadm/validation/validation.go cmd.PersistentFlags().BoolVar( &skipPreFlight, "skip-preflight-checks", skipPreFlight, "Skip preflight checks normally run before modifying the system", ) + // Note: All flags that are not bound to the cfg object should be whitelisted in cmd/kubeadm/app/apis/kubeadm/validation/validation.go cmd.PersistentFlags().BoolVar( &skipTokenPrint, "skip-token-print", skipTokenPrint, "Skip printing of the default bootstrap token generated by 'kubeadm init'", @@ -192,7 +204,10 @@ type Init struct { } // Validate validates configuration passed to "kubeadm init" -func (i *Init) Validate() error { +func (i *Init) Validate(cmd *cobra.Command) error { + if err := validation.ValidateMixedArguments(cmd.Flags()); err != nil { + return err + } return validation.ValidateMasterConfiguration(i.cfg).ToAggregate() } @@ -208,7 +223,7 @@ func (i *Init) Run(out io.Writer) error { // PHASE 2: Generate kubeconfig files for the admin and the kubelet masterEndpoint := fmt.Sprintf("https://%s:%d", i.cfg.API.AdvertiseAddress, i.cfg.API.BindPort) - err = kubeconfigphase.CreateInitKubeConfigFiles(masterEndpoint, i.cfg.CertificatesDir, kubeadmapi.GlobalEnvParams.KubernetesDir) + err = kubeconfigphase.CreateInitKubeConfigFiles(masterEndpoint, i.cfg.CertificatesDir, kubeadmapi.GlobalEnvParams.KubernetesDir, i.cfg.NodeName) if err != nil { return err } @@ -224,7 +239,7 @@ func (i *Init) Run(out io.Writer) error { return err } - if err := apiconfigphase.UpdateMasterRoleLabelsAndTaints(client); err != nil { + if err := apiconfigphase.UpdateMasterRoleLabelsAndTaints(client, i.cfg.NodeName); err != nil { return err } diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/cmd/join.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/cmd/join.go index 031b89c15e6e..ff32031f117c 100644 --- a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/cmd/join.go +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/cmd/join.go @@ -20,7 +20,6 @@ import ( "fmt" "io" "io/ioutil" - "os" "path/filepath" "github.com/renstrom/dedent" @@ -33,11 +32,12 @@ import ( "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/validation" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" "k8s.io/kubernetes/cmd/kubeadm/app/discovery" - kubenode "k8s.io/kubernetes/cmd/kubeadm/app/node" + kubeadmnode "k8s.io/kubernetes/cmd/kubeadm/app/node" "k8s.io/kubernetes/cmd/kubeadm/app/preflight" kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util" kubeconfigutil "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig" "k8s.io/kubernetes/pkg/api" + nodeutil "k8s.io/kubernetes/pkg/util/node" ) var ( @@ -63,26 +63,26 @@ func NewCmdJoin(out io.Writer) *cobra.Command { Use: "join [DiscoveryTokenAPIServers]", Short: "Run this on any machine you wish to join an existing cluster", Long: dedent.Dedent(` - When joining a kubeadm initialized cluster, we need to establish - bidirectional trust. This is split into discovery (having the Node - trust the Kubernetes Master) and TLS bootstrap (having the Kubernetes + When joining a kubeadm initialized cluster, we need to establish + bidirectional trust. This is split into discovery (having the Node + trust the Kubernetes Master) and TLS bootstrap (having the Kubernetes Master trust the Node). - There are 2 main schemes for discovery. The first is to use a shared - token along with the IP address of the API server. The second is to - provide a file (a subset of the standard kubeconfig file). This file - can be a local file or downloaded via an HTTPS URL. The forms are - kubeadm join --discovery-token abcdef.1234567890abcdef 1.2.3.4:6443, + There are 2 main schemes for discovery. The first is to use a shared + token along with the IP address of the API server. The second is to + provide a file (a subset of the standard kubeconfig file). This file + can be a local file or downloaded via an HTTPS URL. The forms are + kubeadm join --discovery-token abcdef.1234567890abcdef 1.2.3.4:6443, kubeadm join --discovery-file path/to/file.conf, or kubeadm join - --discovery-file https://url/file.conf. Only one form can be used. If - the discovery information is loaded from a URL, HTTPS must be used and + --discovery-file https://url/file.conf. Only one form can be used. If + the discovery information is loaded from a URL, HTTPS must be used and the host installed CA bundle is used to verify the connection. - The TLS bootstrap mechanism is also driven via a shared token. This is + The TLS bootstrap mechanism is also driven via a shared token. This is used to temporarily authenticate with the Kubernetes Master to submit a - certificate signing request (CSR) for a locally created key pair. By - default kubeadm will set up the Kubernetes Master to automatically - approve these signing requests. This token is passed in with the + certificate signing request (CSR) for a locally created key pair. By + default kubeadm will set up the Kubernetes Master to automatically + approve these signing requests. This token is passed in with the --tls-bootstrap-token abcdef.1234567890abcdef flag. Often times the same token is used for both parts. In this case, the @@ -97,7 +97,7 @@ func NewCmdJoin(out io.Writer) *cobra.Command { j, err := NewJoin(cfgPath, args, internalcfg, skipPreFlight) kubeadmutil.CheckErr(err) - kubeadmutil.CheckErr(j.Validate()) + kubeadmutil.CheckErr(j.Validate(cmd)) kubeadmutil.CheckErr(j.Run(out)) }, } @@ -112,6 +112,9 @@ func NewCmdJoin(out io.Writer) *cobra.Command { cmd.PersistentFlags().StringVar( &cfg.DiscoveryToken, "discovery-token", "", "A token used to validate cluster information fetched from the master") + cmd.PersistentFlags().StringVar( + &cfg.NodeName, "node-name", "", + "Specify the node name") cmd.PersistentFlags().StringVar( &cfg.TLSBootstrapToken, "tls-bootstrap-token", "", "A token used for TLS bootstrapping") @@ -134,6 +137,10 @@ type Join struct { func NewJoin(cfgPath string, args []string, cfg *kubeadmapi.NodeConfiguration, skipPreFlight bool) (*Join, error) { fmt.Println("[kubeadm] WARNING: kubeadm is in beta, please do not use it for production clusters.") + if cfg.NodeName == "" { + cfg.NodeName = nodeutil.GetHostname("") + } + if cfgPath != "" { b, err := ioutil.ReadFile(cfgPath) if err != nil { @@ -166,7 +173,10 @@ func NewJoin(cfgPath string, args []string, cfg *kubeadmapi.NodeConfiguration, s return &Join{cfg: cfg}, nil } -func (j *Join) Validate() error { +func (j *Join) Validate(cmd *cobra.Command) error { + if err := validation.ValidateMixedArguments(cmd.PersistentFlags()); err != nil { + return err + } return validation.ValidateNodeConfiguration(j.cfg).ToAggregate() } @@ -177,18 +187,17 @@ func (j *Join) Run(out io.Writer) error { return err } - hostname, err := os.Hostname() - if err != nil { - return err - } + // Use j.cfg.NodeName if set, otherwise get that from os.Hostname(). This also makes sure the hostname is lower-cased + hostname := nodeutil.GetHostname(j.cfg.NodeName) + client, err := kubeconfigutil.KubeConfigToClientSet(cfg) if err != nil { return err } - if err := kubenode.ValidateAPIServer(client); err != nil { + if err := kubeadmnode.ValidateAPIServer(client); err != nil { return err } - if err := kubenode.PerformTLSBootstrap(cfg, hostname); err != nil { + if err := kubeadmnode.PerformTLSBootstrap(cfg, hostname); err != nil { return err } diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/cmd/reset.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/cmd/reset.go index 1da7e6bb4692..6761c75c33ad 100644 --- a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/cmd/reset.go +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/cmd/reset.go @@ -114,7 +114,7 @@ func (r *Reset) Run(out io.Writer) error { fmt.Println("[reset] docker doesn't seem to be running, skipping the removal of running kubernetes containers") } - dirsToClean := []string{"/var/lib/kubelet", "/etc/cni/net.d", "/var/lib/dockershim"} + dirsToClean := []string{"/var/lib/kubelet", "/etc/cni/net.d", "/var/lib/dockershim", "/var/run/kubernetes"} // Only clear etcd data when the etcd manifest is found. In case it is not found, we must assume that the user // provided external etcd endpoints. In that case, it is his own responsibility to reset etcd diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/cmd/token.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/cmd/token.go index ff6fdb5b0284..1475885c2f42 100644 --- a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/cmd/token.go +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/cmd/token.go @@ -109,6 +109,12 @@ func NewCmdToken(out io.Writer, errW io.Writer) *cobra.Command { client, err := kubeconfigutil.ClientSetFromFile(kubeConfigFile) kubeadmutil.CheckErr(err) + // TODO: remove this warning in 1.9 + if !tokenCmd.Flags().Lookup("ttl").Changed { + // sending this output to stderr s + fmt.Fprintln(errW, "[kubeadm] WARNING: starting in 1.8, tokens expire after 24 hours by default (if you require a non-expiring token use --ttl 0)") + } + err = RunCreateToken(out, client, token, tokenDuration, usages, description) kubeadmutil.CheckErr(err) }, diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/discovery/token/token.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/discovery/token/token.go index c0d139a2288c..d6d6edd83bab 100644 --- a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/discovery/token/token.go +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/discovery/token/token.go @@ -58,7 +58,7 @@ func RetrieveValidatedClusterInfo(discoveryToken string, tokenAPIServers []strin fmt.Printf("[discovery] Created cluster-info discovery client, requesting info from %q\n", bootstrapConfig.Clusters[clusterName].Server) var clusterinfo *v1.ConfigMap - wait.PollInfinite(constants.DiscoveryRetryInterval, func() (bool, error) { + wait.PollImmediateInfinite(constants.DiscoveryRetryInterval, func() (bool, error) { var err error clusterinfo, err = client.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{}) if err != nil { diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/master/manifests.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/master/manifests.go index 55fe560c4616..5f7c6760943a 100644 --- a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/master/manifests.go +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/master/manifests.go @@ -127,13 +127,6 @@ func WriteStaticPodManifests(cfg *kubeadmapi.MasterConfiguration) error { LivenessProbe: componentProbe(2379, "/health", api.URISchemeHTTP), }, certsVolume(cfg), etcdVolume(cfg), k8sVolume()) - etcdPod.Spec.SecurityContext = &api.PodSecurityContext{ - SELinuxOptions: &api.SELinuxOptions{ - // Unconfine the etcd container so it can write to the data dir with SELinux enforcing: - Type: "spc_t", - }, - } - staticPodSpecs[etcd] = etcdPod } diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/addons/addons.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/addons/addons.go index 3c3ab5f113cc..1cc6d1398049 100644 --- a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/addons/addons.go +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/addons/addons.go @@ -32,6 +32,7 @@ import ( "k8s.io/kubernetes/cmd/kubeadm/app/images" kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" ) // CreateEssentialAddons creates the kube-proxy and kube-dns addons @@ -110,7 +111,14 @@ func CreateKubeProxyAddon(configMapBytes, daemonSetbytes []byte, client *clients if err := kuberuntime.DecodeInto(api.Codecs.UniversalDecoder(), daemonSetbytes, kubeproxyDaemonSet); err != nil { return fmt.Errorf("unable to decode kube-proxy daemonset %v", err) } - kubeproxyDaemonSet.Spec.Template.Spec.Tolerations = []v1.Toleration{kubeadmconstants.MasterToleration} + kubeproxyDaemonSet.Spec.Template.Spec.Tolerations = []v1.Toleration{ + kubeadmconstants.MasterToleration, + { + Key: algorithm.TaintExternalCloudProvider, + Value: "true", + Effect: "NoSchedule", + }, + } if _, err := client.ExtensionsV1beta1().DaemonSets(metav1.NamespaceSystem).Create(kubeproxyDaemonSet); err != nil { if !apierrors.IsAlreadyExists(err) { diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/apiconfig/clusterroles.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/apiconfig/clusterroles.go index c6f4f1b6f965..47fec4c6cc02 100644 --- a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/apiconfig/clusterroles.go +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/apiconfig/clusterroles.go @@ -105,7 +105,7 @@ func createRoles(clientset *clientset.Clientset) error { Namespace: metav1.NamespacePublic, }, Rules: []rbac.PolicyRule{ - rbac.NewRule("get").Groups("").Resources("configmaps").RuleOrDie(), + rbac.NewRule("get").Groups("").Resources("configmaps").Names("cluster-info").RuleOrDie(), }, }, } diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/apiconfig/setupmaster.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/apiconfig/setupmaster.go index 0b0bd0b94ea2..b5d942e1455a 100644 --- a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/apiconfig/setupmaster.go +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/apiconfig/setupmaster.go @@ -30,20 +30,19 @@ import ( "k8s.io/client-go/pkg/api/v1" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" - "k8s.io/kubernetes/pkg/util/node" ) const apiCallRetryInterval = 500 * time.Millisecond // TODO: Can we think of any unit tests here? Or should this code just be covered through integration/e2e tests? -func attemptToUpdateMasterRoleLabelsAndTaints(client *clientset.Clientset) error { +func attemptToUpdateMasterRoleLabelsAndTaints(client *clientset.Clientset, nodeName string) error { var n *v1.Node // Wait for current node registration wait.PollInfinite(kubeadmconstants.APICallRetryInterval, func() (bool, error) { var err error - if n, err = client.Nodes().Get(node.GetHostname(""), metav1.GetOptions{}); err != nil { + if n, err = client.Nodes().Get(nodeName, metav1.GetOptions{}); err != nil { return false, nil } // The node may appear to have no labels at first, @@ -75,7 +74,7 @@ func attemptToUpdateMasterRoleLabelsAndTaints(client *clientset.Clientset) error if apierrs.IsConflict(err) { fmt.Println("[apiclient] Temporarily unable to update master node metadata due to conflict (will retry)") time.Sleep(apiCallRetryInterval) - attemptToUpdateMasterRoleLabelsAndTaints(client) + attemptToUpdateMasterRoleLabelsAndTaints(client, nodeName) } else { return err } @@ -95,9 +94,9 @@ func addTaintIfNotExists(n *v1.Node, t v1.Taint) { } // UpdateMasterRoleLabelsAndTaints taints the master and sets the master label -func UpdateMasterRoleLabelsAndTaints(client *clientset.Clientset) error { +func UpdateMasterRoleLabelsAndTaints(client *clientset.Clientset, nodeName string) error { // TODO: Use iterate instead of recursion - err := attemptToUpdateMasterRoleLabelsAndTaints(client) + err := attemptToUpdateMasterRoleLabelsAndTaints(client, nodeName) if err != nil { return fmt.Errorf("failed to update master node - [%v]", err) } diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/certs.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/certs.go index a19f7e60452b..f60f15c48a00 100644 --- a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/certs.go +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/certs.go @@ -21,7 +21,6 @@ import ( "crypto/x509" "fmt" "net" - "os" setutil "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation" @@ -43,10 +42,6 @@ import ( // It generates a self-signed CA certificate and a server certificate (signed by the CA) func CreatePKIAssets(cfg *kubeadmapi.MasterConfiguration) error { pkiDir := cfg.CertificatesDir - hostname, err := os.Hostname() - if err != nil { - return fmt.Errorf("couldn't get the hostname: %v", err) - } _, svcSubnet, err := net.ParseCIDR(cfg.Networking.ServiceSubnet) if err != nil { @@ -54,7 +49,7 @@ func CreatePKIAssets(cfg *kubeadmapi.MasterConfiguration) error { } // Build the list of SANs - altNames := getAltNames(cfg.APIServerCertSANs, hostname, cfg.Networking.DNSDomain, svcSubnet) + altNames := getAltNames(cfg.APIServerCertSANs, cfg.NodeName, cfg.Networking.DNSDomain, svcSubnet) // Append the address the API Server is advertising altNames.IPs = append(altNames.IPs, net.ParseIP(cfg.API.AdvertiseAddress)) diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/kubeconfig/kubeconfig.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/kubeconfig/kubeconfig.go index d842c3eb0b8d..e3df1d430834 100644 --- a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/kubeconfig/kubeconfig.go +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/kubeconfig/kubeconfig.go @@ -29,7 +29,6 @@ import ( kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/pkiutil" kubeconfigutil "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig" - "k8s.io/kubernetes/pkg/util/node" ) // BuildConfigProperties holds some simple information about how this phase should build the KubeConfig object @@ -53,12 +52,7 @@ type BuildConfigProperties struct { // /etc/kubernetes/{admin,kubelet}.conf exist but not certs => certs will be generated and conflict with the kubeconfig files => error // CreateInitKubeConfigFiles is called from the main init and does the work for the default phase behaviour -func CreateInitKubeConfigFiles(masterEndpoint, pkiDir, outDir string) error { - - nodeName := node.GetHostname("") - if len(nodeName) == 0 { - return fmt.Errorf("unable to get hostname for master node") - } +func CreateInitKubeConfigFiles(masterEndpoint, pkiDir, outDir, nodeName string) error { // Create a lightweight specification for what the files should look like filesToCreateFromSpec := map[string]BuildConfigProperties{ diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/preflight/checks.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/preflight/checks.go index 3ebbd6f38ee1..6edf5bd2134f 100644 --- a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/preflight/checks.go +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/preflight/checks.go @@ -43,7 +43,6 @@ import ( "k8s.io/kubernetes/pkg/api/validation" authzmodes "k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes" "k8s.io/kubernetes/pkg/util/initsystem" - "k8s.io/kubernetes/pkg/util/node" "k8s.io/kubernetes/test/e2e_node/system" ) @@ -267,21 +266,22 @@ func (ipc InPathCheck) Check() (warnings, errors []error) { // HostnameCheck checks if hostname match dns sub domain regex. // If hostname doesn't match this regex, kubelet will not launch static pods like kube-apiserver/kube-controller-manager and so on. -type HostnameCheck struct{} +type HostnameCheck struct { + nodeName string +} func (hc HostnameCheck) Check() (warnings, errors []error) { errors = []error{} warnings = []error{} - hostname := node.GetHostname("") - for _, msg := range validation.ValidateNodeName(hostname, false) { - errors = append(errors, fmt.Errorf("hostname \"%s\" %s", hostname, msg)) + for _, msg := range validation.ValidateNodeName(hc.nodeName, false) { + errors = append(errors, fmt.Errorf("hostname \"%s\" %s", hc.nodeName, msg)) } - addr, err := net.LookupHost(hostname) + addr, err := net.LookupHost(hc.nodeName) if addr == nil { - warnings = append(warnings, fmt.Errorf("hostname \"%s\" could not be reached", hostname)) + warnings = append(warnings, fmt.Errorf("hostname \"%s\" could not be reached", hc.nodeName)) } if err != nil { - warnings = append(warnings, fmt.Errorf("hostname \"%s\" %s", hostname, err)) + warnings = append(warnings, fmt.Errorf("hostname \"%s\" %s", hc.nodeName, err)) } return warnings, errors } @@ -488,7 +488,7 @@ func RunInitMasterChecks(cfg *kubeadmapi.MasterConfiguration) error { checks := []Checker{ SystemVerificationCheck{}, IsRootCheck{}, - HostnameCheck{}, + HostnameCheck{nodeName: cfg.NodeName}, ServiceCheck{Service: "kubelet", CheckIfActive: false}, ServiceCheck{Service: "docker", CheckIfActive: true}, FirewalldCheck{ports: []int{int(cfg.API.BindPort), 10250}}, @@ -541,7 +541,7 @@ func RunJoinNodeChecks(cfg *kubeadmapi.NodeConfiguration) error { checks := []Checker{ SystemVerificationCheck{}, IsRootCheck{}, - HostnameCheck{}, + HostnameCheck{nodeName: cfg.NodeName}, ServiceCheck{Service: "kubelet", CheckIfActive: false}, ServiceCheck{Service: "docker", CheckIfActive: true}, PortOpenCheck{port: 10250}, diff --git a/vendor/k8s.io/kubernetes/cmd/kubelet/app/server.go b/vendor/k8s.io/kubernetes/cmd/kubelet/app/server.go index 223ef46bed9a..8add1565d5d3 100644 --- a/vendor/k8s.io/kubernetes/cmd/kubelet/app/server.go +++ b/vendor/k8s.io/kubernetes/cmd/kubelet/app/server.go @@ -943,7 +943,12 @@ func parseResourceList(m componentconfig.ConfigurationMap) (v1.ResourceList, err if q.Sign() == -1 { return nil, fmt.Errorf("resource quantity for %q cannot be negative: %v", k, v) } - rl[v1.ResourceName(k)] = q + // storage specified in configuration map is mapped to ResourceStorageScratch API + if v1.ResourceName(k) == v1.ResourceStorage { + rl[v1.ResourceStorageScratch] = q + } else { + rl[v1.ResourceName(k)] = q + } default: return nil, fmt.Errorf("cannot reserve %q resource", k) } diff --git a/vendor/k8s.io/kubernetes/examples/README.md b/vendor/k8s.io/kubernetes/examples/README.md index 7bfd41116f21..a37c8fd8663a 100644 --- a/vendor/k8s.io/kubernetes/examples/README.md +++ b/vendor/k8s.io/kubernetes/examples/README.md @@ -14,10 +14,10 @@ and best practices, and to refresh command syntax, output, changed prerequisites, as needed. |Name | Description | Notable Features Used | Complexity Level| -------------- | ------------- | ------------ | ------------ | +------------- | ------------- | ------------ | ------------ | |[Guestbook](guestbook/) | PHP app with Redis | Replication Controller, Service | Beginner | |[WordPress](mysql-wordpress-pd/) | WordPress with MySQL | Deployment, Persistent Volume with Claim | Beginner| -|[Cassandra](storage/cassandra/) | Cloud Native Cassandra | Daemon Set | Intermediate +|[Cassandra](storage/cassandra/) | Cloud Native Cassandra | Daemon Set | Intermediate * Note: Please add examples to the list above that are maintained. diff --git a/vendor/k8s.io/kubernetes/examples/cockroachdb/demo.sh b/vendor/k8s.io/kubernetes/examples/cockroachdb/demo.sh index 8b3031fb2888..2a8b444fc859 100755 --- a/vendor/k8s.io/kubernetes/examples/cockroachdb/demo.sh +++ b/vendor/k8s.io/kubernetes/examples/cockroachdb/demo.sh @@ -31,7 +31,7 @@ function kill() { # Create database on second node (idempotently for convenience). cat < 6379/TCP 19m Also list all your Deployments: ```console -$ kubectl get deployments +$ kubectl get deployments NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE frontend 3 3 3 3 2m redis-master 1 1 1 1 39m diff --git a/vendor/k8s.io/kubernetes/examples/javaee/mysql-pod.yaml b/vendor/k8s.io/kubernetes/examples/javaee/mysql-pod.yaml index b8884f386b76..b7590249f0fb 100644 --- a/vendor/k8s.io/kubernetes/examples/javaee/mysql-pod.yaml +++ b/vendor/k8s.io/kubernetes/examples/javaee/mysql-pod.yaml @@ -6,23 +6,23 @@ metadata: name: mysql-pod context: docker-k8s-lab spec: - containers: - - + containers: + - name: mysql image: mysql:latest - env: - - + env: + - name: "MYSQL_USER" value: "mysql" - - + - name: "MYSQL_PASSWORD" value: "mysql" - - + - name: "MYSQL_DATABASE" value: "sample" - - + - name: "MYSQL_ROOT_PASSWORD" value: "supersecret" - ports: - - + ports: + - containerPort: 3306 diff --git a/vendor/k8s.io/kubernetes/examples/javaee/mysql-service.yaml b/vendor/k8s.io/kubernetes/examples/javaee/mysql-service.yaml index 0cbb329a82fd..cf85885fb8e8 100644 --- a/vendor/k8s.io/kubernetes/examples/javaee/mysql-service.yaml +++ b/vendor/k8s.io/kubernetes/examples/javaee/mysql-service.yaml @@ -1,15 +1,15 @@ apiVersion: v1 kind: Service -metadata: +metadata: name: mysql-service - labels: + labels: name: mysql-pod context: docker-k8s-lab -spec: +spec: ports: # the port that this service should serve on - port: 3306 # label keys and values that must match in order to receive traffic for this service - selector: + selector: name: mysql-pod context: docker-k8s-lab diff --git a/vendor/k8s.io/kubernetes/examples/javaweb-tomcat-sidecar/README.md b/vendor/k8s.io/kubernetes/examples/javaweb-tomcat-sidecar/README.md index 8e50d66d2fbd..8832d2afb685 100644 --- a/vendor/k8s.io/kubernetes/examples/javaweb-tomcat-sidecar/README.md +++ b/vendor/k8s.io/kubernetes/examples/javaweb-tomcat-sidecar/README.md @@ -124,7 +124,7 @@ spec: name: app-volume ports: - containerPort: 8080 - hostPort: 8001 + hostPort: 8001 volumes: - name: app-volume emptyDir: {} diff --git a/vendor/k8s.io/kubernetes/examples/javaweb-tomcat-sidecar/javaweb-2.yaml b/vendor/k8s.io/kubernetes/examples/javaweb-tomcat-sidecar/javaweb-2.yaml index b34d5ab6e26e..271e757b9a84 100644 --- a/vendor/k8s.io/kubernetes/examples/javaweb-tomcat-sidecar/javaweb-2.yaml +++ b/vendor/k8s.io/kubernetes/examples/javaweb-tomcat-sidecar/javaweb-2.yaml @@ -24,8 +24,7 @@ spec: name: app-volume ports: - containerPort: 8080 - hostPort: 8001 + hostPort: 8001 volumes: - name: app-volume emptyDir: {} - diff --git a/vendor/k8s.io/kubernetes/examples/javaweb-tomcat-sidecar/javaweb.yaml b/vendor/k8s.io/kubernetes/examples/javaweb-tomcat-sidecar/javaweb.yaml index d77f6a727e62..8d8518502d62 100644 --- a/vendor/k8s.io/kubernetes/examples/javaweb-tomcat-sidecar/javaweb.yaml +++ b/vendor/k8s.io/kubernetes/examples/javaweb-tomcat-sidecar/javaweb.yaml @@ -21,4 +21,3 @@ spec: volumes: - name: app-volume emptyDir: {} - diff --git a/vendor/k8s.io/kubernetes/examples/oms/README.md b/vendor/k8s.io/kubernetes/examples/oms/README.md index 477b53c88c69..94e7f3bfa33a 100644 --- a/vendor/k8s.io/kubernetes/examples/oms/README.md +++ b/vendor/k8s.io/kubernetes/examples/oms/README.md @@ -42,7 +42,7 @@ The Workspace ID and Primary Key can be found inside the OMS Portal under Settin Run the following command to deploy the OMS agent to your Kubernetes nodes: ``` -kubectl -f omsagent-daemonset.yaml +kubectl -f omsagent-daemonset.yaml ``` ## Step 4 diff --git a/vendor/k8s.io/kubernetes/examples/openshift-origin/README.md b/vendor/k8s.io/kubernetes/examples/openshift-origin/README.md index 34b243518054..3b5b2027a181 100644 --- a/vendor/k8s.io/kubernetes/examples/openshift-origin/README.md +++ b/vendor/k8s.io/kubernetes/examples/openshift-origin/README.md @@ -94,14 +94,14 @@ You can automate the process with the following script, as it might take more th ```shell $ while [ ${#PUBLIC_OPENSHIFT_IP} -lt 1 ]; do - echo -n . - sleep 1 - { - export PUBLIC_OPENSHIFT_IP=$(kubectl get services openshift --namespace="openshift-origin" --template="{{ index .status.loadBalancer.ingress 0 \"ip\" }}") + echo -n . + sleep 1 + { + export PUBLIC_OPENSHIFT_IP=$(kubectl get services openshift --namespace="openshift-origin" --template="{{ index .status.loadBalancer.ingress 0 \"ip\" }}") } 2> ${OPENSHIFT_EXAMPLE}/openshift-startup.log if [[ ! ${PUBLIC_OPENSHIFT_IP} =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then export PUBLIC_OPENSHIFT_IP="" - fi + fi done $ echo $ echo "Public OpenShift IP set to: ${PUBLIC_OPENSHIFT_IP}" diff --git a/vendor/k8s.io/kubernetes/examples/openshift-origin/create.sh b/vendor/k8s.io/kubernetes/examples/openshift-origin/create.sh index 717df61b7f25..8f03989a7f22 100755 --- a/vendor/k8s.io/kubernetes/examples/openshift-origin/create.sh +++ b/vendor/k8s.io/kubernetes/examples/openshift-origin/create.sh @@ -82,7 +82,7 @@ export PUBLIC_OPENSHIFT_IP="" echo "===> Waiting for public IP to be set for the OpenShift Service." echo "Mistakes in service setup can cause this to loop infinitely if an" echo "external IP is never set. Ensure that the OpenShift service" -echo "is set to use an external load balancer. This process may take" +echo "is set to use an external load balancer. This process may take" echo "a few minutes. Errors can be found in the log file found at:" echo ${OPENSHIFT_EXAMPLE}/openshift-startup.log echo "" > ${OPENSHIFT_EXAMPLE}/openshift-startup.log diff --git a/vendor/k8s.io/kubernetes/examples/openshift-origin/etcd-controller.yaml b/vendor/k8s.io/kubernetes/examples/openshift-origin/etcd-controller.yaml index 419c57dbdbbe..fd4c55a68895 100644 --- a/vendor/k8s.io/kubernetes/examples/openshift-origin/etcd-controller.yaml +++ b/vendor/k8s.io/kubernetes/examples/openshift-origin/etcd-controller.yaml @@ -34,7 +34,7 @@ spec: # ETCD_DISCOVERY_TOKEN is a unique token used by the discovery service. Conforms to etcd-cluster-[a-z0-9]{5} - name: ETCD_DISCOVERY_TOKEN value: INSERT_ETCD_DISCOVERY_TOKEN - # ETCD_DISCOVERY_URL connects etcd instances together by storing a list of peer addresses, + # ETCD_DISCOVERY_URL connects etcd instances together by storing a list of peer addresses, # metadata and the initial size of the cluster under a unique address - name: ETCD_DISCOVERY_URL value: "http://etcd-discovery:2379" diff --git a/vendor/k8s.io/kubernetes/examples/persistent-volume-provisioning/README.md b/vendor/k8s.io/kubernetes/examples/persistent-volume-provisioning/README.md index e664ae067d5d..5c5ed6a62190 100644 --- a/vendor/k8s.io/kubernetes/examples/persistent-volume-provisioning/README.md +++ b/vendor/k8s.io/kubernetes/examples/persistent-volume-provisioning/README.md @@ -488,7 +488,7 @@ Source: Type: RBD (a Rados Block Device mount on the host that shares a pod's lifetime) CephMonitors: [127.0.0.1:6789] RBDImage: kubernetes-dynamic-pvc-1cfb1862-664b-11e6-9a5d-90b11c09520d - FSType: + FSType: RBDPool: kube RadosUser: kube Keyring: /etc/ceph/keyring diff --git a/vendor/k8s.io/kubernetes/examples/persistent-volume-provisioning/quobyte/example-pod.yaml b/vendor/k8s.io/kubernetes/examples/persistent-volume-provisioning/quobyte/example-pod.yaml index eb814f552a82..22ce31557102 100644 --- a/vendor/k8s.io/kubernetes/examples/persistent-volume-provisioning/quobyte/example-pod.yaml +++ b/vendor/k8s.io/kubernetes/examples/persistent-volume-provisioning/quobyte/example-pod.yaml @@ -13,7 +13,7 @@ spec: spec: containers: - name: server - image: nginx + image: nginx volumeMounts: - mountPath: /var/lib/www/html name: quobytepvc diff --git a/vendor/k8s.io/kubernetes/examples/persistent-volume-provisioning/rbd/ceph-secret-user.yaml b/vendor/k8s.io/kubernetes/examples/persistent-volume-provisioning/rbd/ceph-secret-user.yaml index e538dcafb5ad..f8d7c7b340e1 100644 --- a/vendor/k8s.io/kubernetes/examples/persistent-volume-provisioning/rbd/ceph-secret-user.yaml +++ b/vendor/k8s.io/kubernetes/examples/persistent-volume-provisioning/rbd/ceph-secret-user.yaml @@ -2,7 +2,7 @@ apiVersion: v1 kind: Secret metadata: name: ceph-secret-user -type: "kubernetes.io/rbd" +type: "kubernetes.io/rbd" data: #Please note this value is base64 encoded. key: QVFBTWdYaFZ3QkNlRGhBQTlubFBhRnlmVVNhdEdENGRyRldEdlE9PQ== diff --git a/vendor/k8s.io/kubernetes/examples/persistent-volume-provisioning/rbd/rbd-storage-class.yaml b/vendor/k8s.io/kubernetes/examples/persistent-volume-provisioning/rbd/rbd-storage-class.yaml index e2d7d67d98cc..01a3751c79ea 100644 --- a/vendor/k8s.io/kubernetes/examples/persistent-volume-provisioning/rbd/rbd-storage-class.yaml +++ b/vendor/k8s.io/kubernetes/examples/persistent-volume-provisioning/rbd/rbd-storage-class.yaml @@ -11,4 +11,3 @@ parameters: pool: kube userId: kube userSecretName: ceph-secret-user - diff --git a/vendor/k8s.io/kubernetes/examples/phabricator/php-phabricator/run.sh b/vendor/k8s.io/kubernetes/examples/phabricator/php-phabricator/run.sh index 1f2b8387f468..767f072b13c8 100755 --- a/vendor/k8s.io/kubernetes/examples/phabricator/php-phabricator/run.sh +++ b/vendor/k8s.io/kubernetes/examples/phabricator/php-phabricator/run.sh @@ -25,4 +25,3 @@ echo "Running storage upgrade" source /etc/apache2/envvars echo "Starting Apache2" apache2 -D FOREGROUND - diff --git a/vendor/k8s.io/kubernetes/examples/phabricator/setup.sh b/vendor/k8s.io/kubernetes/examples/phabricator/setup.sh index 588b1f5f93f6..f6a6177e4e3a 100755 --- a/vendor/k8s.io/kubernetes/examples/phabricator/setup.sh +++ b/vendor/k8s.io/kubernetes/examples/phabricator/setup.sh @@ -17,4 +17,3 @@ echo "Create Phabricator replication controller" && kubectl create -f phabricator-controller.json echo "Create Phabricator service" && kubectl create -f phabricator-service.json echo "Create firewall rule" && gcloud compute firewall-rules create phabricator-node-80 --allow=tcp:80 --target-tags kubernetes-node - diff --git a/vendor/k8s.io/kubernetes/examples/phabricator/teardown.sh b/vendor/k8s.io/kubernetes/examples/phabricator/teardown.sh index 266313912b78..9021066ccef4 100755 --- a/vendor/k8s.io/kubernetes/examples/phabricator/teardown.sh +++ b/vendor/k8s.io/kubernetes/examples/phabricator/teardown.sh @@ -17,4 +17,3 @@ echo "Deleting Phabricator service" && kubectl delete -f phabricator-service.json echo "Deleting Phabricator replication controller" && kubectl delete rc phabricator-controller echo "Delete firewall rule" && gcloud compute firewall-rules delete -q phabricator-node-80 - diff --git a/vendor/k8s.io/kubernetes/examples/podsecuritypolicy/rbac/README.md b/vendor/k8s.io/kubernetes/examples/podsecuritypolicy/rbac/README.md index e606d23dec72..39392c75eaf6 100644 --- a/vendor/k8s.io/kubernetes/examples/podsecuritypolicy/rbac/README.md +++ b/vendor/k8s.io/kubernetes/examples/podsecuritypolicy/rbac/README.md @@ -75,7 +75,7 @@ spec: To create these policies run ``` -$ kubectl --server=https://127.0.0.1:6443 --token=foo/system:masters create -f examples/podsecuritypolicy/rbac/policies.yaml +$ kubectl --server=https://127.0.0.1:6443 --token=foo/system:masters create -f examples/podsecuritypolicy/rbac/policies.yaml podsecuritypolicy "privileged" created podsecuritypolicy "restricted" created ``` @@ -107,11 +107,11 @@ role which is already provided by the cluster. To create these roles and bindings run ``` -$ kubectl --server=https://127.0.0.1:6443 --token=foo/system:masters create -f examples/podsecuritypolicy/rbac/roles.yaml +$ kubectl --server=https://127.0.0.1:6443 --token=foo/system:masters create -f examples/podsecuritypolicy/rbac/roles.yaml clusterrole "restricted-psp-user" created clusterrole "privileged-psp-user" created -$ kubectl --server=https://127.0.0.1:6443 --token=foo/system:masters create -f examples/podsecuritypolicy/rbac/bindings.yaml +$ kubectl --server=https://127.0.0.1:6443 --token=foo/system:masters create -f examples/podsecuritypolicy/rbac/bindings.yaml clusterrolebinding "privileged-psp-users" created clusterrolebinding "restricted-psp-users" created clusterrolebinding "edit" created @@ -124,7 +124,7 @@ clusterrolebinding "edit" created Create the pod ``` -$ kubectl --server=https://127.0.0.1:6443 --token=foo/restricted-psp-users create -f examples/podsecuritypolicy/rbac/pod.yaml +$ kubectl --server=https://127.0.0.1:6443 --token=foo/restricted-psp-users create -f examples/podsecuritypolicy/rbac/pod.yaml pod "nginx" created ``` @@ -147,14 +147,14 @@ pod "nginx" deleted Create the privileged pod ``` -$ kubectl --server=https://127.0.0.1:6443 --token=foo/restricted-psp-users create -f examples/podsecuritypolicy/rbac/pod_priv.yaml +$ kubectl --server=https://127.0.0.1:6443 --token=foo/restricted-psp-users create -f examples/podsecuritypolicy/rbac/pod_priv.yaml Error from server (Forbidden): error when creating "examples/podsecuritypolicy/rbac/pod_priv.yaml": pods "nginx" is forbidden: unable to validate against any pod security policy: [spec.containers[0].securityContext.privileged: Invalid value: true: Privileged containers are not allowed] ``` ### Privileged user can create non-privileged pods ``` -$ kubectl --server=https://127.0.0.1:6443 --token=foo/privileged-psp-users create -f examples/podsecuritypolicy/rbac/pod.yaml +$ kubectl --server=https://127.0.0.1:6443 --token=foo/privileged-psp-users create -f examples/podsecuritypolicy/rbac/pod.yaml pod "nginx" created ``` @@ -179,7 +179,7 @@ pod "nginx" deleted Create the privileged pod ``` -$ kubectl --server=https://127.0.0.1:6443 --token=foo/privileged-psp-users create -f examples/podsecuritypolicy/rbac/pod_priv.yaml +$ kubectl --server=https://127.0.0.1:6443 --token=foo/privileged-psp-users create -f examples/podsecuritypolicy/rbac/pod_priv.yaml pod "nginx" created ``` diff --git a/vendor/k8s.io/kubernetes/examples/podsecuritypolicy/rbac/policies.yaml b/vendor/k8s.io/kubernetes/examples/podsecuritypolicy/rbac/policies.yaml index 6ddd9422fbe0..b7f80d0200fa 100644 --- a/vendor/k8s.io/kubernetes/examples/podsecuritypolicy/rbac/policies.yaml +++ b/vendor/k8s.io/kubernetes/examples/podsecuritypolicy/rbac/policies.yaml @@ -36,4 +36,3 @@ spec: - 'configMap' - 'persistentVolumeClaim' - 'projected' - diff --git a/vendor/k8s.io/kubernetes/examples/podsecuritypolicy/rbac/roles.yaml b/vendor/k8s.io/kubernetes/examples/podsecuritypolicy/rbac/roles.yaml index 43aecf2a09a4..a5fd8f41192c 100644 --- a/vendor/k8s.io/kubernetes/examples/podsecuritypolicy/rbac/roles.yaml +++ b/vendor/k8s.io/kubernetes/examples/podsecuritypolicy/rbac/roles.yaml @@ -4,7 +4,7 @@ apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRole metadata: name: restricted-psp-user -rules: +rules: - apiGroups: - extensions resources: @@ -20,7 +20,7 @@ apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRole metadata: name: privileged-psp-user -rules: +rules: - apiGroups: - extensions resources: @@ -29,5 +29,3 @@ rules: - privileged verbs: - use - - diff --git a/vendor/k8s.io/kubernetes/examples/selenium/selenium-hub-rc.yaml b/vendor/k8s.io/kubernetes/examples/selenium/selenium-hub-rc.yaml index f48510e1ce09..fcd3910d5025 100644 --- a/vendor/k8s.io/kubernetes/examples/selenium/selenium-hub-rc.yaml +++ b/vendor/k8s.io/kubernetes/examples/selenium/selenium-hub-rc.yaml @@ -1,7 +1,7 @@ apiVersion: v1 kind: ReplicationController metadata: - name: selenium-hub + name: selenium-hub labels: app: selenium-hub spec: @@ -17,7 +17,7 @@ spec: - name: selenium-hub image: selenium/hub:2.53.0 ports: - - containerPort: 4444 + - containerPort: 4444 resources: limits: memory: "1000Mi" diff --git a/vendor/k8s.io/kubernetes/examples/selenium/selenium-hub-svc.yaml b/vendor/k8s.io/kubernetes/examples/selenium/selenium-hub-svc.yaml index 0b252ede3758..2de79e900713 100644 --- a/vendor/k8s.io/kubernetes/examples/selenium/selenium-hub-svc.yaml +++ b/vendor/k8s.io/kubernetes/examples/selenium/selenium-hub-svc.yaml @@ -6,8 +6,8 @@ metadata: app: selenium-hub spec: ports: - - port: 4444 - targetPort: 4444 + - port: 4444 + targetPort: 4444 name: port0 selector: app: selenium-hub diff --git a/vendor/k8s.io/kubernetes/examples/selenium/selenium-node-chrome-rc.yaml b/vendor/k8s.io/kubernetes/examples/selenium/selenium-node-chrome-rc.yaml index 3c49c1b43d41..ea1cc6671651 100644 --- a/vendor/k8s.io/kubernetes/examples/selenium/selenium-node-chrome-rc.yaml +++ b/vendor/k8s.io/kubernetes/examples/selenium/selenium-node-chrome-rc.yaml @@ -1,7 +1,7 @@ apiVersion: v1 kind: ReplicationController metadata: - name: selenium-node-chrome + name: selenium-node-chrome labels: app: selenium-node-chrome spec: @@ -17,11 +17,11 @@ spec: - name: selenium-node-chrome image: selenium/node-chrome-debug:2.53.0 ports: - - containerPort: 5900 + - containerPort: 5900 env: - - name: HUB_PORT_4444_TCP_ADDR + - name: HUB_PORT_4444_TCP_ADDR value: "selenium-hub" - - name: HUB_PORT_4444_TCP_PORT + - name: HUB_PORT_4444_TCP_PORT value: "4444" resources: limits: diff --git a/vendor/k8s.io/kubernetes/examples/selenium/selenium-node-firefox-rc.yaml b/vendor/k8s.io/kubernetes/examples/selenium/selenium-node-firefox-rc.yaml index d6f6657858ea..4408608198f4 100644 --- a/vendor/k8s.io/kubernetes/examples/selenium/selenium-node-firefox-rc.yaml +++ b/vendor/k8s.io/kubernetes/examples/selenium/selenium-node-firefox-rc.yaml @@ -1,7 +1,7 @@ apiVersion: v1 kind: ReplicationController metadata: - name: selenium-node-firefox + name: selenium-node-firefox labels: app: selenium-node-firefox spec: @@ -17,11 +17,11 @@ spec: - name: selenium-node-firefox image: selenium/node-firefox-debug:2.53.0 ports: - - containerPort: 5900 + - containerPort: 5900 env: - - name: HUB_PORT_4444_TCP_ADDR + - name: HUB_PORT_4444_TCP_ADDR value: "selenium-hub" - - name: HUB_PORT_4444_TCP_PORT + - name: HUB_PORT_4444_TCP_PORT value: "4444" resources: limits: diff --git a/vendor/k8s.io/kubernetes/examples/selenium/selenium-test.py b/vendor/k8s.io/kubernetes/examples/selenium/selenium-test.py index 80d598a3b18a..0fe2fec3a11f 100644 --- a/vendor/k8s.io/kubernetes/examples/selenium/selenium-test.py +++ b/vendor/k8s.io/kubernetes/examples/selenium/selenium-test.py @@ -30,4 +30,3 @@ def check_browser(browser): check_browser("FIREFOX") check_browser("CHROME") - diff --git a/vendor/k8s.io/kubernetes/examples/spark/spark-worker-controller.yaml b/vendor/k8s.io/kubernetes/examples/spark/spark-worker-controller.yaml index 9c748b3e0489..05d4ebddf806 100644 --- a/vendor/k8s.io/kubernetes/examples/spark/spark-worker-controller.yaml +++ b/vendor/k8s.io/kubernetes/examples/spark/spark-worker-controller.yaml @@ -20,4 +20,3 @@ spec: resources: requests: cpu: 100m - diff --git a/vendor/k8s.io/kubernetes/examples/storage/cassandra/README.md b/vendor/k8s.io/kubernetes/examples/storage/cassandra/README.md index 95f63f42ea9b..36eb4f99f648 100644 --- a/vendor/k8s.io/kubernetes/examples/storage/cassandra/README.md +++ b/vendor/k8s.io/kubernetes/examples/storage/cassandra/README.md @@ -177,7 +177,7 @@ The StatefulSet manifest that is included below, creates a Cassandra ring that c of three pods. This example includes using a GCE Storage Class, please update appropriately depending -on the cloud you are working with. +on the cloud you are working with. diff --git a/vendor/k8s.io/kubernetes/examples/storage/cassandra/image/files/cassandra.yaml b/vendor/k8s.io/kubernetes/examples/storage/cassandra/image/files/cassandra.yaml index 7df40351e23b..b55fd42ebeda 100644 --- a/vendor/k8s.io/kubernetes/examples/storage/cassandra/image/files/cassandra.yaml +++ b/vendor/k8s.io/kubernetes/examples/storage/cassandra/image/files/cassandra.yaml @@ -306,7 +306,7 @@ counter_cache_save_period: 7200 saved_caches_directory: /cassandra_data/saved_caches # commitlog_sync may be either "periodic" or "batch." -# +# # When in batch mode, Cassandra won't ack writes until the commit log # has been fsynced to disk. It will wait # commitlog_sync_batch_window_in_ms milliseconds between fsyncs. @@ -980,11 +980,10 @@ transparent_data_encryption_options: key_alias: testing:1 # CBC IV length for AES needs to be 16 bytes (which is also the default size) # iv_length: 16 - key_provider: + key_provider: - class_name: org.apache.cassandra.security.JKSKeyProvider - parameters: + parameters: - keystore: conf/.keystore keystore_password: cassandra store_type: JCEKS key_password: cassandra - diff --git a/vendor/k8s.io/kubernetes/examples/storage/cassandra/java/src/test/resources/cassandra.yaml b/vendor/k8s.io/kubernetes/examples/storage/cassandra/java/src/test/resources/cassandra.yaml index 791d310364f1..6616d7673c41 100644 --- a/vendor/k8s.io/kubernetes/examples/storage/cassandra/java/src/test/resources/cassandra.yaml +++ b/vendor/k8s.io/kubernetes/examples/storage/cassandra/java/src/test/resources/cassandra.yaml @@ -35,7 +35,7 @@ data_file_directories: - target/cassandra/data disk_access_mode: mmap seed_provider: - - class_name: io.k8s.cassandra.KubernetesSeedProvider + - class_name: io.k8s.cassandra.KubernetesSeedProvider parameters: - seeds: "8.4.4.4,8.8.8.8" endpoint_snitch: org.apache.cassandra.locator.SimpleSnitch diff --git a/vendor/k8s.io/kubernetes/examples/storage/hazelcast/README.md b/vendor/k8s.io/kubernetes/examples/storage/hazelcast/README.md index 8940204a14b8..f875c22ebde1 100644 --- a/vendor/k8s.io/kubernetes/examples/storage/hazelcast/README.md +++ b/vendor/k8s.io/kubernetes/examples/storage/hazelcast/README.md @@ -43,7 +43,7 @@ metadata: labels: name: hazelcast name: hazelcast -spec: +spec: ports: - port: 5701 selector: @@ -74,24 +74,24 @@ Deployments will "adopt" existing pods that match their selector query, so let's ```yaml apiVersion: extensions/v1beta1 kind: Deployment -metadata: +metadata: name: hazelcast - labels: + labels: name: hazelcast -spec: - template: - metadata: - labels: +spec: + template: + metadata: + labels: name: hazelcast - spec: - containers: + spec: + containers: - name: hazelcast image: quay.io/pires/hazelcast-kubernetes:0.8.0 imagePullPolicy: Always env: - name: "DNS_DOMAIN" value: "cluster.local" - ports: + ports: - name: hazelcast containerPort: 5701 ``` @@ -184,7 +184,7 @@ kubectl logs -f hazelcast-4195412960-0tl3w 2017-03-15 09:42:47.253 INFO 7 --- [cached.thread-3] c.hazelcast.nio.tcp.InitConnectionTask : [172.17.0.6]:5701 [someGroup] [3.8] Connecting to /172.17.0.2:5701, timeout: 0, bind-any: true 2017-03-15 09:42:47.262 INFO 7 --- [cached.thread-3] c.h.nio.tcp.TcpIpConnectionManager : [172.17.0.6]:5701 [someGroup] [3.8] Established socket connection between /172.17.0.6:58073 and /172.17.0.2:5701 2017-03-15 09:42:54.260 INFO 7 --- [ration.thread-0] com.hazelcast.system : [172.17.0.6]:5701 [someGroup] [3.8] Cluster version set to 3.8 -2017-03-15 09:42:54.262 INFO 7 --- [ration.thread-0] c.h.internal.cluster.ClusterService : [172.17.0.6]:5701 [someGroup] [3.8] +2017-03-15 09:42:54.262 INFO 7 --- [ration.thread-0] c.h.internal.cluster.ClusterService : [172.17.0.6]:5701 [someGroup] [3.8] Members [2] { Member [172.17.0.2]:5701 - 170f6924-7888-442a-9875-ad4d25659a8a diff --git a/vendor/k8s.io/kubernetes/examples/storage/hazelcast/hazelcast-deployment.yaml b/vendor/k8s.io/kubernetes/examples/storage/hazelcast/hazelcast-deployment.yaml index bbf61b092c4a..e530134435b3 100644 --- a/vendor/k8s.io/kubernetes/examples/storage/hazelcast/hazelcast-deployment.yaml +++ b/vendor/k8s.io/kubernetes/examples/storage/hazelcast/hazelcast-deployment.yaml @@ -1,16 +1,16 @@ apiVersion: extensions/v1beta1 kind: Deployment -metadata: +metadata: name: hazelcast - labels: + labels: name: hazelcast -spec: - template: - metadata: - labels: +spec: + template: + metadata: + labels: name: hazelcast - spec: - containers: + spec: + containers: - name: hazelcast image: quay.io/pires/hazelcast-kubernetes:3.8_1 imagePullPolicy: Always @@ -21,6 +21,6 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace - ports: + ports: - name: hazelcast containerPort: 5701 diff --git a/vendor/k8s.io/kubernetes/examples/storage/hazelcast/hazelcast-service.yaml b/vendor/k8s.io/kubernetes/examples/storage/hazelcast/hazelcast-service.yaml index 0c9dc55da517..1b18206e8746 100644 --- a/vendor/k8s.io/kubernetes/examples/storage/hazelcast/hazelcast-service.yaml +++ b/vendor/k8s.io/kubernetes/examples/storage/hazelcast/hazelcast-service.yaml @@ -4,7 +4,7 @@ metadata: labels: name: hazelcast name: hazelcast -spec: +spec: ports: - port: 5701 selector: diff --git a/vendor/k8s.io/kubernetes/examples/storage/mysql-galera/README.md b/vendor/k8s.io/kubernetes/examples/storage/mysql-galera/README.md index 02846ea477f3..7362c8cd971b 100644 --- a/vendor/k8s.io/kubernetes/examples/storage/mysql-galera/README.md +++ b/vendor/k8s.io/kubernetes/examples/storage/mysql-galera/README.md @@ -40,18 +40,18 @@ When complete, you should be able connect with a MySQL client to the IP address Shown below are examples of Using ```kubectl``` from within the ```./examples/storage/mysql-galera``` directory, the status of the lauched replication controllers and services can be confirmed ``` -$ kubectl create -f examples/storage/mysql-galera/pxc-cluster-service.yaml +$ kubectl create -f examples/storage/mysql-galera/pxc-cluster-service.yaml services/pxc-cluster -$ kubectl create -f examples/storage/mysql-galera/pxc-node1.yaml +$ kubectl create -f examples/storage/mysql-galera/pxc-node1.yaml services/pxc-node1 replicationcontrollers/pxc-node1 -$ kubectl create -f examples/storage/mysql-galera/pxc-node2.yaml +$ kubectl create -f examples/storage/mysql-galera/pxc-node2.yaml services/pxc-node2 replicationcontrollers/pxc-node2 -$ kubectl create -f examples/storage/mysql-galera/pxc-node3.yaml +$ kubectl create -f examples/storage/mysql-galera/pxc-node3.yaml services/pxc-node3 replicationcontrollers/pxc-node3 @@ -100,7 +100,7 @@ pxc-node3-0b5mc $ kubectl exec pxc-node3-0b5mc -i -t -- mysql -u root -p -h pxc-cluster -Enter password: +Enter password: Welcome to the MySQL monitor. Commands end with ; or \g. Your MySQL connection id is 5 Server version: 5.6.24-72.2-56-log Percona XtraDB Cluster (GPL), Release rel72.2, Revision 43abf03, WSREP version 25.11, wsrep_25.11 diff --git a/vendor/k8s.io/kubernetes/examples/storage/mysql-galera/image/Dockerfile b/vendor/k8s.io/kubernetes/examples/storage/mysql-galera/image/Dockerfile index 53a068c8c930..f120a008b36d 100644 --- a/vendor/k8s.io/kubernetes/examples/storage/mysql-galera/image/Dockerfile +++ b/vendor/k8s.io/kubernetes/examples/storage/mysql-galera/image/Dockerfile @@ -14,7 +14,7 @@ FROM ubuntu:trusty -# add our user and group first to make sure their IDs get assigned +# add our user and group first to make sure their IDs get assigned # consistently, regardless of whatever dependencies get added RUN groupadd -r mysql && useradd -r -g mysql mysql @@ -22,7 +22,7 @@ ENV PERCONA_XTRADB_VERSION 5.6 ENV MYSQL_VERSION 5.6 ENV TERM linux -RUN apt-get update +RUN apt-get update RUN DEBIAN_FRONTEND=noninteractive apt-get install -y perl --no-install-recommends && rm -rf /var/lib/apt/lists/* RUN apt-key adv --keyserver keys.gnupg.net --recv-keys 8507EFA5 @@ -31,7 +31,7 @@ RUN echo "deb http://repo.percona.com/apt trusty main" > /etc/apt/sources.list.d RUN echo "deb-src http://repo.percona.com/apt trusty main" >> /etc/apt/sources.list.d/percona.list # the "/var/lib/mysql" stuff here is because the mysql-server -# postinst doesn't have an explicit way to disable the +# postinst doesn't have an explicit way to disable the # mysql_install_db codepath besides having a database already # "configured" (ie, stuff in /var/lib/mysql/mysql) # also, we set debconf keys to make APT a little quieter @@ -42,7 +42,7 @@ RUN { \ && apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y percona-xtradb-cluster-client-"${MYSQL_VERSION}" \ percona-xtradb-cluster-common-"${MYSQL_VERSION}" percona-xtradb-cluster-server-"${MYSQL_VERSION}" \ && rm -rf /var/lib/apt/lists/* \ - && rm -rf /var/lib/mysql && mkdir -p /var/lib/mysql && chown -R mysql:mysql /var/lib/mysql + && rm -rf /var/lib/mysql && mkdir -p /var/lib/mysql && chown -R mysql:mysql /var/lib/mysql VOLUME /var/lib/mysql diff --git a/vendor/k8s.io/kubernetes/examples/storage/mysql-galera/image/docker-entrypoint.sh b/vendor/k8s.io/kubernetes/examples/storage/mysql-galera/image/docker-entrypoint.sh index 50185562f7eb..cb041f7edcd7 100755 --- a/vendor/k8s.io/kubernetes/examples/storage/mysql-galera/image/docker-entrypoint.sh +++ b/vendor/k8s.io/kubernetes/examples/storage/mysql-galera/image/docker-entrypoint.sh @@ -14,13 +14,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -# +# # This script does the following: -# +# # 1. Sets up database privileges by building an SQL script -# 2. MySQL is initially started with this script a first time +# 2. MySQL is initially started with this script a first time # 3. Modify my.cnf and cluster.cnf to reflect available nodes to join -# +# # if NUM_NODES not passed, default to 3 if [ -z "$NUM_NODES" ]; then @@ -31,15 +31,15 @@ if [ "${1:0:1}" = '-' ]; then set -- mysqld "$@" fi -# if the command passed is 'mysqld' via CMD, then begin processing. +# if the command passed is 'mysqld' via CMD, then begin processing. if [ "$1" = 'mysqld' ]; then # read DATADIR from the MySQL config DATADIR="$("$@" --verbose --help 2>/dev/null | awk '$1 == "datadir" { print $2; exit }')" - - # only check if system tables not created from mysql_install_db and permissions + + # only check if system tables not created from mysql_install_db and permissions # set with initial SQL script before proceeding to build SQL script if [ ! -d "$DATADIR/mysql" ]; then - # fail if user didn't supply a root password + # fail if user didn't supply a root password if [ -z "$MYSQL_ROOT_PASSWORD" -a -z "$MYSQL_ALLOW_EMPTY_PASSWORD" ]; then echo >&2 'error: database is uninitialized and MYSQL_ROOT_PASSWORD not set' echo >&2 ' Did you forget to add -e MYSQL_ROOT_PASSWORD=... ?' @@ -50,23 +50,23 @@ if [ "$1" = 'mysqld' ]; then echo 'Running mysql_install_db ...' mysql_install_db --datadir="$DATADIR" echo 'Finished mysql_install_db' - + # this script will be run once when MySQL first starts to set up - # prior to creating system tables and will ensure proper user permissions + # prior to creating system tables and will ensure proper user permissions tempSqlFile='/tmp/mysql-first-time.sql' cat > "$tempSqlFile" <<-EOSQL DELETE FROM mysql.user ; CREATE USER 'root'@'%' IDENTIFIED BY '${MYSQL_ROOT_PASSWORD}' ; GRANT ALL ON *.* TO 'root'@'%' WITH GRANT OPTION ; EOSQL - + if [ "$MYSQL_DATABASE" ]; then echo "CREATE DATABASE IF NOT EXISTS \`$MYSQL_DATABASE\` ;" >> "$tempSqlFile" fi - + if [ "$MYSQL_USER" -a "$MYSQL_PASSWORD" ]; then echo "CREATE USER '$MYSQL_USER'@'%' IDENTIFIED BY '$MYSQL_PASSWORD' ;" >> "$tempSqlFile" - + if [ "$MYSQL_DATABASE" ]; then echo "GRANT ALL ON \`$MYSQL_DATABASE\`.* TO '$MYSQL_USER'@'%' ;" >> "$tempSqlFile" fi @@ -87,11 +87,11 @@ EOSQL fi echo 'FLUSH PRIVILEGES ;' >> "$tempSqlFile" - + # Add the SQL file to mysqld's command line args set -- "$@" --init-file="$tempSqlFile" fi - + chown -R mysql:mysql "$DATADIR" fi @@ -114,11 +114,11 @@ if [ -n "$GALERA_CLUSTER" ]; then if [ -n "$WSREP_NODE_ADDRESS" ]; then sed -i -e "s|^wsrep_node_address=.*$|wsrep_node_address=${WSREP_NODE_ADDRESS}|" /etc/mysql/conf.d/cluster.cnf fi - + # if the string is not defined or it only is 'gcomm://', this means bootstrap if [ -z "$WSREP_CLUSTER_ADDRESS" -o "$WSREP_CLUSTER_ADDRESS" == "gcomm://" ]; then # if empty, set to 'gcomm://' - # NOTE: this list does not imply membership. + # NOTE: this list does not imply membership. # It only means "obtain SST and join from one of these..." if [ -z "$WSREP_CLUSTER_ADDRESS" ]; then WSREP_CLUSTER_ADDRESS="gcomm://" @@ -127,7 +127,7 @@ if [ -n "$GALERA_CLUSTER" ]; then # loop through number of nodes for NUM in `seq 1 $NUM_NODES`; do NODE_SERVICE_HOST="PXC_NODE${NUM}_SERVICE_HOST" - + # if set if [ -n "${!NODE_SERVICE_HOST}" ]; then # if not its own IP, then add it @@ -149,7 +149,7 @@ if [ -n "$GALERA_CLUSTER" ]; then done fi - # WSREP_CLUSTER_ADDRESS is now complete and will be interpolated into the + # WSREP_CLUSTER_ADDRESS is now complete and will be interpolated into the # cluster address string (wsrep_cluster_address) in the cluster # configuration file, cluster.cnf if [ -n "$WSREP_CLUSTER_ADDRESS" -a "$WSREP_CLUSTER_ADDRESS" != "gcomm://" ]; then @@ -160,5 +160,5 @@ fi # random server ID needed sed -i -e "s/^server\-id=.*$/server-id=${RANDOM}/" /etc/mysql/my.cnf -# finally, start mysql +# finally, start mysql exec "$@" diff --git a/vendor/k8s.io/kubernetes/examples/storage/mysql-galera/pxc-node1.yaml b/vendor/k8s.io/kubernetes/examples/storage/mysql-galera/pxc-node1.yaml index fa1163bdcbf3..b60667a237c2 100644 --- a/vendor/k8s.io/kubernetes/examples/storage/mysql-galera/pxc-node1.yaml +++ b/vendor/k8s.io/kubernetes/examples/storage/mysql-galera/pxc-node1.yaml @@ -11,27 +11,27 @@ spec: - port: 4444 name: state-snapshot-transfer - port: 4567 - name: replication-traffic + name: replication-traffic - port: 4568 - name: incremental-state-transfer + name: incremental-state-transfer selector: - node: pxc-node1 + node: pxc-node1 --- apiVersion: v1 kind: ReplicationController metadata: - name: pxc-node1 + name: pxc-node1 spec: replicas: 1 template: metadata: labels: - node: pxc-node1 + node: pxc-node1 unit: pxc-cluster spec: containers: - resources: - limits: + limits: cpu: 0.3 image: capttofu/percona_xtradb_cluster_5_6:beta name: pxc-node1 @@ -54,4 +54,4 @@ spec: - name: MYSQL_PASSWORD value: mysql - name: MYSQL_ROOT_PASSWORD - value: c-krit + value: c-krit diff --git a/vendor/k8s.io/kubernetes/examples/storage/mysql-galera/pxc-node2.yaml b/vendor/k8s.io/kubernetes/examples/storage/mysql-galera/pxc-node2.yaml index ead3675d746a..a3702ca17751 100644 --- a/vendor/k8s.io/kubernetes/examples/storage/mysql-galera/pxc-node2.yaml +++ b/vendor/k8s.io/kubernetes/examples/storage/mysql-galera/pxc-node2.yaml @@ -2,7 +2,7 @@ apiVersion: v1 kind: Service metadata: name: pxc-node2 - labels: + labels: node: pxc-node2 spec: ports: @@ -11,28 +11,28 @@ spec: - port: 4444 name: state-snapshot-transfer - port: 4567 - name: replication-traffic + name: replication-traffic - port: 4568 - name: incremental-state-transfer + name: incremental-state-transfer selector: - node: pxc-node2 + node: pxc-node2 --- apiVersion: v1 kind: ReplicationController metadata: - name: pxc-node2 + name: pxc-node2 spec: replicas: 1 template: metadata: labels: - node: pxc-node2 + node: pxc-node2 unit: pxc-cluster spec: containers: - resources: - limits: + limits: cpu: 0.3 image: capttofu/percona_xtradb_cluster_5_6:beta name: pxc-node2 @@ -55,4 +55,4 @@ spec: - name: MYSQL_PASSWORD value: mysql - name: MYSQL_ROOT_PASSWORD - value: c-krit + value: c-krit diff --git a/vendor/k8s.io/kubernetes/examples/storage/mysql-galera/pxc-node3.yaml b/vendor/k8s.io/kubernetes/examples/storage/mysql-galera/pxc-node3.yaml index fbb368b27dd1..265fbf17d899 100644 --- a/vendor/k8s.io/kubernetes/examples/storage/mysql-galera/pxc-node3.yaml +++ b/vendor/k8s.io/kubernetes/examples/storage/mysql-galera/pxc-node3.yaml @@ -2,7 +2,7 @@ apiVersion: v1 kind: Service metadata: name: pxc-node3 - labels: + labels: node: pxc-node3 spec: ports: @@ -11,28 +11,28 @@ spec: - port: 4444 name: state-snapshot-transfer - port: 4567 - name: replication-traffic + name: replication-traffic - port: 4568 - name: incremental-state-transfer + name: incremental-state-transfer selector: - node: pxc-node3 + node: pxc-node3 --- apiVersion: v1 kind: ReplicationController metadata: - name: pxc-node3 + name: pxc-node3 spec: replicas: 1 template: metadata: labels: - node: pxc-node3 + node: pxc-node3 unit: pxc-cluster spec: containers: - resources: - limits: + limits: cpu: 0.3 image: capttofu/percona_xtradb_cluster_5_6:beta name: pxc-node3 @@ -55,4 +55,4 @@ spec: - name: MYSQL_PASSWORD value: mysql - name: MYSQL_ROOT_PASSWORD - value: c-krit + value: c-krit diff --git a/vendor/k8s.io/kubernetes/examples/storage/redis/image/redis-slave.conf b/vendor/k8s.io/kubernetes/examples/storage/redis/image/redis-slave.conf index afd0a45fd432..4b4d23fd19d8 100644 --- a/vendor/k8s.io/kubernetes/examples/storage/redis/image/redis-slave.conf +++ b/vendor/k8s.io/kubernetes/examples/storage/redis/image/redis-slave.conf @@ -203,7 +203,7 @@ dir "./" # network partition slaves automatically try to reconnect to masters # and resynchronize with them. # -slaveof %master-ip% %master-port% +slaveof %master-ip% %master-port% # If the master is password protected (using the "requirepass" configuration # directive below) it is possible to tell the slave to authenticate before diff --git a/vendor/k8s.io/kubernetes/examples/storage/redis/image/run.sh b/vendor/k8s.io/kubernetes/examples/storage/redis/image/run.sh index 6fed5af4a84b..799edc2abfcf 100755 --- a/vendor/k8s.io/kubernetes/examples/storage/redis/image/run.sh +++ b/vendor/k8s.io/kubernetes/examples/storage/redis/image/run.sh @@ -59,7 +59,7 @@ function launchslave() { echo "Failed to find master." sleep 60 exit 1 - fi + fi redis-cli -h ${master} INFO if [[ "$?" == "0" ]]; then break diff --git a/vendor/k8s.io/kubernetes/examples/storage/redis/redis-controller.yaml b/vendor/k8s.io/kubernetes/examples/storage/redis/redis-controller.yaml index fcb5e67cd6fa..26fc26f7852c 100644 --- a/vendor/k8s.io/kubernetes/examples/storage/redis/redis-controller.yaml +++ b/vendor/k8s.io/kubernetes/examples/storage/redis/redis-controller.yaml @@ -25,4 +25,3 @@ spec: volumes: - name: data emptyDir: {} - diff --git a/vendor/k8s.io/kubernetes/examples/storage/rethinkdb/gen-pod.sh b/vendor/k8s.io/kubernetes/examples/storage/rethinkdb/gen-pod.sh index 90a44f2300e6..fe871a816918 100755 --- a/vendor/k8s.io/kubernetes/examples/storage/rethinkdb/gen-pod.sh +++ b/vendor/k8s.io/kubernetes/examples/storage/rethinkdb/gen-pod.sh @@ -32,7 +32,7 @@ if [[ ${NAME} == "admin" ]]; then fi NODE="" -# One needs to label a node with the same key/value pair, +# One needs to label a node with the same key/value pair, # i.e., 'kubectl label nodes name=${2}' if [[ ! -z "${2-}" ]]; then NODE="nodeSelector: { name: ${2} }" diff --git a/vendor/k8s.io/kubernetes/examples/storage/rethinkdb/image/run.sh b/vendor/k8s.io/kubernetes/examples/storage/rethinkdb/image/run.sh index 607eb59edc1a..4839a6d6dc96 100644 --- a/vendor/k8s.io/kubernetes/examples/storage/rethinkdb/image/run.sh +++ b/vendor/k8s.io/kubernetes/examples/storage/rethinkdb/image/run.sh @@ -27,7 +27,7 @@ if [[ -n "${KUBERNETES_SERVICE_HOST}" ]]; then URL="https://${KUBERNETES_SERVICE_HOST}:${KUBERNETES_SERVICE_PORT}/api/v1/namespaces/${POD_NAMESPACE}/endpoints/rethinkdb-driver" echo "Endpont url: ${URL}" echo "Looking for IPs..." - token=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) + token=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) # try to pick up first different ip from endpoints IP=$(curl -s ${URL} --cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt --header "Authorization: Bearer ${token}" \ | jq -s -r --arg h "${MYHOST}" '.[0].subsets | .[].addresses | [ .[].ip ] | map(select(. != $h)) | .[0]') || exit 1 diff --git a/vendor/k8s.io/kubernetes/examples/storage/vitess/configure.sh b/vendor/k8s.io/kubernetes/examples/storage/vitess/configure.sh index 7166c7fbb9cc..89ec4bd35239 100755 --- a/vendor/k8s.io/kubernetes/examples/storage/vitess/configure.sh +++ b/vendor/k8s.io/kubernetes/examples/storage/vitess/configure.sh @@ -70,4 +70,3 @@ esac echo "Saving config.sh..." echo "backup_flags=\"$backup_flags\"" > config.sh - diff --git a/vendor/k8s.io/kubernetes/examples/storage/vitess/create_test_table.sql b/vendor/k8s.io/kubernetes/examples/storage/vitess/create_test_table.sql index 0a6ef36090be..fa434443019c 100644 --- a/vendor/k8s.io/kubernetes/examples/storage/vitess/create_test_table.sql +++ b/vendor/k8s.io/kubernetes/examples/storage/vitess/create_test_table.sql @@ -5,4 +5,3 @@ CREATE TABLE messages ( message VARCHAR(10000), PRIMARY KEY (page, time_created_ns) ) ENGINE=InnoDB - diff --git a/vendor/k8s.io/kubernetes/examples/storage/vitess/env.sh b/vendor/k8s.io/kubernetes/examples/storage/vitess/env.sh index 49b06c80073a..8baf69f947c2 100644 --- a/vendor/k8s.io/kubernetes/examples/storage/vitess/env.sh +++ b/vendor/k8s.io/kubernetes/examples/storage/vitess/env.sh @@ -60,4 +60,3 @@ if [ ! -f $config_file ]; then fi source $config_file - diff --git a/vendor/k8s.io/kubernetes/examples/storage/vitess/etcd-controller-template.yaml b/vendor/k8s.io/kubernetes/examples/storage/vitess/etcd-controller-template.yaml index dcd7980bd9f2..0a2cbb99555c 100644 --- a/vendor/k8s.io/kubernetes/examples/storage/vitess/etcd-controller-template.yaml +++ b/vendor/k8s.io/kubernetes/examples/storage/vitess/etcd-controller-template.yaml @@ -51,4 +51,3 @@ spec: -initial-advertise-peer-urls http://$ipaddr:7001 -listen-client-urls http://$ipaddr:4001 -listen-peer-urls http://$ipaddr:7001 - diff --git a/vendor/k8s.io/kubernetes/examples/storage/vitess/etcd-down.sh b/vendor/k8s.io/kubernetes/examples/storage/vitess/etcd-down.sh index 1f3ca258cb0e..a30325f61ec9 100755 --- a/vendor/k8s.io/kubernetes/examples/storage/vitess/etcd-down.sh +++ b/vendor/k8s.io/kubernetes/examples/storage/vitess/etcd-down.sh @@ -33,4 +33,3 @@ for cell in 'global' $cells; do echo "Deleting etcd service for $cell cell..." $KUBECTL delete service etcd-$cell done - diff --git a/vendor/k8s.io/kubernetes/examples/storage/vitess/etcd-service-template.yaml b/vendor/k8s.io/kubernetes/examples/storage/vitess/etcd-service-template.yaml index 817c3e13236d..966e67f04b5c 100644 --- a/vendor/k8s.io/kubernetes/examples/storage/vitess/etcd-service-template.yaml +++ b/vendor/k8s.io/kubernetes/examples/storage/vitess/etcd-service-template.yaml @@ -13,4 +13,3 @@ spec: component: etcd cell: {{cell}} app: vitess - diff --git a/vendor/k8s.io/kubernetes/examples/storage/vitess/etcd-up.sh b/vendor/k8s.io/kubernetes/examples/storage/vitess/etcd-up.sh index b97e3690071e..566fa78afb3d 100755 --- a/vendor/k8s.io/kubernetes/examples/storage/vitess/etcd-up.sh +++ b/vendor/k8s.io/kubernetes/examples/storage/vitess/etcd-up.sh @@ -57,4 +57,3 @@ for cell in 'global' $cells; do echo "Creating etcd replicationcontroller for $cell cell..." cat etcd-controller-template.yaml | sed -e "$sed_script" | $KUBECTL create -f - done - diff --git a/vendor/k8s.io/kubernetes/examples/storage/vitess/guestbook-controller.yaml b/vendor/k8s.io/kubernetes/examples/storage/vitess/guestbook-controller.yaml index 1c5ca5a18b87..022bfbaa21bd 100644 --- a/vendor/k8s.io/kubernetes/examples/storage/vitess/guestbook-controller.yaml +++ b/vendor/k8s.io/kubernetes/examples/storage/vitess/guestbook-controller.yaml @@ -20,4 +20,3 @@ spec: limits: memory: "128Mi" cpu: "100m" - diff --git a/vendor/k8s.io/kubernetes/examples/storage/vitess/guestbook-service.yaml b/vendor/k8s.io/kubernetes/examples/storage/vitess/guestbook-service.yaml index 5435f7fa8091..92360b4854e4 100644 --- a/vendor/k8s.io/kubernetes/examples/storage/vitess/guestbook-service.yaml +++ b/vendor/k8s.io/kubernetes/examples/storage/vitess/guestbook-service.yaml @@ -13,4 +13,3 @@ spec: component: guestbook app: vitess type: LoadBalancer - diff --git a/vendor/k8s.io/kubernetes/examples/storage/vitess/vitess-up.sh b/vendor/k8s.io/kubernetes/examples/storage/vitess/vitess-up.sh index 4add4ee7e15e..f86bc8f5f164 100755 --- a/vendor/k8s.io/kubernetes/examples/storage/vitess/vitess-up.sh +++ b/vendor/k8s.io/kubernetes/examples/storage/vitess/vitess-up.sh @@ -162,4 +162,3 @@ echo "* Use the following line to make an alias to kvtctl:" echo "* alias kvtctl='\$GOPATH/bin/vtctlclient -server $vtctl_server'" echo "* See the vtctld UI at: http://${vtctld_ip}:30000" echo "****************************" - diff --git a/vendor/k8s.io/kubernetes/examples/storage/vitess/vtctld-controller-template.yaml b/vendor/k8s.io/kubernetes/examples/storage/vitess/vtctld-controller-template.yaml index 72fe245a2120..a86da9f4d6ba 100644 --- a/vendor/k8s.io/kubernetes/examples/storage/vitess/vtctld-controller-template.yaml +++ b/vendor/k8s.io/kubernetes/examples/storage/vitess/vtctld-controller-template.yaml @@ -52,4 +52,3 @@ spec: emptyDir: {} - name: certs hostPath: {path: /etc/ssl/certs} - diff --git a/vendor/k8s.io/kubernetes/examples/storage/vitess/vtctld-service.yaml b/vendor/k8s.io/kubernetes/examples/storage/vitess/vtctld-service.yaml index 70d619a873dd..1130871bd366 100644 --- a/vendor/k8s.io/kubernetes/examples/storage/vitess/vtctld-service.yaml +++ b/vendor/k8s.io/kubernetes/examples/storage/vitess/vtctld-service.yaml @@ -19,4 +19,3 @@ spec: component: vtctld app: vitess type: NodePort - diff --git a/vendor/k8s.io/kubernetes/examples/storage/vitess/vtctld-up.sh b/vendor/k8s.io/kubernetes/examples/storage/vitess/vtctld-up.sh index 257b2d773026..8f08494d329c 100755 --- a/vendor/k8s.io/kubernetes/examples/storage/vitess/vtctld-up.sh +++ b/vendor/k8s.io/kubernetes/examples/storage/vitess/vtctld-up.sh @@ -37,4 +37,3 @@ cat vtctld-controller-template.yaml | sed -e "$sed_script" | $KUBECTL create -f server=$(get_vtctld_addr) echo echo "vtctld address: http://$server" - diff --git a/vendor/k8s.io/kubernetes/examples/storage/vitess/vtgate-controller-template.yaml b/vendor/k8s.io/kubernetes/examples/storage/vitess/vtgate-controller-template.yaml index 03c9665b2df5..2df8f2903f71 100644 --- a/vendor/k8s.io/kubernetes/examples/storage/vitess/vtgate-controller-template.yaml +++ b/vendor/k8s.io/kubernetes/examples/storage/vitess/vtgate-controller-template.yaml @@ -42,4 +42,3 @@ spec: hostPath: {path: /dev/log} - name: vtdataroot emptyDir: {} - diff --git a/vendor/k8s.io/kubernetes/examples/storage/vitess/vtgate-service.yaml b/vendor/k8s.io/kubernetes/examples/storage/vitess/vtgate-service.yaml index 192968aa2d0e..37fe8f02bd71 100644 --- a/vendor/k8s.io/kubernetes/examples/storage/vitess/vtgate-service.yaml +++ b/vendor/k8s.io/kubernetes/examples/storage/vitess/vtgate-service.yaml @@ -12,4 +12,3 @@ spec: component: vtgate app: vitess type: LoadBalancer - diff --git a/vendor/k8s.io/kubernetes/examples/storage/vitess/vttablet-down.sh b/vendor/k8s.io/kubernetes/examples/storage/vitess/vttablet-down.sh index 0683f1f7de74..146fe1191bad 100755 --- a/vendor/k8s.io/kubernetes/examples/storage/vitess/vttablet-down.sh +++ b/vendor/k8s.io/kubernetes/examples/storage/vitess/vttablet-down.sh @@ -48,4 +48,3 @@ for shard in `seq 1 $num_shards`; do done let uid_base=uid_base+100 done - diff --git a/vendor/k8s.io/kubernetes/examples/storage/vitess/vttablet-pod-template.yaml b/vendor/k8s.io/kubernetes/examples/storage/vitess/vttablet-pod-template.yaml index d3d097e8fcfe..6b2f50007f64 100644 --- a/vendor/k8s.io/kubernetes/examples/storage/vitess/vttablet-pod-template.yaml +++ b/vendor/k8s.io/kubernetes/examples/storage/vitess/vttablet-pod-template.yaml @@ -125,4 +125,3 @@ spec: emptyDir: {} - name: certs hostPath: {path: /etc/ssl/certs} - diff --git a/vendor/k8s.io/kubernetes/examples/sysdig-cloud/sysdig-daemonset.yaml b/vendor/k8s.io/kubernetes/examples/sysdig-cloud/sysdig-daemonset.yaml index e1fc1534a75a..bc063e9d71e5 100644 --- a/vendor/k8s.io/kubernetes/examples/sysdig-cloud/sysdig-daemonset.yaml +++ b/vendor/k8s.io/kubernetes/examples/sysdig-cloud/sysdig-daemonset.yaml @@ -1,7 +1,7 @@ #Use this sysdig.yaml when Daemon Sets are enabled on Kubernetes (minimum version 1.1.1). Otherwise use the RC method. apiVersion: extensions/v1beta1 -kind: DaemonSet +kind: DaemonSet metadata: name: sysdig-agent labels: @@ -42,10 +42,10 @@ spec: - name: ACCESS_KEY #REQUIRED - replace with your Sysdig Cloud access key value: 8312341g-5678-abcd-4a2b2c-33bcsd655 # - name: TAGS #OPTIONAL -# value: linux:ubuntu,dept:dev,local:nyc +# value: linux:ubuntu,dept:dev,local:nyc # - name: COLLECTOR #OPTIONAL - on-prem install only -# value: 192.168.183.200 -# - name: SECURE #OPTIONAL - on-prem install only +# value: 192.168.183.200 +# - name: SECURE #OPTIONAL - on-prem install only # value: false # - name: CHECK_CERTIFICATE #OPTIONAL - on-prem install only # value: false diff --git a/vendor/k8s.io/kubernetes/examples/sysdig-cloud/sysdig-rc.yaml b/vendor/k8s.io/kubernetes/examples/sysdig-cloud/sysdig-rc.yaml index d088cd5355b2..033b1a25d261 100644 --- a/vendor/k8s.io/kubernetes/examples/sysdig-cloud/sysdig-rc.yaml +++ b/vendor/k8s.io/kubernetes/examples/sysdig-cloud/sysdig-rc.yaml @@ -1,7 +1,7 @@ #Use this sysdig.yaml when Daemon Sets are NOT enabled on Kubernetes (minimum version 1.1.1). If Daemon Sets are available, use the other example sysdig.yaml - that is the recommended method. apiVersion: v1 -kind: ReplicationController +kind: ReplicationController metadata: name: sysdig-agent labels: @@ -47,10 +47,10 @@ spec: # - name: K8S_API_URI #OPTIONAL - only necessary when connecting remotely to API server # value: "http[s]://[username:passwd@]host[:port]" # - name: TAGS #OPTIONAL -# value: linux:ubuntu,dept:dev,local:nyc +# value: linux:ubuntu,dept:dev,local:nyc # - name: COLLECTOR #OPTIONAL -# value: 192.168.183.200 -# - name: SECURE #OPTIONAL +# value: 192.168.183.200 +# - name: SECURE #OPTIONAL # value: false # - name: CHECK_CERTIFICATE #OPTIONAL # value: false diff --git a/vendor/k8s.io/kubernetes/examples/volumes/cephfs/README.md b/vendor/k8s.io/kubernetes/examples/volumes/cephfs/README.md index ce6263cdae99..4f97bb859e6e 100644 --- a/vendor/k8s.io/kubernetes/examples/volumes/cephfs/README.md +++ b/vendor/k8s.io/kubernetes/examples/volumes/cephfs/README.md @@ -25,7 +25,7 @@ Here are the commands: # create a secret if you want to use Ceph secret instead of secret file # kubectl create -f examples/volumes/cephfs/secret/ceph-secret.yaml - + # kubectl create -f examples/volumes/cephfs/cephfs-with-secret.yaml # kubectl get pods ``` diff --git a/vendor/k8s.io/kubernetes/examples/volumes/cephfs/cephfs.yaml b/vendor/k8s.io/kubernetes/examples/volumes/cephfs/cephfs.yaml index e4eb395b6245..ddaab3b16ae3 100644 --- a/vendor/k8s.io/kubernetes/examples/volumes/cephfs/cephfs.yaml +++ b/vendor/k8s.io/kubernetes/examples/volumes/cephfs/cephfs.yaml @@ -17,7 +17,7 @@ spec: - 10.16.154.82:6789 - 10.16.154.83:6789 # by default the path is /, but you can override and mount a specific path of the filesystem by using the path attribute - # path: /some/path/in/side/cephfs + # path: /some/path/in/side/cephfs user: admin secretFile: "/etc/ceph/admin.secret" readOnly: true diff --git a/vendor/k8s.io/kubernetes/examples/volumes/fibre_channel/README.md b/vendor/k8s.io/kubernetes/examples/volumes/fibre_channel/README.md index 0e1bcf9b39e7..3607e8780e42 100644 --- a/vendor/k8s.io/kubernetes/examples/volumes/fibre_channel/README.md +++ b/vendor/k8s.io/kubernetes/examples/volumes/fibre_channel/README.md @@ -34,9 +34,9 @@ If you ssh to that machine, you can run `docker ps` to see the actual pod. ```console # docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -090ac457ddc2 kubernetes/pause "/pause" 12 minutes ago Up 12 minutes k8s_fcpd-rw.aae720ec_fcpd_default_4024318f-4121-11e5-a294-e839352ddd54_99eb5415 -5e2629cf3e7b kubernetes/pause "/pause" 12 minutes ago Up 12 minutes k8s_fcpd-ro.857720dc_fcpd_default_4024318f-4121-11e5-a294-e839352ddd54_c0175742 -2948683253f7 gcr.io/google_containers/pause:0.8.0 "/pause" 12 minutes ago Up 12 minutes k8s_POD.7be6d81d_fcpd_default_4024318f-4121-11e5-a294-e839352ddd54_8d9dd7bf +090ac457ddc2 kubernetes/pause "/pause" 12 minutes ago Up 12 minutes k8s_fcpd-rw.aae720ec_fcpd_default_4024318f-4121-11e5-a294-e839352ddd54_99eb5415 +5e2629cf3e7b kubernetes/pause "/pause" 12 minutes ago Up 12 minutes k8s_fcpd-ro.857720dc_fcpd_default_4024318f-4121-11e5-a294-e839352ddd54_c0175742 +2948683253f7 gcr.io/google_containers/pause:0.8.0 "/pause" 12 minutes ago Up 12 minutes k8s_POD.7be6d81d_fcpd_default_4024318f-4121-11e5-a294-e839352ddd54_8d9dd7bf ``` ## Multipath diff --git a/vendor/k8s.io/kubernetes/examples/volumes/fibre_channel/fc.yaml b/vendor/k8s.io/kubernetes/examples/volumes/fibre_channel/fc.yaml index ac28bee4a3e0..6a4ec820ef32 100644 --- a/vendor/k8s.io/kubernetes/examples/volumes/fibre_channel/fc.yaml +++ b/vendor/k8s.io/kubernetes/examples/volumes/fibre_channel/fc.yaml @@ -4,7 +4,7 @@ metadata: name: fc spec: containers: - - image: kubernetes/pause + - image: kubernetes/pause name: fc volumeMounts: - name: fc-vol diff --git a/vendor/k8s.io/kubernetes/examples/volumes/flocker/README.md b/vendor/k8s.io/kubernetes/examples/volumes/flocker/README.md index 72ad47bc3bdb..53cf57327806 100644 --- a/vendor/k8s.io/kubernetes/examples/volumes/flocker/README.md +++ b/vendor/k8s.io/kubernetes/examples/volumes/flocker/README.md @@ -47,7 +47,7 @@ More details regarding cluster authentication can be found at the documentation: ```sh flocker-volumes create -m name=my-flocker-vol -s 10G -n -# -n or --node= Is the initial primary node for dataset (any unique +# -n or --node= Is the initial primary node for dataset (any unique # prefix of node uuid, see flocker-volumes list-nodes) ``` diff --git a/vendor/k8s.io/kubernetes/examples/volumes/iscsi/README.md b/vendor/k8s.io/kubernetes/examples/volumes/iscsi/README.md index 7f6090eff31d..f16233a8420d 100644 --- a/vendor/k8s.io/kubernetes/examples/volumes/iscsi/README.md +++ b/vendor/k8s.io/kubernetes/examples/volumes/iscsi/README.md @@ -48,16 +48,16 @@ apiVersion: v1 kind: Secret metadata: name: chap-secret -type: "kubernetes.io/iscsi-chap" +type: "kubernetes.io/iscsi-chap" data: - discovery.sendtargets.auth.username: - discovery.sendtargets.auth.password: - discovery.sendtargets.auth.username_in: - discovery.sendtargets.auth.password_in: - node.session.auth.username: - node.session.auth.password: - node.session.auth.username_in: - node.session.auth.password_in: + discovery.sendtargets.auth.username: + discovery.sendtargets.auth.password: + discovery.sendtargets.auth.username_in: + discovery.sendtargets.auth.password_in: + node.session.auth.username: + node.session.auth.password: + node.session.auth.username_in: + node.session.auth.password_in: ``` These keys map to those used by Open-iSCSI initiator. Detailed documents on these keys can be found at [Open-iSCSI](https://github.com/open-iscsi/open-iscsi/blob/master/etc/iscsid.conf) diff --git a/vendor/k8s.io/kubernetes/examples/volumes/iscsi/chap-secret.yaml b/vendor/k8s.io/kubernetes/examples/volumes/iscsi/chap-secret.yaml index 5bc9cc8747e9..631d81743b05 100644 --- a/vendor/k8s.io/kubernetes/examples/volumes/iscsi/chap-secret.yaml +++ b/vendor/k8s.io/kubernetes/examples/volumes/iscsi/chap-secret.yaml @@ -3,7 +3,7 @@ apiVersion: v1 kind: Secret metadata: name: chap-secret -type: "kubernetes.io/iscsi-chap" +type: "kubernetes.io/iscsi-chap" data: discovery.sendtargets.auth.username: dXNlcg== discovery.sendtargets.auth.password: ZGVtbw== diff --git a/vendor/k8s.io/kubernetes/examples/volumes/iscsi/iscsi-chap.yaml b/vendor/k8s.io/kubernetes/examples/volumes/iscsi/iscsi-chap.yaml index 1ddc2f02cac0..c7a66805734d 100644 --- a/vendor/k8s.io/kubernetes/examples/volumes/iscsi/iscsi-chap.yaml +++ b/vendor/k8s.io/kubernetes/examples/volumes/iscsi/iscsi-chap.yaml @@ -20,5 +20,5 @@ spec: readOnly: true chapAuthDiscovery: true chapAuthSession: true - secretRef: + secretRef: name: chap-secret diff --git a/vendor/k8s.io/kubernetes/examples/volumes/nfs/nfs-data/run_nfs.sh b/vendor/k8s.io/kubernetes/examples/volumes/nfs/nfs-data/run_nfs.sh index fa7b165c0197..e6eb8f946204 100755 --- a/vendor/k8s.io/kubernetes/examples/volumes/nfs/nfs-data/run_nfs.sh +++ b/vendor/k8s.io/kubernetes/examples/volumes/nfs/nfs-data/run_nfs.sh @@ -26,7 +26,7 @@ function start() chmod 644 $i/index.html echo "Serving $i" done - + # start rpcbind if it is not started yet /usr/sbin/rpcinfo 127.0.0.1 > /dev/null; s=$? if [ $s -ne 0 ]; then diff --git a/vendor/k8s.io/kubernetes/examples/volumes/portworx/README.md b/vendor/k8s.io/kubernetes/examples/volumes/portworx/README.md index 36e87b4adfec..ca05731d3481 100644 --- a/vendor/k8s.io/kubernetes/examples/volumes/portworx/README.md +++ b/vendor/k8s.io/kubernetes/examples/volumes/portworx/README.md @@ -204,7 +204,7 @@ The following examples assumes that you already have a running Kubernetes cluste ``` bash $ kubectl get pod pvpod NAME READY STATUS RESTARTS AGE - pvpod 1/1 Running 0 48m + pvpod 1/1 Running 0 48m ``` ### Using Dynamic Provisioning @@ -361,7 +361,7 @@ create Portworx volumes out of band and they will be created automatically. ``` bash $ kubectl get pod pvpod NAME READY STATUS RESTARTS AGE - pvpod 1/1 Running 0 48m + pvpod 1/1 Running 0 48m ``` diff --git a/vendor/k8s.io/kubernetes/examples/volumes/rbd/rbd-with-secret.json b/vendor/k8s.io/kubernetes/examples/volumes/rbd/rbd-with-secret.json index 30375583d06d..fc41709978c1 100644 --- a/vendor/k8s.io/kubernetes/examples/volumes/rbd/rbd-with-secret.json +++ b/vendor/k8s.io/kubernetes/examples/volumes/rbd/rbd-with-secret.json @@ -22,10 +22,10 @@ "name": "rbdpd", "rbd": { "monitors": [ - "10.16.154.78:6789", + "10.16.154.78:6789", "10.16.154.82:6789", - "10.16.154.83:6789" - ], + "10.16.154.83:6789" + ], "pool": "kube", "image": "foo", "user": "admin", diff --git a/vendor/k8s.io/kubernetes/examples/volumes/rbd/rbd.json b/vendor/k8s.io/kubernetes/examples/volumes/rbd/rbd.json index 68033bffd880..ab40e3210b8b 100644 --- a/vendor/k8s.io/kubernetes/examples/volumes/rbd/rbd.json +++ b/vendor/k8s.io/kubernetes/examples/volumes/rbd/rbd.json @@ -22,10 +22,10 @@ "name": "rbdpd", "rbd": { "monitors": [ - "10.16.154.78:6789", + "10.16.154.78:6789", "10.16.154.82:6789", - "10.16.154.83:6789" - ], + "10.16.154.83:6789" + ], "pool": "kube", "image": "foo", "user": "admin", diff --git a/vendor/k8s.io/kubernetes/examples/volumes/rbd/secret/ceph-secret.yaml b/vendor/k8s.io/kubernetes/examples/volumes/rbd/secret/ceph-secret.yaml index f717f9005ec7..387dcbd4e934 100644 --- a/vendor/k8s.io/kubernetes/examples/volumes/rbd/secret/ceph-secret.yaml +++ b/vendor/k8s.io/kubernetes/examples/volumes/rbd/secret/ceph-secret.yaml @@ -2,6 +2,6 @@ apiVersion: v1 kind: Secret metadata: name: ceph-secret -type: "kubernetes.io/rbd" +type: "kubernetes.io/rbd" data: key: QVFCMTZWMVZvRjVtRXhBQTVrQ1FzN2JCajhWVUxSdzI2Qzg0SEE9PQ== diff --git a/vendor/k8s.io/kubernetes/examples/volumes/scaleio/README.md b/vendor/k8s.io/kubernetes/examples/volumes/scaleio/README.md index 65bec664117b..f5f623d8f34e 100644 --- a/vendor/k8s.io/kubernetes/examples/volumes/scaleio/README.md +++ b/vendor/k8s.io/kubernetes/examples/volumes/scaleio/README.md @@ -42,14 +42,14 @@ This document shows how to configure Kubernetes resources to consume storage fro This document assumes you are familiar with ScaleIO and have a cluster ready to go. If you are *not familiar* with ScaleIO, please review *Learn how to setup a 3-node* [ScaleIO cluster on Vagrant](https://github.com/codedellemc/labs/tree/master/setup-scaleio-vagrant) and see *General instructions on* [setting up ScaleIO](https://www.emc.com/products-solutions/trial-software-download/scaleio.htm) -For this demonstration, ensure the following: +For this demonstration, ensure the following: - The ScaleIO `SDC` component is installed and properly configured on all Kubernetes nodes where deployed pods will consume ScaleIO-backed volumes. - - You have a configured ScaleIO gateway that is accessible from the Kubernetes nodes. + - You have a configured ScaleIO gateway that is accessible from the Kubernetes nodes. ## Deploy Kubernetes Secret for ScaleIO -The ScaleIO plugin uses a Kubernetes Secret object to store the `username` and `password` credentials. +The ScaleIO plugin uses a Kubernetes Secret object to store the `username` and `password` credentials. Kuberenetes requires the secret values to be base64-encoded to simply obfuscate (not encrypt) the clear text as shown below. ``` @@ -58,8 +58,8 @@ c2lvdXNlcg== $> echo -n "sc@l3I0" | base64 c2NAbDNJMA== ``` -The previous will generate `base64-encoded` values for the username and password. -Remember to generate the credentials for your own environment and copy them in a secret file similar to the following. +The previous will generate `base64-encoded` values for the username and password. +Remember to generate the credentials for your own environment and copy them in a secret file similar to the following. File: [secret.yaml](secret.yaml) @@ -82,7 +82,7 @@ $ kubectl create -f ./examples/volumes/scaleio/secret.yaml ## Deploying Pods with Persistent Volumes -The example presented in this section shows how the ScaleIO volume plugin can automatically attach, format, and mount an existing ScaleIO volume for pod. +The example presented in this section shows how the ScaleIO volume plugin can automatically attach, format, and mount an existing ScaleIO volume for pod. The Kubernetes ScaleIO volume spec supports the following attributes: | Attribute | Description | @@ -146,7 +146,7 @@ $> kubectl get pod NAME READY STATUS RESTARTS AGE pod-0 1/1 Running 0 33s ``` -Or for more detail, use +Or for more detail, use ``` kubectl describe pod pod-0 ``` @@ -232,7 +232,7 @@ spec: Note the `annotations:` entry which specifies annotation `volume.beta.kubernetes.io/storage-class: sio-small` which references the name of the storage class defined earlier. -Next, we deploy PVC file for the storage class. This step will cause the Kubernetes ScaleIO plugin to create the volume in the storage system. +Next, we deploy PVC file for the storage class. This step will cause the Kubernetes ScaleIO plugin to create the volume in the storage system. ``` $> kubectl create -f examples/volumes/scaleio/sc-pvc.yaml ``` diff --git a/vendor/k8s.io/kubernetes/examples/volumes/scaleio/sc.yaml b/vendor/k8s.io/kubernetes/examples/volumes/scaleio/sc.yaml index 85de382bf1bc..e0012d95bb8a 100644 --- a/vendor/k8s.io/kubernetes/examples/volumes/scaleio/sc.yaml +++ b/vendor/k8s.io/kubernetes/examples/volumes/scaleio/sc.yaml @@ -4,7 +4,7 @@ metadata: name: sio-small provisioner: kubernetes.io/scaleio parameters: - gateway: https://localhost:443/api + gateway: https://localhost:443/api system: scaleio protectionDomain: default secretRef: sio-secret diff --git a/vendor/k8s.io/kubernetes/examples/volumes/storageos/README.md b/vendor/k8s.io/kubernetes/examples/volumes/storageos/README.md index 9aba1435e0ba..ddd9aa106d9d 100644 --- a/vendor/k8s.io/kubernetes/examples/volumes/storageos/README.md +++ b/vendor/k8s.io/kubernetes/examples/volumes/storageos/README.md @@ -26,14 +26,14 @@ The StorageOS provider has been pre-configured to use the StorageOS API defaults API configuration is set by using Kubernetes secrets. The configuration secret supports the following parameters: -* `apiAddress`: The address of the StorageOS API. This is optional and defaults to `tcp://localhost:5705`, which should be correct if the StorageOS container is running using the default settings. +* `apiAddress`: The address of the StorageOS API. This is optional and defaults to `tcp://localhost:5705`, which should be correct if the StorageOS container is running using the default settings. * `apiUsername`: The username to authenticate to the StorageOS API with. * `apiPassword`: The password to authenticate to the StorageOS API with. * `apiVersion`: Optional, string value defaulting to `1`. Only set this if requested in StorageOS documentation. -Mutiple credentials can be used by creating different secrets. +Mutiple credentials can be used by creating different secrets. -For Persistent Volumes, secrets must be created in the Pod namespace. Specify the secret name using the `secretName` parameter when attaching existing volumes in Pods or creating new persistent volumes. +For Persistent Volumes, secrets must be created in the Pod namespace. Specify the secret name using the `secretName` parameter when attaching existing volumes in Pods or creating new persistent volumes. For dynamically provisioned volumes using storage classes, the secret can be created in any namespace. Note that you would want this to be an admin-controlled namespace with restricted access to users. Specify the secret namespace as parameter `adminSecretNamespace` and name as parameter `adminSecretName` in storage classes. diff --git a/vendor/k8s.io/kubernetes/examples/volumes/vsphere/README.md b/vendor/k8s.io/kubernetes/examples/volumes/vsphere/README.md index f049acf71bdd..61bfbda74359 100644 --- a/vendor/k8s.io/kubernetes/examples/volumes/vsphere/README.md +++ b/vendor/k8s.io/kubernetes/examples/volumes/vsphere/README.md @@ -96,10 +96,10 @@ volumePath: "[datastore1] volumes/myDisk" fsType: ext4 ``` - In the above example datastore1 is located in the root folder. If datastore is member of Datastore Cluster or located in sub folder, the folder path needs to be provided in the VolumePath as below. + In the above example datastore1 is located in the root folder. If datastore is member of Datastore Cluster or located in sub folder, the folder path needs to be provided in the VolumePath as below. ```yaml vsphereVolume: - VolumePath: "[DatastoreCluster/datastore1] volumes/myDisk" + VolumePath: "[DatastoreCluster/datastore1] volumes/myDisk" ``` [Download example](vsphere-volume-pv.yaml?raw=true) @@ -240,7 +240,7 @@ parameters: diskformat: zeroedthick datastore: VSANDatastore - ``` + ``` If datastore is member of DataStore Cluster or within some sub folder, the datastore folder path needs to be provided in the datastore parameter as below. ```yaml @@ -258,7 +258,7 @@ Verifying storage class is created: ``` bash - $ kubectl describe storageclass fast + $ kubectl describe storageclass fast Name: fast IsDefaultClass: No Annotations: diff --git a/vendor/k8s.io/kubernetes/federation/cmd/federation-apiserver/app/server.go b/vendor/k8s.io/kubernetes/federation/cmd/federation-apiserver/app/server.go index fe6837d13269..e67e683fc443 100644 --- a/vendor/k8s.io/kubernetes/federation/cmd/federation-apiserver/app/server.go +++ b/vendor/k8s.io/kubernetes/federation/cmd/federation-apiserver/app/server.go @@ -36,7 +36,6 @@ import ( "k8s.io/apimachinery/pkg/util/sets" genericapiserver "k8s.io/apiserver/pkg/server" "k8s.io/apiserver/pkg/server/filters" - serveroptions "k8s.io/apiserver/pkg/server/options" serverstorage "k8s.io/apiserver/pkg/server/storage" federationv1beta1 "k8s.io/kubernetes/federation/apis/federation/v1beta1" "k8s.io/kubernetes/federation/cmd/federation-apiserver/app/options" @@ -230,17 +229,8 @@ func NonBlockingRun(s *options.ServerRunOptions, stopCh <-chan struct{}) error { // TODO: Move this to generic api server (Need to move the command line flag). if s.Etcd.EnableWatchCache { - glog.V(2).Infof("Initializing cache sizes based on %dMB limit", s.GenericServerRunOptions.TargetRAMMB) - sizes := cachesize.NewHeuristicWatchCacheSizes(s.GenericServerRunOptions.TargetRAMMB) - if userSpecified, err := serveroptions.ParseWatchCacheSizes(s.Etcd.WatchCacheSizes); err == nil { - for resource, size := range userSpecified { - sizes[resource] = size - } - } - s.Etcd.WatchCacheSizes, err = serveroptions.WriteWatchCacheSizes(sizes) - if err != nil { - return err - } + cachesize.InitializeWatchCacheSizes(s.GenericServerRunOptions.TargetRAMMB) + cachesize.SetWatchCacheSizes(s.GenericServerRunOptions.WatchCacheSizes) } m, err := genericConfig.Complete().New("federation", genericapiserver.EmptyDelegate) diff --git a/vendor/k8s.io/kubernetes/federation/pkg/kubefed/init/init_test.go b/vendor/k8s.io/kubernetes/federation/pkg/kubefed/init/init_test.go index d616cd7beb45..6d34f766bf8e 100644 --- a/vendor/k8s.io/kubernetes/federation/pkg/kubefed/init/init_test.go +++ b/vendor/k8s.io/kubernetes/federation/pkg/kubefed/init/init_test.go @@ -1444,7 +1444,11 @@ func tlsHandshake(t *testing.T, sCfg, cCfg *tls.Config) error { } }() - c, err := tls.Dial("tcp", s.Addr().String(), cCfg) + // workaround [::] not working in ipv4 only systems (https://github.com/golang/go/issues/18806) + // TODO: remove with Golang 1.9 with https://go-review.googlesource.com/c/45088/ + addr := strings.TrimPrefix(s.Addr().String(), "[::]") + + c, err := tls.Dial("tcp", addr, cCfg) if err != nil { // Intentionally not serializing the error received because we want to // test for the failure case in the caller test function. diff --git a/vendor/k8s.io/kubernetes/federation/pkg/kubefed/util/BUILD b/vendor/k8s.io/kubernetes/federation/pkg/kubefed/util/BUILD index 19122d412f0d..00f65acdab86 100644 --- a/vendor/k8s.io/kubernetes/federation/pkg/kubefed/util/BUILD +++ b/vendor/k8s.io/kubernetes/federation/pkg/kubefed/util/BUILD @@ -16,6 +16,8 @@ go_library( "//federation/client/clientset_generated/federation_clientset:go_default_library", "//pkg/api:go_default_library", "//pkg/apis/rbac:go_default_library", + "//pkg/apis/rbac/v1alpha1:go_default_library", + "//pkg/apis/rbac/v1beta1:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", "//pkg/kubectl/cmd:go_default_library", "//pkg/kubectl/cmd/util:go_default_library", diff --git a/vendor/k8s.io/kubernetes/federation/pkg/kubefed/util/util.go b/vendor/k8s.io/kubernetes/federation/pkg/kubefed/util/util.go index 049b65395bbd..5441a0a03b92 100644 --- a/vendor/k8s.io/kubernetes/federation/pkg/kubefed/util/util.go +++ b/vendor/k8s.io/kubernetes/federation/pkg/kubefed/util/util.go @@ -31,6 +31,8 @@ import ( fedclient "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/rbac" + rbacv1alpha1 "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1" + rbacv1beta1 "k8s.io/kubernetes/pkg/apis/rbac/v1beta1" client "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" kubectlcmd "k8s.io/kubernetes/pkg/kubectl/cmd" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" @@ -284,6 +286,15 @@ func getRBACVersion(discoveryclient discovery.CachedDiscoveryInterface) (*schema return nil, fmt.Errorf("Couldn't get clientset to create RBAC roles in the host cluster: %v", err) } + // These are the RBAC versions we can speak + knownVersions := map[schema.GroupVersion]bool{ + rbacv1alpha1.SchemeGroupVersion: true, + rbacv1beta1.SchemeGroupVersion: true, + } + + // This holds any RBAC versions listed in discovery we do not know how to speak + unknownVersions := []schema.GroupVersion{} + for _, g := range groupList.Groups { if g.Name == rbac.GroupName { if g.PreferredVersion.GroupVersion != "" { @@ -291,7 +302,9 @@ func getRBACVersion(discoveryclient discovery.CachedDiscoveryInterface) (*schema if err != nil { return nil, err } - return &gv, nil + if knownVersions[gv] { + return &gv, nil + } } for _, version := range g.Versions { if version.GroupVersion != "" { @@ -299,12 +312,20 @@ func getRBACVersion(discoveryclient discovery.CachedDiscoveryInterface) (*schema if err != nil { return nil, err } - return &gv, nil + if knownVersions[gv] { + return &gv, nil + } else { + unknownVersions = append(unknownVersions, gv) + } } } } } + if len(unknownVersions) > 0 { + return nil, &NoRBACAPIError{fmt.Sprintf("%s\nUnknown RBAC API versions: %v", rbacAPINotAvailable, unknownVersions)} + } + return nil, &NoRBACAPIError{rbacAPINotAvailable} } diff --git a/vendor/k8s.io/kubernetes/federation/registry/cluster/etcd/etcd.go b/vendor/k8s.io/kubernetes/federation/registry/cluster/etcd/etcd.go index 817db80c30e3..8f8c3be28c19 100644 --- a/vendor/k8s.io/kubernetes/federation/registry/cluster/etcd/etcd.go +++ b/vendor/k8s.io/kubernetes/federation/registry/cluster/etcd/etcd.go @@ -25,6 +25,7 @@ import ( "k8s.io/kubernetes/federation/apis/federation" "k8s.io/kubernetes/federation/registry/cluster" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/registry/cachesize" ) type REST struct { @@ -52,6 +53,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { NewListFunc: func() runtime.Object { return &federation.ClusterList{} }, PredicateFunc: cluster.MatchCluster, DefaultQualifiedResource: federation.Resource("clusters"), + WatchCacheSize: cachesize.GetWatchCacheSizeByResource("clusters"), CreateStrategy: cluster.Strategy, UpdateStrategy: cluster.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/api/types.go b/vendor/k8s.io/kubernetes/pkg/api/types.go index 5cabed666e63..af2a0c31eb4c 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/types.go +++ b/vendor/k8s.io/kubernetes/pkg/api/types.go @@ -615,7 +615,7 @@ type EmptyDirVolumeSource struct { // The default is nil which means that the limit is undefined. // More info: http://kubernetes.io/docs/user-guide/volumes#emptydir // +optional - SizeLimit resource.Quantity + SizeLimit *resource.Quantity } // StorageMedium defines ways that storage can be allocated to a volume. diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/generated.pb.go b/vendor/k8s.io/kubernetes/pkg/api/v1/generated.pb.go index d45d2d5e4a00..f159e3e42b5b 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/generated.pb.go +++ b/vendor/k8s.io/kubernetes/pkg/api/v1/generated.pb.go @@ -2591,14 +2591,16 @@ func (m *EmptyDirVolumeSource) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Medium))) i += copy(dAtA[i:], m.Medium) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SizeLimit.Size())) - n31, err := m.SizeLimit.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.SizeLimit != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SizeLimit.Size())) + n31, err := m.SizeLimit.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n31 } - i += n31 return i, nil } @@ -10033,8 +10035,10 @@ func (m *EmptyDirVolumeSource) Size() (n int) { _ = l l = len(m.Medium) n += 1 + l + sovGenerated(uint64(l)) - l = m.SizeLimit.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.SizeLimit != nil { + l = m.SizeLimit.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -12949,7 +12953,7 @@ func (this *EmptyDirVolumeSource) String() string { } s := strings.Join([]string{`&EmptyDirVolumeSource{`, `Medium:` + fmt.Sprintf("%v", this.Medium) + `,`, - `SizeLimit:` + strings.Replace(strings.Replace(this.SizeLimit.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `SizeLimit:` + strings.Replace(fmt.Sprintf("%v", this.SizeLimit), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, `}`, }, "") return s @@ -19966,6 +19970,9 @@ func (m *EmptyDirVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } + if m.SizeLimit == nil { + m.SizeLimit = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + } if err := m.SizeLimit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -44391,720 +44398,720 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 11429 bytes of a gzipped FileDescriptorProto + // 11430 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x6d, 0x8c, 0x24, 0xc7, 0x75, 0x98, 0x7a, 0x66, 0xf6, 0x63, 0xde, 0x7e, 0xd7, 0xed, 0x1d, 0x97, 0x2b, 0xf2, 0xf6, 0xd8, 0x14, 0xe9, 0x23, 0x79, 0xdc, 0xd3, 0x1d, 0x49, 0x91, 0x12, 0x65, 0x5a, 0xbb, 0x3b, 0xbb, 0x77, 0xeb, 0xfb, 0x1a, 0xd6, 0xec, 0xdd, 0x51, 0x14, 0x43, 0xb2, 0x6f, 0xba, 0x76, 0xb7, 0x79, 0xb3, 0xdd, 0xc3, 0xee, 0x9e, 0xbd, 0x5b, 0x1a, 0x06, 0x6c, 0x45, 0xb0, 0x14, 0x40, 0x49, 0x64, 0x38, - 0x02, 0x02, 0x27, 0x80, 0x02, 0x03, 0x71, 0x94, 0x6f, 0x2b, 0x82, 0x3e, 0x0c, 0xcb, 0x09, 0xe2, - 0x48, 0x8e, 0x1c, 0x24, 0x8e, 0x00, 0x23, 0xb1, 0x02, 0xc3, 0x6b, 0x6b, 0x85, 0xf8, 0x4f, 0x80, - 0xfc, 0x48, 0xfe, 0x6d, 0x3e, 0x10, 0xd4, 0x67, 0x57, 0xf5, 0xf4, 0x6c, 0xf7, 0x2c, 0x6f, 0xd7, - 0x94, 0x90, 0x7f, 0x33, 0xf5, 0x5e, 0xbd, 0xaa, 0xae, 0x8f, 0x57, 0xef, 0xbd, 0x7a, 0xef, 0x15, - 0x9c, 0xbb, 0xfb, 0x52, 0x34, 0xef, 0x05, 0xe7, 0xef, 0x76, 0xee, 0x90, 0xd0, 0x27, 0x31, 0x89, - 0xce, 0xb7, 0xef, 0x6e, 0x9c, 0x77, 0xda, 0xde, 0xf9, 0xed, 0x0b, 0xe7, 0x37, 0x88, 0x4f, 0x42, - 0x27, 0x26, 0xee, 0x7c, 0x3b, 0x0c, 0xe2, 0x00, 0x3d, 0xc2, 0xb1, 0xe7, 0x13, 0xec, 0xf9, 0xf6, - 0xdd, 0x8d, 0x79, 0xa7, 0xed, 0xcd, 0x6f, 0x5f, 0x98, 0x7d, 0x76, 0xc3, 0x8b, 0x37, 0x3b, 0x77, - 0xe6, 0x9b, 0xc1, 0xd6, 0xf9, 0x8d, 0x60, 0x23, 0x38, 0xcf, 0x2a, 0xdd, 0xe9, 0xac, 0xb3, 0x7f, - 0xec, 0x0f, 0xfb, 0xc5, 0x89, 0xcd, 0x3e, 0x2f, 0x9a, 0x76, 0xda, 0xde, 0x96, 0xd3, 0xdc, 0xf4, - 0x7c, 0x12, 0xee, 0xa8, 0xc6, 0x43, 0x12, 0x05, 0x9d, 0xb0, 0x49, 0xd2, 0x5d, 0x38, 0xb0, 0x56, - 0x74, 0x7e, 0x8b, 0xc4, 0x4e, 0x46, 0xc7, 0x67, 0xcf, 0xf7, 0xaa, 0x15, 0x76, 0xfc, 0xd8, 0xdb, - 0xea, 0x6e, 0xe6, 0x63, 0x79, 0x15, 0xa2, 0xe6, 0x26, 0xd9, 0x72, 0xba, 0xea, 0x3d, 0xd7, 0xab, - 0x5e, 0x27, 0xf6, 0x5a, 0xe7, 0x3d, 0x3f, 0x8e, 0xe2, 0x30, 0x5d, 0xc9, 0xfe, 0x63, 0x0b, 0xce, - 0x2c, 0xdc, 0x6e, 0x2c, 0xb7, 0x9c, 0x28, 0xf6, 0x9a, 0x8b, 0xad, 0xa0, 0x79, 0xb7, 0x11, 0x07, - 0x21, 0xb9, 0x15, 0xb4, 0x3a, 0x5b, 0xa4, 0xc1, 0x06, 0x02, 0x9d, 0x83, 0xe1, 0x6d, 0xf6, 0x7f, - 0xb5, 0x36, 0x63, 0x9d, 0xb1, 0xce, 0x56, 0x17, 0x27, 0xbf, 0xbf, 0x3b, 0xf7, 0xa1, 0xbd, 0xdd, - 0xb9, 0xe1, 0x5b, 0xa2, 0x1c, 0x2b, 0x0c, 0xf4, 0x24, 0x0c, 0xae, 0x47, 0x6b, 0x3b, 0x6d, 0x32, - 0x53, 0x62, 0xb8, 0xe3, 0x02, 0x77, 0x70, 0xa5, 0x41, 0x4b, 0xb1, 0x80, 0xa2, 0xf3, 0x50, 0x6d, - 0x3b, 0x61, 0xec, 0xc5, 0x5e, 0xe0, 0xcf, 0x94, 0xcf, 0x58, 0x67, 0x07, 0x16, 0xa7, 0x04, 0x6a, - 0xb5, 0x2e, 0x01, 0x38, 0xc1, 0xa1, 0xdd, 0x08, 0x89, 0xe3, 0xde, 0xf0, 0x5b, 0x3b, 0x33, 0x95, - 0x33, 0xd6, 0xd9, 0xe1, 0xa4, 0x1b, 0x58, 0x94, 0x63, 0x85, 0x61, 0x7f, 0xbb, 0x04, 0xc3, 0x0b, - 0xeb, 0xeb, 0x9e, 0xef, 0xc5, 0x3b, 0xe8, 0x6d, 0x18, 0xf5, 0x03, 0x97, 0xc8, 0xff, 0xec, 0x2b, - 0x46, 0x2e, 0x3e, 0x3d, 0x7f, 0xd0, 0xa2, 0x9a, 0xbf, 0xae, 0xd5, 0x58, 0x9c, 0xdc, 0xdb, 0x9d, - 0x1b, 0xd5, 0x4b, 0xb0, 0x41, 0x11, 0xbd, 0x01, 0x23, 0xed, 0xc0, 0x55, 0x0d, 0x94, 0x58, 0x03, - 0x4f, 0x1d, 0xdc, 0x40, 0x3d, 0xa9, 0xb0, 0x38, 0xb1, 0xb7, 0x3b, 0x37, 0xa2, 0x15, 0x60, 0x9d, - 0x1c, 0x6a, 0xc1, 0x04, 0xfd, 0xeb, 0xc7, 0x9e, 0x6a, 0xa1, 0xcc, 0x5a, 0x78, 0x36, 0xbf, 0x05, - 0xad, 0xd2, 0xe2, 0x89, 0xbd, 0xdd, 0xb9, 0x89, 0x54, 0x21, 0x4e, 0x93, 0xb6, 0xdf, 0x83, 0xf1, - 0x85, 0x38, 0x76, 0x9a, 0x9b, 0xc4, 0xe5, 0xf3, 0x8b, 0x9e, 0x87, 0x8a, 0xef, 0x6c, 0x11, 0x31, - 0xfb, 0x67, 0xc4, 0xb0, 0x57, 0xae, 0x3b, 0x5b, 0x64, 0x7f, 0x77, 0x6e, 0xf2, 0xa6, 0xef, 0xbd, - 0xdb, 0x11, 0x6b, 0x86, 0x96, 0x61, 0x86, 0x8d, 0x2e, 0x02, 0xb8, 0x64, 0xdb, 0x6b, 0x92, 0xba, - 0x13, 0x6f, 0x8a, 0xd5, 0x80, 0x44, 0x5d, 0xa8, 0x29, 0x08, 0xd6, 0xb0, 0xec, 0xcf, 0x5a, 0x50, - 0x5d, 0xd8, 0x0e, 0x3c, 0xb7, 0x1e, 0xb8, 0x11, 0xea, 0xc0, 0x44, 0x3b, 0x24, 0xeb, 0x24, 0x54, - 0x45, 0x33, 0xd6, 0x99, 0xf2, 0xd9, 0x91, 0x8b, 0x17, 0x73, 0xbe, 0xdb, 0xac, 0xb4, 0xec, 0xc7, - 0xe1, 0xce, 0xe2, 0x43, 0xa2, 0xe9, 0x89, 0x14, 0x14, 0xa7, 0xdb, 0xb0, 0xbf, 0x5b, 0x82, 0x93, - 0x0b, 0xef, 0x75, 0x42, 0x52, 0xf3, 0xa2, 0xbb, 0xe9, 0xad, 0xe0, 0x7a, 0xd1, 0xdd, 0xeb, 0xc9, - 0x60, 0xa8, 0x35, 0x58, 0x13, 0xe5, 0x58, 0x61, 0xa0, 0x67, 0x61, 0x88, 0xfe, 0xbe, 0x89, 0x57, - 0xc5, 0xd7, 0x9f, 0x10, 0xc8, 0x23, 0x35, 0x27, 0x76, 0x6a, 0x1c, 0x84, 0x25, 0x0e, 0xba, 0x06, - 0x23, 0x4d, 0xb6, 0x73, 0x37, 0xae, 0x05, 0x2e, 0x61, 0x33, 0x5c, 0x5d, 0x7c, 0x86, 0xa2, 0x2f, - 0x25, 0xc5, 0xfb, 0xbb, 0x73, 0x33, 0xbc, 0x6f, 0x82, 0x84, 0x06, 0xc3, 0x7a, 0x7d, 0x64, 0xab, - 0x8d, 0x58, 0x61, 0x94, 0x20, 0x63, 0x13, 0x9e, 0xd5, 0xf6, 0xd4, 0x00, 0xdb, 0x53, 0xa3, 0xd9, - 0xfb, 0x09, 0x5d, 0x80, 0xca, 0x5d, 0xcf, 0x77, 0x67, 0x06, 0x19, 0xad, 0x47, 0xe9, 0xf4, 0x5f, - 0xf1, 0x7c, 0x77, 0x7f, 0x77, 0x6e, 0xca, 0xe8, 0x0e, 0x2d, 0xc4, 0x0c, 0xd5, 0xfe, 0x47, 0x96, - 0x18, 0xc6, 0x15, 0xaf, 0x65, 0x72, 0x94, 0x8b, 0x00, 0x11, 0x69, 0x86, 0x24, 0xd6, 0x06, 0x52, - 0xad, 0x8c, 0x86, 0x82, 0x60, 0x0d, 0x8b, 0xf2, 0x8b, 0x68, 0xd3, 0x09, 0xd9, 0x02, 0x13, 0xc3, - 0xa9, 0xf8, 0x45, 0x43, 0x02, 0x70, 0x82, 0x63, 0xf0, 0x8b, 0x72, 0x2e, 0xbf, 0xf8, 0x3d, 0x0b, - 0x86, 0x16, 0x3d, 0xdf, 0xf5, 0xfc, 0x0d, 0xf4, 0x36, 0x0c, 0x53, 0x76, 0xee, 0x3a, 0xb1, 0x23, - 0x58, 0xc5, 0x47, 0xe5, 0x7a, 0xd3, 0xb9, 0xab, 0x5c, 0x71, 0xd1, 0x3c, 0xc5, 0xa6, 0xeb, 0xee, - 0xc6, 0x9d, 0x77, 0x48, 0x33, 0xbe, 0x46, 0x62, 0x27, 0xf9, 0x9c, 0xa4, 0x0c, 0x2b, 0xaa, 0xe8, - 0x26, 0x0c, 0xc6, 0x4e, 0xb8, 0x41, 0x62, 0xc1, 0x29, 0x72, 0xf6, 0x31, 0xa7, 0x81, 0xe9, 0x2a, - 0x25, 0x7e, 0x93, 0x24, 0x3c, 0x75, 0x8d, 0x11, 0xc1, 0x82, 0x98, 0xdd, 0x84, 0xd1, 0x25, 0xa7, - 0xed, 0xdc, 0xf1, 0x5a, 0x5e, 0xec, 0x91, 0x08, 0xfd, 0x0c, 0x94, 0x1d, 0xd7, 0x65, 0x7b, 0xa6, - 0xba, 0x78, 0x72, 0x6f, 0x77, 0xae, 0xbc, 0xe0, 0xd2, 0x29, 0x03, 0x85, 0xb5, 0x83, 0x29, 0x06, - 0x7a, 0x1a, 0x2a, 0x6e, 0x18, 0xb4, 0x67, 0x4a, 0x0c, 0xf3, 0x14, 0x9d, 0xdd, 0x5a, 0x18, 0xb4, - 0x53, 0xa8, 0x0c, 0xc7, 0xfe, 0x5e, 0x09, 0xd0, 0x12, 0x69, 0x6f, 0xae, 0x34, 0x8c, 0x39, 0x3d, - 0x0b, 0xc3, 0x5b, 0x81, 0xef, 0xc5, 0x41, 0x18, 0x89, 0x06, 0xd9, 0x52, 0xba, 0x26, 0xca, 0xb0, - 0x82, 0xa2, 0x33, 0x50, 0x69, 0x27, 0x1c, 0x61, 0x54, 0x72, 0x13, 0xc6, 0x0b, 0x18, 0x84, 0x62, - 0x74, 0x22, 0x12, 0x8a, 0x2d, 0xa0, 0x30, 0x6e, 0x46, 0x24, 0xc4, 0x0c, 0x92, 0xac, 0x20, 0xba, - 0xb6, 0xc4, 0x02, 0x4f, 0xad, 0x20, 0x0a, 0xc1, 0x1a, 0x16, 0x7a, 0x0b, 0xaa, 0xfc, 0x1f, 0x26, - 0xeb, 0x6c, 0xb5, 0xe7, 0xf2, 0x91, 0xab, 0x41, 0xd3, 0x69, 0xa5, 0x07, 0x7f, 0x8c, 0xad, 0x38, - 0x49, 0x08, 0x27, 0x34, 0x8d, 0x15, 0x37, 0x98, 0xbb, 0xe2, 0xfe, 0xb6, 0x05, 0x68, 0xc9, 0xf3, - 0x5d, 0x12, 0x1e, 0xc3, 0x69, 0xdb, 0xdf, 0x66, 0xf8, 0x13, 0xda, 0xb5, 0x60, 0xab, 0x1d, 0xf8, - 0xc4, 0x8f, 0x97, 0x02, 0xdf, 0xe5, 0x27, 0xf0, 0x27, 0xa0, 0x12, 0xd3, 0xa6, 0x78, 0xb7, 0x9e, - 0x94, 0xd3, 0x42, 0x1b, 0xd8, 0xdf, 0x9d, 0x3b, 0xd5, 0x5d, 0x83, 0x75, 0x81, 0xd5, 0x41, 0x1f, - 0x87, 0xc1, 0x28, 0x76, 0xe2, 0x4e, 0x24, 0x3a, 0xfa, 0x98, 0xec, 0x68, 0x83, 0x95, 0xee, 0xef, - 0xce, 0x4d, 0xa8, 0x6a, 0xbc, 0x08, 0x8b, 0x0a, 0xe8, 0x29, 0x18, 0xda, 0x22, 0x51, 0xe4, 0x6c, - 0x48, 0x9e, 0x38, 0x21, 0xea, 0x0e, 0x5d, 0xe3, 0xc5, 0x58, 0xc2, 0xd1, 0xe3, 0x30, 0x40, 0xc2, - 0x30, 0x08, 0xc5, 0x8a, 0x18, 0x13, 0x88, 0x03, 0xcb, 0xb4, 0x10, 0x73, 0x98, 0xfd, 0x5f, 0x2c, - 0x98, 0x50, 0x7d, 0xe5, 0x6d, 0x1d, 0xc3, 0x96, 0x77, 0x01, 0x9a, 0xf2, 0x03, 0x23, 0xb6, 0xd1, - 0xb4, 0x36, 0xb2, 0x97, 0x5f, 0xf7, 0x80, 0x26, 0x6d, 0xa8, 0xa2, 0x08, 0x6b, 0x74, 0xed, 0x7f, - 0x6b, 0xc1, 0x89, 0xd4, 0xb7, 0x5d, 0xf5, 0xa2, 0x18, 0xbd, 0xd1, 0xf5, 0x7d, 0xf3, 0xc5, 0xbe, - 0x8f, 0xd6, 0x66, 0x5f, 0xa7, 0xd6, 0x8b, 0x2c, 0xd1, 0xbe, 0x0d, 0xc3, 0x80, 0x17, 0x93, 0x2d, - 0xf9, 0x59, 0xcf, 0x16, 0xfc, 0x2c, 0xde, 0xbf, 0x64, 0x96, 0x56, 0x29, 0x0d, 0xcc, 0x49, 0xd9, - 0xff, 0xcb, 0x82, 0xea, 0x52, 0xe0, 0xaf, 0x7b, 0x1b, 0xd7, 0x9c, 0xf6, 0x31, 0xcc, 0x4f, 0x03, - 0x2a, 0x8c, 0x3a, 0xff, 0x84, 0x0b, 0x79, 0x9f, 0x20, 0x3a, 0x36, 0x4f, 0xcf, 0x3d, 0x2e, 0x5f, - 0x28, 0x36, 0x45, 0x8b, 0x30, 0x23, 0x36, 0xfb, 0x22, 0x54, 0x15, 0x02, 0x9a, 0x84, 0xf2, 0x5d, - 0xc2, 0x85, 0xcf, 0x2a, 0xa6, 0x3f, 0xd1, 0x34, 0x0c, 0x6c, 0x3b, 0xad, 0x8e, 0xd8, 0xbc, 0x98, - 0xff, 0xf9, 0x44, 0xe9, 0x25, 0xcb, 0xfe, 0x1e, 0xdb, 0x81, 0xa2, 0x91, 0x65, 0x7f, 0x5b, 0x30, - 0x87, 0xcf, 0x59, 0x30, 0xdd, 0xca, 0x60, 0x4a, 0x62, 0x4c, 0x0e, 0xc3, 0xce, 0x1e, 0x11, 0xdd, - 0x9e, 0xce, 0x82, 0xe2, 0xcc, 0xd6, 0x28, 0xaf, 0x0f, 0xda, 0x74, 0xc1, 0x39, 0x2d, 0xd6, 0x75, - 0x21, 0x36, 0xdc, 0x10, 0x65, 0x58, 0x41, 0xed, 0xbf, 0xb0, 0x60, 0x5a, 0x7d, 0xc7, 0x15, 0xb2, - 0xd3, 0x20, 0x2d, 0xd2, 0x8c, 0x83, 0xf0, 0x83, 0xf2, 0x25, 0x8f, 0xf2, 0x39, 0xe1, 0x3c, 0x69, - 0x44, 0x10, 0x28, 0x5f, 0x21, 0x3b, 0x7c, 0x82, 0xf4, 0x0f, 0x2d, 0x1f, 0xf8, 0xa1, 0xbf, 0x63, - 0xc1, 0x98, 0xfa, 0xd0, 0x63, 0xd8, 0x72, 0x57, 0xcd, 0x2d, 0xf7, 0x33, 0x05, 0xd7, 0x6b, 0x8f, - 0xcd, 0xf6, 0xb7, 0x4a, 0x94, 0x6d, 0x08, 0x9c, 0x7a, 0x18, 0xd0, 0x41, 0xa2, 0x1c, 0xff, 0x03, - 0x32, 0x4b, 0xfd, 0x7d, 0xec, 0x15, 0xb2, 0xb3, 0x16, 0x50, 0x69, 0x22, 0xfb, 0x63, 0x8d, 0x49, - 0xad, 0x1c, 0x38, 0xa9, 0x7f, 0x50, 0x82, 0x93, 0x6a, 0x58, 0x8c, 0x53, 0xfa, 0xa7, 0x72, 0x60, - 0x2e, 0xc0, 0x88, 0x4b, 0xd6, 0x9d, 0x4e, 0x2b, 0x56, 0x0a, 0xc8, 0x00, 0xd7, 0x4c, 0x6b, 0x49, - 0x31, 0xd6, 0x71, 0xfa, 0x18, 0xcb, 0xaf, 0x8c, 0x30, 0x7e, 0x1e, 0x3b, 0x74, 0xd5, 0x53, 0x09, - 0x4f, 0xd3, 0x28, 0x47, 0x75, 0x8d, 0x52, 0x68, 0x8f, 0x8f, 0xc3, 0x80, 0xb7, 0x45, 0xcf, 0xfc, - 0x92, 0x79, 0x94, 0xaf, 0xd2, 0x42, 0xcc, 0x61, 0xe8, 0x09, 0x18, 0x6a, 0x06, 0x5b, 0x5b, 0x8e, - 0xef, 0xce, 0x94, 0x99, 0xcc, 0x39, 0x42, 0xc5, 0x82, 0x25, 0x5e, 0x84, 0x25, 0x0c, 0x3d, 0x02, - 0x15, 0x27, 0xdc, 0x88, 0x66, 0x2a, 0x0c, 0x67, 0x98, 0xb6, 0xb4, 0x10, 0x6e, 0x44, 0x98, 0x95, - 0x52, 0x59, 0xf2, 0x5e, 0x10, 0xde, 0xf5, 0xfc, 0x8d, 0x9a, 0x17, 0x32, 0xc1, 0x50, 0x93, 0x25, - 0x6f, 0x2b, 0x08, 0xd6, 0xb0, 0x50, 0x1d, 0x06, 0xda, 0x41, 0x18, 0x47, 0x33, 0x83, 0x6c, 0xe0, - 0x9f, 0xc9, 0xdd, 0x7e, 0xfc, 0xbb, 0xeb, 0x41, 0x18, 0x27, 0x9f, 0x42, 0xff, 0x45, 0x98, 0x13, - 0x42, 0x4b, 0x50, 0x26, 0xfe, 0xf6, 0xcc, 0x10, 0xa3, 0xf7, 0x91, 0x83, 0xe9, 0x2d, 0xfb, 0xdb, - 0xb7, 0x9c, 0x30, 0xe1, 0x57, 0xcb, 0xfe, 0x36, 0xa6, 0xb5, 0x51, 0x13, 0xaa, 0xd2, 0x7e, 0x15, - 0xcd, 0x0c, 0x17, 0x59, 0x8a, 0x58, 0xa0, 0x63, 0xf2, 0x6e, 0xc7, 0x0b, 0xc9, 0x16, 0xf1, 0xe3, - 0x28, 0x51, 0xac, 0x24, 0x34, 0xc2, 0x09, 0x5d, 0xd4, 0x84, 0x51, 0x2e, 0x7f, 0x5e, 0x0b, 0x3a, - 0x7e, 0x1c, 0xcd, 0x54, 0x59, 0x97, 0x73, 0x8c, 0x1d, 0xb7, 0x92, 0x1a, 0x8b, 0xd3, 0x82, 0xfc, - 0xa8, 0x56, 0x18, 0x61, 0x83, 0x28, 0x7a, 0x03, 0xc6, 0x5a, 0xde, 0x36, 0xf1, 0x49, 0x14, 0xd5, - 0xc3, 0xe0, 0x0e, 0x99, 0x01, 0xf6, 0x35, 0x8f, 0xe7, 0x29, 0xfe, 0xc1, 0x1d, 0xb2, 0x38, 0xb5, - 0xb7, 0x3b, 0x37, 0x76, 0x55, 0xaf, 0x8d, 0x4d, 0x62, 0xe8, 0x2d, 0x18, 0xa7, 0xc2, 0xae, 0x97, - 0x90, 0x1f, 0x29, 0x4e, 0x1e, 0xed, 0xed, 0xce, 0x8d, 0x63, 0xa3, 0x3a, 0x4e, 0x91, 0x43, 0x6b, - 0x50, 0x6d, 0x79, 0xeb, 0xa4, 0xb9, 0xd3, 0x6c, 0x91, 0x99, 0x51, 0x46, 0x3b, 0x67, 0x73, 0x5e, - 0x95, 0xe8, 0x5c, 0xc1, 0x50, 0x7f, 0x71, 0x42, 0x08, 0xdd, 0x82, 0x53, 0x31, 0x09, 0xb7, 0x3c, - 0xdf, 0xa1, 0x9b, 0x4a, 0x48, 0xbf, 0xcc, 0xba, 0x32, 0xc6, 0x56, 0xed, 0x69, 0x31, 0xb0, 0xa7, - 0xd6, 0x32, 0xb1, 0x70, 0x8f, 0xda, 0xe8, 0x06, 0x4c, 0xb0, 0xfd, 0x54, 0xef, 0xb4, 0x5a, 0xf5, - 0xa0, 0xe5, 0x35, 0x77, 0x66, 0xc6, 0x19, 0xc1, 0x27, 0xa4, 0xcd, 0x64, 0xd5, 0x04, 0x53, 0xc5, - 0x30, 0xf9, 0x87, 0xd3, 0xb5, 0x51, 0x0b, 0x26, 0x22, 0xd2, 0xec, 0x84, 0x5e, 0xbc, 0x43, 0xd7, - 0x3e, 0xb9, 0x1f, 0xcf, 0x4c, 0x14, 0x51, 0x74, 0x1b, 0x66, 0x25, 0x6e, 0xb0, 0x4a, 0x15, 0xe2, - 0x34, 0x69, 0xca, 0x2a, 0xa2, 0xd8, 0xf5, 0xfc, 0x99, 0x49, 0xc6, 0x81, 0xd4, 0xfe, 0x6a, 0xd0, - 0x42, 0xcc, 0x61, 0xcc, 0x7e, 0x40, 0x7f, 0xdc, 0xa0, 0x5c, 0x7a, 0x8a, 0x21, 0x26, 0xf6, 0x03, - 0x09, 0xc0, 0x09, 0x0e, 0x15, 0x0d, 0xe2, 0x78, 0x67, 0x06, 0x31, 0x54, 0xb5, 0xd5, 0xd6, 0xd6, - 0x3e, 0x8d, 0x69, 0x39, 0xba, 0x05, 0x43, 0xc4, 0xdf, 0x5e, 0x09, 0x83, 0xad, 0x99, 0x13, 0x45, - 0x78, 0xc0, 0x32, 0x47, 0xe6, 0xe7, 0x47, 0xa2, 0xc2, 0x88, 0x62, 0x2c, 0x89, 0xa1, 0xfb, 0x30, - 0x93, 0x31, 0x4b, 0x7c, 0x52, 0xa6, 0xd9, 0xa4, 0x7c, 0x52, 0xd4, 0x9d, 0x59, 0xeb, 0x81, 0xb7, - 0x7f, 0x00, 0x0c, 0xf7, 0xa4, 0x6e, 0xdf, 0x81, 0x71, 0xc5, 0xa8, 0xd8, 0x7c, 0xa3, 0x39, 0x18, - 0xa0, 0xbc, 0x58, 0x2a, 0xf4, 0x55, 0x3a, 0xa8, 0x94, 0x45, 0x47, 0x98, 0x97, 0xb3, 0x41, 0xf5, - 0xde, 0x23, 0x8b, 0x3b, 0x31, 0xe1, 0x8a, 0x5d, 0x59, 0x1b, 0x54, 0x09, 0xc0, 0x09, 0x8e, 0xfd, - 0x7f, 0xb9, 0x98, 0x94, 0x70, 0xc3, 0x02, 0x27, 0xc1, 0x39, 0x18, 0xde, 0x0c, 0xa2, 0x98, 0x62, - 0xb3, 0x36, 0x06, 0x12, 0xc1, 0xe8, 0xb2, 0x28, 0xc7, 0x0a, 0x03, 0xbd, 0x0c, 0x63, 0x4d, 0xbd, - 0x01, 0x71, 0x8c, 0x9d, 0x14, 0x55, 0xcc, 0xd6, 0xb1, 0x89, 0x8b, 0x5e, 0x82, 0x61, 0x66, 0x18, - 0x6f, 0x06, 0x2d, 0xa1, 0x42, 0xca, 0x53, 0x79, 0xb8, 0x2e, 0xca, 0xf7, 0xb5, 0xdf, 0x58, 0x61, - 0x53, 0x45, 0x9c, 0x76, 0x61, 0xb5, 0x2e, 0x0e, 0x10, 0xa5, 0x88, 0x5f, 0x66, 0xa5, 0x58, 0x40, - 0xed, 0xdf, 0x2a, 0x69, 0xa3, 0x4c, 0x15, 0x20, 0x82, 0x5e, 0x87, 0xa1, 0x7b, 0x8e, 0x17, 0x7b, - 0xfe, 0x86, 0x90, 0x1e, 0x9e, 0x2b, 0x78, 0x9a, 0xb0, 0xea, 0xb7, 0x79, 0x55, 0x7e, 0xf2, 0x89, - 0x3f, 0x58, 0x12, 0xa4, 0xb4, 0xc3, 0x8e, 0xef, 0x53, 0xda, 0xa5, 0xfe, 0x69, 0x63, 0x5e, 0x95, - 0xd3, 0x16, 0x7f, 0xb0, 0x24, 0x88, 0xd6, 0x01, 0xe4, 0x5a, 0x22, 0xae, 0x30, 0x48, 0x7f, 0xac, - 0x1f, 0xf2, 0x6b, 0xaa, 0xf6, 0xe2, 0x38, 0x3d, 0x6b, 0x93, 0xff, 0x58, 0xa3, 0x6c, 0xc7, 0x4c, - 0x08, 0xeb, 0xee, 0x16, 0xfa, 0x0c, 0xdd, 0xd2, 0x4e, 0x18, 0x13, 0x77, 0x21, 0x4e, 0xdb, 0xf4, - 0x0f, 0x16, 0xb1, 0xd7, 0xbc, 0x2d, 0xa2, 0x6f, 0x7f, 0x41, 0x04, 0x27, 0xf4, 0xec, 0x6f, 0x95, - 0x61, 0xa6, 0x57, 0x77, 0xe9, 0x92, 0x24, 0xf7, 0xbd, 0x78, 0x89, 0x8a, 0x49, 0x96, 0xb9, 0x24, - 0x97, 0x45, 0x39, 0x56, 0x18, 0x74, 0x6d, 0x44, 0xde, 0x86, 0x54, 0x96, 0x06, 0x92, 0xb5, 0xd1, - 0x60, 0xa5, 0x58, 0x40, 0x29, 0x5e, 0x48, 0x9c, 0x48, 0xdc, 0x87, 0x68, 0x6b, 0x08, 0xb3, 0x52, - 0x2c, 0xa0, 0xba, 0x41, 0xa4, 0x92, 0x63, 0x10, 0x31, 0x86, 0x68, 0xe0, 0xc1, 0x0e, 0x11, 0x7a, - 0x13, 0x60, 0xdd, 0xf3, 0xbd, 0x68, 0x93, 0x51, 0x1f, 0xec, 0x9b, 0xba, 0x12, 0xb2, 0x56, 0x14, - 0x15, 0xac, 0x51, 0x44, 0x2f, 0xc0, 0x88, 0xda, 0x9e, 0xab, 0xb5, 0x99, 0x21, 0xd3, 0x86, 0x9e, - 0xf0, 0xaa, 0x1a, 0xd6, 0xf1, 0xec, 0x77, 0xd2, 0xeb, 0x45, 0xec, 0x0a, 0x6d, 0x7c, 0xad, 0xa2, - 0xe3, 0x5b, 0x3a, 0x78, 0x7c, 0xed, 0xff, 0x5c, 0x86, 0x09, 0xa3, 0xb1, 0x4e, 0x54, 0x80, 0xa3, - 0xbd, 0x4a, 0x0f, 0x2c, 0x27, 0x26, 0x62, 0x4f, 0x9e, 0xeb, 0x67, 0xd3, 0xe8, 0xc7, 0x1b, 0xdd, - 0x0b, 0x9c, 0x12, 0xda, 0x84, 0x6a, 0xcb, 0x89, 0x98, 0x49, 0x85, 0x88, 0xbd, 0xd8, 0x1f, 0xd9, - 0x44, 0xfd, 0x70, 0xa2, 0x58, 0x3b, 0x3d, 0x78, 0x2b, 0x09, 0x71, 0x7a, 0xda, 0x52, 0x61, 0x47, - 0x5e, 0xc2, 0xa9, 0xee, 0x50, 0x89, 0x68, 0x07, 0x73, 0x18, 0x7a, 0x09, 0x46, 0x43, 0xc2, 0x56, - 0xca, 0x12, 0x95, 0xe7, 0xd8, 0xd2, 0x1b, 0x48, 0x04, 0x3f, 0xac, 0xc1, 0xb0, 0x81, 0x99, 0xc8, - 0xfd, 0x83, 0x07, 0xc8, 0xfd, 0x4f, 0xc1, 0x10, 0xfb, 0xa1, 0x56, 0x85, 0x9a, 0xa1, 0x55, 0x5e, - 0x8c, 0x25, 0x3c, 0xbd, 0x88, 0x86, 0x0b, 0x2e, 0xa2, 0xa7, 0x61, 0xbc, 0xe6, 0x90, 0xad, 0xc0, - 0x5f, 0xf6, 0xdd, 0x76, 0xe0, 0xf9, 0x31, 0x9a, 0x81, 0x0a, 0x3b, 0x4f, 0xf8, 0x7e, 0xaf, 0x50, - 0x0a, 0xb8, 0x42, 0x65, 0x77, 0xfb, 0x4f, 0x4a, 0x30, 0x56, 0x23, 0x2d, 0x12, 0x13, 0xae, 0xf7, - 0x44, 0x68, 0x05, 0xd0, 0x46, 0xe8, 0x34, 0x49, 0x9d, 0x84, 0x5e, 0xe0, 0x36, 0x48, 0x33, 0xf0, - 0xd9, 0xdd, 0x15, 0x3d, 0x20, 0x4f, 0xed, 0xed, 0xce, 0xa1, 0x4b, 0x5d, 0x50, 0x9c, 0x51, 0x03, - 0xb9, 0x30, 0xd6, 0x0e, 0x89, 0x61, 0x37, 0xb4, 0xf2, 0x45, 0x8d, 0xba, 0x5e, 0x85, 0x4b, 0xc3, - 0x46, 0x11, 0x36, 0x89, 0xa2, 0x4f, 0xc1, 0x64, 0x10, 0xb6, 0x37, 0x1d, 0xbf, 0x46, 0xda, 0xc4, - 0x77, 0xa9, 0x0a, 0x20, 0xac, 0x1d, 0xd3, 0x7b, 0xbb, 0x73, 0x93, 0x37, 0x52, 0x30, 0xdc, 0x85, - 0x8d, 0x5e, 0x87, 0xa9, 0x76, 0x18, 0xb4, 0x9d, 0x0d, 0xb6, 0x64, 0x84, 0xb4, 0xc2, 0x79, 0xd3, - 0xb9, 0xbd, 0xdd, 0xb9, 0xa9, 0x7a, 0x1a, 0xb8, 0xbf, 0x3b, 0x77, 0x82, 0x0d, 0x19, 0x2d, 0x49, - 0x80, 0xb8, 0x9b, 0x8c, 0xfd, 0x2e, 0x9c, 0xac, 0x05, 0xf7, 0xfc, 0x7b, 0x4e, 0xe8, 0x2e, 0xd4, - 0x57, 0x35, 0xe3, 0xc4, 0x6b, 0x52, 0xf9, 0xe5, 0x77, 0x82, 0x39, 0x27, 0x9b, 0x46, 0x83, 0xab, - 0x1d, 0x2b, 0x5e, 0x8b, 0xf4, 0x30, 0x87, 0xfc, 0xe3, 0x92, 0xd1, 0x66, 0x82, 0xaf, 0xee, 0x2e, - 0xac, 0x9e, 0x77, 0x17, 0x9f, 0x81, 0xe1, 0x75, 0x8f, 0xb4, 0x5c, 0x4c, 0xd6, 0xc5, 0x6c, 0x5d, - 0x28, 0x72, 0xb9, 0xb3, 0x42, 0xeb, 0x48, 0xeb, 0x18, 0x57, 0xa2, 0x57, 0x04, 0x19, 0xac, 0x08, - 0xa2, 0x0e, 0x4c, 0x4a, 0x3d, 0x4c, 0x42, 0xc5, 0x66, 0x7f, 0xae, 0x98, 0x9a, 0x67, 0x36, 0xc3, - 0xa6, 0x17, 0xa7, 0x08, 0xe2, 0xae, 0x26, 0xa8, 0xfe, 0xbc, 0x45, 0x8f, 0xba, 0x0a, 0x5b, 0xfa, - 0x4c, 0x7f, 0x66, 0xa6, 0x00, 0x56, 0x6a, 0xff, 0xa6, 0x05, 0x0f, 0x75, 0x8d, 0x96, 0xb0, 0x93, - 0x1c, 0xd9, 0x1c, 0xa5, 0x8d, 0x15, 0xa5, 0x7c, 0x63, 0x85, 0xfd, 0x5b, 0x16, 0x4c, 0x2f, 0x6f, - 0xb5, 0xe3, 0x9d, 0x9a, 0x67, 0xde, 0xb9, 0xbc, 0x08, 0x83, 0x5b, 0xc4, 0xf5, 0x3a, 0x5b, 0x62, - 0x5e, 0xe7, 0xe4, 0xc1, 0x70, 0x8d, 0x95, 0xee, 0xef, 0xce, 0x8d, 0x35, 0xe2, 0x20, 0x74, 0x36, - 0x08, 0x2f, 0xc0, 0x02, 0x9d, 0x5d, 0x29, 0x79, 0xef, 0x91, 0xab, 0xde, 0x96, 0x27, 0xaf, 0xf2, - 0x0e, 0x34, 0xf2, 0xcd, 0xcb, 0xa1, 0x9d, 0x7f, 0xb5, 0xe3, 0xf8, 0xb1, 0x17, 0xef, 0x98, 0xf2, - 0x32, 0x23, 0x84, 0x13, 0x9a, 0xf6, 0x8f, 0x2c, 0x98, 0x90, 0x1c, 0x68, 0xc1, 0x75, 0x43, 0x12, - 0x45, 0x68, 0x16, 0x4a, 0x5e, 0x5b, 0xf4, 0x14, 0x44, 0xed, 0xd2, 0x6a, 0x1d, 0x97, 0xbc, 0x36, - 0x7a, 0x1d, 0xaa, 0xfc, 0x2e, 0x30, 0x59, 0x7e, 0x7d, 0xde, 0x2d, 0x32, 0xed, 0x73, 0x4d, 0xd2, - 0xc0, 0x09, 0x39, 0x29, 0x87, 0xb3, 0xb3, 0xad, 0x6c, 0xde, 0x4c, 0x5d, 0x16, 0xe5, 0x58, 0x61, - 0xa0, 0xb3, 0x30, 0xec, 0x07, 0x2e, 0xbf, 0xae, 0xe5, 0x9c, 0x80, 0x2d, 0xea, 0xeb, 0xa2, 0x0c, - 0x2b, 0xa8, 0xfd, 0x45, 0x0b, 0x46, 0xe5, 0x37, 0x16, 0x54, 0x09, 0xe8, 0x36, 0x4c, 0xd4, 0x81, - 0x64, 0x1b, 0x52, 0x91, 0x9e, 0x41, 0x0c, 0x49, 0xbe, 0xdc, 0x8f, 0x24, 0x6f, 0xff, 0x76, 0x09, - 0xc6, 0x65, 0x77, 0x1a, 0x9d, 0x3b, 0x11, 0xa1, 0x82, 0x4e, 0xd5, 0xe1, 0x83, 0x4f, 0xe4, 0x4a, - 0x7e, 0x36, 0x4f, 0xdb, 0x33, 0xe6, 0x2c, 0x99, 0xe5, 0x05, 0x49, 0x07, 0x27, 0x24, 0xd1, 0x36, - 0x4c, 0xf9, 0x41, 0xcc, 0x0e, 0x50, 0x05, 0x2f, 0x76, 0x97, 0x92, 0x6e, 0xe7, 0x61, 0xd1, 0xce, - 0xd4, 0xf5, 0x34, 0x3d, 0xdc, 0xdd, 0x04, 0xba, 0x21, 0xad, 0x58, 0x65, 0xd6, 0xd6, 0xd3, 0xc5, - 0xda, 0xea, 0x6d, 0xc4, 0xb2, 0x7f, 0xdf, 0x82, 0xaa, 0x44, 0x3b, 0x8e, 0x4b, 0xb5, 0xdb, 0x30, - 0x14, 0xb1, 0x29, 0x92, 0xc3, 0x75, 0xae, 0xd8, 0x27, 0xf0, 0x79, 0x4d, 0xa4, 0x06, 0xfe, 0x3f, - 0xc2, 0x92, 0x1a, 0x33, 0xe7, 0xab, 0x0f, 0xf9, 0xc0, 0x99, 0xf3, 0x55, 0xcf, 0x7a, 0xdf, 0x9d, - 0x8d, 0x19, 0xf6, 0x06, 0x2a, 0xfa, 0xb6, 0x43, 0xb2, 0xee, 0xdd, 0x4f, 0x8b, 0xbe, 0x75, 0x56, - 0x8a, 0x05, 0x14, 0xad, 0xc3, 0x68, 0x53, 0x1a, 0xbc, 0x13, 0x16, 0xf2, 0xd1, 0x82, 0xb7, 0x0b, - 0xea, 0xa2, 0x8a, 0xfb, 0x4b, 0x2d, 0x69, 0x94, 0xb0, 0x41, 0x97, 0xf2, 0xa9, 0xe4, 0x2e, 0xbe, - 0x5c, 0xd0, 0x34, 0x14, 0x92, 0x38, 0x69, 0xa1, 0xe7, 0x35, 0xbc, 0xfd, 0x55, 0x0b, 0x06, 0xb9, - 0x85, 0xb4, 0x98, 0x99, 0x59, 0xbb, 0x82, 0x4b, 0xc6, 0xf3, 0x16, 0x2d, 0x14, 0x37, 0x72, 0xe8, - 0x36, 0x54, 0xd9, 0x0f, 0x66, 0xed, 0x29, 0x17, 0x71, 0x1e, 0xe3, 0xed, 0xeb, 0x5d, 0xbd, 0x25, - 0x09, 0xe0, 0x84, 0x96, 0xfd, 0x9d, 0x32, 0x65, 0x7d, 0x09, 0xaa, 0x21, 0x3d, 0x58, 0xc7, 0x21, - 0x3d, 0x94, 0x8e, 0x5e, 0x7a, 0x78, 0x17, 0x26, 0x9a, 0xda, 0x15, 0x60, 0x32, 0xe3, 0x17, 0x0b, - 0x2e, 0x2b, 0xed, 0xde, 0x90, 0x5b, 0x04, 0x97, 0x4c, 0x72, 0x38, 0x4d, 0x1f, 0x11, 0x18, 0xe5, - 0xeb, 0x41, 0xb4, 0x57, 0x61, 0xed, 0x9d, 0x2f, 0xb2, 0xc2, 0xf4, 0xc6, 0xd8, 0x2a, 0x6e, 0x68, - 0x84, 0xb0, 0x41, 0xd6, 0xfe, 0xf5, 0x01, 0x18, 0x58, 0xde, 0x26, 0x7e, 0x7c, 0x0c, 0xac, 0x6e, - 0x0b, 0xc6, 0x3d, 0x7f, 0x3b, 0x68, 0x6d, 0x13, 0x97, 0xc3, 0x0f, 0x77, 0xbc, 0x9f, 0x12, 0x8d, - 0x8c, 0xaf, 0x1a, 0xc4, 0x70, 0x8a, 0xf8, 0x51, 0xd8, 0x22, 0x5e, 0x85, 0x41, 0xbe, 0x32, 0x84, - 0x21, 0x22, 0xe7, 0xc6, 0x80, 0x0d, 0xac, 0xd8, 0x41, 0x89, 0xc5, 0x84, 0x5f, 0x56, 0x08, 0x42, - 0xe8, 0x1d, 0x18, 0x5f, 0xf7, 0xc2, 0x28, 0x5e, 0xf3, 0xb6, 0xa8, 0x0e, 0xb9, 0xd5, 0x3e, 0x84, - 0x15, 0x42, 0x8d, 0xc8, 0x8a, 0x41, 0x09, 0xa7, 0x28, 0xa3, 0x0d, 0x18, 0xa3, 0x4a, 0x70, 0xd2, - 0xd4, 0x50, 0xdf, 0x4d, 0x29, 0x23, 0xe4, 0x55, 0x9d, 0x10, 0x36, 0xe9, 0x52, 0x96, 0xd4, 0x64, - 0x4a, 0xf3, 0x30, 0x93, 0x6e, 0x14, 0x4b, 0xe2, 0xda, 0x32, 0x87, 0x51, 0xce, 0xc6, 0x7c, 0x71, - 0xaa, 0x26, 0x67, 0x4b, 0x3c, 0x6e, 0xec, 0xaf, 0xd3, 0xb3, 0x98, 0x8e, 0xe1, 0x31, 0x1c, 0x5f, - 0x97, 0xcd, 0xe3, 0xeb, 0xf1, 0x02, 0x33, 0xdb, 0xe3, 0xe8, 0x7a, 0x1b, 0x46, 0xb4, 0x89, 0x47, - 0xe7, 0xa1, 0xda, 0x94, 0xee, 0x22, 0x82, 0x8b, 0x2b, 0x51, 0x4a, 0xf9, 0x91, 0xe0, 0x04, 0x87, - 0x8e, 0x0b, 0x15, 0x41, 0xd3, 0xce, 0x65, 0x54, 0x40, 0xc5, 0x0c, 0x62, 0x3f, 0x07, 0xb0, 0x7c, - 0x9f, 0x34, 0x17, 0xb8, 0x12, 0xa9, 0xdd, 0x20, 0x5a, 0xbd, 0x6f, 0x10, 0xed, 0xaf, 0x59, 0x30, - 0xbe, 0xb2, 0x64, 0x28, 0x0d, 0xf3, 0x00, 0x5c, 0x36, 0xbe, 0x7d, 0xfb, 0xba, 0xb4, 0x90, 0x73, - 0x33, 0xa6, 0x2a, 0xc5, 0x1a, 0x06, 0x7a, 0x18, 0xca, 0xad, 0x8e, 0x2f, 0x44, 0xd6, 0xa1, 0xbd, - 0xdd, 0xb9, 0xf2, 0xd5, 0x8e, 0x8f, 0x69, 0x99, 0xe6, 0xc5, 0x55, 0x2e, 0xec, 0xc5, 0x95, 0xef, - 0x02, 0xfd, 0xe5, 0x32, 0x4c, 0xae, 0xb4, 0xc8, 0x7d, 0xa3, 0xd7, 0x4f, 0xc2, 0xa0, 0x1b, 0x7a, - 0xdb, 0x24, 0x4c, 0x0b, 0x02, 0x35, 0x56, 0x8a, 0x05, 0xb4, 0xb0, 0x63, 0xd9, 0x5b, 0xdd, 0x07, - 0xf9, 0xd1, 0x39, 0xd5, 0xe5, 0x7e, 0x33, 0x5a, 0x87, 0x21, 0x7e, 0xe3, 0x1c, 0xcd, 0x0c, 0xb0, - 0xa5, 0xf8, 0xf2, 0xc1, 0x9d, 0x49, 0x8f, 0xcf, 0xbc, 0xb0, 0xe0, 0x70, 0x97, 0x1e, 0xc5, 0xcb, - 0x44, 0x29, 0x96, 0xc4, 0x67, 0x3f, 0x01, 0xa3, 0x3a, 0x66, 0x5f, 0xbe, 0x3d, 0x7f, 0xd5, 0x82, - 0x13, 0x2b, 0xad, 0xa0, 0x79, 0x37, 0xe5, 0xf9, 0xf7, 0x02, 0x8c, 0xd0, 0xcd, 0x14, 0x19, 0x6e, - 0xb1, 0x86, 0xcb, 0xb0, 0x00, 0x61, 0x1d, 0x4f, 0xab, 0x76, 0xf3, 0xe6, 0x6a, 0x2d, 0xcb, 0xd3, - 0x58, 0x80, 0xb0, 0x8e, 0x67, 0xff, 0xa1, 0x05, 0x8f, 0x5e, 0x5a, 0x5a, 0xae, 0x93, 0x30, 0xf2, - 0xa2, 0x98, 0xf8, 0x71, 0x97, 0xb3, 0x33, 0x95, 0x19, 0x5d, 0xad, 0x2b, 0x89, 0xcc, 0x58, 0x63, - 0xbd, 0x10, 0xd0, 0x0f, 0x8a, 0xc7, 0xff, 0x57, 0x2d, 0x38, 0x71, 0xc9, 0x8b, 0x31, 0x69, 0x07, - 0x69, 0x67, 0xe3, 0x90, 0xb4, 0x83, 0xc8, 0x8b, 0x83, 0x70, 0x27, 0xed, 0x6c, 0x8c, 0x15, 0x04, - 0x6b, 0x58, 0xbc, 0xe5, 0x6d, 0x2f, 0xa2, 0x3d, 0x2d, 0x99, 0xaa, 0x2e, 0x16, 0xe5, 0x58, 0x61, - 0xd0, 0x0f, 0x73, 0xbd, 0x90, 0x89, 0x0c, 0x3b, 0x62, 0x07, 0xab, 0x0f, 0xab, 0x49, 0x00, 0x4e, - 0x70, 0xec, 0xbf, 0x6b, 0xc1, 0xc9, 0x4b, 0xad, 0x4e, 0x14, 0x93, 0x70, 0x3d, 0x32, 0x3a, 0xfb, - 0x1c, 0x54, 0x89, 0x14, 0xee, 0x45, 0x5f, 0xd5, 0xa1, 0xa1, 0xa4, 0x7e, 0xee, 0xe9, 0xac, 0xf0, - 0x0a, 0x38, 0xd4, 0xf6, 0xe7, 0xfe, 0xf9, 0xbb, 0x25, 0x18, 0xbb, 0xbc, 0xb6, 0x56, 0xbf, 0x44, - 0x62, 0xc1, 0x25, 0xf3, 0xcd, 0x5e, 0x58, 0xd3, 0xc8, 0x0f, 0x12, 0x7e, 0x3a, 0xb1, 0xd7, 0x9a, - 0xe7, 0xd1, 0x28, 0xf3, 0xab, 0x7e, 0x7c, 0x23, 0x6c, 0xc4, 0xa1, 0xe7, 0x6f, 0x64, 0xea, 0xf0, - 0x92, 0x97, 0x97, 0x7b, 0xf1, 0x72, 0xf4, 0x1c, 0x0c, 0xb2, 0x70, 0x18, 0x29, 0x7c, 0x7c, 0x58, - 0xc9, 0x09, 0xac, 0x74, 0x7f, 0x77, 0xae, 0x7a, 0x13, 0xaf, 0xf2, 0x3f, 0x58, 0xa0, 0xa2, 0xb7, - 0x60, 0x64, 0x33, 0x8e, 0xdb, 0x97, 0x89, 0xe3, 0x92, 0x50, 0xf2, 0x89, 0xb3, 0x07, 0xf3, 0x09, - 0x3a, 0x1c, 0xbc, 0x42, 0xb2, 0xb5, 0x92, 0xb2, 0x08, 0xeb, 0x14, 0xed, 0x06, 0x40, 0x02, 0x7b, - 0x40, 0x3a, 0x88, 0xfd, 0xcb, 0x25, 0x18, 0xba, 0xec, 0xf8, 0x6e, 0x8b, 0x84, 0x68, 0x05, 0x2a, - 0xe4, 0x3e, 0x69, 0x8a, 0x83, 0x3c, 0xa7, 0xeb, 0xc9, 0x61, 0xc7, 0x2d, 0x77, 0xf4, 0x3f, 0x66, - 0xf5, 0x11, 0x86, 0x21, 0xda, 0xef, 0x4b, 0xca, 0x0f, 0xfd, 0x99, 0xfc, 0x51, 0x50, 0x8b, 0x82, - 0x9f, 0x94, 0xa2, 0x08, 0x4b, 0x42, 0xcc, 0x02, 0xd5, 0x6c, 0x37, 0x28, 0x7b, 0x8b, 0x8b, 0x69, - 0x76, 0x6b, 0x4b, 0x75, 0x8e, 0x2e, 0xe8, 0x72, 0x0b, 0x94, 0x2c, 0xc4, 0x09, 0x39, 0x7b, 0x0d, - 0xaa, 0x74, 0xf2, 0x17, 0x5a, 0x9e, 0x73, 0xb0, 0x19, 0xec, 0x19, 0xa8, 0x4a, 0x43, 0x54, 0x24, - 0x9c, 0xda, 0x19, 0x55, 0x69, 0xa7, 0x8a, 0x70, 0x02, 0xb7, 0x5f, 0x82, 0x69, 0x76, 0x8f, 0xec, - 0xc4, 0x9b, 0xc6, 0x5e, 0xcc, 0x5d, 0xf4, 0xf6, 0x37, 0x2a, 0x30, 0xb5, 0xda, 0x58, 0x6a, 0x98, - 0x36, 0xcf, 0x97, 0x60, 0x94, 0x1f, 0xfb, 0x74, 0x29, 0x3b, 0x2d, 0x51, 0x5f, 0xdd, 0x7d, 0xac, - 0x69, 0x30, 0x6c, 0x60, 0xa2, 0x47, 0xa1, 0xec, 0xbd, 0xeb, 0xa7, 0xbd, 0x11, 0x57, 0x5f, 0xbd, - 0x8e, 0x69, 0x39, 0x05, 0x53, 0x09, 0x82, 0xb3, 0x4e, 0x05, 0x56, 0x52, 0xc4, 0x2b, 0x30, 0xee, - 0x45, 0xcd, 0xc8, 0x5b, 0xf5, 0x29, 0x5f, 0x71, 0x9a, 0x72, 0x53, 0x24, 0x22, 0x3f, 0xed, 0xaa, - 0x82, 0xe2, 0x14, 0xb6, 0xc6, 0xc7, 0x07, 0x0a, 0x4b, 0x21, 0xb9, 0x6e, 0xee, 0x54, 0xc0, 0x6a, - 0xb3, 0xaf, 0x8b, 0x98, 0x6f, 0x93, 0x10, 0xb0, 0xf8, 0x07, 0x47, 0x58, 0xc2, 0xd0, 0x25, 0x98, - 0x6a, 0x6e, 0x3a, 0xed, 0x85, 0x4e, 0xbc, 0x59, 0xf3, 0xa2, 0x66, 0xb0, 0x4d, 0xc2, 0x1d, 0x26, - 0x00, 0x0f, 0x27, 0x36, 0x2d, 0x05, 0x58, 0xba, 0xbc, 0x50, 0xa7, 0x98, 0xb8, 0xbb, 0x8e, 0x29, - 0x90, 0xc0, 0x11, 0x08, 0x24, 0x0b, 0x30, 0x21, 0x5b, 0x6d, 0x90, 0x88, 0x1d, 0x11, 0x23, 0xac, - 0x9f, 0x2a, 0xc0, 0x48, 0x14, 0xab, 0x5e, 0xa6, 0xf1, 0xed, 0x77, 0xa0, 0xaa, 0x7c, 0xf1, 0xa4, - 0x0b, 0xaa, 0xd5, 0xc3, 0x05, 0x35, 0x9f, 0xb9, 0x4b, 0xeb, 0x7c, 0x39, 0xd3, 0x3a, 0xff, 0x4f, - 0x2d, 0x48, 0x9c, 0x89, 0x10, 0x86, 0x6a, 0x3b, 0x60, 0x37, 0x79, 0xa1, 0xbc, 0x32, 0x7f, 0x22, - 0x67, 0xcf, 0x73, 0x9e, 0xc3, 0x07, 0xa4, 0x2e, 0xeb, 0xe2, 0x84, 0x0c, 0xba, 0x0a, 0x43, 0xed, - 0x90, 0x34, 0x62, 0x16, 0x3f, 0xd2, 0x07, 0x45, 0xbe, 0x10, 0x78, 0x4d, 0x2c, 0x49, 0xd8, 0xff, - 0xd2, 0x02, 0xe0, 0x66, 0x70, 0xc7, 0xdf, 0x20, 0xc7, 0xa0, 0x58, 0x5f, 0x87, 0x4a, 0xd4, 0x26, - 0xcd, 0x62, 0x77, 0xb1, 0x49, 0xcf, 0x1a, 0x6d, 0xd2, 0x4c, 0xa6, 0x83, 0xfe, 0xc3, 0x8c, 0x8e, - 0xfd, 0x6d, 0x80, 0xf1, 0x04, 0x8d, 0x2a, 0x37, 0xe8, 0x59, 0x23, 0x70, 0xe2, 0xe1, 0x54, 0xe0, - 0x44, 0x95, 0x61, 0x6b, 0xb1, 0x12, 0x31, 0x94, 0xb7, 0x9c, 0xfb, 0x42, 0x97, 0x7a, 0xa1, 0x68, - 0x87, 0x68, 0x4b, 0xf3, 0xd7, 0x9c, 0xfb, 0x5c, 0x74, 0x7d, 0x46, 0x2e, 0xa4, 0x6b, 0xce, 0xfd, - 0x7d, 0x7e, 0xe3, 0xca, 0xb8, 0x13, 0x55, 0xde, 0x3e, 0xfb, 0x67, 0xc9, 0x7f, 0x76, 0x0c, 0xd1, - 0xe6, 0x58, 0xab, 0x9e, 0x2f, 0x4c, 0xc1, 0x7d, 0xb6, 0xea, 0xf9, 0xe9, 0x56, 0x3d, 0xbf, 0x40, - 0xab, 0x1e, 0xf3, 0x30, 0x1e, 0x12, 0x77, 0x34, 0xcc, 0x3d, 0x73, 0xe4, 0xe2, 0xc7, 0xfb, 0x6a, - 0x5a, 0x5c, 0xf6, 0xf0, 0xe6, 0xcf, 0x4b, 0x79, 0x5d, 0x94, 0xe6, 0x76, 0x41, 0x36, 0x8d, 0xfe, - 0x9e, 0x05, 0xe3, 0xe2, 0x37, 0x26, 0xef, 0x76, 0x48, 0x14, 0x0b, 0xb9, 0xe0, 0x53, 0x87, 0xe9, - 0x8d, 0x20, 0xc1, 0x3b, 0xf5, 0x31, 0xc9, 0x7e, 0x4d, 0x60, 0x6e, 0xdf, 0x52, 0xfd, 0x41, 0xdf, - 0xb6, 0x60, 0x7a, 0xcb, 0xb9, 0xcf, 0x5b, 0xe4, 0x65, 0xd8, 0x89, 0xbd, 0x40, 0xb8, 0xa0, 0xae, - 0xf4, 0xbb, 0x4e, 0xba, 0x08, 0xf1, 0xee, 0x4a, 0xef, 0xb2, 0xe9, 0x2c, 0x94, 0xdc, 0x4e, 0x67, - 0xf6, 0x70, 0x76, 0x1d, 0x86, 0xe5, 0xc2, 0xcc, 0xd0, 0x94, 0x6a, 0xba, 0xf8, 0xd3, 0xf7, 0x05, - 0x9a, 0xa6, 0x59, 0xb1, 0x76, 0xc4, 0x52, 0x3c, 0xd2, 0x76, 0xde, 0x81, 0x51, 0x7d, 0xdd, 0x1d, - 0x69, 0x5b, 0xef, 0xc2, 0x89, 0x8c, 0x55, 0x75, 0xa4, 0x4d, 0xde, 0x83, 0x87, 0x7b, 0xae, 0x8f, - 0xa3, 0x6c, 0xd8, 0xfe, 0x5d, 0x4b, 0x67, 0x9d, 0xc7, 0x60, 0xb7, 0xba, 0x66, 0xda, 0xad, 0xce, - 0x16, 0xdd, 0x43, 0x3d, 0x8c, 0x57, 0xeb, 0x7a, 0xf7, 0xe9, 0x91, 0x80, 0xd6, 0x60, 0xb0, 0x45, - 0x4b, 0xe4, 0xb5, 0xe1, 0xb9, 0x7e, 0x76, 0x69, 0x22, 0x81, 0xb1, 0xf2, 0x08, 0x0b, 0x5a, 0xf6, - 0xb7, 0x2d, 0xa8, 0xfc, 0x25, 0x86, 0x75, 0x75, 0x91, 0x16, 0xa9, 0x09, 0xe6, 0xb1, 0x73, 0x6f, - 0xf9, 0x7e, 0x4c, 0xfc, 0x88, 0x89, 0xf1, 0x99, 0x43, 0xf4, 0x7f, 0x4a, 0x30, 0x42, 0x9b, 0x92, - 0x9e, 0x32, 0x2f, 0xc3, 0x58, 0xcb, 0xb9, 0x43, 0x5a, 0xd2, 0xe6, 0x9e, 0x56, 0x7a, 0xaf, 0xea, - 0x40, 0x6c, 0xe2, 0xd2, 0xca, 0xeb, 0xfa, 0x95, 0x84, 0x10, 0x92, 0x54, 0x65, 0xe3, 0xbe, 0x02, - 0x9b, 0xb8, 0x54, 0xeb, 0xba, 0xe7, 0xc4, 0xcd, 0x4d, 0xa1, 0x10, 0xab, 0xee, 0xde, 0xa6, 0x85, - 0x98, 0xc3, 0xa8, 0xb0, 0x27, 0x57, 0xec, 0x2d, 0x12, 0x32, 0x61, 0x8f, 0x0b, 0xd5, 0x4a, 0xd8, - 0xc3, 0x26, 0x18, 0xa7, 0xf1, 0xd1, 0x27, 0x60, 0x9c, 0x0e, 0x4e, 0xd0, 0x89, 0xa5, 0x1f, 0xd0, - 0x00, 0xf3, 0x03, 0x62, 0x6e, 0xe4, 0x6b, 0x06, 0x04, 0xa7, 0x30, 0x51, 0x1d, 0xa6, 0x3d, 0xbf, - 0xd9, 0xea, 0xb8, 0xe4, 0xa6, 0xef, 0xf9, 0x5e, 0xec, 0x39, 0x2d, 0xef, 0x3d, 0xe2, 0x0a, 0xb1, - 0x5b, 0xb9, 0x6c, 0xad, 0x66, 0xe0, 0xe0, 0xcc, 0x9a, 0xf6, 0x5b, 0x70, 0xe2, 0x6a, 0xe0, 0xb8, - 0x8b, 0x4e, 0xcb, 0xf1, 0x9b, 0x24, 0x5c, 0xf5, 0x37, 0x72, 0x7d, 0x0a, 0xf4, 0x7b, 0xff, 0x52, - 0xde, 0xbd, 0xbf, 0x1d, 0x02, 0xd2, 0x1b, 0x10, 0x3e, 0x71, 0x6f, 0xc0, 0x90, 0xc7, 0x9b, 0x12, - 0x1b, 0xe1, 0x42, 0x9e, 0x4c, 0xde, 0xd5, 0x47, 0xcd, 0xc7, 0x8b, 0x17, 0x60, 0x49, 0x92, 0x6a, - 0x70, 0x59, 0x42, 0x7c, 0xbe, 0xea, 0x6d, 0xbf, 0x00, 0x53, 0xac, 0x66, 0x9f, 0x8a, 0xdf, 0x5f, - 0xb3, 0x60, 0xe2, 0x7a, 0x2a, 0x00, 0xfa, 0x49, 0x18, 0x8c, 0x48, 0x98, 0x61, 0x59, 0x6d, 0xb0, - 0x52, 0x2c, 0xa0, 0x0f, 0xdc, 0x5a, 0xf3, 0x6b, 0x25, 0xa8, 0x32, 0xa7, 0xec, 0x36, 0x55, 0xe2, - 0x8e, 0x5e, 0x5e, 0xbe, 0x66, 0xc8, 0xcb, 0x39, 0x16, 0x03, 0xd5, 0xb1, 0x5e, 0xe2, 0x32, 0xba, - 0xa9, 0x02, 0x83, 0x0b, 0x19, 0x0b, 0x12, 0x82, 0x3c, 0x78, 0x74, 0xdc, 0x8c, 0x23, 0x96, 0x41, - 0xc3, 0xec, 0x02, 0x5f, 0xe1, 0x7e, 0xe0, 0x2e, 0xf0, 0x55, 0xcf, 0x7a, 0x70, 0xc9, 0xba, 0xd6, - 0x79, 0x76, 0x8e, 0xfc, 0x1c, 0x73, 0xb5, 0x65, 0x7b, 0x58, 0xc5, 0xd7, 0xcf, 0x09, 0xd7, 0x59, - 0x51, 0xba, 0xcf, 0x18, 0x9e, 0xf8, 0xc7, 0xd3, 0x27, 0x24, 0x55, 0xec, 0xcb, 0x30, 0x91, 0x1a, - 0x3a, 0xf4, 0x02, 0x0c, 0xb4, 0x37, 0x9d, 0x88, 0xa4, 0x9c, 0x9e, 0x06, 0xea, 0xb4, 0x70, 0x7f, - 0x77, 0x6e, 0x5c, 0x55, 0x60, 0x25, 0x98, 0x63, 0xdb, 0x9f, 0x2b, 0x41, 0xe5, 0x7a, 0xe0, 0x1e, - 0xc7, 0x52, 0xbb, 0x6c, 0x2c, 0xb5, 0x27, 0xf3, 0xf3, 0xb5, 0xf4, 0x5c, 0x65, 0xf5, 0xd4, 0x2a, - 0x3b, 0x5b, 0x80, 0xd6, 0xc1, 0x0b, 0x6c, 0x0b, 0x46, 0x58, 0x3e, 0x18, 0xe1, 0x94, 0xf5, 0x9c, - 0xa1, 0xe2, 0xcd, 0xa5, 0x54, 0xbc, 0x09, 0x0d, 0x55, 0x53, 0xf4, 0x9e, 0x82, 0x21, 0xe1, 0x04, - 0x94, 0x76, 0x34, 0x16, 0xb8, 0x58, 0xc2, 0xed, 0x7f, 0x51, 0x06, 0x23, 0xff, 0x0c, 0xfa, 0x7d, - 0x0b, 0xe6, 0x43, 0x1e, 0xb4, 0xe5, 0xd6, 0x3a, 0xa1, 0xe7, 0x6f, 0x34, 0x9a, 0x9b, 0xc4, 0xed, - 0xb4, 0x3c, 0x7f, 0x63, 0x75, 0xc3, 0x0f, 0x54, 0xf1, 0xf2, 0x7d, 0xd2, 0xec, 0x30, 0x9b, 0x7b, - 0xe1, 0xb4, 0x37, 0xea, 0x02, 0xfc, 0xe2, 0xde, 0xee, 0xdc, 0x3c, 0xee, 0xab, 0x15, 0xdc, 0x67, - 0xaf, 0xd0, 0x0f, 0x2d, 0x38, 0xcf, 0x33, 0xb0, 0x14, 0xff, 0x92, 0x42, 0xaa, 0x71, 0x5d, 0x12, - 0x4d, 0xc8, 0xad, 0x91, 0x70, 0x6b, 0xf1, 0x45, 0x31, 0xc8, 0xe7, 0xeb, 0xfd, 0xb5, 0x8a, 0xfb, - 0xed, 0xa6, 0xfd, 0xaf, 0xcb, 0x30, 0x46, 0xc7, 0x33, 0x49, 0xa1, 0xf0, 0x82, 0xb1, 0x4c, 0x1e, - 0x4b, 0x2d, 0x93, 0x29, 0x03, 0xf9, 0xc1, 0x64, 0x4f, 0x88, 0x60, 0xaa, 0xe5, 0x44, 0xf1, 0x65, + 0x02, 0x02, 0x27, 0x80, 0x02, 0x03, 0x71, 0x94, 0xc4, 0xf9, 0x50, 0x04, 0x7d, 0x18, 0x96, 0x13, + 0xc4, 0x91, 0x1c, 0x39, 0x48, 0x1c, 0x01, 0x46, 0x62, 0x05, 0x86, 0xd7, 0xd6, 0x0a, 0xf1, 0x9f, + 0x00, 0xf9, 0x91, 0xfc, 0xdb, 0x7c, 0x20, 0xa8, 0xcf, 0xae, 0xea, 0xe9, 0xd9, 0xee, 0x59, 0xde, + 0xae, 0x29, 0x21, 0xff, 0x66, 0xde, 0x7b, 0xf5, 0xaa, 0xba, 0x3e, 0x5e, 0xbd, 0x7a, 0xf5, 0xde, + 0x2b, 0x38, 0x77, 0xf7, 0xa5, 0x68, 0xde, 0x0b, 0xce, 0xdf, 0xed, 0xdc, 0x21, 0xa1, 0x4f, 0x62, + 0x12, 0x9d, 0x6f, 0xdf, 0xdd, 0x38, 0xef, 0xb4, 0xbd, 0xf3, 0xdb, 0x17, 0xce, 0x6f, 0x10, 0x9f, + 0x84, 0x4e, 0x4c, 0xdc, 0xf9, 0x76, 0x18, 0xc4, 0x01, 0x7a, 0x84, 0x53, 0xcf, 0x27, 0xd4, 0xf3, + 0xed, 0xbb, 0x1b, 0xf3, 0x4e, 0xdb, 0x9b, 0xdf, 0xbe, 0x30, 0xfb, 0xec, 0x86, 0x17, 0x6f, 0x76, + 0xee, 0xcc, 0x37, 0x83, 0xad, 0xf3, 0x1b, 0xc1, 0x46, 0x70, 0x9e, 0x15, 0xba, 0xd3, 0x59, 0x67, + 0xff, 0xd8, 0x1f, 0xf6, 0x8b, 0x33, 0x9b, 0x7d, 0x5e, 0x54, 0xed, 0xb4, 0xbd, 0x2d, 0xa7, 0xb9, + 0xe9, 0xf9, 0x24, 0xdc, 0x51, 0x95, 0x87, 0x24, 0x0a, 0x3a, 0x61, 0x93, 0xa4, 0x9b, 0x70, 0x60, + 0xa9, 0xe8, 0xfc, 0x16, 0x89, 0x9d, 0x8c, 0x86, 0xcf, 0x9e, 0xef, 0x55, 0x2a, 0xec, 0xf8, 0xb1, + 0xb7, 0xd5, 0x5d, 0xcd, 0xc7, 0xf2, 0x0a, 0x44, 0xcd, 0x4d, 0xb2, 0xe5, 0x74, 0x95, 0x7b, 0xae, + 0x57, 0xb9, 0x4e, 0xec, 0xb5, 0xce, 0x7b, 0x7e, 0x1c, 0xc5, 0x61, 0xba, 0x90, 0xfd, 0xc7, 0x16, + 0x9c, 0x59, 0xb8, 0xdd, 0x58, 0x6e, 0x39, 0x51, 0xec, 0x35, 0x17, 0x5b, 0x41, 0xf3, 0x6e, 0x23, + 0x0e, 0x42, 0x72, 0x2b, 0x68, 0x75, 0xb6, 0x48, 0x83, 0x75, 0x04, 0x3a, 0x07, 0xc3, 0xdb, 0xec, + 0xff, 0x6a, 0x6d, 0xc6, 0x3a, 0x63, 0x9d, 0xad, 0x2e, 0x4e, 0x7e, 0x7f, 0x77, 0xee, 0x43, 0x7b, + 0xbb, 0x73, 0xc3, 0xb7, 0x04, 0x1c, 0x2b, 0x0a, 0xf4, 0x24, 0x0c, 0xae, 0x47, 0x6b, 0x3b, 0x6d, + 0x32, 0x53, 0x62, 0xb4, 0xe3, 0x82, 0x76, 0x70, 0xa5, 0x41, 0xa1, 0x58, 0x60, 0xd1, 0x79, 0xa8, + 0xb6, 0x9d, 0x30, 0xf6, 0x62, 0x2f, 0xf0, 0x67, 0xca, 0x67, 0xac, 0xb3, 0x03, 0x8b, 0x53, 0x82, + 0xb4, 0x5a, 0x97, 0x08, 0x9c, 0xd0, 0xd0, 0x66, 0x84, 0xc4, 0x71, 0x6f, 0xf8, 0xad, 0x9d, 0x99, + 0xca, 0x19, 0xeb, 0xec, 0x70, 0xd2, 0x0c, 0x2c, 0xe0, 0x58, 0x51, 0xd8, 0xdf, 0x2e, 0xc1, 0xf0, + 0xc2, 0xfa, 0xba, 0xe7, 0x7b, 0xf1, 0x0e, 0x7a, 0x1b, 0x46, 0xfd, 0xc0, 0x25, 0xf2, 0x3f, 0xfb, + 0x8a, 0x91, 0x8b, 0x4f, 0xcf, 0x1f, 0x34, 0xa9, 0xe6, 0xaf, 0x6b, 0x25, 0x16, 0x27, 0xf7, 0x76, + 0xe7, 0x46, 0x75, 0x08, 0x36, 0x38, 0xa2, 0x37, 0x60, 0xa4, 0x1d, 0xb8, 0xaa, 0x82, 0x12, 0xab, + 0xe0, 0xa9, 0x83, 0x2b, 0xa8, 0x27, 0x05, 0x16, 0x27, 0xf6, 0x76, 0xe7, 0x46, 0x34, 0x00, 0xd6, + 0xd9, 0xa1, 0x16, 0x4c, 0xd0, 0xbf, 0x7e, 0xec, 0xa9, 0x1a, 0xca, 0xac, 0x86, 0x67, 0xf3, 0x6b, + 0xd0, 0x0a, 0x2d, 0x9e, 0xd8, 0xdb, 0x9d, 0x9b, 0x48, 0x01, 0x71, 0x9a, 0xb5, 0xfd, 0x1e, 0x8c, + 0x2f, 0xc4, 0xb1, 0xd3, 0xdc, 0x24, 0x2e, 0x1f, 0x5f, 0xf4, 0x3c, 0x54, 0x7c, 0x67, 0x8b, 0x88, + 0xd1, 0x3f, 0x23, 0xba, 0xbd, 0x72, 0xdd, 0xd9, 0x22, 0xfb, 0xbb, 0x73, 0x93, 0x37, 0x7d, 0xef, + 0xdd, 0x8e, 0x98, 0x33, 0x14, 0x86, 0x19, 0x35, 0xba, 0x08, 0xe0, 0x92, 0x6d, 0xaf, 0x49, 0xea, + 0x4e, 0xbc, 0x29, 0x66, 0x03, 0x12, 0x65, 0xa1, 0xa6, 0x30, 0x58, 0xa3, 0xb2, 0x3f, 0x6b, 0x41, + 0x75, 0x61, 0x3b, 0xf0, 0xdc, 0x7a, 0xe0, 0x46, 0xa8, 0x03, 0x13, 0xed, 0x90, 0xac, 0x93, 0x50, + 0x81, 0x66, 0xac, 0x33, 0xe5, 0xb3, 0x23, 0x17, 0x2f, 0xe6, 0x7c, 0xb7, 0x59, 0x68, 0xd9, 0x8f, + 0xc3, 0x9d, 0xc5, 0x87, 0x44, 0xd5, 0x13, 0x29, 0x2c, 0x4e, 0xd7, 0x61, 0x7f, 0xb7, 0x04, 0x27, + 0x17, 0xde, 0xeb, 0x84, 0xa4, 0xe6, 0x45, 0x77, 0xd3, 0x4b, 0xc1, 0xf5, 0xa2, 0xbb, 0xd7, 0x93, + 0xce, 0x50, 0x73, 0xb0, 0x26, 0xe0, 0x58, 0x51, 0xa0, 0x67, 0x61, 0x88, 0xfe, 0xbe, 0x89, 0x57, + 0xc5, 0xd7, 0x9f, 0x10, 0xc4, 0x23, 0x35, 0x27, 0x76, 0x6a, 0x1c, 0x85, 0x25, 0x0d, 0xba, 0x06, + 0x23, 0x4d, 0xb6, 0x72, 0x37, 0xae, 0x05, 0x2e, 0x61, 0x23, 0x5c, 0x5d, 0x7c, 0x86, 0x92, 0x2f, + 0x25, 0xe0, 0xfd, 0xdd, 0xb9, 0x19, 0xde, 0x36, 0xc1, 0x42, 0xc3, 0x61, 0xbd, 0x3c, 0xb2, 0xd5, + 0x42, 0xac, 0x30, 0x4e, 0x90, 0xb1, 0x08, 0xcf, 0x6a, 0x6b, 0x6a, 0x80, 0xad, 0xa9, 0xd1, 0xec, + 0xf5, 0x84, 0x2e, 0x40, 0xe5, 0xae, 0xe7, 0xbb, 0x33, 0x83, 0x8c, 0xd7, 0xa3, 0x74, 0xf8, 0xaf, + 0x78, 0xbe, 0xbb, 0xbf, 0x3b, 0x37, 0x65, 0x34, 0x87, 0x02, 0x31, 0x23, 0xb5, 0xff, 0x91, 0x25, + 0xba, 0x71, 0xc5, 0x6b, 0x99, 0x12, 0xe5, 0x22, 0x40, 0x44, 0x9a, 0x21, 0x89, 0xb5, 0x8e, 0x54, + 0x33, 0xa3, 0xa1, 0x30, 0x58, 0xa3, 0xa2, 0xf2, 0x22, 0xda, 0x74, 0x42, 0x36, 0xc1, 0x44, 0x77, + 0x2a, 0x79, 0xd1, 0x90, 0x08, 0x9c, 0xd0, 0x18, 0xf2, 0xa2, 0x9c, 0x2b, 0x2f, 0x7e, 0xcf, 0x82, + 0xa1, 0x45, 0xcf, 0x77, 0x3d, 0x7f, 0x03, 0xbd, 0x0d, 0xc3, 0x54, 0x9c, 0xbb, 0x4e, 0xec, 0x08, + 0x51, 0xf1, 0x51, 0x39, 0xdf, 0x74, 0xe9, 0x2a, 0x67, 0x5c, 0x34, 0x4f, 0xa9, 0xe9, 0xbc, 0xbb, + 0x71, 0xe7, 0x1d, 0xd2, 0x8c, 0xaf, 0x91, 0xd8, 0x49, 0x3e, 0x27, 0x81, 0x61, 0xc5, 0x15, 0xdd, + 0x84, 0xc1, 0xd8, 0x09, 0x37, 0x48, 0x2c, 0x24, 0x45, 0xce, 0x3a, 0xe6, 0x3c, 0x30, 0x9d, 0xa5, + 0xc4, 0x6f, 0x92, 0x44, 0xa6, 0xae, 0x31, 0x26, 0x58, 0x30, 0xb3, 0x9b, 0x30, 0xba, 0xe4, 0xb4, + 0x9d, 0x3b, 0x5e, 0xcb, 0x8b, 0x3d, 0x12, 0xa1, 0x9f, 0x81, 0xb2, 0xe3, 0xba, 0x6c, 0xcd, 0x54, + 0x17, 0x4f, 0xee, 0xed, 0xce, 0x95, 0x17, 0x5c, 0x3a, 0x64, 0xa0, 0xa8, 0x76, 0x30, 0xa5, 0x40, + 0x4f, 0x43, 0xc5, 0x0d, 0x83, 0xf6, 0x4c, 0x89, 0x51, 0x9e, 0xa2, 0xa3, 0x5b, 0x0b, 0x83, 0x76, + 0x8a, 0x94, 0xd1, 0xd8, 0xdf, 0x2b, 0x01, 0x5a, 0x22, 0xed, 0xcd, 0x95, 0x86, 0x31, 0xa6, 0x67, + 0x61, 0x78, 0x2b, 0xf0, 0xbd, 0x38, 0x08, 0x23, 0x51, 0x21, 0x9b, 0x4a, 0xd7, 0x04, 0x0c, 0x2b, + 0x2c, 0x3a, 0x03, 0x95, 0x76, 0x22, 0x11, 0x46, 0xa5, 0x34, 0x61, 0xb2, 0x80, 0x61, 0x28, 0x45, + 0x27, 0x22, 0xa1, 0x58, 0x02, 0x8a, 0xe2, 0x66, 0x44, 0x42, 0xcc, 0x30, 0xc9, 0x0c, 0xa2, 0x73, + 0x4b, 0x4c, 0xf0, 0xd4, 0x0c, 0xa2, 0x18, 0xac, 0x51, 0xa1, 0xb7, 0xa0, 0xca, 0xff, 0x61, 0xb2, + 0xce, 0x66, 0x7b, 0xae, 0x1c, 0xb9, 0x1a, 0x34, 0x9d, 0x56, 0xba, 0xf3, 0xc7, 0xd8, 0x8c, 0x93, + 0x8c, 0x70, 0xc2, 0xd3, 0x98, 0x71, 0x83, 0xb9, 0x33, 0xee, 0x6f, 0x5b, 0x80, 0x96, 0x3c, 0xdf, + 0x25, 0xe1, 0x31, 0xec, 0xb6, 0xfd, 0x2d, 0x86, 0x3f, 0xa1, 0x4d, 0x0b, 0xb6, 0xda, 0x81, 0x4f, + 0xfc, 0x78, 0x29, 0xf0, 0x5d, 0xbe, 0x03, 0x7f, 0x02, 0x2a, 0x31, 0xad, 0x8a, 0x37, 0xeb, 0x49, + 0x39, 0x2c, 0xb4, 0x82, 0xfd, 0xdd, 0xb9, 0x53, 0xdd, 0x25, 0x58, 0x13, 0x58, 0x19, 0xf4, 0x71, + 0x18, 0x8c, 0x62, 0x27, 0xee, 0x44, 0xa2, 0xa1, 0x8f, 0xc9, 0x86, 0x36, 0x18, 0x74, 0x7f, 0x77, + 0x6e, 0x42, 0x15, 0xe3, 0x20, 0x2c, 0x0a, 0xa0, 0xa7, 0x60, 0x68, 0x8b, 0x44, 0x91, 0xb3, 0x21, + 0x65, 0xe2, 0x84, 0x28, 0x3b, 0x74, 0x8d, 0x83, 0xb1, 0xc4, 0xa3, 0xc7, 0x61, 0x80, 0x84, 0x61, + 0x10, 0x8a, 0x19, 0x31, 0x26, 0x08, 0x07, 0x96, 0x29, 0x10, 0x73, 0x9c, 0xfd, 0x5f, 0x2c, 0x98, + 0x50, 0x6d, 0xe5, 0x75, 0x1d, 0xc3, 0x92, 0x77, 0x01, 0x9a, 0xf2, 0x03, 0x23, 0xb6, 0xd0, 0xb4, + 0x3a, 0xb2, 0xa7, 0x5f, 0x77, 0x87, 0x26, 0x75, 0x28, 0x50, 0x84, 0x35, 0xbe, 0xf6, 0xbf, 0xb5, + 0xe0, 0x44, 0xea, 0xdb, 0xae, 0x7a, 0x51, 0x8c, 0xde, 0xe8, 0xfa, 0xbe, 0xf9, 0x62, 0xdf, 0x47, + 0x4b, 0xb3, 0xaf, 0x53, 0xf3, 0x45, 0x42, 0xb4, 0x6f, 0xc3, 0x30, 0xe0, 0xc5, 0x64, 0x4b, 0x7e, + 0xd6, 0xb3, 0x05, 0x3f, 0x8b, 0xb7, 0x2f, 0x19, 0xa5, 0x55, 0xca, 0x03, 0x73, 0x56, 0xf6, 0xff, + 0xb2, 0xa0, 0xba, 0x14, 0xf8, 0xeb, 0xde, 0xc6, 0x35, 0xa7, 0x7d, 0x0c, 0xe3, 0xd3, 0x80, 0x0a, + 0xe3, 0xce, 0x3f, 0xe1, 0x42, 0xde, 0x27, 0x88, 0x86, 0xcd, 0xd3, 0x7d, 0x8f, 0xeb, 0x17, 0x4a, + 0x4c, 0x51, 0x10, 0x66, 0xcc, 0x66, 0x5f, 0x84, 0xaa, 0x22, 0x40, 0x93, 0x50, 0xbe, 0x4b, 0xb8, + 0xf2, 0x59, 0xc5, 0xf4, 0x27, 0x9a, 0x86, 0x81, 0x6d, 0xa7, 0xd5, 0x11, 0x8b, 0x17, 0xf3, 0x3f, + 0x9f, 0x28, 0xbd, 0x64, 0xd9, 0xdf, 0x63, 0x2b, 0x50, 0x54, 0xb2, 0xec, 0x6f, 0x0b, 0xe1, 0xf0, + 0x39, 0x0b, 0xa6, 0x5b, 0x19, 0x42, 0x49, 0xf4, 0xc9, 0x61, 0xc4, 0xd9, 0x23, 0xa2, 0xd9, 0xd3, + 0x59, 0x58, 0x9c, 0x59, 0x1b, 0x95, 0xf5, 0x41, 0x9b, 0x4e, 0x38, 0xa7, 0xc5, 0x9a, 0x2e, 0xd4, + 0x86, 0x1b, 0x02, 0x86, 0x15, 0xd6, 0xfe, 0x0b, 0x0b, 0xa6, 0xd5, 0x77, 0x5c, 0x21, 0x3b, 0x0d, + 0xd2, 0x22, 0xcd, 0x38, 0x08, 0x3f, 0x28, 0x5f, 0xf2, 0x28, 0x1f, 0x13, 0x2e, 0x93, 0x46, 0x04, + 0x83, 0xf2, 0x15, 0xb2, 0xc3, 0x07, 0x48, 0xff, 0xd0, 0xf2, 0x81, 0x1f, 0xfa, 0x3b, 0x16, 0x8c, + 0xa9, 0x0f, 0x3d, 0x86, 0x25, 0x77, 0xd5, 0x5c, 0x72, 0x3f, 0x53, 0x70, 0xbe, 0xf6, 0x58, 0x6c, + 0x7f, 0xab, 0x44, 0xc5, 0x86, 0xa0, 0xa9, 0x87, 0x01, 0xed, 0x24, 0x2a, 0xf1, 0x3f, 0x20, 0xa3, + 0xd4, 0xdf, 0xc7, 0x5e, 0x21, 0x3b, 0x6b, 0x01, 0xd5, 0x26, 0xb2, 0x3f, 0xd6, 0x18, 0xd4, 0xca, + 0x81, 0x83, 0xfa, 0x07, 0x25, 0x38, 0xa9, 0xba, 0xc5, 0xd8, 0xa5, 0x7f, 0x2a, 0x3b, 0xe6, 0x02, + 0x8c, 0xb8, 0x64, 0xdd, 0xe9, 0xb4, 0x62, 0x75, 0x00, 0x19, 0xe0, 0x27, 0xd3, 0x5a, 0x02, 0xc6, + 0x3a, 0x4d, 0x1f, 0x7d, 0xf9, 0x95, 0x11, 0x26, 0xcf, 0x63, 0x87, 0xce, 0x7a, 0xaa, 0xe1, 0x69, + 0x27, 0xca, 0x51, 0xfd, 0x44, 0x29, 0x4e, 0x8f, 0x8f, 0xc3, 0x80, 0xb7, 0x45, 0xf7, 0xfc, 0x92, + 0xb9, 0x95, 0xaf, 0x52, 0x20, 0xe6, 0x38, 0xf4, 0x04, 0x0c, 0x35, 0x83, 0xad, 0x2d, 0xc7, 0x77, + 0x67, 0xca, 0x4c, 0xe7, 0x1c, 0xa1, 0x6a, 0xc1, 0x12, 0x07, 0x61, 0x89, 0x43, 0x8f, 0x40, 0xc5, + 0x09, 0x37, 0xa2, 0x99, 0x0a, 0xa3, 0x19, 0xa6, 0x35, 0x2d, 0x84, 0x1b, 0x11, 0x66, 0x50, 0xaa, + 0x4b, 0xde, 0x0b, 0xc2, 0xbb, 0x9e, 0xbf, 0x51, 0xf3, 0x42, 0xa6, 0x18, 0x6a, 0xba, 0xe4, 0x6d, + 0x85, 0xc1, 0x1a, 0x15, 0xaa, 0xc3, 0x40, 0x3b, 0x08, 0xe3, 0x68, 0x66, 0x90, 0x75, 0xfc, 0x33, + 0xb9, 0xcb, 0x8f, 0x7f, 0x77, 0x3d, 0x08, 0xe3, 0xe4, 0x53, 0xe8, 0xbf, 0x08, 0x73, 0x46, 0x68, + 0x09, 0xca, 0xc4, 0xdf, 0x9e, 0x19, 0x62, 0xfc, 0x3e, 0x72, 0x30, 0xbf, 0x65, 0x7f, 0xfb, 0x96, + 0x13, 0x26, 0xf2, 0x6a, 0xd9, 0xdf, 0xc6, 0xb4, 0x34, 0x6a, 0x42, 0x55, 0xda, 0xaf, 0xa2, 0x99, + 0xe1, 0x22, 0x53, 0x11, 0x0b, 0x72, 0x4c, 0xde, 0xed, 0x78, 0x21, 0xd9, 0x22, 0x7e, 0x1c, 0x25, + 0x07, 0x2b, 0x89, 0x8d, 0x70, 0xc2, 0x17, 0x35, 0x61, 0x94, 0xeb, 0x9f, 0xd7, 0x82, 0x8e, 0x1f, + 0x47, 0x33, 0x55, 0xd6, 0xe4, 0x1c, 0x63, 0xc7, 0xad, 0xa4, 0xc4, 0xe2, 0xb4, 0x60, 0x3f, 0xaa, + 0x01, 0x23, 0x6c, 0x30, 0x45, 0x6f, 0xc0, 0x58, 0xcb, 0xdb, 0x26, 0x3e, 0x89, 0xa2, 0x7a, 0x18, + 0xdc, 0x21, 0x33, 0xc0, 0xbe, 0xe6, 0xf1, 0xbc, 0x83, 0x7f, 0x70, 0x87, 0x2c, 0x4e, 0xed, 0xed, + 0xce, 0x8d, 0x5d, 0xd5, 0x4b, 0x63, 0x93, 0x19, 0x7a, 0x0b, 0xc6, 0xa9, 0xb2, 0xeb, 0x25, 0xec, + 0x47, 0x8a, 0xb3, 0x47, 0x7b, 0xbb, 0x73, 0xe3, 0xd8, 0x28, 0x8e, 0x53, 0xec, 0xd0, 0x1a, 0x54, + 0x5b, 0xde, 0x3a, 0x69, 0xee, 0x34, 0x5b, 0x64, 0x66, 0x94, 0xf1, 0xce, 0x59, 0x9c, 0x57, 0x25, + 0x39, 0x3f, 0x60, 0xa8, 0xbf, 0x38, 0x61, 0x84, 0x6e, 0xc1, 0xa9, 0x98, 0x84, 0x5b, 0x9e, 0xef, + 0xd0, 0x45, 0x25, 0xb4, 0x5f, 0x66, 0x5d, 0x19, 0x63, 0xb3, 0xf6, 0xb4, 0xe8, 0xd8, 0x53, 0x6b, + 0x99, 0x54, 0xb8, 0x47, 0x69, 0x74, 0x03, 0x26, 0xd8, 0x7a, 0xaa, 0x77, 0x5a, 0xad, 0x7a, 0xd0, + 0xf2, 0x9a, 0x3b, 0x33, 0xe3, 0x8c, 0xe1, 0x13, 0xd2, 0x66, 0xb2, 0x6a, 0xa2, 0xe9, 0xc1, 0x30, + 0xf9, 0x87, 0xd3, 0xa5, 0x51, 0x0b, 0x26, 0x22, 0xd2, 0xec, 0x84, 0x5e, 0xbc, 0x43, 0xe7, 0x3e, + 0xb9, 0x1f, 0xcf, 0x4c, 0x14, 0x39, 0xe8, 0x36, 0xcc, 0x42, 0xdc, 0x60, 0x95, 0x02, 0xe2, 0x34, + 0x6b, 0x2a, 0x2a, 0xa2, 0xd8, 0xf5, 0xfc, 0x99, 0x49, 0x26, 0x81, 0xd4, 0xfa, 0x6a, 0x50, 0x20, + 0xe6, 0x38, 0x66, 0x3f, 0xa0, 0x3f, 0x6e, 0x50, 0x29, 0x3d, 0xc5, 0x08, 0x13, 0xfb, 0x81, 0x44, + 0xe0, 0x84, 0x86, 0xaa, 0x06, 0x71, 0xbc, 0x33, 0x83, 0x18, 0xa9, 0x5a, 0x6a, 0x6b, 0x6b, 0x9f, + 0xc6, 0x14, 0x8e, 0x6e, 0xc1, 0x10, 0xf1, 0xb7, 0x57, 0xc2, 0x60, 0x6b, 0xe6, 0x44, 0x11, 0x19, + 0xb0, 0xcc, 0x89, 0xf9, 0xfe, 0x91, 0x1c, 0x61, 0x04, 0x18, 0x4b, 0x66, 0xe8, 0x3e, 0xcc, 0x64, + 0x8c, 0x12, 0x1f, 0x94, 0x69, 0x36, 0x28, 0x9f, 0x14, 0x65, 0x67, 0xd6, 0x7a, 0xd0, 0xed, 0x1f, + 0x80, 0xc3, 0x3d, 0xb9, 0xdb, 0x77, 0x60, 0x5c, 0x09, 0x2a, 0x36, 0xde, 0x68, 0x0e, 0x06, 0xa8, + 0x2c, 0x96, 0x07, 0xfa, 0x2a, 0xed, 0x54, 0x2a, 0xa2, 0x23, 0xcc, 0xe1, 0xac, 0x53, 0xbd, 0xf7, + 0xc8, 0xe2, 0x4e, 0x4c, 0xf8, 0xc1, 0xae, 0xac, 0x75, 0xaa, 0x44, 0xe0, 0x84, 0xc6, 0xfe, 0xbf, + 0x5c, 0x4d, 0x4a, 0xa4, 0x61, 0x81, 0x9d, 0xe0, 0x1c, 0x0c, 0x6f, 0x06, 0x51, 0x4c, 0xa9, 0x59, + 0x1d, 0x03, 0x89, 0x62, 0x74, 0x59, 0xc0, 0xb1, 0xa2, 0x40, 0x2f, 0xc3, 0x58, 0x53, 0xaf, 0x40, + 0x6c, 0x63, 0x27, 0x45, 0x11, 0xb3, 0x76, 0x6c, 0xd2, 0xa2, 0x97, 0x60, 0x98, 0x19, 0xc6, 0x9b, + 0x41, 0x4b, 0x1c, 0x21, 0xe5, 0xae, 0x3c, 0x5c, 0x17, 0xf0, 0x7d, 0xed, 0x37, 0x56, 0xd4, 0xf4, + 0x20, 0x4e, 0x9b, 0xb0, 0x5a, 0x17, 0x1b, 0x88, 0x3a, 0x88, 0x5f, 0x66, 0x50, 0x2c, 0xb0, 0xf6, + 0x3f, 0x2f, 0x69, 0xbd, 0x4c, 0x0f, 0x40, 0x04, 0xbd, 0x0e, 0x43, 0xf7, 0x1c, 0x2f, 0xf6, 0xfc, + 0x0d, 0xa1, 0x3d, 0x3c, 0x57, 0x70, 0x37, 0x61, 0xc5, 0x6f, 0xf3, 0xa2, 0x7c, 0xe7, 0x13, 0x7f, + 0xb0, 0x64, 0x48, 0x79, 0x87, 0x1d, 0xdf, 0xa7, 0xbc, 0x4b, 0xfd, 0xf3, 0xc6, 0xbc, 0x28, 0xe7, + 0x2d, 0xfe, 0x60, 0xc9, 0x10, 0xad, 0x03, 0xc8, 0xb9, 0x44, 0x5c, 0x61, 0x90, 0xfe, 0x58, 0x3f, + 0xec, 0xd7, 0x54, 0xe9, 0xc5, 0x71, 0xba, 0xd7, 0x26, 0xff, 0xb1, 0xc6, 0xd9, 0x8e, 0x99, 0x12, + 0xd6, 0xdd, 0x2c, 0xf4, 0x19, 0xba, 0xa4, 0x9d, 0x30, 0x26, 0xee, 0x42, 0x9c, 0xb6, 0xe9, 0x1f, + 0xac, 0x62, 0xaf, 0x79, 0x5b, 0x44, 0x5f, 0xfe, 0x82, 0x09, 0x4e, 0xf8, 0xd9, 0xdf, 0x2a, 0xc3, + 0x4c, 0xaf, 0xe6, 0xd2, 0x29, 0x49, 0xee, 0x7b, 0xf1, 0x12, 0x55, 0x93, 0x2c, 0x73, 0x4a, 0x2e, + 0x0b, 0x38, 0x56, 0x14, 0x74, 0x6e, 0x44, 0xde, 0x86, 0x3c, 0x2c, 0x0d, 0x24, 0x73, 0xa3, 0xc1, + 0xa0, 0x58, 0x60, 0x29, 0x5d, 0x48, 0x9c, 0x48, 0xdc, 0x87, 0x68, 0x73, 0x08, 0x33, 0x28, 0x16, + 0x58, 0xdd, 0x20, 0x52, 0xc9, 0x31, 0x88, 0x18, 0x5d, 0x34, 0xf0, 0x60, 0xbb, 0x08, 0xbd, 0x09, + 0xb0, 0xee, 0xf9, 0x5e, 0xb4, 0xc9, 0xb8, 0x0f, 0xf6, 0xcd, 0x5d, 0x29, 0x59, 0x2b, 0x8a, 0x0b, + 0xd6, 0x38, 0xa2, 0x17, 0x60, 0x44, 0x2d, 0xcf, 0xd5, 0xda, 0xcc, 0x90, 0x69, 0x43, 0x4f, 0x64, + 0x55, 0x0d, 0xeb, 0x74, 0xf6, 0x3b, 0xe9, 0xf9, 0x22, 0x56, 0x85, 0xd6, 0xbf, 0x56, 0xd1, 0xfe, + 0x2d, 0x1d, 0xdc, 0xbf, 0xf6, 0x7f, 0x2e, 0xc3, 0x84, 0x51, 0x59, 0x27, 0x2a, 0x20, 0xd1, 0x5e, + 0xa5, 0x1b, 0x96, 0x13, 0x13, 0xb1, 0x26, 0xcf, 0xf5, 0xb3, 0x68, 0xf4, 0xed, 0x8d, 0xae, 0x05, + 0xce, 0x09, 0x6d, 0x42, 0xb5, 0xe5, 0x44, 0xcc, 0xa4, 0x42, 0xc4, 0x5a, 0xec, 0x8f, 0x6d, 0x72, + 0xfc, 0x70, 0xa2, 0x58, 0xdb, 0x3d, 0x78, 0x2d, 0x09, 0x73, 0xba, 0xdb, 0x52, 0x65, 0x47, 0x5e, + 0xc2, 0xa9, 0xe6, 0x50, 0x8d, 0x68, 0x07, 0x73, 0x1c, 0x7a, 0x09, 0x46, 0x43, 0xc2, 0x66, 0xca, + 0x12, 0xd5, 0xe7, 0xd8, 0xd4, 0x1b, 0x48, 0x14, 0x3f, 0xac, 0xe1, 0xb0, 0x41, 0x99, 0xe8, 0xfd, + 0x83, 0x07, 0xe8, 0xfd, 0x4f, 0xc1, 0x10, 0xfb, 0xa1, 0x66, 0x85, 0x1a, 0xa1, 0x55, 0x0e, 0xc6, + 0x12, 0x9f, 0x9e, 0x44, 0xc3, 0x05, 0x27, 0xd1, 0xd3, 0x30, 0x5e, 0x73, 0xc8, 0x56, 0xe0, 0x2f, + 0xfb, 0x6e, 0x3b, 0xf0, 0xfc, 0x18, 0xcd, 0x40, 0x85, 0xed, 0x27, 0x7c, 0xbd, 0x57, 0x28, 0x07, + 0x5c, 0xa1, 0xba, 0xbb, 0xfd, 0x27, 0x25, 0x18, 0xab, 0x91, 0x16, 0x89, 0x09, 0x3f, 0xf7, 0x44, + 0x68, 0x05, 0xd0, 0x46, 0xe8, 0x34, 0x49, 0x9d, 0x84, 0x5e, 0xe0, 0x36, 0x48, 0x33, 0xf0, 0xd9, + 0xdd, 0x15, 0xdd, 0x20, 0x4f, 0xed, 0xed, 0xce, 0xa1, 0x4b, 0x5d, 0x58, 0x9c, 0x51, 0x02, 0xb9, + 0x30, 0xd6, 0x0e, 0x89, 0x61, 0x37, 0xb4, 0xf2, 0x55, 0x8d, 0xba, 0x5e, 0x84, 0x6b, 0xc3, 0x06, + 0x08, 0x9b, 0x4c, 0xd1, 0xa7, 0x60, 0x32, 0x08, 0xdb, 0x9b, 0x8e, 0x5f, 0x23, 0x6d, 0xe2, 0xbb, + 0xf4, 0x08, 0x20, 0xac, 0x1d, 0xd3, 0x7b, 0xbb, 0x73, 0x93, 0x37, 0x52, 0x38, 0xdc, 0x45, 0x8d, + 0x5e, 0x87, 0xa9, 0x76, 0x18, 0xb4, 0x9d, 0x0d, 0x36, 0x65, 0x84, 0xb6, 0xc2, 0x65, 0xd3, 0xb9, + 0xbd, 0xdd, 0xb9, 0xa9, 0x7a, 0x1a, 0xb9, 0xbf, 0x3b, 0x77, 0x82, 0x75, 0x19, 0x85, 0x24, 0x48, + 0xdc, 0xcd, 0xc6, 0x7e, 0x17, 0x4e, 0xd6, 0x82, 0x7b, 0xfe, 0x3d, 0x27, 0x74, 0x17, 0xea, 0xab, + 0x9a, 0x71, 0xe2, 0x35, 0x79, 0xf8, 0xe5, 0x77, 0x82, 0x39, 0x3b, 0x9b, 0xc6, 0x83, 0x1f, 0x3b, + 0x56, 0xbc, 0x16, 0xe9, 0x61, 0x0e, 0xf9, 0xc7, 0x25, 0xa3, 0xce, 0x84, 0x5e, 0xdd, 0x5d, 0x58, + 0x3d, 0xef, 0x2e, 0x3e, 0x03, 0xc3, 0xeb, 0x1e, 0x69, 0xb9, 0x98, 0xac, 0x8b, 0xd1, 0xba, 0x50, + 0xe4, 0x72, 0x67, 0x85, 0x96, 0x91, 0xd6, 0x31, 0x7e, 0x88, 0x5e, 0x11, 0x6c, 0xb0, 0x62, 0x88, + 0x3a, 0x30, 0x29, 0xcf, 0x61, 0x12, 0x2b, 0x16, 0xfb, 0x73, 0xc5, 0x8e, 0x79, 0x66, 0x35, 0x6c, + 0x78, 0x71, 0x8a, 0x21, 0xee, 0xaa, 0x82, 0x9e, 0x9f, 0xb7, 0xe8, 0x56, 0x57, 0x61, 0x53, 0x9f, + 0x9d, 0x9f, 0x99, 0x29, 0x80, 0x41, 0xed, 0xdf, 0xb4, 0xe0, 0xa1, 0xae, 0xde, 0x12, 0x76, 0x92, + 0x23, 0x1b, 0xa3, 0xb4, 0xb1, 0xa2, 0x94, 0x6f, 0xac, 0xb0, 0x7f, 0xcb, 0x82, 0xe9, 0xe5, 0xad, + 0x76, 0xbc, 0x53, 0xf3, 0xcc, 0x3b, 0x97, 0x17, 0x61, 0x70, 0x8b, 0xb8, 0x5e, 0x67, 0x4b, 0x8c, + 0xeb, 0x9c, 0xdc, 0x18, 0xae, 0x31, 0xe8, 0xfe, 0xee, 0xdc, 0x58, 0x23, 0x0e, 0x42, 0x67, 0x83, + 0x70, 0x00, 0x16, 0xe4, 0x6c, 0x7b, 0xf5, 0xde, 0x23, 0x57, 0xbd, 0x2d, 0x4f, 0x5e, 0xe5, 0x1d, + 0x68, 0xe4, 0x9b, 0x97, 0x5d, 0x3b, 0xff, 0x6a, 0xc7, 0xf1, 0x63, 0x2f, 0xde, 0x11, 0xd7, 0x49, + 0x92, 0x09, 0x4e, 0xf8, 0xd9, 0x3f, 0xb2, 0x60, 0x42, 0x4a, 0x9f, 0x05, 0xd7, 0x0d, 0x49, 0x14, + 0xa1, 0x59, 0x28, 0x79, 0x6d, 0xd1, 0x4a, 0x10, 0xad, 0x2c, 0xad, 0xd6, 0x71, 0xc9, 0x6b, 0xa3, + 0xd7, 0xa1, 0xca, 0xef, 0x01, 0x93, 0xa9, 0xd7, 0xe7, 0xbd, 0x22, 0x6b, 0xcb, 0x9a, 0xe4, 0x81, + 0x13, 0x76, 0x52, 0x07, 0x67, 0xfb, 0x5a, 0xd9, 0xbc, 0x95, 0xba, 0x2c, 0xe0, 0x58, 0x51, 0xa0, + 0xb3, 0x30, 0xec, 0x07, 0x2e, 0xbf, 0xaa, 0xe5, 0x52, 0x80, 0x4d, 0xe8, 0xeb, 0x02, 0x86, 0x15, + 0xd6, 0xfe, 0xa2, 0x05, 0xa3, 0xf2, 0x1b, 0x0b, 0x1e, 0x07, 0xe8, 0x12, 0x4c, 0x8e, 0x02, 0xc9, + 0x12, 0xa4, 0xea, 0x3c, 0xc3, 0x18, 0x5a, 0x7c, 0xb9, 0x1f, 0x2d, 0xde, 0xfe, 0xed, 0x12, 0x8c, + 0xcb, 0xe6, 0x34, 0x3a, 0x77, 0x22, 0x42, 0x95, 0x9c, 0xaa, 0xc3, 0x3b, 0x9f, 0xc8, 0x59, 0xfc, + 0x6c, 0xde, 0x49, 0xcf, 0x18, 0xb3, 0x44, 0x89, 0x5a, 0x90, 0x7c, 0x70, 0xc2, 0x12, 0x6d, 0xc3, + 0x94, 0x1f, 0xc4, 0x6c, 0xf3, 0x54, 0xf8, 0x62, 0xf7, 0x28, 0xe9, 0x7a, 0x1e, 0x16, 0xf5, 0x4c, + 0x5d, 0x4f, 0xf3, 0xc3, 0xdd, 0x55, 0xa0, 0x1b, 0xd2, 0x82, 0x55, 0x66, 0x75, 0x3d, 0x5d, 0xac, + 0xae, 0xde, 0x06, 0x2c, 0xfb, 0xf7, 0x2d, 0xa8, 0x4a, 0xb2, 0xe3, 0xb8, 0x50, 0xbb, 0x0d, 0x43, + 0x11, 0x1b, 0x22, 0xd9, 0x5d, 0xe7, 0x8a, 0x7d, 0x02, 0x1f, 0xd7, 0x44, 0x63, 0xe0, 0xff, 0x23, + 0x2c, 0xb9, 0x31, 0x53, 0xbe, 0xfa, 0x90, 0x0f, 0x9c, 0x29, 0x5f, 0xb5, 0xac, 0xf7, 0xbd, 0xd9, + 0x98, 0x61, 0x6b, 0xa0, 0x6a, 0x6f, 0x3b, 0x24, 0xeb, 0xde, 0xfd, 0xb4, 0xda, 0x5b, 0x67, 0x50, + 0x2c, 0xb0, 0x68, 0x1d, 0x46, 0x9b, 0xd2, 0xd8, 0x9d, 0x88, 0x90, 0x8f, 0x16, 0xbc, 0x59, 0x50, + 0x97, 0x54, 0xdc, 0x57, 0x6a, 0x49, 0xe3, 0x84, 0x0d, 0xbe, 0x54, 0x4e, 0x25, 0xf7, 0xf0, 0xe5, + 0x82, 0x66, 0xa1, 0x90, 0xc4, 0x49, 0x0d, 0x3d, 0xaf, 0xe0, 0xed, 0xaf, 0x5a, 0x30, 0xc8, 0xad, + 0xa3, 0xc5, 0x4c, 0xcc, 0xda, 0xf5, 0x5b, 0xd2, 0x9f, 0xb7, 0x28, 0x50, 0xdc, 0xc6, 0xa1, 0xdb, + 0x50, 0x65, 0x3f, 0x98, 0xa5, 0xa7, 0x5c, 0xc4, 0x71, 0x8c, 0xd7, 0xaf, 0x37, 0xf5, 0x96, 0x64, + 0x80, 0x13, 0x5e, 0xf6, 0x77, 0xca, 0x54, 0xf4, 0x25, 0xa4, 0x86, 0xe6, 0x60, 0x1d, 0x87, 0xe6, + 0x50, 0x3a, 0x7a, 0xcd, 0xe1, 0x5d, 0x98, 0x68, 0x6a, 0xd7, 0x7f, 0xc9, 0x88, 0x5f, 0x2c, 0x38, + 0xad, 0xb4, 0x3b, 0x43, 0x6e, 0x0d, 0x5c, 0x32, 0xd9, 0xe1, 0x34, 0x7f, 0x44, 0x60, 0x94, 0xcf, + 0x07, 0x51, 0x5f, 0x85, 0xd5, 0x77, 0xbe, 0xc8, 0x0c, 0xd3, 0x2b, 0x63, 0xb3, 0xb8, 0xa1, 0x31, + 0xc2, 0x06, 0x5b, 0xfb, 0xd7, 0x07, 0x60, 0x60, 0x79, 0x9b, 0xf8, 0xf1, 0x31, 0x88, 0xba, 0x2d, + 0x18, 0xf7, 0xfc, 0xed, 0xa0, 0xb5, 0x4d, 0x5c, 0x8e, 0x3f, 0xdc, 0xf6, 0x7e, 0x4a, 0x54, 0x32, + 0xbe, 0x6a, 0x30, 0xc3, 0x29, 0xe6, 0x47, 0x61, 0x87, 0x78, 0x15, 0x06, 0xf9, 0xcc, 0x10, 0x46, + 0x88, 0x9c, 0xdb, 0x02, 0xd6, 0xb1, 0x62, 0x05, 0x25, 0xd6, 0x12, 0x7e, 0x51, 0x21, 0x18, 0xa1, + 0x77, 0x60, 0x7c, 0xdd, 0x0b, 0xa3, 0x78, 0xcd, 0xdb, 0xa2, 0xe7, 0xc7, 0xad, 0xf6, 0x21, 0x2c, + 0x10, 0xaa, 0x47, 0x56, 0x0c, 0x4e, 0x38, 0xc5, 0x19, 0x6d, 0xc0, 0x18, 0x3d, 0x00, 0x27, 0x55, + 0x0d, 0xf5, 0x5d, 0x95, 0x32, 0x40, 0x5e, 0xd5, 0x19, 0x61, 0x93, 0x2f, 0x15, 0x49, 0x4d, 0x76, + 0x60, 0x1e, 0x66, 0xda, 0x8d, 0x12, 0x49, 0xfc, 0xa4, 0xcc, 0x71, 0x54, 0xb2, 0x31, 0x3f, 0x9c, + 0xaa, 0x29, 0xd9, 0x12, 0x6f, 0x1b, 0xfb, 0xeb, 0x74, 0x2f, 0xa6, 0x7d, 0x78, 0x0c, 0xdb, 0xd7, + 0x65, 0x73, 0xfb, 0x7a, 0xbc, 0xc0, 0xc8, 0xf6, 0xd8, 0xba, 0xde, 0x86, 0x11, 0x6d, 0xe0, 0xd1, + 0x79, 0xa8, 0x36, 0xa5, 0xab, 0x88, 0x90, 0xe2, 0x4a, 0x95, 0x52, 0x3e, 0x24, 0x38, 0xa1, 0xa1, + 0xfd, 0x42, 0x55, 0xd0, 0xb4, 0x63, 0x19, 0x55, 0x50, 0x31, 0xc3, 0xd8, 0xcf, 0x01, 0x2c, 0xdf, + 0x27, 0xcd, 0x05, 0x7e, 0x80, 0xd4, 0x6e, 0x0f, 0xad, 0xde, 0xb7, 0x87, 0xf6, 0xd7, 0x2c, 0x18, + 0x5f, 0x59, 0x32, 0x0e, 0x0c, 0xf3, 0x00, 0x5c, 0x37, 0xbe, 0x7d, 0xfb, 0xba, 0xb4, 0x8e, 0x73, + 0x13, 0xa6, 0x82, 0x62, 0x8d, 0x02, 0x3d, 0x0c, 0xe5, 0x56, 0xc7, 0x17, 0x2a, 0xeb, 0xd0, 0xde, + 0xee, 0x5c, 0xf9, 0x6a, 0xc7, 0xc7, 0x14, 0xa6, 0x79, 0x70, 0x95, 0x0b, 0x7b, 0x70, 0xe5, 0xbb, + 0x3f, 0x7f, 0xb9, 0x0c, 0x93, 0x2b, 0x2d, 0x72, 0xdf, 0x68, 0xf5, 0x93, 0x30, 0xe8, 0x86, 0xde, + 0x36, 0x09, 0xd3, 0x8a, 0x40, 0x8d, 0x41, 0xb1, 0xc0, 0x16, 0x76, 0x2a, 0x7b, 0xab, 0x7b, 0x23, + 0x3f, 0x3a, 0x87, 0xba, 0xdc, 0x6f, 0x46, 0xeb, 0x30, 0xc4, 0x6f, 0x9b, 0xa3, 0x99, 0x01, 0x36, + 0x15, 0x5f, 0x3e, 0xb8, 0x31, 0xe9, 0xfe, 0x99, 0x17, 0xd6, 0x1b, 0xee, 0xce, 0xa3, 0x64, 0x99, + 0x80, 0x62, 0xc9, 0x7c, 0xf6, 0x13, 0x30, 0xaa, 0x53, 0xf6, 0xe5, 0xd7, 0xf3, 0x57, 0x2d, 0x38, + 0xb1, 0xd2, 0x0a, 0x9a, 0x77, 0x53, 0x5e, 0x7f, 0x2f, 0xc0, 0x08, 0x5d, 0x4c, 0x91, 0xe1, 0x12, + 0x6b, 0xb8, 0x0b, 0x0b, 0x14, 0xd6, 0xe9, 0xb4, 0x62, 0x37, 0x6f, 0xae, 0xd6, 0xb2, 0xbc, 0x8c, + 0x05, 0x0a, 0xeb, 0x74, 0xf6, 0x1f, 0x5a, 0xf0, 0xe8, 0xa5, 0xa5, 0xe5, 0x3a, 0x09, 0x23, 0x2f, + 0x8a, 0x89, 0x1f, 0x77, 0x39, 0x3a, 0x53, 0x9d, 0xd1, 0xd5, 0x9a, 0x92, 0xe8, 0x8c, 0x35, 0xd6, + 0x0a, 0x81, 0xfd, 0xa0, 0x78, 0xfb, 0x7f, 0xd5, 0x82, 0x13, 0x97, 0xbc, 0x18, 0x93, 0x76, 0x90, + 0x76, 0x34, 0x0e, 0x49, 0x3b, 0x88, 0xbc, 0x38, 0x08, 0x77, 0xd2, 0x8e, 0xc6, 0x58, 0x61, 0xb0, + 0x46, 0xc5, 0x6b, 0xde, 0xf6, 0x22, 0xda, 0xd2, 0x92, 0x79, 0xd4, 0xc5, 0x02, 0x8e, 0x15, 0x05, + 0xfd, 0x30, 0xd7, 0x0b, 0x99, 0xca, 0xb0, 0x23, 0x56, 0xb0, 0xfa, 0xb0, 0x9a, 0x44, 0xe0, 0x84, + 0xc6, 0xfe, 0xbb, 0x16, 0x9c, 0xbc, 0xd4, 0xea, 0x44, 0x31, 0x09, 0xd7, 0x23, 0xa3, 0xb1, 0xcf, + 0x41, 0x95, 0x48, 0xe5, 0x5e, 0xb4, 0x55, 0x6d, 0x1a, 0x4a, 0xeb, 0xe7, 0x5e, 0xce, 0x8a, 0xae, + 0x80, 0x33, 0x6d, 0x7f, 0xae, 0x9f, 0xbf, 0x5b, 0x82, 0xb1, 0xcb, 0x6b, 0x6b, 0xf5, 0x4b, 0x24, + 0x16, 0x52, 0x32, 0xdf, 0xe4, 0x85, 0xb5, 0x13, 0xf9, 0x41, 0xca, 0x4f, 0x27, 0xf6, 0x5a, 0xf3, + 0x3c, 0x12, 0x65, 0x7e, 0xd5, 0x8f, 0x6f, 0x84, 0x8d, 0x38, 0xf4, 0xfc, 0x8d, 0xcc, 0x33, 0xbc, + 0x94, 0xe5, 0xe5, 0x5e, 0xb2, 0x1c, 0x3d, 0x07, 0x83, 0x2c, 0x14, 0x46, 0x2a, 0x1f, 0x1f, 0x56, + 0x7a, 0x02, 0x83, 0xee, 0xef, 0xce, 0x55, 0x6f, 0xe2, 0x55, 0xfe, 0x07, 0x0b, 0x52, 0xf4, 0x16, + 0x8c, 0x6c, 0xc6, 0x71, 0xfb, 0x32, 0x71, 0x5c, 0x12, 0x4a, 0x39, 0x71, 0xf6, 0x60, 0x39, 0x41, + 0xbb, 0x83, 0x17, 0x48, 0x96, 0x56, 0x02, 0x8b, 0xb0, 0xce, 0xd1, 0x6e, 0x00, 0x24, 0xb8, 0x07, + 0x74, 0x06, 0xb1, 0x7f, 0xb9, 0x04, 0x43, 0x97, 0x1d, 0xdf, 0x6d, 0x91, 0x10, 0xad, 0x40, 0x85, + 0xdc, 0x27, 0x4d, 0xb1, 0x91, 0xe7, 0x34, 0x3d, 0xd9, 0xec, 0xb8, 0xd5, 0x8e, 0xfe, 0xc7, 0xac, + 0x3c, 0xc2, 0x30, 0x44, 0xdb, 0x7d, 0x49, 0xf9, 0xa0, 0x3f, 0x93, 0xdf, 0x0b, 0x6a, 0x52, 0xf0, + 0x9d, 0x52, 0x80, 0xb0, 0x64, 0xc4, 0x2c, 0x50, 0xcd, 0x76, 0x83, 0x8a, 0xb7, 0xb8, 0xd8, 0xc9, + 0x6e, 0x6d, 0xa9, 0xce, 0xc9, 0x05, 0x5f, 0x6e, 0x81, 0x92, 0x40, 0x9c, 0xb0, 0xb3, 0xd7, 0xa0, + 0x4a, 0x07, 0x7f, 0xa1, 0xe5, 0x39, 0x07, 0x9b, 0xc1, 0x9e, 0x81, 0xaa, 0x34, 0x44, 0x45, 0xc2, + 0xa1, 0x9d, 0x71, 0x95, 0x76, 0xaa, 0x08, 0x27, 0x78, 0xfb, 0x25, 0x98, 0x66, 0x77, 0xc8, 0x4e, + 0xbc, 0x69, 0xac, 0xc5, 0xdc, 0x49, 0x6f, 0x7f, 0xa3, 0x02, 0x53, 0xab, 0x8d, 0xa5, 0x86, 0x69, + 0xef, 0x7c, 0x09, 0x46, 0xf9, 0xb6, 0x4f, 0xa7, 0xb2, 0xd3, 0x12, 0xe5, 0xd5, 0xbd, 0xc7, 0x9a, + 0x86, 0xc3, 0x06, 0x25, 0x7a, 0x14, 0xca, 0xde, 0xbb, 0x7e, 0xda, 0x13, 0x71, 0xf5, 0xd5, 0xeb, + 0x98, 0xc2, 0x29, 0x9a, 0x6a, 0x10, 0x5c, 0x74, 0x2a, 0xb4, 0xd2, 0x22, 0x5e, 0x81, 0x71, 0x2f, + 0x6a, 0x46, 0xde, 0xaa, 0x4f, 0xe5, 0x8a, 0xd3, 0x94, 0x8b, 0x22, 0x51, 0xf9, 0x69, 0x53, 0x15, + 0x16, 0xa7, 0xa8, 0x35, 0x39, 0x3e, 0x50, 0x58, 0x0b, 0xc9, 0x75, 0x71, 0xa7, 0x0a, 0x56, 0x9b, + 0x7d, 0x5d, 0xc4, 0xfc, 0x9a, 0x84, 0x82, 0xc5, 0x3f, 0x38, 0xc2, 0x12, 0x87, 0x2e, 0xc1, 0x54, + 0x73, 0xd3, 0x69, 0x2f, 0x74, 0xe2, 0xcd, 0x9a, 0x17, 0x35, 0x83, 0x6d, 0x12, 0xee, 0x30, 0x05, + 0x78, 0x38, 0xb1, 0x69, 0x29, 0xc4, 0xd2, 0xe5, 0x85, 0x3a, 0xa5, 0xc4, 0xdd, 0x65, 0x4c, 0x85, + 0x04, 0x8e, 0x40, 0x21, 0x59, 0x80, 0x09, 0x59, 0x6b, 0x83, 0x44, 0x6c, 0x8b, 0x18, 0x61, 0xed, + 0x54, 0xc1, 0x45, 0x02, 0xac, 0x5a, 0x99, 0xa6, 0xb7, 0xdf, 0x81, 0xaa, 0xf2, 0xc3, 0x93, 0xee, + 0xa7, 0x56, 0x0f, 0xf7, 0xd3, 0x7c, 0xe1, 0x2e, 0x2d, 0xf3, 0xe5, 0x4c, 0xcb, 0xfc, 0x3f, 0xb1, + 0x20, 0x71, 0x24, 0x42, 0x18, 0xaa, 0xed, 0x80, 0xdd, 0xe2, 0x85, 0xf2, 0xba, 0xfc, 0x89, 0x9c, + 0x35, 0xcf, 0x65, 0x0e, 0xef, 0x90, 0xba, 0x2c, 0x8b, 0x13, 0x36, 0xe8, 0x2a, 0x0c, 0xb5, 0x43, + 0xd2, 0x88, 0x59, 0xec, 0x48, 0x1f, 0x1c, 0xf9, 0x44, 0xe0, 0x25, 0xb1, 0x64, 0x61, 0xff, 0x4b, + 0x0b, 0x80, 0x9b, 0xc1, 0x1d, 0x7f, 0x83, 0x1c, 0xc3, 0xc1, 0xfa, 0x3a, 0x54, 0xa2, 0x36, 0x69, + 0x16, 0xbb, 0x87, 0x4d, 0x5a, 0xd6, 0x68, 0x93, 0x66, 0x32, 0x1c, 0xf4, 0x1f, 0x66, 0x7c, 0xec, + 0x6f, 0x03, 0x8c, 0x27, 0x64, 0xf4, 0x70, 0x83, 0x9e, 0x35, 0x82, 0x26, 0x1e, 0x4e, 0x05, 0x4d, + 0x54, 0x19, 0xb5, 0x16, 0x27, 0x11, 0x43, 0x79, 0xcb, 0xb9, 0x2f, 0xce, 0x52, 0x2f, 0x14, 0x6d, + 0x10, 0xad, 0x69, 0xfe, 0x9a, 0x73, 0x9f, 0xab, 0xae, 0xcf, 0xc8, 0x89, 0x74, 0xcd, 0xb9, 0xbf, + 0xcf, 0x6f, 0x5b, 0x99, 0x74, 0xa2, 0x87, 0xb7, 0xcf, 0xfe, 0x59, 0xf2, 0x9f, 0x6d, 0x43, 0xb4, + 0x3a, 0x56, 0xab, 0xe7, 0x0b, 0x53, 0x70, 0x9f, 0xb5, 0x7a, 0x7e, 0xba, 0x56, 0xcf, 0x2f, 0x50, + 0xab, 0xc7, 0xbc, 0x8b, 0x87, 0xc4, 0xfd, 0x0c, 0x73, 0xcd, 0x1c, 0xb9, 0xf8, 0xf1, 0xbe, 0xaa, + 0x16, 0x17, 0x3d, 0xbc, 0xfa, 0xf3, 0x52, 0x5f, 0x17, 0xd0, 0xdc, 0x26, 0xc8, 0xaa, 0xd1, 0xdf, + 0xb3, 0x60, 0x5c, 0xfc, 0xc6, 0xe4, 0xdd, 0x0e, 0x89, 0x62, 0xa1, 0x17, 0x7c, 0xea, 0x30, 0xad, + 0x11, 0x2c, 0x78, 0xa3, 0x3e, 0x26, 0xc5, 0xaf, 0x89, 0xcc, 0x6d, 0x5b, 0xaa, 0x3d, 0xe8, 0xdb, + 0x16, 0x4c, 0x6f, 0x39, 0xf7, 0x79, 0x8d, 0x1c, 0x86, 0x9d, 0xd8, 0x0b, 0x84, 0xfb, 0xe9, 0x4a, + 0xbf, 0xf3, 0xa4, 0x8b, 0x11, 0x6f, 0xae, 0xf4, 0x2c, 0x9b, 0xce, 0x22, 0xc9, 0x6d, 0x74, 0x66, + 0x0b, 0x67, 0xd7, 0x61, 0x58, 0x4e, 0xcc, 0x8c, 0x93, 0x52, 0x4d, 0x57, 0x7f, 0xfa, 0xbe, 0x3c, + 0xd3, 0x4e, 0x56, 0xac, 0x1e, 0x31, 0x15, 0x8f, 0xb4, 0x9e, 0x77, 0x60, 0x54, 0x9f, 0x77, 0x47, + 0x5a, 0xd7, 0xbb, 0x70, 0x22, 0x63, 0x56, 0x1d, 0x69, 0x95, 0xf7, 0xe0, 0xe1, 0x9e, 0xf3, 0xe3, + 0x28, 0x2b, 0xb6, 0x7f, 0xd7, 0xd2, 0x45, 0xe7, 0x31, 0xd8, 0xad, 0xae, 0x99, 0x76, 0xab, 0xb3, + 0x45, 0xd7, 0x50, 0x0f, 0xe3, 0xd5, 0xba, 0xde, 0x7c, 0xba, 0x25, 0xa0, 0x35, 0x18, 0x6c, 0x51, + 0x88, 0xbc, 0x36, 0x3c, 0xd7, 0xcf, 0x2a, 0x4d, 0x34, 0x30, 0x06, 0x8f, 0xb0, 0xe0, 0x65, 0x7f, + 0xdb, 0x82, 0xca, 0x5f, 0x62, 0x48, 0x57, 0x17, 0x6b, 0x91, 0x96, 0x60, 0x1e, 0x3b, 0xf7, 0x96, + 0xef, 0xc7, 0xc4, 0x8f, 0x98, 0x1a, 0x9f, 0xd9, 0x45, 0xff, 0xa7, 0x04, 0x23, 0xb4, 0x2a, 0xe9, + 0x25, 0xf3, 0x32, 0x8c, 0xb5, 0x9c, 0x3b, 0xa4, 0x25, 0x6d, 0xee, 0xe9, 0x43, 0xef, 0x55, 0x1d, + 0x89, 0x4d, 0x5a, 0x5a, 0x78, 0x5d, 0xbf, 0x92, 0x10, 0x4a, 0x92, 0x2a, 0x6c, 0xdc, 0x57, 0x60, + 0x93, 0x96, 0x9e, 0xba, 0xee, 0x39, 0x71, 0x73, 0x53, 0x1c, 0x88, 0x55, 0x73, 0x6f, 0x53, 0x20, + 0xe6, 0x38, 0xaa, 0xec, 0xc9, 0x19, 0x7b, 0x8b, 0x84, 0x4c, 0xd9, 0xe3, 0x4a, 0xb5, 0x52, 0xf6, + 0xb0, 0x89, 0xc6, 0x69, 0x7a, 0xf4, 0x09, 0x18, 0xa7, 0x9d, 0x13, 0x74, 0x62, 0xe9, 0x03, 0x34, + 0xc0, 0x7c, 0x80, 0x98, 0x0b, 0xf9, 0x9a, 0x81, 0xc1, 0x29, 0x4a, 0x54, 0x87, 0x69, 0xcf, 0x6f, + 0xb6, 0x3a, 0x2e, 0xb9, 0xe9, 0x7b, 0xbe, 0x17, 0x7b, 0x4e, 0xcb, 0x7b, 0x8f, 0xb8, 0x42, 0xed, + 0x56, 0xee, 0x5a, 0xab, 0x19, 0x34, 0x38, 0xb3, 0xa4, 0xfd, 0x16, 0x9c, 0xb8, 0x1a, 0x38, 0xee, + 0xa2, 0xd3, 0x72, 0xfc, 0x26, 0x09, 0x57, 0xfd, 0x8d, 0x5c, 0x9f, 0x02, 0xfd, 0xde, 0xbf, 0x94, + 0x77, 0xef, 0x6f, 0x87, 0x80, 0xf4, 0x0a, 0x84, 0x3f, 0xdc, 0x1b, 0x30, 0xe4, 0xf1, 0xaa, 0xc4, + 0x42, 0xb8, 0x90, 0xa7, 0x93, 0x77, 0xb5, 0x51, 0xf3, 0xef, 0xe2, 0x00, 0x2c, 0x59, 0xd2, 0x13, + 0x5c, 0x96, 0x12, 0x9f, 0x7f, 0xf4, 0xb6, 0x5f, 0x80, 0x29, 0x56, 0xb2, 0xcf, 0x83, 0xdf, 0x5f, + 0xb3, 0x60, 0xe2, 0x7a, 0x2a, 0xf8, 0xf9, 0x49, 0x18, 0x8c, 0x48, 0x98, 0x61, 0x59, 0x6d, 0x30, + 0x28, 0x16, 0xd8, 0x07, 0x6e, 0xad, 0xf9, 0xb5, 0x12, 0x54, 0x99, 0x43, 0x76, 0x9b, 0x1e, 0xe2, + 0x8e, 0x5e, 0x5f, 0xbe, 0x66, 0xe8, 0xcb, 0x39, 0x16, 0x03, 0xd5, 0xb0, 0x5e, 0xea, 0x32, 0xba, + 0xa9, 0x82, 0x82, 0x0b, 0x19, 0x0b, 0x12, 0x86, 0x3c, 0x70, 0x74, 0xdc, 0x8c, 0x21, 0x96, 0x01, + 0xc3, 0xec, 0x02, 0x5f, 0xd1, 0x7e, 0xe0, 0x2e, 0xf0, 0x55, 0xcb, 0x7a, 0x48, 0xc9, 0xba, 0xd6, + 0x78, 0xb6, 0x8f, 0xfc, 0x1c, 0x73, 0xb3, 0x65, 0x6b, 0x58, 0xc5, 0xd6, 0xcf, 0x09, 0xb7, 0x59, + 0x01, 0xdd, 0x67, 0x02, 0x4f, 0xfc, 0xe3, 0xa9, 0x13, 0x92, 0x22, 0xf6, 0x65, 0x98, 0x48, 0x75, + 0x1d, 0x7a, 0x01, 0x06, 0xda, 0x9b, 0x4e, 0x44, 0x52, 0x0e, 0x4f, 0x03, 0x75, 0x0a, 0xdc, 0xdf, + 0x9d, 0x1b, 0x57, 0x05, 0x18, 0x04, 0x73, 0x6a, 0xfb, 0x73, 0x25, 0xa8, 0x5c, 0x0f, 0xdc, 0xe3, + 0x98, 0x6a, 0x97, 0x8d, 0xa9, 0xf6, 0x64, 0x7e, 0xae, 0x96, 0x9e, 0xb3, 0xac, 0x9e, 0x9a, 0x65, + 0x67, 0x0b, 0xf0, 0x3a, 0x78, 0x82, 0x6d, 0xc1, 0x08, 0xcb, 0x05, 0x23, 0x9c, 0xb2, 0x9e, 0x33, + 0x8e, 0x78, 0x73, 0xa9, 0x23, 0xde, 0x84, 0x46, 0xaa, 0x1d, 0xf4, 0x9e, 0x82, 0x21, 0xe1, 0x04, + 0x94, 0x76, 0x32, 0x16, 0xb4, 0x58, 0xe2, 0xed, 0x7f, 0x51, 0x06, 0x23, 0xf7, 0x0c, 0xfa, 0x7d, + 0x0b, 0xe6, 0x43, 0x1e, 0xb0, 0xe5, 0xd6, 0x3a, 0xa1, 0xe7, 0x6f, 0x34, 0x9a, 0x9b, 0xc4, 0xed, + 0xb4, 0x3c, 0x7f, 0x63, 0x75, 0xc3, 0x0f, 0x14, 0x78, 0xf9, 0x3e, 0x69, 0x76, 0x98, 0xcd, 0xbd, + 0x70, 0xca, 0x1b, 0x75, 0x01, 0x7e, 0x71, 0x6f, 0x77, 0x6e, 0x1e, 0xf7, 0x55, 0x0b, 0xee, 0xb3, + 0x55, 0xe8, 0x87, 0x16, 0x9c, 0xe7, 0xd9, 0x57, 0x8a, 0x7f, 0x49, 0xa1, 0xa3, 0x71, 0x5d, 0x32, + 0x4d, 0xd8, 0xad, 0x91, 0x70, 0x6b, 0xf1, 0x45, 0xd1, 0xc9, 0xe7, 0xeb, 0xfd, 0xd5, 0x8a, 0xfb, + 0x6d, 0xa6, 0xfd, 0xaf, 0xcb, 0x30, 0x46, 0xfb, 0x33, 0x49, 0x9f, 0xf0, 0x82, 0x31, 0x4d, 0x1e, + 0x4b, 0x4d, 0x93, 0x29, 0x83, 0xf8, 0xc1, 0x64, 0x4e, 0x88, 0x60, 0xaa, 0xe5, 0x44, 0xf1, 0x65, 0xe2, 0x84, 0xf1, 0x1d, 0xe2, 0xb0, 0x7b, 0xe6, 0xb4, 0x0f, 0x4b, 0x81, 0xab, 0x6b, 0x65, 0x84, - 0xbb, 0x9a, 0x26, 0x86, 0xbb, 0xe9, 0xa3, 0x6d, 0x40, 0xec, 0x4e, 0x3b, 0x74, 0xfc, 0x88, 0x7f, - 0x8b, 0x27, 0x6c, 0xf4, 0xfd, 0xb5, 0x3a, 0x2b, 0x5a, 0x45, 0x57, 0xbb, 0xa8, 0xe1, 0x8c, 0x16, - 0x34, 0xaf, 0x85, 0x81, 0xa2, 0x5e, 0x0b, 0x83, 0x39, 0x1e, 0xfe, 0xbf, 0x62, 0xc1, 0x09, 0x3a, - 0x2d, 0xa6, 0x37, 0x78, 0x84, 0x02, 0x98, 0xa0, 0xcb, 0xae, 0x45, 0x62, 0x59, 0x26, 0xf6, 0x57, - 0x8e, 0x88, 0x6f, 0xd2, 0x49, 0xe4, 0xc8, 0x2b, 0x26, 0x31, 0x9c, 0xa6, 0x6e, 0x7f, 0xcd, 0x02, - 0xe6, 0x3d, 0x79, 0x0c, 0x87, 0xd9, 0x25, 0xf3, 0x30, 0xb3, 0xf3, 0x39, 0x46, 0x8f, 0x73, 0xec, - 0x79, 0x98, 0xa4, 0xd0, 0x7a, 0x18, 0xdc, 0xdf, 0x91, 0x12, 0x7f, 0xbe, 0x74, 0xf5, 0x2b, 0x25, - 0xbe, 0x6d, 0x54, 0xf4, 0x29, 0xfa, 0xbc, 0x05, 0xc3, 0x4d, 0xa7, 0xed, 0x34, 0x79, 0xf6, 0xae, - 0x02, 0x66, 0x22, 0xa3, 0xfe, 0xfc, 0x92, 0xa8, 0xcb, 0x4d, 0x1c, 0x1f, 0x95, 0x9f, 0x2e, 0x8b, - 0x73, 0xcd, 0x1a, 0xaa, 0xf1, 0xd9, 0xbb, 0x30, 0x66, 0x10, 0x3b, 0x52, 0x7d, 0xf8, 0xf3, 0x16, - 0x67, 0xfa, 0x4a, 0x67, 0xb9, 0x07, 0x53, 0xbe, 0xf6, 0x9f, 0xb2, 0x33, 0x29, 0x50, 0xcf, 0x17, - 0x67, 0xeb, 0x8c, 0x0b, 0x6a, 0x9e, 0xa2, 0x29, 0x82, 0xb8, 0xbb, 0x0d, 0xfb, 0x37, 0x2c, 0x78, - 0x48, 0x47, 0xd4, 0xc2, 0x85, 0xf3, 0x0c, 0xd8, 0x35, 0x18, 0x0e, 0xda, 0x24, 0x74, 0x12, 0xfd, - 0xec, 0xac, 0x1c, 0xff, 0x1b, 0xa2, 0x7c, 0x7f, 0x77, 0x6e, 0x5a, 0xa7, 0x2e, 0xcb, 0xb1, 0xaa, - 0x89, 0x6c, 0x18, 0x64, 0xe3, 0x12, 0x89, 0x40, 0x6f, 0x96, 0xcd, 0x8a, 0x5d, 0x90, 0x45, 0x58, - 0x40, 0xec, 0xbf, 0x69, 0xf1, 0xe5, 0xa6, 0x77, 0x1d, 0xfd, 0x02, 0x4c, 0x6e, 0x51, 0x55, 0x6e, - 0xf9, 0x7e, 0x3b, 0xe4, 0xe6, 0x77, 0x39, 0x62, 0x2f, 0x14, 0x1f, 0x31, 0xed, 0x73, 0x17, 0x67, - 0x44, 0xef, 0x27, 0xaf, 0xa5, 0xc8, 0xe2, 0xae, 0x86, 0xec, 0x7f, 0x50, 0xe2, 0x7b, 0x96, 0xc9, - 0x70, 0x4f, 0xc1, 0x50, 0x3b, 0x70, 0x97, 0x56, 0x6b, 0x58, 0x8c, 0x95, 0x62, 0x3a, 0x75, 0x5e, - 0x8c, 0x25, 0x1c, 0x5d, 0x04, 0x20, 0xf7, 0x63, 0x12, 0xfa, 0x4e, 0x4b, 0x5d, 0xe9, 0x2b, 0x51, - 0x69, 0x59, 0x41, 0xb0, 0x86, 0x45, 0xeb, 0xb4, 0xc3, 0x60, 0xdb, 0x73, 0x59, 0x9c, 0x4b, 0xd9, - 0xac, 0x53, 0x57, 0x10, 0xac, 0x61, 0x51, 0x05, 0xba, 0xe3, 0x47, 0xfc, 0x18, 0x73, 0xee, 0x88, - 0x4c, 0x4a, 0xc3, 0x89, 0x02, 0x7d, 0x53, 0x07, 0x62, 0x13, 0x17, 0x5d, 0x81, 0xc1, 0xd8, 0x61, - 0x17, 0xd5, 0x03, 0x45, 0xbc, 0x7e, 0xd6, 0x28, 0xae, 0x9e, 0xba, 0x8a, 0x56, 0xc5, 0x82, 0x84, - 0xfd, 0x9f, 0xaa, 0x00, 0x89, 0xd4, 0x85, 0x3e, 0xd7, 0xbd, 0xe1, 0x3f, 0x56, 0x54, 0x64, 0x7b, - 0x70, 0xbb, 0x1d, 0x7d, 0xc9, 0x82, 0x11, 0xa7, 0xd5, 0x0a, 0x9a, 0x4e, 0xcc, 0x86, 0xa7, 0x54, - 0x94, 0xf5, 0x88, 0x9e, 0x2c, 0x24, 0x75, 0x79, 0x67, 0x9e, 0x93, 0x97, 0xc7, 0x1a, 0x24, 0xb7, - 0x3f, 0x7a, 0x17, 0xd0, 0x47, 0xa5, 0xd4, 0xce, 0x67, 0x78, 0x36, 0x2d, 0xb5, 0x57, 0x19, 0xc3, - 0xd5, 0x04, 0x76, 0xf4, 0x96, 0x91, 0x79, 0xa8, 0x52, 0x24, 0x58, 0xd9, 0x90, 0x43, 0xf2, 0x92, - 0x0e, 0xa1, 0xd7, 0x75, 0xf7, 0xf8, 0x81, 0x22, 0xd9, 0x00, 0x34, 0x71, 0x38, 0xc7, 0x35, 0x3e, - 0x86, 0x09, 0xd7, 0x3c, 0x79, 0x85, 0x8b, 0xdf, 0x85, 0xfc, 0x16, 0x52, 0x47, 0x76, 0x72, 0xd6, - 0xa6, 0x00, 0x38, 0xdd, 0x04, 0x7a, 0x9d, 0x07, 0x2f, 0xac, 0xfa, 0xeb, 0x81, 0x70, 0xf3, 0x3b, - 0x57, 0x60, 0xce, 0x77, 0xa2, 0x98, 0x6c, 0xd1, 0x3a, 0xc9, 0xe1, 0x7a, 0x5d, 0x50, 0xc1, 0x8a, - 0x1e, 0x5a, 0x83, 0x41, 0x16, 0x9b, 0x16, 0xcd, 0x0c, 0x17, 0x31, 0x09, 0x9a, 0x21, 0xd9, 0xc9, - 0xfe, 0x61, 0x7f, 0x23, 0x2c, 0x68, 0xa1, 0xcb, 0x32, 0x29, 0x43, 0xb4, 0xea, 0xdf, 0x8c, 0x08, - 0x4b, 0xca, 0x50, 0x5d, 0xfc, 0x48, 0x92, 0x65, 0x81, 0x97, 0x67, 0xa6, 0x6b, 0x34, 0x6a, 0x52, - 0xc1, 0x46, 0xfc, 0x97, 0x59, 0x20, 0x67, 0xa0, 0x48, 0x47, 0xcd, 0x9c, 0x91, 0xc9, 0x60, 0xdf, - 0x32, 0x89, 0xe1, 0x34, 0xf5, 0x63, 0x3d, 0x52, 0x67, 0x7d, 0x98, 0x4c, 0x6f, 0xca, 0x23, 0x3d, - 0xc2, 0x7f, 0x5c, 0x81, 0x71, 0x73, 0x71, 0xa0, 0xf3, 0x50, 0x15, 0x44, 0x54, 0x8a, 0x37, 0xb5, - 0x07, 0xae, 0x49, 0x00, 0x4e, 0x70, 0x58, 0xb2, 0x3b, 0x56, 0x5d, 0x73, 0xf0, 0x4a, 0x92, 0xdd, - 0x29, 0x08, 0xd6, 0xb0, 0xa8, 0x24, 0x7c, 0x27, 0x08, 0x62, 0x75, 0x12, 0xa8, 0x75, 0xb3, 0xc8, - 0x4a, 0xb1, 0x80, 0xd2, 0x13, 0xe0, 0x2e, 0x9d, 0xcc, 0x96, 0x69, 0xde, 0x54, 0x27, 0xc0, 0x15, - 0x1d, 0x88, 0x4d, 0x5c, 0x7a, 0xa2, 0x05, 0x11, 0x5b, 0x88, 0x42, 0xde, 0x4e, 0x1c, 0xe6, 0x1a, - 0x3c, 0x5e, 0x53, 0xc2, 0xd1, 0xa7, 0xe1, 0x21, 0x15, 0x5e, 0x89, 0xb9, 0xb9, 0x58, 0xb6, 0x38, - 0x68, 0xa8, 0xcc, 0x0f, 0x2d, 0x65, 0xa3, 0xe1, 0x5e, 0xf5, 0xd1, 0x2b, 0x30, 0x2e, 0x64, 0x65, - 0x49, 0x71, 0xc8, 0xf4, 0x7b, 0xb8, 0x62, 0x40, 0x71, 0x0a, 0x1b, 0xd5, 0x60, 0x92, 0x96, 0x30, - 0x21, 0x55, 0x52, 0xe0, 0x61, 0xa2, 0xea, 0xa8, 0xbf, 0x92, 0x82, 0xe3, 0xae, 0x1a, 0x68, 0x01, - 0x26, 0xb8, 0xb0, 0x42, 0x15, 0x43, 0x36, 0x0f, 0xc2, 0x37, 0x57, 0x6d, 0x84, 0x1b, 0x26, 0x18, - 0xa7, 0xf1, 0xd1, 0x4b, 0x30, 0xea, 0x84, 0xcd, 0x4d, 0x2f, 0x26, 0xcd, 0xb8, 0x13, 0xf2, 0x94, - 0x27, 0x9a, 0xe3, 0xc8, 0x82, 0x06, 0xc3, 0x06, 0xa6, 0xfd, 0x1e, 0x9c, 0xc8, 0x08, 0x04, 0xa0, - 0x0b, 0xc7, 0x69, 0x7b, 0xf2, 0x9b, 0x52, 0xae, 0x6f, 0x0b, 0xf5, 0x55, 0xf9, 0x35, 0x1a, 0x16, - 0x5d, 0x9d, 0xcc, 0x4e, 0xae, 0x25, 0x6d, 0x55, 0xab, 0x73, 0x45, 0x02, 0x70, 0x82, 0x63, 0xff, - 0x29, 0x80, 0x66, 0xbd, 0x29, 0xe0, 0xee, 0xf4, 0x12, 0x8c, 0xca, 0x3c, 0xc4, 0x5a, 0x32, 0x4f, - 0xf5, 0x99, 0x97, 0x34, 0x18, 0x36, 0x30, 0x69, 0xdf, 0x7c, 0x69, 0x93, 0x4a, 0x3b, 0xda, 0x29, - 0x63, 0x15, 0x4e, 0x70, 0xd0, 0x39, 0x18, 0x8e, 0x48, 0x6b, 0xfd, 0xaa, 0xe7, 0xdf, 0x15, 0x0b, - 0x5b, 0x71, 0xe6, 0x86, 0x28, 0xc7, 0x0a, 0x03, 0x2d, 0x42, 0xb9, 0xe3, 0xb9, 0x62, 0x29, 0x4b, - 0xb1, 0xa1, 0x7c, 0x73, 0xb5, 0xb6, 0xbf, 0x3b, 0xf7, 0x58, 0xaf, 0xf4, 0xca, 0x54, 0x3f, 0x8f, - 0xe6, 0xe9, 0xf6, 0xa3, 0x95, 0xb3, 0x2e, 0x0c, 0x06, 0xfb, 0xbc, 0x30, 0xb8, 0x08, 0x20, 0xbe, - 0x5a, 0xae, 0xe5, 0x72, 0x32, 0x6b, 0x97, 0x14, 0x04, 0x6b, 0x58, 0x54, 0xcb, 0x6f, 0x86, 0xc4, - 0x91, 0x8a, 0x30, 0x77, 0x50, 0x1f, 0x3e, 0xbc, 0x96, 0xbf, 0x94, 0x26, 0x86, 0xbb, 0xe9, 0xa3, - 0x00, 0xa6, 0x5c, 0x11, 0xc3, 0x9b, 0x34, 0x5a, 0xed, 0xdf, 0x2b, 0x9e, 0xf9, 0xf6, 0xa4, 0x09, - 0xe1, 0x6e, 0xda, 0xe8, 0x4d, 0x98, 0x95, 0x85, 0xdd, 0x01, 0xd4, 0x6c, 0xbb, 0x94, 0x17, 0x4f, - 0xef, 0xed, 0xce, 0xcd, 0xd6, 0x7a, 0x62, 0xe1, 0x03, 0x28, 0xa0, 0x37, 0x60, 0x90, 0x5d, 0x30, - 0x45, 0x33, 0x23, 0xec, 0xc4, 0x7b, 0xbe, 0x48, 0x6c, 0x05, 0x5d, 0xf5, 0xf3, 0xec, 0x9a, 0x4a, - 0x78, 0x0d, 0x27, 0xb7, 0x76, 0xac, 0x10, 0x0b, 0x9a, 0xa8, 0x0d, 0x23, 0x8e, 0xef, 0x07, 0xb1, - 0xc3, 0x05, 0xb1, 0xd1, 0x22, 0xb2, 0xa4, 0xd6, 0xc4, 0x42, 0x52, 0x97, 0xb7, 0xa3, 0x1c, 0x11, - 0x35, 0x08, 0xd6, 0x9b, 0x40, 0xf7, 0x60, 0x22, 0xb8, 0x47, 0x19, 0xa6, 0xbc, 0x11, 0x89, 0x66, - 0xc6, 0xcc, 0x0f, 0xcb, 0x31, 0xd4, 0x1a, 0x95, 0x35, 0x4e, 0x66, 0x12, 0xc5, 0xe9, 0x56, 0xd0, - 0xbc, 0x61, 0xae, 0x1e, 0x4f, 0x7c, 0xe3, 0x13, 0x73, 0xb5, 0x6e, 0x9d, 0x66, 0x41, 0xfa, 0xdc, - 0x1f, 0x96, 0x71, 0x84, 0x89, 0x54, 0x90, 0x7e, 0x02, 0xc2, 0x3a, 0x1e, 0xda, 0x84, 0xd1, 0xe4, - 0x6e, 0x2b, 0x8c, 0x58, 0xfe, 0x1f, 0xcd, 0xdd, 0xeb, 0xe0, 0x8f, 0x5b, 0xd5, 0x6a, 0xf2, 0x48, - 0x1f, 0xbd, 0x04, 0x1b, 0x94, 0x67, 0x3f, 0x0e, 0x23, 0xda, 0x14, 0xf7, 0xe3, 0xee, 0x3d, 0xfb, - 0x0a, 0x4c, 0xa6, 0xa7, 0xae, 0x2f, 0x77, 0xf1, 0xff, 0x51, 0x82, 0x89, 0x8c, 0x8b, 0x2d, 0x96, - 0x8d, 0x39, 0xc5, 0x64, 0x93, 0xe4, 0xcb, 0x26, 0xab, 0x2c, 0x15, 0x60, 0x95, 0x92, 0x6f, 0x97, - 0x7b, 0xf2, 0x6d, 0xc1, 0x1e, 0x2b, 0xef, 0x87, 0x3d, 0x9a, 0x27, 0xd2, 0x40, 0xa1, 0x13, 0xe9, - 0x01, 0xb0, 0x54, 0xe3, 0x50, 0x1b, 0x2a, 0x70, 0xa8, 0x7d, 0xb5, 0x04, 0x93, 0x89, 0x6b, 0xbc, - 0x48, 0x83, 0x7e, 0xf4, 0x17, 0x1e, 0x6b, 0xc6, 0x85, 0x47, 0x5e, 0x96, 0xf3, 0x54, 0xff, 0x7a, - 0x5e, 0x7e, 0xbc, 0x91, 0xba, 0xfc, 0x78, 0xbe, 0x4f, 0xba, 0x07, 0x5f, 0x84, 0x7c, 0xab, 0x04, - 0x27, 0xd3, 0x55, 0x96, 0x5a, 0x8e, 0xb7, 0x75, 0x0c, 0xe3, 0xf5, 0x69, 0x63, 0xbc, 0x5e, 0xec, - 0xef, 0xbb, 0x58, 0x27, 0x7b, 0x0e, 0x9a, 0x93, 0x1a, 0xb4, 0x8f, 0x1f, 0x86, 0xf8, 0xc1, 0x23, - 0xf7, 0x47, 0x16, 0x3c, 0x9c, 0x59, 0xef, 0x18, 0x4c, 0xbc, 0xaf, 0x99, 0x26, 0xde, 0xe7, 0x0e, + 0xbb, 0x9a, 0x66, 0x86, 0xbb, 0xf9, 0xa3, 0x6d, 0x40, 0xec, 0x4e, 0x3b, 0x74, 0xfc, 0x88, 0x7f, + 0x8b, 0x27, 0x6c, 0xf4, 0xfd, 0xd5, 0x3a, 0x2b, 0x6a, 0x45, 0x57, 0xbb, 0xb8, 0xe1, 0x8c, 0x1a, + 0x34, 0xaf, 0x85, 0x81, 0xa2, 0x5e, 0x0b, 0x83, 0x39, 0xde, 0xfd, 0xbf, 0x62, 0xc1, 0x09, 0x3a, + 0x2c, 0xa6, 0x27, 0x78, 0x84, 0x02, 0x98, 0xa0, 0xd3, 0xae, 0x45, 0x62, 0x09, 0x13, 0xeb, 0x2b, + 0x47, 0xc5, 0x37, 0xf9, 0x24, 0x7a, 0xe4, 0x15, 0x93, 0x19, 0x4e, 0x73, 0xb7, 0xbf, 0x66, 0x01, + 0xf3, 0x9e, 0x3c, 0x86, 0xcd, 0xec, 0x92, 0xb9, 0x99, 0xd9, 0xf9, 0x12, 0xa3, 0xc7, 0x3e, 0xf6, + 0x3c, 0x4c, 0x52, 0x6c, 0x3d, 0x0c, 0xee, 0xef, 0x48, 0x8d, 0x3f, 0x5f, 0xbb, 0xfa, 0x95, 0x12, + 0x5f, 0x36, 0x2a, 0xf2, 0x14, 0x7d, 0xde, 0x82, 0xe1, 0xa6, 0xd3, 0x76, 0x9a, 0x3c, 0x73, 0x57, + 0x01, 0x33, 0x91, 0x51, 0x7e, 0x7e, 0x49, 0x94, 0xe5, 0x26, 0x8e, 0x8f, 0xca, 0x4f, 0x97, 0xe0, + 0x5c, 0xb3, 0x86, 0xaa, 0x7c, 0xf6, 0x2e, 0x8c, 0x19, 0xcc, 0x8e, 0xf4, 0x3c, 0xfc, 0x79, 0x8b, + 0x0b, 0x7d, 0x75, 0x66, 0xb9, 0x07, 0x53, 0xbe, 0xf6, 0x9f, 0x8a, 0x33, 0xa9, 0x50, 0xcf, 0x17, + 0x17, 0xeb, 0x4c, 0x0a, 0x6a, 0x9e, 0xa2, 0x29, 0x86, 0xb8, 0xbb, 0x0e, 0xfb, 0x37, 0x2c, 0x78, + 0x48, 0x27, 0xd4, 0x42, 0x85, 0xf3, 0x0c, 0xd8, 0x35, 0x18, 0x0e, 0xda, 0x24, 0x74, 0x92, 0xf3, + 0xd9, 0x59, 0xd9, 0xff, 0x37, 0x04, 0x7c, 0x7f, 0x77, 0x6e, 0x5a, 0xe7, 0x2e, 0xe1, 0x58, 0x95, + 0x44, 0x36, 0x0c, 0xb2, 0x7e, 0x89, 0x44, 0x90, 0x37, 0xcb, 0x64, 0xc5, 0x2e, 0xc8, 0x22, 0x2c, + 0x30, 0xf6, 0xdf, 0xb4, 0xf8, 0x74, 0xd3, 0x9b, 0x8e, 0x7e, 0x01, 0x26, 0xb7, 0xe8, 0x51, 0x6e, + 0xf9, 0x7e, 0x3b, 0xe4, 0xe6, 0x77, 0xd9, 0x63, 0x2f, 0x14, 0xef, 0x31, 0xed, 0x73, 0x17, 0x67, + 0x44, 0xeb, 0x27, 0xaf, 0xa5, 0xd8, 0xe2, 0xae, 0x8a, 0xec, 0x7f, 0x50, 0xe2, 0x6b, 0x96, 0xe9, + 0x70, 0x4f, 0xc1, 0x50, 0x3b, 0x70, 0x97, 0x56, 0x6b, 0x58, 0xf4, 0x95, 0x12, 0x3a, 0x75, 0x0e, + 0xc6, 0x12, 0x8f, 0x2e, 0x02, 0x90, 0xfb, 0x31, 0x09, 0x7d, 0xa7, 0xa5, 0xae, 0xf4, 0x95, 0xaa, + 0xb4, 0xac, 0x30, 0x58, 0xa3, 0xa2, 0x65, 0xda, 0x61, 0xb0, 0xed, 0xb9, 0x2c, 0xc6, 0xa5, 0x6c, + 0x96, 0xa9, 0x2b, 0x0c, 0xd6, 0xa8, 0xe8, 0x01, 0xba, 0xe3, 0x47, 0x7c, 0x1b, 0x73, 0xee, 0x88, + 0x2c, 0x4a, 0xc3, 0xc9, 0x01, 0xfa, 0xa6, 0x8e, 0xc4, 0x26, 0x2d, 0xba, 0x02, 0x83, 0xb1, 0xc3, + 0x2e, 0xaa, 0x07, 0x8a, 0x78, 0xfd, 0xac, 0x51, 0x5a, 0x3d, 0x6d, 0x15, 0x2d, 0x8a, 0x05, 0x0b, + 0xfb, 0x3f, 0x55, 0x01, 0x12, 0xad, 0x0b, 0x7d, 0xae, 0x7b, 0xc1, 0x7f, 0xac, 0xa8, 0xca, 0xf6, + 0xe0, 0x56, 0x3b, 0xfa, 0x92, 0x05, 0x23, 0x4e, 0xab, 0x15, 0x34, 0x9d, 0x98, 0x75, 0x4f, 0xa9, + 0xa8, 0xe8, 0x11, 0x2d, 0x59, 0x48, 0xca, 0xf2, 0xc6, 0x3c, 0x27, 0x2f, 0x8f, 0x35, 0x4c, 0x6e, + 0x7b, 0xf4, 0x26, 0xa0, 0x8f, 0x4a, 0xad, 0x9d, 0x8f, 0xf0, 0x6c, 0x5a, 0x6b, 0xaf, 0x32, 0x81, + 0xab, 0x29, 0xec, 0xe8, 0x2d, 0x23, 0xeb, 0x50, 0xa5, 0x48, 0xa0, 0xb2, 0xa1, 0x87, 0xe4, 0x25, + 0x1c, 0x42, 0xaf, 0xeb, 0xee, 0xf1, 0x03, 0x45, 0x32, 0x01, 0x68, 0xea, 0x70, 0x8e, 0x6b, 0x7c, + 0x0c, 0x13, 0xae, 0xb9, 0xf3, 0x0a, 0x17, 0xbf, 0x0b, 0xf9, 0x35, 0xa4, 0xb6, 0xec, 0x64, 0xaf, + 0x4d, 0x21, 0x70, 0xba, 0x0a, 0xf4, 0x3a, 0x0f, 0x5e, 0x58, 0xf5, 0xd7, 0x03, 0xe1, 0xe6, 0x77, + 0xae, 0xc0, 0x98, 0xef, 0x44, 0x31, 0xd9, 0xa2, 0x65, 0x92, 0xcd, 0xf5, 0xba, 0xe0, 0x82, 0x15, + 0x3f, 0xb4, 0x06, 0x83, 0x2c, 0x2e, 0x2d, 0x9a, 0x19, 0x2e, 0x62, 0x12, 0x34, 0xc3, 0xb1, 0x93, + 0xf5, 0xc3, 0xfe, 0x46, 0x58, 0xf0, 0x42, 0x97, 0x65, 0x42, 0x86, 0x68, 0xd5, 0xbf, 0x19, 0x11, + 0x96, 0x90, 0xa1, 0xba, 0xf8, 0x91, 0x24, 0xc3, 0x02, 0x87, 0x67, 0xa6, 0x6a, 0x34, 0x4a, 0x52, + 0xc5, 0x46, 0xfc, 0x97, 0x19, 0x20, 0x67, 0xa0, 0x48, 0x43, 0xcd, 0x7c, 0x91, 0x49, 0x67, 0xdf, + 0x32, 0x99, 0xe1, 0x34, 0xf7, 0x63, 0xdd, 0x52, 0x67, 0x7d, 0x98, 0x4c, 0x2f, 0xca, 0x23, 0xdd, + 0xc2, 0x7f, 0x5c, 0x81, 0x71, 0x73, 0x72, 0xa0, 0xf3, 0x50, 0x15, 0x4c, 0x54, 0x7a, 0x37, 0xb5, + 0x06, 0xae, 0x49, 0x04, 0x4e, 0x68, 0x58, 0xa2, 0x3b, 0x56, 0x5c, 0x73, 0xf0, 0x4a, 0x12, 0xdd, + 0x29, 0x0c, 0xd6, 0xa8, 0xa8, 0x26, 0x7c, 0x27, 0x08, 0x62, 0xb5, 0x13, 0xa8, 0x79, 0xb3, 0xc8, + 0xa0, 0x58, 0x60, 0xe9, 0x0e, 0x70, 0x97, 0x0e, 0x66, 0xcb, 0x34, 0x6f, 0xaa, 0x1d, 0xe0, 0x8a, + 0x8e, 0xc4, 0x26, 0x2d, 0xdd, 0xd1, 0x82, 0x88, 0x4d, 0x44, 0xa1, 0x6f, 0x27, 0x0e, 0x73, 0x0d, + 0x1e, 0xab, 0x29, 0xf1, 0xe8, 0xd3, 0xf0, 0x90, 0x0a, 0xad, 0xc4, 0xdc, 0x5c, 0x2c, 0x6b, 0x1c, + 0x34, 0x8e, 0xcc, 0x0f, 0x2d, 0x65, 0x93, 0xe1, 0x5e, 0xe5, 0xd1, 0x2b, 0x30, 0x2e, 0x74, 0x65, + 0xc9, 0x71, 0xc8, 0xf4, 0x7b, 0xb8, 0x62, 0x60, 0x71, 0x8a, 0x1a, 0xd5, 0x60, 0x92, 0x42, 0x98, + 0x92, 0x2a, 0x39, 0xf0, 0x10, 0x51, 0xb5, 0xd5, 0x5f, 0x49, 0xe1, 0x71, 0x57, 0x09, 0xb4, 0x00, + 0x13, 0x5c, 0x59, 0xa1, 0x07, 0x43, 0x36, 0x0e, 0xc2, 0x37, 0x57, 0x2d, 0x84, 0x1b, 0x26, 0x1a, + 0xa7, 0xe9, 0xd1, 0x4b, 0x30, 0xea, 0x84, 0xcd, 0x4d, 0x2f, 0x26, 0xcd, 0xb8, 0x13, 0xf2, 0x74, + 0x27, 0x9a, 0xe3, 0xc8, 0x82, 0x86, 0xc3, 0x06, 0xa5, 0xfd, 0x1e, 0x9c, 0xc8, 0x08, 0x04, 0xa0, + 0x13, 0xc7, 0x69, 0x7b, 0xf2, 0x9b, 0x52, 0xae, 0x6f, 0x0b, 0xf5, 0x55, 0xf9, 0x35, 0x1a, 0x15, + 0x9d, 0x9d, 0xcc, 0x4e, 0xae, 0x25, 0x6c, 0x55, 0xb3, 0x73, 0x45, 0x22, 0x70, 0x42, 0x63, 0xff, + 0x29, 0x80, 0x66, 0xbd, 0x29, 0xe0, 0xee, 0xf4, 0x12, 0x8c, 0xca, 0x1c, 0xc4, 0x5a, 0x22, 0x4f, + 0xf5, 0x99, 0x97, 0x34, 0x1c, 0x36, 0x28, 0x69, 0xdb, 0x7c, 0x69, 0x93, 0x4a, 0x3b, 0xda, 0x29, + 0x63, 0x15, 0x4e, 0x68, 0xd0, 0x39, 0x18, 0x8e, 0x48, 0x6b, 0xfd, 0xaa, 0xe7, 0xdf, 0x15, 0x13, + 0x5b, 0x49, 0xe6, 0x86, 0x80, 0x63, 0x45, 0x81, 0x16, 0xa1, 0xdc, 0xf1, 0x5c, 0x31, 0x95, 0xa5, + 0xda, 0x50, 0xbe, 0xb9, 0x5a, 0xdb, 0xdf, 0x9d, 0x7b, 0xac, 0x57, 0x6a, 0x65, 0x7a, 0x3e, 0x8f, + 0xe6, 0xe9, 0xf2, 0xa3, 0x85, 0xb3, 0x2e, 0x0c, 0x06, 0xfb, 0xbc, 0x30, 0xb8, 0x08, 0x20, 0xbe, + 0x5a, 0xce, 0xe5, 0x72, 0x32, 0x6a, 0x97, 0x14, 0x06, 0x6b, 0x54, 0xf4, 0x94, 0xdf, 0x0c, 0x89, + 0x23, 0x0f, 0xc2, 0xdc, 0x41, 0x7d, 0xf8, 0xf0, 0xa7, 0xfc, 0xa5, 0x34, 0x33, 0xdc, 0xcd, 0x1f, + 0x05, 0x30, 0xe5, 0x8a, 0xf8, 0xdd, 0xa4, 0xd2, 0x6a, 0xff, 0x5e, 0xf1, 0xcc, 0xb7, 0x27, 0xcd, + 0x08, 0x77, 0xf3, 0x46, 0x6f, 0xc2, 0xac, 0x04, 0x76, 0x07, 0x4f, 0xb3, 0xe5, 0x52, 0x5e, 0x3c, + 0xbd, 0xb7, 0x3b, 0x37, 0x5b, 0xeb, 0x49, 0x85, 0x0f, 0xe0, 0x80, 0xde, 0x80, 0x41, 0x76, 0xc1, + 0x14, 0xcd, 0x8c, 0xb0, 0x1d, 0xef, 0xf9, 0x22, 0xb1, 0x15, 0x74, 0xd6, 0xcf, 0xb3, 0x6b, 0x2a, + 0xe1, 0x35, 0x9c, 0xdc, 0xda, 0x31, 0x20, 0x16, 0x3c, 0x51, 0x1b, 0x46, 0x1c, 0xdf, 0x0f, 0x62, + 0x87, 0x2b, 0x62, 0xa3, 0x45, 0x74, 0x49, 0xad, 0x8a, 0x85, 0xa4, 0x2c, 0xaf, 0x47, 0x39, 0x22, + 0x6a, 0x18, 0xac, 0x57, 0x81, 0xee, 0xc1, 0x44, 0x70, 0x8f, 0x0a, 0x4c, 0x79, 0x23, 0x12, 0xcd, + 0x8c, 0x99, 0x1f, 0x96, 0x63, 0xa8, 0x35, 0x0a, 0x6b, 0x92, 0xcc, 0x64, 0x8a, 0xd3, 0xb5, 0xa0, + 0x79, 0xc3, 0x5c, 0x3d, 0x9e, 0xf8, 0xc6, 0x27, 0xe6, 0x6a, 0xdd, 0x3a, 0xcd, 0x02, 0xf4, 0xb9, + 0x3f, 0x2c, 0x93, 0x08, 0x13, 0xa9, 0x00, 0xfd, 0x04, 0x85, 0x75, 0x3a, 0xb4, 0x09, 0xa3, 0xc9, + 0xdd, 0x56, 0x18, 0xb1, 0xdc, 0x3f, 0x9a, 0xbb, 0xd7, 0xc1, 0x1f, 0xb7, 0xaa, 0x95, 0xe4, 0x91, + 0x3e, 0x3a, 0x04, 0x1b, 0x9c, 0x67, 0x3f, 0x0e, 0x23, 0xda, 0x10, 0xf7, 0xe3, 0xee, 0x3d, 0xfb, + 0x0a, 0x4c, 0xa6, 0x87, 0xae, 0x2f, 0x77, 0xf1, 0xff, 0x51, 0x82, 0x89, 0x8c, 0x8b, 0x2d, 0x96, + 0x89, 0x39, 0x25, 0x64, 0x93, 0xc4, 0xcb, 0xa6, 0xa8, 0x2c, 0x15, 0x10, 0x95, 0x52, 0x6e, 0x97, + 0x7b, 0xca, 0x6d, 0x21, 0x1e, 0x2b, 0xef, 0x47, 0x3c, 0x9a, 0x3b, 0xd2, 0x40, 0xa1, 0x1d, 0xe9, + 0x01, 0x88, 0x54, 0x63, 0x53, 0x1b, 0x2a, 0xb0, 0xa9, 0x7d, 0xb5, 0x04, 0x93, 0x89, 0x6b, 0xbc, + 0x48, 0x81, 0x7e, 0xf4, 0x17, 0x1e, 0x6b, 0xc6, 0x85, 0x47, 0x5e, 0x86, 0xf3, 0x54, 0xfb, 0x7a, + 0x5e, 0x7e, 0xbc, 0x91, 0xba, 0xfc, 0x78, 0xbe, 0x4f, 0xbe, 0x07, 0x5f, 0x84, 0x7c, 0xab, 0x04, + 0x27, 0xd3, 0x45, 0x96, 0x5a, 0x8e, 0xb7, 0x75, 0x0c, 0xfd, 0xf5, 0x69, 0xa3, 0xbf, 0x5e, 0xec, + 0xef, 0xbb, 0x58, 0x23, 0x7b, 0x76, 0x9a, 0x93, 0xea, 0xb4, 0x8f, 0x1f, 0x86, 0xf9, 0xc1, 0x3d, + 0xf7, 0x47, 0x16, 0x3c, 0x9c, 0x59, 0xee, 0x18, 0x4c, 0xbc, 0xaf, 0x99, 0x26, 0xde, 0xe7, 0x0e, 0xf1, 0x75, 0x3d, 0x6c, 0xbe, 0xbf, 0x59, 0xee, 0xf1, 0x55, 0xcc, 0x08, 0x76, 0x03, 0x46, 0x9c, - 0x66, 0x93, 0x44, 0xd1, 0xb5, 0xc0, 0x55, 0x89, 0xc5, 0x9e, 0x65, 0xa7, 0x58, 0x52, 0xbc, 0xbf, - 0x3b, 0x37, 0x9b, 0x26, 0x91, 0x80, 0xb1, 0x4e, 0xc1, 0x4c, 0x79, 0x58, 0x3a, 0xa2, 0x94, 0x87, - 0x17, 0x01, 0xb6, 0x95, 0xbe, 0x9c, 0xb6, 0xad, 0x69, 0x9a, 0xb4, 0x86, 0x85, 0xfe, 0x0a, 0x93, - 0x3d, 0xb9, 0x5f, 0x4a, 0xc5, 0x8c, 0xb2, 0xcd, 0x99, 0x3f, 0xdd, 0xc7, 0x85, 0x07, 0xf3, 0x2a, - 0x3b, 0xa4, 0x22, 0x89, 0x3e, 0x05, 0x93, 0x11, 0xcf, 0x49, 0xb1, 0xd4, 0x72, 0x22, 0x16, 0x13, - 0x22, 0xf8, 0x29, 0x8b, 0xcb, 0x6d, 0xa4, 0x60, 0xb8, 0x0b, 0xdb, 0xfe, 0x66, 0x19, 0x3e, 0x7c, - 0xc0, 0xb2, 0x45, 0x0b, 0xe6, 0xfd, 0xf0, 0x33, 0x69, 0x4b, 0xd3, 0x6c, 0x66, 0x65, 0xc3, 0xf4, - 0x94, 0x9a, 0xed, 0xd2, 0xfb, 0x9e, 0xed, 0x2f, 0xeb, 0x76, 0x41, 0xee, 0xaa, 0x7a, 0xe9, 0xd0, - 0x1b, 0xf3, 0x27, 0xf5, 0x5a, 0xe0, 0xb3, 0x16, 0x3c, 0x96, 0xf9, 0x59, 0x86, 0x3f, 0xca, 0x79, - 0xa8, 0x36, 0x69, 0xa1, 0x16, 0xc1, 0x95, 0x84, 0x4e, 0x4a, 0x00, 0x4e, 0x70, 0x0c, 0xb7, 0x93, - 0x52, 0xae, 0xdb, 0xc9, 0x1f, 0x58, 0x30, 0x9d, 0xee, 0xc4, 0x31, 0xf0, 0xad, 0x86, 0xc9, 0xb7, - 0xe6, 0xfb, 0x9b, 0xfc, 0x1e, 0x2c, 0xeb, 0xab, 0x93, 0x70, 0xaa, 0xeb, 0xd4, 0xe3, 0xa3, 0xf8, - 0x4b, 0x16, 0x4c, 0x6d, 0x30, 0x3d, 0x41, 0x0b, 0x93, 0x13, 0xdf, 0x95, 0x13, 0x5b, 0x78, 0x60, - 0x74, 0x1d, 0xd7, 0x7a, 0xba, 0x50, 0x70, 0x77, 0x63, 0xe8, 0x8b, 0x16, 0x4c, 0x3b, 0xf7, 0xa2, - 0xae, 0x47, 0x7a, 0xc4, 0x42, 0x7a, 0x25, 0xc7, 0x2c, 0x97, 0xf3, 0xbc, 0xcf, 0xe2, 0xcc, 0xde, - 0xee, 0xdc, 0x74, 0x16, 0x16, 0xce, 0x6c, 0x95, 0xce, 0xef, 0xa6, 0x08, 0x97, 0x29, 0x16, 0xf0, - 0x99, 0x15, 0x5c, 0xc3, 0xd9, 0x9a, 0x84, 0x60, 0x45, 0x11, 0xbd, 0x0d, 0xd5, 0x0d, 0x19, 0x19, - 0x97, 0x66, 0x9b, 0x3d, 0x86, 0x39, 0x2b, 0x90, 0x8e, 0x87, 0x2b, 0x28, 0x10, 0x4e, 0x88, 0xa2, - 0xcb, 0x50, 0xf6, 0xd7, 0x23, 0x11, 0x83, 0x9e, 0xe7, 0x6d, 0x64, 0xfa, 0x78, 0xf1, 0xb0, 0xdd, - 0xeb, 0x2b, 0x0d, 0x4c, 0x49, 0x50, 0x4a, 0xe1, 0x1d, 0x57, 0xd8, 0xa3, 0x73, 0x28, 0xe1, 0xc5, - 0x5a, 0x37, 0x25, 0xbc, 0x58, 0xc3, 0x94, 0x04, 0xaa, 0xc3, 0x00, 0x0b, 0xc6, 0x11, 0xc6, 0xe6, - 0x9c, 0x44, 0x05, 0x5d, 0x21, 0x47, 0x3c, 0x33, 0x27, 0x2b, 0xc6, 0x9c, 0x10, 0x5a, 0x83, 0xc1, - 0x26, 0x7b, 0x5c, 0x42, 0x58, 0x01, 0xf2, 0x52, 0x78, 0x74, 0x3d, 0x44, 0xc1, 0x6f, 0xd8, 0x78, - 0x39, 0x16, 0xb4, 0x18, 0x55, 0xd2, 0xde, 0x5c, 0x8f, 0x84, 0x9a, 0x9f, 0x47, 0xb5, 0xeb, 0x99, - 0x10, 0x41, 0x95, 0x95, 0x63, 0x41, 0x0b, 0xd5, 0xa0, 0xb4, 0xde, 0x14, 0xb1, 0x3a, 0x39, 0x46, + 0x66, 0x93, 0x44, 0xd1, 0xb5, 0xc0, 0x55, 0x49, 0xc5, 0x9e, 0x65, 0xbb, 0x58, 0x02, 0xde, 0xdf, + 0x9d, 0x9b, 0x4d, 0xb3, 0x48, 0xd0, 0x58, 0xe7, 0x60, 0xa6, 0x3b, 0x2c, 0x1d, 0x51, 0xba, 0xc3, + 0x8b, 0x00, 0xdb, 0xea, 0xbc, 0x9c, 0xb6, 0xad, 0x69, 0x27, 0x69, 0x8d, 0x0a, 0xfd, 0x15, 0xa6, + 0x7b, 0x72, 0xbf, 0x94, 0x8a, 0x19, 0x65, 0x9b, 0x33, 0x7e, 0xba, 0x8f, 0x0b, 0x0f, 0xe6, 0x55, + 0x76, 0x48, 0xc5, 0x12, 0x7d, 0x0a, 0x26, 0x23, 0x9e, 0x8f, 0x62, 0xa9, 0xe5, 0x44, 0x2c, 0x26, + 0x44, 0xc8, 0x53, 0x16, 0x97, 0xdb, 0x48, 0xe1, 0x70, 0x17, 0xb5, 0xfd, 0xcd, 0x32, 0x7c, 0xf8, + 0x80, 0x69, 0x8b, 0x16, 0xcc, 0xfb, 0xe1, 0x67, 0xd2, 0x96, 0xa6, 0xd9, 0xcc, 0xc2, 0x86, 0xe9, + 0x29, 0x35, 0xda, 0xa5, 0xf7, 0x3d, 0xda, 0x5f, 0xd6, 0xed, 0x82, 0xdc, 0x55, 0xf5, 0xd2, 0xa1, + 0x17, 0xe6, 0x4f, 0xea, 0xb5, 0xc0, 0x67, 0x2d, 0x78, 0x2c, 0xf3, 0xb3, 0x0c, 0x7f, 0x94, 0xf3, + 0x50, 0x6d, 0x52, 0xa0, 0x16, 0xc1, 0x95, 0x84, 0x4e, 0x4a, 0x04, 0x4e, 0x68, 0x0c, 0xb7, 0x93, + 0x52, 0xae, 0xdb, 0xc9, 0x1f, 0x58, 0x30, 0x9d, 0x6e, 0xc4, 0x31, 0xc8, 0xad, 0x86, 0x29, 0xb7, + 0xe6, 0xfb, 0x1b, 0xfc, 0x1e, 0x22, 0xeb, 0xab, 0x93, 0x70, 0xaa, 0x6b, 0xd7, 0xe3, 0xbd, 0xf8, + 0x4b, 0x16, 0x4c, 0x6d, 0xb0, 0x73, 0x82, 0x16, 0x26, 0x27, 0xbe, 0x2b, 0x27, 0xb6, 0xf0, 0xc0, + 0xe8, 0x3a, 0x7e, 0xea, 0xe9, 0x22, 0xc1, 0xdd, 0x95, 0xa1, 0x2f, 0x5a, 0x30, 0xed, 0xdc, 0x8b, + 0xba, 0x1e, 0xe8, 0x11, 0x13, 0xe9, 0x95, 0x1c, 0xb3, 0x5c, 0xce, 0xd3, 0x3e, 0x8b, 0x33, 0x7b, + 0xbb, 0x73, 0xd3, 0x59, 0x54, 0x38, 0xb3, 0x56, 0x3a, 0xbe, 0x9b, 0x22, 0x5c, 0xa6, 0x58, 0xc0, + 0x67, 0x56, 0x70, 0x0d, 0x17, 0x6b, 0x12, 0x83, 0x15, 0x47, 0xf4, 0x36, 0x54, 0x37, 0x64, 0x64, + 0x5c, 0x5a, 0x6c, 0xf6, 0xe8, 0xe6, 0xac, 0x40, 0x3a, 0x1e, 0xae, 0xa0, 0x50, 0x38, 0x61, 0x8a, + 0x2e, 0x43, 0xd9, 0x5f, 0x8f, 0x44, 0x0c, 0x7a, 0x9e, 0xb7, 0x91, 0xe9, 0xe3, 0xc5, 0xc3, 0x76, + 0xaf, 0xaf, 0x34, 0x30, 0x65, 0x41, 0x39, 0x85, 0x77, 0x5c, 0x61, 0x8f, 0xce, 0xe1, 0x84, 0x17, + 0x6b, 0xdd, 0x9c, 0xf0, 0x62, 0x0d, 0x53, 0x16, 0xa8, 0x0e, 0x03, 0x2c, 0x18, 0x47, 0x18, 0x9b, + 0x73, 0x12, 0x15, 0x74, 0x85, 0x1c, 0xf1, 0xac, 0x9c, 0x0c, 0x8c, 0x39, 0x23, 0xb4, 0x06, 0x83, + 0x4d, 0xf6, 0xb0, 0x84, 0xb0, 0x02, 0xe4, 0xa5, 0xf0, 0xe8, 0x7a, 0x84, 0x82, 0xdf, 0xb0, 0x71, + 0x38, 0x16, 0xbc, 0x18, 0x57, 0xd2, 0xde, 0x5c, 0x8f, 0xc4, 0x31, 0x3f, 0x8f, 0x6b, 0xd7, 0x13, + 0x21, 0x82, 0x2b, 0x83, 0x63, 0xc1, 0x0b, 0xd5, 0xa0, 0xb4, 0xde, 0x14, 0xb1, 0x3a, 0x39, 0x46, 0x66, 0x33, 0x06, 0x7b, 0x71, 0x70, 0x6f, 0x77, 0xae, 0xb4, 0xb2, 0x84, 0x4b, 0xeb, 0x4d, 0xf4, - 0x1a, 0x0c, 0xad, 0xf3, 0xa8, 0x5a, 0x91, 0xcc, 0xf7, 0x42, 0x5e, 0xe8, 0x6f, 0x57, 0x08, 0x2e, - 0x0f, 0x49, 0x11, 0x00, 0x2c, 0xc9, 0xb1, 0x3c, 0x87, 0x2a, 0x4e, 0x58, 0x64, 0xf3, 0x9d, 0xef, - 0x2f, 0xae, 0x58, 0x68, 0xbf, 0xaa, 0x14, 0x6b, 0x14, 0xe9, 0x9a, 0x77, 0xe4, 0x3b, 0x39, 0x2c, - 0x93, 0x6f, 0xee, 0x9a, 0xcf, 0x7c, 0x56, 0x87, 0xaf, 0x79, 0x05, 0xc2, 0x09, 0x51, 0xd4, 0x81, - 0xb1, 0xed, 0xa8, 0xbd, 0x49, 0xe4, 0xd6, 0x67, 0xe9, 0x7d, 0x47, 0x2e, 0x7e, 0x32, 0x27, 0x67, - 0xb3, 0xa8, 0xe2, 0x85, 0x71, 0xc7, 0x69, 0x75, 0x71, 0x30, 0x96, 0x58, 0xee, 0x96, 0x4e, 0x16, - 0x9b, 0xad, 0xd0, 0x29, 0x79, 0xb7, 0x13, 0xdc, 0xd9, 0x89, 0x89, 0x48, 0xff, 0x9b, 0x33, 0x25, - 0xaf, 0x72, 0xe4, 0xee, 0x29, 0x11, 0x00, 0x2c, 0xc9, 0xa9, 0x21, 0x63, 0xdc, 0x78, 0xb2, 0xf0, - 0x90, 0x75, 0x7d, 0x43, 0x32, 0x64, 0x8c, 0xfb, 0x26, 0x44, 0x19, 0xd7, 0x6d, 0x6f, 0x06, 0x71, - 0xe0, 0xa7, 0x78, 0xff, 0x54, 0x11, 0xae, 0x5b, 0xcf, 0xa8, 0xd9, 0xcd, 0x75, 0xb3, 0xb0, 0x70, - 0x66, 0xab, 0xc8, 0x87, 0xf1, 0x76, 0x10, 0xc6, 0xf7, 0x82, 0x50, 0xae, 0x43, 0x54, 0x48, 0x47, - 0x34, 0xea, 0x88, 0xb6, 0x99, 0xe7, 0xb1, 0x09, 0xc1, 0x29, 0xea, 0x74, 0xea, 0xa2, 0xa6, 0xd3, - 0x22, 0xab, 0x37, 0x66, 0x4e, 0x14, 0x99, 0xba, 0x06, 0x47, 0xee, 0x9e, 0x3a, 0x01, 0xc0, 0x92, - 0x1c, 0xe5, 0x75, 0x2c, 0x97, 0x3d, 0xcb, 0x66, 0x9c, 0xcb, 0xeb, 0xba, 0xbc, 0x73, 0x39, 0xaf, - 0x63, 0xc5, 0x98, 0x13, 0x42, 0xef, 0x40, 0x55, 0x08, 0xb7, 0x41, 0x34, 0x73, 0x92, 0x51, 0xfd, - 0xd9, 0x9c, 0xde, 0x72, 0xf4, 0x1b, 0x8d, 0xec, 0x53, 0x5f, 0x44, 0xff, 0x49, 0x24, 0x9c, 0x90, - 0xb7, 0x7f, 0x63, 0xb0, 0x5b, 0xec, 0x61, 0x8a, 0xcd, 0xdf, 0xe8, 0xbe, 0xb1, 0xfe, 0x54, 0xff, - 0xfa, 0xfb, 0x03, 0xbc, 0xbb, 0xfe, 0xa2, 0x05, 0xa7, 0xda, 0x99, 0x9f, 0x27, 0x04, 0x87, 0x7e, - 0xcd, 0x00, 0x7c, 0x68, 0x54, 0x8e, 0xf1, 0x6c, 0x38, 0xee, 0xd1, 0x66, 0x5a, 0x15, 0x28, 0xbf, - 0x6f, 0x55, 0xe0, 0x36, 0x0c, 0x33, 0xd9, 0x35, 0xc9, 0xef, 0xd3, 0x67, 0x2a, 0x1c, 0x26, 0x82, - 0x2c, 0x09, 0x12, 0x58, 0x11, 0xa3, 0x03, 0xf7, 0x68, 0xfa, 0x23, 0x30, 0x61, 0x60, 0x91, 0xd9, - 0x92, 0xeb, 0x59, 0x2b, 0x62, 0x24, 0x1e, 0xad, 0x1f, 0x84, 0xbc, 0x9f, 0x87, 0x80, 0x0f, 0x6e, - 0x0c, 0xd5, 0x32, 0x14, 0xbd, 0x41, 0xf3, 0x7a, 0x2a, 0x5f, 0xd9, 0x3b, 0x5e, 0x05, 0xe5, 0x1f, - 0x5a, 0x19, 0xf2, 0x34, 0x57, 0x2a, 0x3f, 0x69, 0x2a, 0x95, 0x4f, 0xa6, 0x95, 0xca, 0x2e, 0x53, - 0x92, 0xa1, 0x4f, 0x16, 0xcf, 0xcc, 0x5b, 0x34, 0x81, 0x91, 0xdd, 0x82, 0x33, 0x79, 0xcc, 0x9a, - 0xb9, 0xac, 0xb9, 0xea, 0xb2, 0x36, 0x71, 0x59, 0x73, 0x57, 0x6b, 0x98, 0x41, 0x8a, 0xe6, 0xc0, - 0xb0, 0x7f, 0xb9, 0x04, 0xe5, 0x7a, 0xe0, 0x1e, 0x83, 0x69, 0xec, 0x92, 0x61, 0x1a, 0x7b, 0x22, - 0xf7, 0xa1, 0xc8, 0x9e, 0x86, 0xb0, 0x1b, 0x29, 0x43, 0xd8, 0xcf, 0xe4, 0x93, 0x3a, 0xd8, 0xec, - 0xf5, 0xed, 0x32, 0xe8, 0x4f, 0x5d, 0xa2, 0xff, 0x70, 0x18, 0x4f, 0xe6, 0x72, 0xb1, 0xd7, 0x2f, - 0x45, 0x1b, 0xcc, 0xe3, 0x4d, 0x06, 0x62, 0xfe, 0xc4, 0x3a, 0x34, 0xdf, 0x26, 0xde, 0xc6, 0x66, - 0x4c, 0xdc, 0xf4, 0x87, 0x1d, 0x9f, 0x43, 0xf3, 0x5f, 0x58, 0x30, 0x91, 0x6a, 0x1d, 0xb5, 0xb2, - 0x22, 0xb8, 0x0e, 0x69, 0xec, 0x9a, 0xca, 0x0d, 0xf9, 0x9a, 0x07, 0x50, 0x77, 0x16, 0xd2, 0xa0, - 0xc4, 0x64, 0x6b, 0x75, 0xa9, 0x11, 0x61, 0x0d, 0x03, 0xbd, 0x00, 0x23, 0x71, 0xd0, 0x0e, 0x5a, - 0xc1, 0xc6, 0xce, 0x15, 0x22, 0xb3, 0xb3, 0xa8, 0x9b, 0xa5, 0xb5, 0x04, 0x84, 0x75, 0x3c, 0xfb, - 0x3b, 0x65, 0x48, 0x3f, 0x94, 0xfa, 0xff, 0xd7, 0xe9, 0x4f, 0xce, 0x3a, 0xfd, 0x63, 0x0b, 0x26, - 0x69, 0xeb, 0xcc, 0xc5, 0x48, 0x3a, 0x1e, 0xab, 0x67, 0x42, 0xac, 0x03, 0x9e, 0x09, 0x79, 0x92, - 0x72, 0x3b, 0x37, 0xe8, 0xc4, 0xc2, 0x04, 0xa6, 0x31, 0x31, 0x5a, 0x8a, 0x05, 0x54, 0xe0, 0x91, - 0x30, 0x14, 0x11, 0x5a, 0x3a, 0x1e, 0x09, 0x43, 0x2c, 0xa0, 0xf2, 0x15, 0x91, 0x4a, 0x8f, 0x57, - 0x44, 0x58, 0x7e, 0x33, 0xe1, 0xd6, 0x22, 0xc4, 0x0a, 0x2d, 0xbf, 0x99, 0xf4, 0x77, 0x49, 0x70, - 0xec, 0xaf, 0x97, 0x61, 0xb4, 0x1e, 0xb8, 0x49, 0x44, 0xc1, 0xf3, 0x46, 0x44, 0xc1, 0x99, 0x54, - 0x44, 0xc1, 0xa4, 0x8e, 0xfb, 0x60, 0x02, 0x0a, 0x44, 0x1e, 0x3c, 0xf6, 0xce, 0xcd, 0x21, 0x83, - 0x09, 0x8c, 0x3c, 0x78, 0x8a, 0x10, 0x36, 0xe9, 0xfe, 0x34, 0x05, 0x11, 0xfc, 0x6f, 0x0b, 0xc6, - 0xeb, 0x81, 0x4b, 0x17, 0xe8, 0x4f, 0xd3, 0x6a, 0xd4, 0xb3, 0xe7, 0x0d, 0x1e, 0x90, 0x3d, 0xef, - 0x9f, 0x5b, 0x30, 0x54, 0x0f, 0xdc, 0x63, 0x30, 0x0f, 0xaf, 0x98, 0xe6, 0xe1, 0xc7, 0x72, 0x39, - 0x6f, 0x0f, 0x8b, 0xf0, 0x37, 0xcb, 0x30, 0x46, 0x7b, 0x1c, 0x6c, 0xc8, 0xf9, 0x32, 0xc6, 0xc6, - 0x2a, 0x30, 0x36, 0x54, 0x24, 0x0c, 0x5a, 0xad, 0xe0, 0x5e, 0x7a, 0xee, 0x56, 0x58, 0x29, 0x16, - 0x50, 0x74, 0x0e, 0x86, 0xdb, 0x21, 0xd9, 0xf6, 0x82, 0x4e, 0x94, 0x8e, 0xf6, 0xac, 0x8b, 0x72, - 0xac, 0x30, 0xd0, 0xf3, 0x30, 0x1a, 0x79, 0x7e, 0x93, 0x48, 0xa7, 0x97, 0x0a, 0x73, 0x7a, 0xe1, - 0x89, 0x4a, 0xb5, 0x72, 0x6c, 0x60, 0xa1, 0xdb, 0x50, 0x65, 0xff, 0xd9, 0x0e, 0xea, 0xff, 0x19, - 0x10, 0xae, 0x0e, 0x4b, 0x02, 0x38, 0xa1, 0x85, 0x2e, 0x02, 0xc4, 0xd2, 0x3d, 0x27, 0x12, 0x61, - 0xc9, 0x4a, 0x2e, 0x55, 0x8e, 0x3b, 0x11, 0xd6, 0xb0, 0xd0, 0x33, 0x50, 0x8d, 0x1d, 0xaf, 0x75, - 0xd5, 0xf3, 0x49, 0x24, 0xdc, 0x9b, 0x44, 0xd2, 0x71, 0x51, 0x88, 0x13, 0x38, 0x3d, 0xef, 0x59, - 0xd0, 0x3b, 0x7f, 0x62, 0x68, 0x98, 0x61, 0xb3, 0xf3, 0xfe, 0xaa, 0x2a, 0xc5, 0x1a, 0x86, 0xfd, - 0x12, 0x9c, 0xac, 0x07, 0x6e, 0x3d, 0x08, 0xe3, 0x95, 0x20, 0xbc, 0xe7, 0x84, 0xae, 0x9c, 0xbf, - 0x39, 0x99, 0xeb, 0x9a, 0x9e, 0xc9, 0x03, 0xdc, 0x8a, 0x60, 0xe4, 0xae, 0x7e, 0x8e, 0x9d, 0xf8, - 0x7d, 0x86, 0xaa, 0xfc, 0xa0, 0x04, 0xa8, 0xce, 0x1c, 0x88, 0x8c, 0x17, 0xa9, 0x36, 0x61, 0x3c, - 0x22, 0x57, 0x3d, 0xbf, 0x73, 0x5f, 0x90, 0x2a, 0x16, 0x1b, 0xd4, 0x58, 0xd6, 0xeb, 0x70, 0x3b, - 0x8d, 0x59, 0x86, 0x53, 0x74, 0xe9, 0x60, 0x86, 0x1d, 0x7f, 0x21, 0xba, 0x19, 0x91, 0x50, 0xbc, - 0xc0, 0xc4, 0x06, 0x13, 0xcb, 0x42, 0x9c, 0xc0, 0xe9, 0xe2, 0x61, 0x7f, 0xae, 0x07, 0x3e, 0x0e, - 0x82, 0x58, 0x2e, 0x37, 0xf6, 0x22, 0x87, 0x56, 0x8e, 0x0d, 0x2c, 0xb4, 0x02, 0x28, 0xea, 0xb4, - 0xdb, 0x2d, 0x76, 0x53, 0xea, 0xb4, 0x2e, 0x85, 0x41, 0xa7, 0xcd, 0xfd, 0xc8, 0xc5, 0x63, 0x16, - 0x8d, 0x2e, 0x28, 0xce, 0xa8, 0x41, 0x99, 0xc5, 0x7a, 0xc4, 0x7e, 0x8b, 0x08, 0x78, 0x6e, 0x6d, - 0x6d, 0xb0, 0x22, 0x2c, 0x61, 0xf6, 0x2f, 0xb2, 0x03, 0x8e, 0x3d, 0x8d, 0x13, 0x77, 0x42, 0x82, - 0xb6, 0x60, 0xac, 0xcd, 0x0e, 0xb1, 0x38, 0x0c, 0x5a, 0x2d, 0x22, 0xe5, 0xcb, 0xc3, 0xb9, 0x30, - 0xf1, 0xc7, 0x30, 0x74, 0x72, 0xd8, 0xa4, 0x6e, 0xff, 0xb7, 0x71, 0xc6, 0xab, 0xc4, 0x65, 0xf5, - 0x90, 0x70, 0x56, 0x16, 0x92, 0xdc, 0x47, 0x8a, 0x3c, 0x72, 0x97, 0x9c, 0x03, 0xc2, 0xf5, 0x19, - 0x4b, 0x2a, 0xe8, 0x33, 0xcc, 0x15, 0x9f, 0x33, 0x88, 0xe2, 0x4f, 0x77, 0x72, 0x7c, 0xc3, 0x0d, - 0x5f, 0x90, 0xc0, 0x1a, 0x39, 0x74, 0x15, 0xc6, 0xc4, 0x4b, 0x2a, 0xc2, 0x4c, 0x51, 0x36, 0x54, - 0xec, 0x31, 0xac, 0x03, 0xf7, 0xd3, 0x05, 0xd8, 0xac, 0x8c, 0x36, 0xe0, 0x51, 0xed, 0xa5, 0xb0, - 0x0c, 0x77, 0x3b, 0xce, 0x79, 0x1e, 0xdb, 0xdb, 0x9d, 0x7b, 0x74, 0xed, 0x20, 0x44, 0x7c, 0x30, - 0x1d, 0x74, 0x03, 0x4e, 0x3a, 0xcd, 0xd8, 0xdb, 0x26, 0x35, 0xe2, 0xb8, 0x2d, 0xcf, 0x27, 0x66, - 0x9a, 0x84, 0x87, 0xf7, 0x76, 0xe7, 0x4e, 0x2e, 0x64, 0x21, 0xe0, 0xec, 0x7a, 0xe8, 0x93, 0x50, - 0x75, 0xfd, 0x48, 0x8c, 0xc1, 0xa0, 0xf1, 0x30, 0x5e, 0xb5, 0x76, 0xbd, 0xa1, 0xbe, 0x3f, 0xf9, - 0x83, 0x93, 0x0a, 0xe8, 0x5d, 0x18, 0xd5, 0xc3, 0x9f, 0xc4, 0x83, 0x8c, 0x2f, 0x16, 0xd2, 0x9f, - 0x8d, 0x98, 0x21, 0x6e, 0xc1, 0x53, 0x6e, 0xad, 0x46, 0x38, 0x91, 0xd1, 0x04, 0xfa, 0x79, 0x40, - 0x11, 0x09, 0xb7, 0xbd, 0x26, 0x59, 0x68, 0xb2, 0xec, 0xbe, 0xcc, 0xc6, 0x33, 0x6c, 0xc4, 0x77, - 0xa0, 0x46, 0x17, 0x06, 0xce, 0xa8, 0x85, 0x2e, 0x53, 0xce, 0xa3, 0x97, 0x0a, 0x2f, 0x64, 0x29, - 0x18, 0xce, 0xd4, 0x48, 0x3b, 0x24, 0x4d, 0x27, 0x26, 0xae, 0x49, 0x11, 0xa7, 0xea, 0xd1, 0x73, - 0x49, 0x3d, 0xe0, 0x00, 0xa6, 0xef, 0x6c, 0xf7, 0x23, 0x0e, 0x54, 0xcf, 0xda, 0x0c, 0xa2, 0xf8, - 0x3a, 0x89, 0xef, 0x05, 0xe1, 0x5d, 0x91, 0x11, 0x2d, 0x49, 0x95, 0x98, 0x80, 0xb0, 0x8e, 0x47, - 0x65, 0x28, 0x76, 0xf5, 0xb7, 0x5a, 0x63, 0xf7, 0x2a, 0xc3, 0xc9, 0xde, 0xb9, 0xcc, 0x8b, 0xb1, - 0x84, 0x4b, 0xd4, 0xd5, 0xfa, 0x12, 0xbb, 0x23, 0x49, 0xa1, 0xae, 0xd6, 0x97, 0xb0, 0x84, 0xa3, - 0xa0, 0xfb, 0xf9, 0xc1, 0xf1, 0x22, 0xf7, 0x55, 0xdd, 0x9c, 0xbc, 0xe0, 0x0b, 0x84, 0xf7, 0x61, - 0x52, 0x3d, 0x81, 0xc8, 0x93, 0xc6, 0x45, 0x33, 0x13, 0x6c, 0xe1, 0x1c, 0x26, 0xf7, 0x9c, 0xb2, - 0xeb, 0xad, 0xa6, 0x68, 0xe2, 0xae, 0x56, 0x8c, 0xe4, 0x1c, 0x93, 0xb9, 0x8f, 0x72, 0x9c, 0x87, - 0x6a, 0xd4, 0xb9, 0xe3, 0x06, 0x5b, 0x8e, 0xe7, 0xb3, 0x8b, 0x0c, 0x4d, 0x88, 0x69, 0x48, 0x00, - 0x4e, 0x70, 0x50, 0x1d, 0x86, 0x1d, 0xa1, 0xc2, 0x89, 0x0b, 0x87, 0x9c, 0x28, 0x7c, 0xa9, 0xf0, - 0x71, 0xeb, 0xaa, 0xfc, 0x87, 0x15, 0x15, 0xf4, 0x32, 0x8c, 0x89, 0x20, 0x32, 0xe1, 0xec, 0x79, - 0xc2, 0x0c, 0x38, 0x68, 0xe8, 0x40, 0x6c, 0xe2, 0xa2, 0x0d, 0x18, 0xa7, 0x54, 0x12, 0x06, 0x38, - 0x33, 0xdd, 0x1f, 0x0f, 0xd5, 0xd2, 0x9f, 0xeb, 0x64, 0x70, 0x8a, 0x2c, 0x72, 0xe1, 0x11, 0xa7, - 0x13, 0x07, 0x5b, 0x74, 0x27, 0x98, 0xfb, 0x64, 0x2d, 0xb8, 0x4b, 0x7c, 0x76, 0xcb, 0x30, 0xbc, - 0x78, 0x66, 0x6f, 0x77, 0xee, 0x91, 0x85, 0x03, 0xf0, 0xf0, 0x81, 0x54, 0xd0, 0x5b, 0x30, 0x12, - 0x07, 0x2d, 0xe1, 0xc3, 0x1d, 0xcd, 0x9c, 0x2a, 0x92, 0x84, 0x68, 0x4d, 0x55, 0xd0, 0xcd, 0x18, - 0x8a, 0x08, 0xd6, 0x29, 0xa2, 0x37, 0xf9, 0xae, 0x64, 0x09, 0x33, 0x49, 0x34, 0xf3, 0x50, 0x91, - 0xc1, 0x52, 0x19, 0x36, 0xcd, 0xed, 0x2b, 0x68, 0x60, 0x9d, 0xe0, 0xec, 0xcf, 0xc1, 0x54, 0x17, - 0xcb, 0xeb, 0xcb, 0xb9, 0xf5, 0x3f, 0x0e, 0x40, 0x55, 0x59, 0x0c, 0xd1, 0x79, 0xd3, 0x38, 0xfc, - 0x70, 0xda, 0x38, 0x3c, 0x4c, 0x05, 0x34, 0xdd, 0x1e, 0xfc, 0x66, 0xc6, 0xa3, 0xfa, 0x4f, 0xe7, - 0xee, 0xf1, 0xe2, 0x91, 0x6d, 0x9a, 0x8a, 0x57, 0x2e, 0x6c, 0x6f, 0xae, 0x1c, 0xa8, 0x35, 0x16, - 0x7c, 0x28, 0x92, 0xea, 0x87, 0xed, 0xc0, 0x5d, 0xad, 0xa7, 0xdf, 0x41, 0xab, 0xd3, 0x42, 0xcc, - 0x61, 0x4c, 0xae, 0xa7, 0x67, 0x36, 0x93, 0xeb, 0x87, 0x0e, 0x29, 0xd7, 0x4b, 0x02, 0x38, 0xa1, - 0x85, 0xb6, 0x61, 0xaa, 0x69, 0x3e, 0x6b, 0xa7, 0xe2, 0xd5, 0x9e, 0xed, 0xe3, 0x59, 0xb9, 0x8e, - 0xf6, 0x22, 0xcd, 0x52, 0x9a, 0x1e, 0xee, 0x6e, 0x02, 0xbd, 0x0c, 0xc3, 0xef, 0x06, 0x11, 0xbb, - 0xb6, 0x10, 0x07, 0x97, 0x8c, 0x0b, 0x1a, 0x7e, 0xf5, 0x46, 0x83, 0x95, 0xef, 0xef, 0xce, 0x8d, - 0xd4, 0x03, 0x57, 0xfe, 0xc5, 0xaa, 0x02, 0xfa, 0xac, 0x05, 0x27, 0x8d, 0x7d, 0xac, 0x7a, 0x0e, - 0x87, 0xe9, 0xf9, 0xa3, 0xa2, 0xe5, 0x93, 0xab, 0x59, 0x34, 0x71, 0x76, 0x53, 0xf6, 0x77, 0xb9, - 0x89, 0x54, 0x18, 0x4d, 0x48, 0xd4, 0x69, 0x1d, 0xc7, 0xeb, 0x10, 0x37, 0x0c, 0x7b, 0xce, 0x03, - 0x30, 0xd2, 0xff, 0x7b, 0x8b, 0x19, 0xe9, 0xd7, 0xc8, 0x56, 0xbb, 0xe5, 0xc4, 0xc7, 0xe1, 0xfb, - 0xfc, 0x19, 0x18, 0x8e, 0x45, 0x6b, 0xc5, 0x9e, 0xb6, 0xd0, 0xba, 0xc7, 0x2e, 0x2f, 0xd4, 0xc1, - 0x27, 0x4b, 0xb1, 0x22, 0x68, 0xff, 0x2b, 0x3e, 0x2b, 0x12, 0x72, 0x0c, 0x96, 0x88, 0xeb, 0xa6, - 0x25, 0xe2, 0xa9, 0xc2, 0xdf, 0xd2, 0xc3, 0x22, 0xf1, 0x1d, 0xf3, 0x0b, 0x98, 0x7e, 0xf2, 0x93, - 0x73, 0x8b, 0x64, 0xff, 0xba, 0x05, 0xd3, 0x59, 0xce, 0x08, 0x54, 0x80, 0xe1, 0xda, 0x91, 0xba, - 0x5f, 0x53, 0xa3, 0x7a, 0x4b, 0x94, 0x63, 0x85, 0x51, 0x38, 0xd7, 0x7c, 0x7f, 0x29, 0xb4, 0x6e, - 0x80, 0xf9, 0x40, 0x22, 0x7a, 0x85, 0x87, 0x3a, 0x58, 0xea, 0x05, 0xc3, 0xfe, 0xc2, 0x1c, 0xec, - 0x6f, 0x94, 0x60, 0x9a, 0x1b, 0xb9, 0x17, 0xb6, 0x03, 0xcf, 0xad, 0x07, 0xae, 0x08, 0xfc, 0x70, - 0x61, 0xb4, 0xad, 0x29, 0xb7, 0xc5, 0x52, 0xf2, 0xe8, 0xea, 0x70, 0xa2, 0x50, 0xe8, 0xa5, 0xd8, - 0xa0, 0x4a, 0x5b, 0x21, 0xdb, 0x5e, 0x53, 0xd9, 0x4c, 0x4b, 0x7d, 0x9f, 0x0c, 0xaa, 0x95, 0x65, - 0x8d, 0x0e, 0x36, 0xa8, 0x1e, 0xc1, 0x13, 0x31, 0xf6, 0xdf, 0xb7, 0xe0, 0xa1, 0x1e, 0x69, 0x7b, - 0x68, 0x73, 0xf7, 0xd8, 0xc5, 0x82, 0x78, 0x81, 0x53, 0x35, 0xc7, 0xaf, 0x1b, 0xb0, 0x80, 0xa2, - 0x3b, 0x00, 0xfc, 0xba, 0x80, 0xca, 0xd2, 0xe9, 0xbb, 0xec, 0x82, 0xc9, 0x31, 0xb4, 0xbc, 0x09, - 0x92, 0x12, 0xd6, 0xa8, 0xda, 0x5f, 0x2b, 0xc3, 0x00, 0x7f, 0xe8, 0xbd, 0x0e, 0x43, 0x9b, 0x3c, - 0x9f, 0x71, 0x7f, 0xe9, 0x94, 0x13, 0xe5, 0x85, 0x17, 0x60, 0x49, 0x06, 0x5d, 0x83, 0x13, 0x22, - 0xf4, 0xa8, 0x46, 0x5a, 0xce, 0x8e, 0xd4, 0x86, 0xf9, 0xbb, 0x21, 0x32, 0xc1, 0xfd, 0x89, 0xd5, - 0x6e, 0x14, 0x9c, 0x55, 0x0f, 0xbd, 0xd2, 0x95, 0x7e, 0x90, 0xe7, 0x89, 0x56, 0x92, 0x70, 0x4e, - 0x0a, 0xc2, 0x97, 0x61, 0xac, 0xdd, 0xa5, 0xf7, 0x6b, 0xef, 0x69, 0x9b, 0xba, 0xbe, 0x89, 0xcb, - 0x7c, 0x17, 0x3a, 0xcc, 0x67, 0x63, 0x6d, 0x33, 0x24, 0xd1, 0x66, 0xd0, 0x72, 0xc5, 0x53, 0xb0, - 0x89, 0xef, 0x42, 0x0a, 0x8e, 0xbb, 0x6a, 0x50, 0x2a, 0xeb, 0x8e, 0xd7, 0xea, 0x84, 0x24, 0xa1, - 0x32, 0x68, 0x52, 0x59, 0x49, 0xc1, 0x71, 0x57, 0x0d, 0xba, 0xb6, 0x4e, 0x8a, 0xd7, 0x43, 0x65, - 0x90, 0xba, 0x60, 0x41, 0x9f, 0x86, 0x21, 0x19, 0x40, 0x50, 0x28, 0x97, 0x8a, 0x70, 0x4c, 0x50, - 0x2f, 0x91, 0x6a, 0xef, 0xc8, 0x89, 0xd0, 0x01, 0x49, 0xef, 0x30, 0xaf, 0x54, 0xfe, 0xb9, 0x05, - 0x27, 0x32, 0x1c, 0xe1, 0x38, 0x4b, 0xdb, 0xf0, 0xa2, 0x58, 0xbd, 0x62, 0xa1, 0xb1, 0x34, 0x5e, - 0x8e, 0x15, 0x06, 0xdd, 0x2d, 0x9c, 0x69, 0xa6, 0x19, 0xa5, 0x70, 0x31, 0x11, 0xd0, 0xfe, 0x18, - 0x25, 0x3a, 0x03, 0x95, 0x4e, 0x44, 0x42, 0xf9, 0xa0, 0xa3, 0xe4, 0xf3, 0xcc, 0xce, 0xc8, 0x20, - 0x54, 0x6c, 0xdd, 0x50, 0x26, 0x3e, 0x4d, 0x6c, 0xe5, 0x46, 0x3e, 0x0e, 0xb3, 0xbf, 0x5c, 0x86, - 0x89, 0x94, 0x43, 0x2c, 0xed, 0xc8, 0x56, 0xe0, 0x7b, 0x71, 0xa0, 0xf2, 0xdb, 0xf1, 0x37, 0xe4, - 0x48, 0x7b, 0xf3, 0x9a, 0x28, 0xc7, 0x0a, 0x03, 0x3d, 0x29, 0x5f, 0x09, 0x4e, 0xbf, 0xce, 0xb1, - 0x58, 0x33, 0x1e, 0x0a, 0x2e, 0xfa, 0xb2, 0xce, 0xe3, 0x50, 0x69, 0x07, 0xea, 0xd1, 0x77, 0x35, - 0x9f, 0x78, 0xb1, 0x56, 0x0f, 0x82, 0x16, 0x66, 0x40, 0xf4, 0x84, 0xf8, 0xfa, 0xd4, 0xcd, 0x08, - 0x76, 0xdc, 0x20, 0xd2, 0x86, 0xe0, 0x29, 0x18, 0xba, 0x4b, 0x76, 0x42, 0xcf, 0xdf, 0x48, 0xdf, - 0x0b, 0x5d, 0xe1, 0xc5, 0x58, 0xc2, 0xcd, 0x64, 0xf5, 0x43, 0x47, 0xfc, 0x7a, 0xce, 0x70, 0xee, - 0x39, 0xf8, 0x4d, 0x0b, 0x26, 0x58, 0xf6, 0x59, 0x91, 0x22, 0xc1, 0x0b, 0xfc, 0x63, 0x90, 0x31, - 0x1e, 0x87, 0x81, 0x90, 0x36, 0x9a, 0x7e, 0xfe, 0x82, 0xf5, 0x04, 0x73, 0x18, 0x7a, 0x04, 0x2a, - 0xac, 0x0b, 0x74, 0x1a, 0x47, 0x79, 0x92, 0xfb, 0x9a, 0x13, 0x3b, 0x98, 0x95, 0xb2, 0x18, 0x34, - 0x4c, 0xda, 0x2d, 0x8f, 0x77, 0x3a, 0x31, 0xe7, 0x7e, 0xd0, 0x62, 0xd0, 0x32, 0x3b, 0xf9, 0xa0, - 0x62, 0xd0, 0xb2, 0x89, 0x1f, 0x2c, 0xe7, 0xff, 0xf7, 0x12, 0x9c, 0xce, 0xac, 0x97, 0xdc, 0x30, - 0xaf, 0x18, 0x37, 0xcc, 0x17, 0x53, 0x37, 0xcc, 0xf6, 0xc1, 0xb5, 0x1f, 0xcc, 0x9d, 0x73, 0xf6, - 0x55, 0x70, 0xf9, 0x18, 0xaf, 0x82, 0x2b, 0x45, 0x45, 0x9c, 0x81, 0x1c, 0x11, 0xe7, 0x8f, 0x2c, - 0x78, 0x38, 0x73, 0xc8, 0x3e, 0x70, 0x41, 0x7f, 0x99, 0xbd, 0xec, 0xa1, 0x9d, 0xfc, 0x5a, 0xb9, - 0xc7, 0x57, 0x31, 0x3d, 0xe5, 0x2c, 0xe5, 0x42, 0x0c, 0x18, 0x09, 0xe1, 0x6d, 0x94, 0x73, 0x20, - 0x5e, 0x86, 0x15, 0x14, 0x45, 0x5a, 0xd0, 0x1c, 0xef, 0xe4, 0xf2, 0x21, 0x37, 0xd4, 0xbc, 0x69, - 0x87, 0xd7, 0xf3, 0x3e, 0xa4, 0x43, 0xe9, 0x6e, 0x6b, 0x9a, 0x67, 0xf9, 0x30, 0x9a, 0xe7, 0x68, - 0xb6, 0xd6, 0x89, 0x16, 0x60, 0x62, 0xcb, 0xf3, 0xd9, 0xa3, 0xbb, 0xa6, 0xf4, 0xa4, 0x22, 0x97, - 0xaf, 0x99, 0x60, 0x9c, 0xc6, 0x9f, 0x7d, 0x19, 0xc6, 0x0e, 0x6f, 0x5d, 0xfb, 0x51, 0x19, 0x3e, - 0x7c, 0x00, 0x53, 0xe0, 0xa7, 0x83, 0x31, 0x2f, 0xda, 0xe9, 0xd0, 0x35, 0x37, 0x75, 0x98, 0x5e, - 0xef, 0xb4, 0x5a, 0x3b, 0xcc, 0x3f, 0x8b, 0xb8, 0x12, 0x43, 0x08, 0x35, 0x2a, 0x19, 0xf5, 0x4a, - 0x06, 0x0e, 0xce, 0xac, 0x89, 0x7e, 0x1e, 0x50, 0x70, 0x87, 0xa5, 0x45, 0x76, 0x93, 0xbc, 0x16, - 0x6c, 0x0a, 0xca, 0xc9, 0x56, 0xbd, 0xd1, 0x85, 0x81, 0x33, 0x6a, 0x51, 0x39, 0x95, 0x9e, 0x63, - 0x3b, 0xaa, 0x5b, 0x29, 0x39, 0x15, 0xeb, 0x40, 0x6c, 0xe2, 0xa2, 0x4b, 0x30, 0xe5, 0x6c, 0x3b, - 0x1e, 0x4f, 0x73, 0x26, 0x09, 0x70, 0x41, 0x55, 0xd9, 0xaf, 0x16, 0xd2, 0x08, 0xb8, 0xbb, 0x0e, - 0x6a, 0x1b, 0x06, 0x49, 0xfe, 0x32, 0xc3, 0x27, 0x0f, 0xb1, 0x82, 0x0b, 0x9b, 0x28, 0xed, 0x3f, - 0xb5, 0xe8, 0xd1, 0x97, 0xf1, 0x3e, 0x2b, 0x1d, 0x11, 0x65, 0x60, 0xd3, 0x82, 0x00, 0xd5, 0x88, - 0x2c, 0xe9, 0x40, 0x6c, 0xe2, 0xf2, 0xa5, 0x11, 0x25, 0xee, 0xe2, 0x86, 0xb4, 0x29, 0xe2, 0x67, - 0x15, 0x06, 0x95, 0xa0, 0x5d, 0x6f, 0xdb, 0x8b, 0x82, 0x50, 0x6c, 0xa0, 0x7e, 0x5f, 0x41, 0x57, - 0xfc, 0xb2, 0xc6, 0xc9, 0x60, 0x49, 0xcf, 0xfe, 0x4a, 0x09, 0xc6, 0x64, 0x8b, 0xaf, 0x76, 0x82, - 0xd8, 0x39, 0x86, 0x23, 0xfd, 0x55, 0xe3, 0x48, 0x3f, 0x5f, 0x2c, 0x9c, 0x98, 0x75, 0xae, 0xe7, - 0x51, 0xfe, 0xe9, 0xd4, 0x51, 0x7e, 0xa1, 0x1f, 0xa2, 0x07, 0x1f, 0xe1, 0xff, 0xc6, 0x82, 0x29, - 0x03, 0xff, 0x18, 0x4e, 0x92, 0xba, 0x79, 0x92, 0x3c, 0xd3, 0xc7, 0xd7, 0xf4, 0x38, 0x41, 0xbe, - 0x5e, 0x4a, 0x7d, 0x05, 0x3b, 0x39, 0x7e, 0x01, 0x2a, 0x9b, 0x4e, 0xe8, 0x16, 0xcb, 0xf9, 0xd9, - 0x55, 0x7d, 0xfe, 0xb2, 0x13, 0xba, 0x9c, 0xff, 0x9f, 0x53, 0xaf, 0xc7, 0x39, 0xa1, 0x9b, 0x1b, - 0x45, 0xc1, 0x1a, 0x45, 0x2f, 0xc1, 0x60, 0xd4, 0x0c, 0xda, 0xca, 0xcf, 0xf4, 0x0c, 0x7f, 0x59, - 0x8e, 0x96, 0xec, 0xef, 0xce, 0x21, 0xb3, 0x39, 0x5a, 0x8c, 0x05, 0xfe, 0xec, 0x06, 0x54, 0x55, - 0xd3, 0x47, 0xea, 0x69, 0xff, 0x5f, 0xcb, 0x70, 0x22, 0x63, 0xad, 0xa0, 0x5f, 0x34, 0xc6, 0xed, - 0xe5, 0xbe, 0x17, 0xdb, 0xfb, 0x1c, 0xb9, 0x5f, 0x64, 0x9a, 0x92, 0x2b, 0x56, 0xc7, 0x21, 0x9a, - 0xbf, 0x19, 0x91, 0x74, 0xf3, 0xb4, 0x28, 0xbf, 0x79, 0xda, 0xec, 0xb1, 0x0d, 0x3f, 0x6d, 0x48, - 0xf5, 0xf4, 0x48, 0xe7, 0xf9, 0x0b, 0x15, 0x98, 0xce, 0xca, 0x5b, 0x80, 0x7e, 0xc5, 0x4a, 0xbd, - 0x30, 0xf2, 0x4a, 0xff, 0xc9, 0x0f, 0xf8, 0xb3, 0x23, 0x22, 0xab, 0xd0, 0xbc, 0xf9, 0xe6, 0x48, - 0xee, 0x88, 0x8b, 0xd6, 0x59, 0xfc, 0x53, 0xc8, 0x5f, 0x8b, 0x91, 0x5c, 0xe1, 0x53, 0x87, 0xe8, - 0x8a, 0x78, 0x70, 0x26, 0x4a, 0xc5, 0x3f, 0xc9, 0xe2, 0xfc, 0xf8, 0x27, 0xd9, 0x87, 0x59, 0x0f, - 0x46, 0xb4, 0xef, 0x3a, 0xd2, 0x65, 0x70, 0x97, 0x1e, 0x51, 0x5a, 0xbf, 0x8f, 0x74, 0x29, 0xfc, - 0x1d, 0x0b, 0x52, 0x4e, 0x61, 0xca, 0x2c, 0x63, 0xf5, 0x34, 0xcb, 0x9c, 0x81, 0x4a, 0x18, 0xb4, - 0x48, 0xfa, 0xd1, 0x09, 0x1c, 0xb4, 0x08, 0x66, 0x10, 0xf5, 0xa0, 0x74, 0xb9, 0xd7, 0x83, 0xd2, - 0x54, 0x4f, 0x6f, 0x91, 0x6d, 0x22, 0x8d, 0x24, 0x8a, 0x8d, 0x5f, 0xa5, 0x85, 0x98, 0xc3, 0xec, - 0xdf, 0xa9, 0xc0, 0x89, 0x8c, 0x58, 0x40, 0xaa, 0x21, 0x6d, 0x38, 0x31, 0xb9, 0xe7, 0xec, 0xa4, - 0x93, 0xdf, 0x5e, 0xe2, 0xc5, 0x58, 0xc2, 0x99, 0x33, 0x2b, 0x4f, 0xa0, 0x97, 0x32, 0x5d, 0x89, - 0xbc, 0x79, 0x02, 0x7a, 0xf4, 0x4f, 0x0f, 0x5f, 0x04, 0x88, 0xa2, 0xd6, 0xb2, 0x4f, 0x25, 0x3c, - 0x57, 0x38, 0xcd, 0x26, 0x79, 0x17, 0x1b, 0x57, 0x05, 0x04, 0x6b, 0x58, 0xa8, 0x06, 0x93, 0xed, - 0x30, 0x88, 0xb9, 0x61, 0xb0, 0xc6, 0x1d, 0x2d, 0x06, 0xcc, 0x68, 0xad, 0x7a, 0x0a, 0x8e, 0xbb, - 0x6a, 0xa0, 0x17, 0x60, 0x44, 0x44, 0x70, 0xd5, 0x83, 0xa0, 0x25, 0xcc, 0x48, 0xea, 0x3a, 0xbe, - 0x91, 0x80, 0xb0, 0x8e, 0xa7, 0x55, 0x63, 0xd6, 0xc6, 0xa1, 0xcc, 0x6a, 0xdc, 0xe2, 0xa8, 0xe1, - 0xa5, 0xb2, 0x9b, 0x0c, 0x17, 0xca, 0x6e, 0x92, 0x18, 0xd6, 0xaa, 0x85, 0x2f, 0x62, 0x20, 0xd7, - 0x00, 0xf5, 0x87, 0x65, 0x18, 0xe4, 0x53, 0x71, 0x0c, 0x52, 0x5e, 0x5d, 0x98, 0x94, 0x0a, 0x65, - 0x92, 0xe0, 0xbd, 0x9a, 0xaf, 0x39, 0xb1, 0xc3, 0x59, 0x93, 0xda, 0x21, 0x89, 0x19, 0x0a, 0xcd, - 0x1b, 0x7b, 0x68, 0x36, 0x65, 0x29, 0x01, 0x4e, 0x43, 0xdb, 0x51, 0x9b, 0x00, 0x11, 0x7b, 0xfe, - 0x96, 0xd2, 0x10, 0x99, 0x79, 0x9f, 0x2f, 0xd4, 0x8f, 0x86, 0xaa, 0xc6, 0x7b, 0x93, 0x2c, 0x4b, - 0x05, 0xc0, 0x1a, 0xed, 0xd9, 0x17, 0xa1, 0xaa, 0x90, 0xf3, 0x54, 0xc8, 0x51, 0x9d, 0xb5, 0xfd, - 0x2c, 0x4c, 0xa4, 0xda, 0xea, 0x4b, 0x03, 0xfd, 0x3d, 0x0b, 0x26, 0x78, 0x97, 0x97, 0xfd, 0x6d, - 0xc1, 0x0a, 0x3e, 0x67, 0xc1, 0x74, 0x2b, 0x63, 0x27, 0x8a, 0x69, 0x3e, 0xcc, 0x1e, 0x56, 0xca, - 0x67, 0x16, 0x14, 0x67, 0xb6, 0x86, 0xce, 0xc2, 0x30, 0x7f, 0xcd, 0xdb, 0x69, 0x09, 0x0f, 0xed, - 0x51, 0x9e, 0x93, 0x9c, 0x97, 0x61, 0x05, 0xb5, 0x7f, 0x6c, 0xc1, 0x14, 0xff, 0x88, 0x2b, 0x64, - 0x47, 0xa9, 0x57, 0x1f, 0x90, 0xcf, 0x10, 0xd9, 0xd7, 0x4b, 0x3d, 0xb2, 0xaf, 0xeb, 0x5f, 0x59, - 0x3e, 0xf0, 0x2b, 0xbf, 0x61, 0x81, 0x58, 0xa1, 0xc7, 0xa0, 0x3f, 0xac, 0x9a, 0xfa, 0xc3, 0x47, - 0x8a, 0x2c, 0xfa, 0x1e, 0x8a, 0xc3, 0xaf, 0x96, 0x60, 0x92, 0x23, 0x24, 0x37, 0x32, 0x1f, 0x94, - 0xc9, 0xe9, 0xef, 0x55, 0x20, 0xf5, 0x26, 0x6c, 0xf6, 0x97, 0x1a, 0x73, 0x59, 0x39, 0x70, 0x2e, - 0xff, 0xa7, 0x05, 0x88, 0x8f, 0x49, 0xfa, 0x29, 0x74, 0x7e, 0xba, 0x69, 0xe6, 0x80, 0x84, 0x73, - 0x28, 0x08, 0xd6, 0xb0, 0x1e, 0xf0, 0x27, 0xa4, 0xee, 0xc3, 0xca, 0xf9, 0xf7, 0x61, 0x7d, 0x7c, - 0xf5, 0x77, 0xcb, 0x90, 0x76, 0xd5, 0x44, 0x6f, 0xc3, 0x68, 0xd3, 0x69, 0x3b, 0x77, 0xbc, 0x96, - 0x17, 0x7b, 0x24, 0x2a, 0x76, 0xe1, 0xbe, 0xa4, 0xd5, 0x10, 0xd7, 0x50, 0x5a, 0x09, 0x36, 0x28, - 0xa2, 0x79, 0x80, 0x76, 0xe8, 0x6d, 0x7b, 0x2d, 0xb2, 0xc1, 0x34, 0x1e, 0x16, 0xeb, 0xc1, 0xef, - 0x8e, 0x65, 0x29, 0xd6, 0x30, 0x32, 0x62, 0x03, 0xca, 0xc7, 0x11, 0x1b, 0x50, 0xe9, 0x33, 0x36, - 0x60, 0xa0, 0x50, 0x6c, 0x00, 0x86, 0x53, 0xf2, 0xf0, 0xa6, 0xff, 0x57, 0xbc, 0x16, 0x11, 0xb2, - 0x1b, 0x8f, 0x05, 0x99, 0xdd, 0xdb, 0x9d, 0x3b, 0x85, 0x33, 0x31, 0x70, 0x8f, 0x9a, 0x76, 0x07, - 0x4e, 0x34, 0x48, 0x28, 0x9f, 0xb1, 0x53, 0x7b, 0xe9, 0x4d, 0xa8, 0x86, 0xa9, 0x6d, 0xdc, 0x67, - 0xc0, 0xbf, 0x96, 0xe3, 0x4d, 0x6e, 0xdb, 0x84, 0xa4, 0xfd, 0xd7, 0x4b, 0x30, 0x24, 0x9c, 0x34, - 0x8f, 0x41, 0xf8, 0xb8, 0x62, 0x98, 0x98, 0x9e, 0xca, 0xe3, 0x7f, 0xac, 0x5b, 0x3d, 0x8d, 0x4b, - 0x8d, 0x94, 0x71, 0xe9, 0x99, 0x62, 0xe4, 0x0e, 0x36, 0x2b, 0xfd, 0x93, 0x32, 0x8c, 0x9b, 0x4e, - 0xab, 0xc7, 0x30, 0x2c, 0xaf, 0xc1, 0x50, 0x24, 0xfc, 0xa7, 0x4b, 0x45, 0x7c, 0xf6, 0xd2, 0x53, - 0x9c, 0xdc, 0xc4, 0x0b, 0x8f, 0x69, 0x49, 0x2e, 0xd3, 0x45, 0xbb, 0x7c, 0x2c, 0x2e, 0xda, 0x79, - 0xbe, 0xc4, 0x95, 0x07, 0xe1, 0x4b, 0x6c, 0x7f, 0x8f, 0xb1, 0x7c, 0xbd, 0xfc, 0x18, 0x8e, 0xf1, - 0x57, 0xcd, 0xc3, 0xe1, 0x5c, 0xa1, 0x75, 0x27, 0xba, 0xd7, 0xe3, 0x38, 0xff, 0x96, 0x05, 0x23, - 0x02, 0xf1, 0x18, 0x3e, 0xe0, 0xe7, 0xcd, 0x0f, 0x78, 0xa2, 0xd0, 0x07, 0xf4, 0xe8, 0xf9, 0x57, - 0x4a, 0xaa, 0xe7, 0xf5, 0x20, 0x8c, 0x0b, 0x65, 0x42, 0x1f, 0xa6, 0xaa, 0x5f, 0xd0, 0x0c, 0x5a, - 0x42, 0x80, 0x7b, 0x24, 0x09, 0xfd, 0xe3, 0xe5, 0xfb, 0xda, 0x6f, 0xac, 0xb0, 0x59, 0x64, 0x5a, - 0x10, 0xc6, 0xe2, 0x00, 0x4d, 0x22, 0xd3, 0x82, 0x30, 0xc6, 0x0c, 0x82, 0x5c, 0x80, 0xd8, 0x09, - 0x37, 0x48, 0x4c, 0xcb, 0x44, 0xd4, 0x6c, 0xef, 0xdd, 0xda, 0x89, 0xbd, 0xd6, 0xbc, 0xe7, 0xc7, - 0x51, 0x1c, 0xce, 0xaf, 0xfa, 0xf1, 0x8d, 0x90, 0x0b, 0xfd, 0x5a, 0x2c, 0x9f, 0xa2, 0x85, 0x35, - 0xba, 0x32, 0x48, 0x84, 0xb5, 0x31, 0x60, 0xde, 0x20, 0x5d, 0x17, 0xe5, 0x58, 0x61, 0xd8, 0x2f, - 0x32, 0xce, 0xce, 0x06, 0xa8, 0xbf, 0x30, 0xbb, 0x2f, 0x0c, 0xa9, 0xa1, 0x65, 0x66, 0xe1, 0xeb, - 0x7a, 0x30, 0x5f, 0x51, 0xf6, 0x49, 0xbb, 0xa0, 0xfb, 0x51, 0x27, 0xb1, 0x7f, 0x88, 0x74, 0x5d, - 0x3b, 0xbe, 0x58, 0x98, 0x23, 0xf7, 0x71, 0xd1, 0xc8, 0x52, 0x32, 0xb2, 0x3c, 0x74, 0xab, 0xf5, - 0x74, 0xfe, 0xfa, 0x25, 0x09, 0xc0, 0x09, 0x0e, 0x3a, 0x2f, 0x14, 0x4a, 0x6e, 0x71, 0xf9, 0x70, - 0x4a, 0xa1, 0x94, 0x43, 0xa2, 0x69, 0x94, 0x17, 0x60, 0x44, 0x3d, 0x09, 0x54, 0xe7, 0x8f, 0xb1, - 0x54, 0xb9, 0x7c, 0xb5, 0x9c, 0x14, 0x63, 0x1d, 0x07, 0xad, 0xc1, 0x44, 0xc4, 0xdf, 0x2b, 0x92, - 0xd1, 0x1a, 0xc2, 0x70, 0xf0, 0xb4, 0xbc, 0xa4, 0x6c, 0x98, 0xe0, 0x7d, 0x56, 0xc4, 0xb7, 0xb2, - 0x8c, 0xef, 0x48, 0x93, 0x40, 0xaf, 0xc0, 0x78, 0x4b, 0x7f, 0xc3, 0xb5, 0x2e, 0xec, 0x0a, 0xca, - 0xed, 0xcc, 0x78, 0xe1, 0xb5, 0x8e, 0x53, 0xd8, 0xe8, 0x35, 0x98, 0xd1, 0x4b, 0x44, 0x72, 0x21, - 0xc7, 0xdf, 0x20, 0x91, 0x78, 0xdb, 0xe4, 0x91, 0xbd, 0xdd, 0xb9, 0x99, 0xab, 0x3d, 0x70, 0x70, - 0xcf, 0xda, 0xe8, 0x25, 0x18, 0x95, 0x9f, 0xaf, 0xc5, 0x36, 0x25, 0x0e, 0x8f, 0x1a, 0x0c, 0x1b, - 0x98, 0xe8, 0x1e, 0x9c, 0x94, 0xff, 0xd7, 0x42, 0x67, 0x7d, 0xdd, 0x6b, 0x8a, 0x20, 0xb3, 0x11, - 0x46, 0x62, 0x41, 0xfa, 0x8b, 0x2f, 0x67, 0x21, 0xed, 0xef, 0xce, 0x9d, 0x11, 0xa3, 0x96, 0x09, - 0x67, 0x93, 0x98, 0x4d, 0x1f, 0x5d, 0x83, 0x13, 0x9b, 0xc4, 0x69, 0xc5, 0x9b, 0x4b, 0x9b, 0xa4, - 0x79, 0x57, 0x6e, 0x2c, 0x16, 0x31, 0xa5, 0xb9, 0x04, 0x5e, 0xee, 0x46, 0xc1, 0x59, 0xf5, 0xde, - 0xdf, 0x9d, 0xf2, 0x2f, 0xd0, 0xca, 0x9a, 0xfc, 0x80, 0xde, 0x81, 0x51, 0x7d, 0xac, 0xd3, 0x82, - 0x41, 0xfe, 0xfb, 0xbe, 0x42, 0x0e, 0x51, 0x33, 0xa0, 0xc3, 0xb0, 0x41, 0xdb, 0xfe, 0x77, 0x25, - 0x98, 0xcb, 0xc9, 0xdd, 0x95, 0xb2, 0x66, 0x59, 0x85, 0xac, 0x59, 0x0b, 0xf2, 0xcd, 0x9b, 0xeb, - 0xa9, 0x9c, 0xe9, 0xa9, 0x57, 0x6c, 0x92, 0xcc, 0xe9, 0x69, 0xfc, 0xc2, 0x9e, 0x66, 0xba, 0x41, - 0xac, 0x92, 0xeb, 0x70, 0xf7, 0xba, 0x6e, 0xe3, 0x1c, 0x38, 0x8c, 0xd0, 0xdb, 0xd3, 0xbc, 0x69, - 0x7f, 0xaf, 0x04, 0x27, 0xd5, 0x60, 0xfe, 0xf4, 0x0e, 0xe1, 0x5b, 0xdd, 0x43, 0xf8, 0x40, 0xcd, - 0xc4, 0xf6, 0x0d, 0x18, 0x6c, 0xec, 0x44, 0xcd, 0xb8, 0x55, 0xe0, 0xc4, 0x7f, 0xdc, 0xd8, 0x57, - 0xc9, 0x69, 0xc4, 0x5e, 0xb2, 0x13, 0xdb, 0xcc, 0xfe, 0xbc, 0x05, 0x13, 0x6b, 0x4b, 0xf5, 0x46, - 0xd0, 0xbc, 0x4b, 0xe2, 0x05, 0x6e, 0xd0, 0xc0, 0xe2, 0xc0, 0xb7, 0x0e, 0x79, 0x90, 0x67, 0x89, - 0x08, 0x67, 0xa0, 0xb2, 0x19, 0x44, 0x71, 0xfa, 0x52, 0xe0, 0x72, 0x10, 0xc5, 0x98, 0x41, 0xec, - 0x3f, 0xb3, 0x60, 0x80, 0x3d, 0xd4, 0x96, 0xf7, 0xc8, 0x5f, 0x91, 0xef, 0x42, 0x2f, 0xc0, 0x20, - 0x59, 0x5f, 0x27, 0xcd, 0x58, 0xcc, 0xaf, 0x0c, 0xb0, 0x19, 0x5c, 0x66, 0xa5, 0xf4, 0x44, 0x63, - 0x8d, 0xf1, 0xbf, 0x58, 0x20, 0xa3, 0xcf, 0x40, 0x35, 0xf6, 0xb6, 0xc8, 0x82, 0xeb, 0x0a, 0x2b, - 0x7c, 0x7f, 0x3e, 0x5f, 0xea, 0x84, 0x5d, 0x93, 0x44, 0x70, 0x42, 0xcf, 0xfe, 0x52, 0x09, 0x20, - 0x09, 0x9f, 0xcb, 0xfb, 0xcc, 0xc5, 0xae, 0xb7, 0x0c, 0x9f, 0xcc, 0x78, 0xcb, 0x10, 0x25, 0x04, - 0x33, 0x5e, 0x32, 0x54, 0x43, 0x55, 0x2e, 0x34, 0x54, 0x95, 0x7e, 0x86, 0x6a, 0x09, 0xa6, 0x92, - 0xf0, 0x3f, 0x33, 0x8e, 0x9a, 0xe5, 0x1b, 0x5e, 0x4b, 0x03, 0x71, 0x37, 0xbe, 0xfd, 0x25, 0x0b, - 0x84, 0x97, 0x70, 0x81, 0x05, 0xed, 0xca, 0x77, 0xc7, 0x8c, 0xd4, 0x82, 0x4f, 0x17, 0x71, 0xa0, - 0x16, 0x09, 0x05, 0x15, 0xdf, 0x37, 0xd2, 0x08, 0x1a, 0x54, 0xed, 0xdf, 0xb6, 0x60, 0x84, 0x83, - 0xaf, 0x31, 0x45, 0x34, 0xbf, 0x5f, 0x7d, 0x25, 0xb3, 0x66, 0x4f, 0x72, 0x51, 0xc2, 0x2a, 0xa9, - 0xb1, 0xfe, 0x24, 0x97, 0x04, 0xe0, 0x04, 0x07, 0x3d, 0x05, 0x43, 0x51, 0xe7, 0x0e, 0x43, 0x4f, - 0xb9, 0x0c, 0x37, 0x78, 0x31, 0x96, 0x70, 0xfb, 0x9f, 0x95, 0x60, 0x32, 0xed, 0x31, 0x8e, 0x30, - 0x0c, 0x72, 0x06, 0x92, 0xd6, 0x69, 0x0e, 0x32, 0x80, 0x6a, 0x1e, 0xe7, 0xc0, 0x1f, 0x96, 0x67, - 0x2c, 0x48, 0x50, 0x42, 0xeb, 0x30, 0xe2, 0x06, 0xf7, 0xfc, 0x7b, 0x4e, 0xe8, 0x2e, 0xd4, 0x57, - 0xc5, 0x4c, 0xe4, 0xf8, 0xf8, 0xd5, 0x92, 0x0a, 0xba, 0x3f, 0x3b, 0x33, 0xc8, 0x25, 0x20, 0xac, - 0x13, 0x46, 0x6f, 0xb2, 0x4c, 0x28, 0xeb, 0xde, 0xc6, 0x35, 0xa7, 0x5d, 0xcc, 0x9b, 0x65, 0x49, - 0xa2, 0x6b, 0x6d, 0x8c, 0x89, 0xc4, 0x29, 0x1c, 0x80, 0x13, 0x92, 0xf6, 0xaf, 0x9e, 0x04, 0x63, - 0x2d, 0x18, 0x19, 0xa7, 0xad, 0x07, 0x9e, 0x71, 0xfa, 0x0d, 0x18, 0x26, 0x5b, 0xed, 0x78, 0xa7, - 0xe6, 0x85, 0xc5, 0xde, 0x0f, 0x58, 0x16, 0xd8, 0xdd, 0xd4, 0x25, 0x04, 0x2b, 0x8a, 0x3d, 0xf2, - 0x87, 0x97, 0x3f, 0x10, 0xf9, 0xc3, 0x2b, 0x7f, 0x29, 0xf9, 0xc3, 0x5f, 0x83, 0xa1, 0x0d, 0x2f, - 0xc6, 0xa4, 0x1d, 0x88, 0xd3, 0x38, 0x67, 0xf1, 0x5c, 0xe2, 0xc8, 0xdd, 0x99, 0x65, 0x05, 0x00, - 0x4b, 0x72, 0x68, 0x4d, 0x6d, 0xaa, 0xc1, 0x22, 0x32, 0x68, 0xb7, 0x81, 0x3c, 0x73, 0x5b, 0x89, - 0x7c, 0xe1, 0x43, 0xef, 0x3f, 0x5f, 0xb8, 0xca, 0xf2, 0x3d, 0xfc, 0xa0, 0xb2, 0x7c, 0x1b, 0xd9, - 0xd2, 0xab, 0x47, 0x91, 0x2d, 0xfd, 0x4b, 0x16, 0x9c, 0x6c, 0x67, 0xbd, 0x35, 0x20, 0xf2, 0x75, - 0xff, 0xdc, 0x21, 0x5e, 0x5f, 0x30, 0x9a, 0x66, 0xf9, 0x3d, 0x32, 0xd1, 0x70, 0x76, 0xc3, 0x32, - 0xed, 0xfa, 0xc8, 0xfb, 0x4f, 0xbb, 0x7e, 0xd4, 0x89, 0xbd, 0x93, 0x24, 0xec, 0x63, 0x47, 0x92, - 0x84, 0x7d, 0xfc, 0x01, 0x26, 0x61, 0xd7, 0xd2, 0xa7, 0x4f, 0x3c, 0xd8, 0xf4, 0xe9, 0x9b, 0xe6, - 0xb9, 0xc4, 0xb3, 0x75, 0xbf, 0x50, 0xf8, 0x5c, 0x32, 0x5a, 0x38, 0xf8, 0x64, 0xe2, 0x89, 0xe4, - 0xa7, 0xde, 0x67, 0x22, 0x79, 0x23, 0x1d, 0x3b, 0x3a, 0x8a, 0x74, 0xec, 0x6f, 0xeb, 0x27, 0xe8, - 0x89, 0x22, 0x2d, 0xa8, 0x83, 0xb2, 0xbb, 0x85, 0xac, 0x33, 0xb4, 0x3b, 0xe1, 0xfb, 0xf4, 0x71, - 0x27, 0x7c, 0x3f, 0x79, 0x84, 0x09, 0xdf, 0x4f, 0x1d, 0x6b, 0xc2, 0xf7, 0x87, 0x3e, 0x20, 0x09, - 0xdf, 0x67, 0x8e, 0x2b, 0xe1, 0xfb, 0xc3, 0x0f, 0x36, 0xe1, 0xfb, 0xdb, 0x50, 0x6d, 0xcb, 0xb8, - 0xcb, 0x99, 0xd9, 0x22, 0x53, 0x97, 0x19, 0xa6, 0xc9, 0xa7, 0x4e, 0x81, 0x70, 0x42, 0x94, 0xb6, - 0x90, 0x24, 0x80, 0xff, 0x70, 0x91, 0x16, 0x32, 0xed, 0x1e, 0x07, 0xa4, 0x7d, 0xff, 0x42, 0x09, - 0x4e, 0x1f, 0xbc, 0x3b, 0x12, 0xa3, 0x49, 0x3d, 0xb1, 0x65, 0xa7, 0x8c, 0x26, 0x4c, 0xf2, 0xd4, - 0xb0, 0x0a, 0x87, 0xb3, 0x5f, 0x82, 0x29, 0xe5, 0xe7, 0xd5, 0xf2, 0x9a, 0x3b, 0xda, 0x33, 0x54, - 0x2a, 0x3e, 0xa1, 0x91, 0x46, 0xc0, 0xdd, 0x75, 0xd0, 0x02, 0x4c, 0x18, 0x85, 0xab, 0x35, 0xa1, - 0xbf, 0x28, 0x2b, 0x4d, 0xc3, 0x04, 0xe3, 0x34, 0xbe, 0xfd, 0x75, 0x0b, 0x1e, 0xea, 0x91, 0xe1, - 0xb5, 0x70, 0x8c, 0x76, 0x1b, 0x26, 0xda, 0x66, 0xd5, 0xc2, 0x29, 0x1f, 0x8c, 0x8c, 0xb2, 0xaa, - 0xd7, 0x29, 0x00, 0x4e, 0x93, 0x5f, 0x3c, 0xfb, 0xfd, 0x1f, 0x9d, 0xfe, 0xd0, 0x0f, 0x7e, 0x74, - 0xfa, 0x43, 0x3f, 0xfc, 0xd1, 0xe9, 0x0f, 0xfd, 0xd2, 0xde, 0x69, 0xeb, 0xfb, 0x7b, 0xa7, 0xad, - 0x1f, 0xec, 0x9d, 0xb6, 0x7e, 0xb8, 0x77, 0xda, 0xfa, 0xf3, 0xbd, 0xd3, 0xd6, 0x97, 0x7e, 0x7c, - 0xfa, 0x43, 0xaf, 0x97, 0xb6, 0x2f, 0xfc, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x06, 0xe5, 0xd7, - 0x49, 0x99, 0xd0, 0x00, 0x00, + 0x1a, 0x0c, 0xad, 0xf3, 0xa8, 0x5a, 0x91, 0xc8, 0xf7, 0x42, 0x5e, 0xe8, 0x6f, 0x57, 0x08, 0x2e, + 0x0f, 0x49, 0x11, 0x08, 0x2c, 0xd9, 0xb1, 0x1c, 0x87, 0x2a, 0x4e, 0x58, 0x64, 0xf2, 0x9d, 0xef, + 0x2f, 0xae, 0x58, 0x9c, 0x7e, 0x15, 0x14, 0x6b, 0x1c, 0xe9, 0x9c, 0x77, 0xe4, 0x1b, 0x39, 0x2c, + 0x8b, 0x6f, 0xee, 0x9c, 0xcf, 0x7c, 0x52, 0x87, 0xcf, 0x79, 0x85, 0xc2, 0x09, 0x53, 0xd4, 0x81, + 0xb1, 0xed, 0xa8, 0xbd, 0x49, 0xe4, 0xd2, 0x67, 0xa9, 0x7d, 0x47, 0x2e, 0x7e, 0x32, 0x27, 0x5f, + 0xb3, 0x28, 0xe2, 0x85, 0x71, 0xc7, 0x69, 0x75, 0x49, 0x30, 0x96, 0x54, 0xee, 0x96, 0xce, 0x16, + 0x9b, 0xb5, 0xd0, 0x21, 0x79, 0xb7, 0x13, 0xdc, 0xd9, 0x89, 0x89, 0x48, 0xfd, 0x9b, 0x33, 0x24, + 0xaf, 0x72, 0xe2, 0xee, 0x21, 0x11, 0x08, 0x2c, 0xd9, 0xa9, 0x2e, 0x63, 0xd2, 0x78, 0xb2, 0x70, + 0x97, 0x75, 0x7d, 0x43, 0xd2, 0x65, 0x4c, 0xfa, 0x26, 0x4c, 0x99, 0xd4, 0x6d, 0x6f, 0x06, 0x71, + 0xe0, 0xa7, 0x64, 0xff, 0x54, 0x11, 0xa9, 0x5b, 0xcf, 0x28, 0xd9, 0x2d, 0x75, 0xb3, 0xa8, 0x70, + 0x66, 0xad, 0xc8, 0x87, 0xf1, 0x76, 0x10, 0xc6, 0xf7, 0x82, 0x50, 0xce, 0x43, 0x54, 0xe8, 0x8c, + 0x68, 0x94, 0x11, 0x75, 0x33, 0xcf, 0x63, 0x13, 0x83, 0x53, 0xdc, 0xe9, 0xd0, 0x45, 0x4d, 0xa7, + 0x45, 0x56, 0x6f, 0xcc, 0x9c, 0x28, 0x32, 0x74, 0x0d, 0x4e, 0xdc, 0x3d, 0x74, 0x02, 0x81, 0x25, + 0x3b, 0x2a, 0xeb, 0x58, 0x1e, 0x7b, 0x96, 0xc9, 0x38, 0x57, 0xd6, 0x75, 0x79, 0xe7, 0x72, 0x59, + 0xc7, 0xc0, 0x98, 0x33, 0x42, 0xef, 0x40, 0x55, 0x28, 0xb7, 0x41, 0x34, 0x73, 0x92, 0x71, 0xfd, + 0xd9, 0x9c, 0xd6, 0x72, 0xf2, 0x1b, 0x8d, 0xec, 0x5d, 0x5f, 0x44, 0xff, 0x49, 0x22, 0x9c, 0xb0, + 0xb7, 0x7f, 0x63, 0xb0, 0x5b, 0xed, 0x61, 0x07, 0x9b, 0xbf, 0xd1, 0x7d, 0x63, 0xfd, 0xa9, 0xfe, + 0xcf, 0xef, 0x0f, 0xf0, 0xee, 0xfa, 0x8b, 0x16, 0x9c, 0x6a, 0x67, 0x7e, 0x9e, 0x50, 0x1c, 0xfa, + 0x35, 0x03, 0xf0, 0xae, 0x51, 0xf9, 0xc5, 0xb3, 0xf1, 0xb8, 0x47, 0x9d, 0xe9, 0xa3, 0x40, 0xf9, + 0x7d, 0x1f, 0x05, 0x6e, 0xc3, 0x30, 0xd3, 0x5d, 0x93, 0xfc, 0x3e, 0x7d, 0xa6, 0xc2, 0x61, 0x2a, + 0xc8, 0x92, 0x60, 0x81, 0x15, 0x33, 0xda, 0x71, 0x8f, 0xa6, 0x3f, 0x02, 0x13, 0x86, 0x16, 0x59, + 0x2d, 0xf9, 0x39, 0x6b, 0x45, 0xf4, 0xc4, 0xa3, 0xf5, 0x83, 0x88, 0xf7, 0xf3, 0x08, 0xf0, 0xc1, + 0x95, 0xa1, 0x5a, 0xc6, 0x41, 0x6f, 0xd0, 0xbc, 0x9e, 0xca, 0x3f, 0xec, 0x1d, 0xef, 0x01, 0xe5, + 0x1f, 0x5a, 0x19, 0xfa, 0x34, 0x3f, 0x54, 0x7e, 0xd2, 0x3c, 0x54, 0x3e, 0x99, 0x3e, 0x54, 0x76, + 0x99, 0x92, 0x8c, 0xf3, 0x64, 0xf1, 0xac, 0xbc, 0x45, 0x13, 0x18, 0xd9, 0x2d, 0x38, 0x93, 0x27, + 0xac, 0x99, 0xcb, 0x9a, 0xab, 0x2e, 0x6b, 0x13, 0x97, 0x35, 0x77, 0xb5, 0x86, 0x19, 0xa6, 0x68, + 0x0e, 0x0c, 0xfb, 0x97, 0x4b, 0x50, 0xae, 0x07, 0xee, 0x31, 0x98, 0xc6, 0x2e, 0x19, 0xa6, 0xb1, + 0x27, 0x72, 0x1f, 0x89, 0xec, 0x69, 0x08, 0xbb, 0x91, 0x32, 0x84, 0xfd, 0x4c, 0x3e, 0xab, 0x83, + 0xcd, 0x5e, 0xdf, 0x2e, 0x83, 0xfe, 0xcc, 0x25, 0xfa, 0x0f, 0x87, 0xf1, 0x64, 0x2e, 0x17, 0x7b, + 0xf9, 0x52, 0xd4, 0xc1, 0x3c, 0xde, 0x64, 0x20, 0xe6, 0x4f, 0xac, 0x43, 0xf3, 0x6d, 0xe2, 0x6d, + 0x6c, 0xc6, 0xc4, 0x4d, 0x7f, 0xd8, 0xf1, 0x39, 0x34, 0xff, 0x85, 0x05, 0x13, 0xa9, 0xda, 0x51, + 0x2b, 0x2b, 0x82, 0xeb, 0x90, 0xc6, 0xae, 0xa9, 0xdc, 0x90, 0xaf, 0x79, 0x00, 0x75, 0x67, 0x21, + 0x0d, 0x4a, 0x4c, 0xb7, 0x56, 0x97, 0x1a, 0x11, 0xd6, 0x28, 0xd0, 0x0b, 0x30, 0x12, 0x07, 0xed, + 0xa0, 0x15, 0x6c, 0xec, 0x5c, 0x21, 0x32, 0x3b, 0x8b, 0xba, 0x59, 0x5a, 0x4b, 0x50, 0x58, 0xa7, + 0xb3, 0xbf, 0x53, 0x86, 0xf4, 0x23, 0xa9, 0xff, 0x7f, 0x9e, 0xfe, 0xe4, 0xcc, 0xd3, 0x3f, 0xb6, + 0x60, 0x92, 0xd6, 0xce, 0x5c, 0x8c, 0xa4, 0xe3, 0xb1, 0x7a, 0x22, 0xc4, 0x3a, 0xe0, 0x89, 0x90, + 0x27, 0xa9, 0xb4, 0x73, 0x83, 0x4e, 0x2c, 0x4c, 0x60, 0x9a, 0x10, 0xa3, 0x50, 0x2c, 0xb0, 0x82, + 0x8e, 0x84, 0xa1, 0x88, 0xd0, 0xd2, 0xe9, 0x48, 0x18, 0x62, 0x81, 0x95, 0x2f, 0x88, 0x54, 0x7a, + 0xbc, 0x20, 0xc2, 0xf2, 0x9b, 0x09, 0xb7, 0x16, 0xa1, 0x56, 0x68, 0xf9, 0xcd, 0xa4, 0xbf, 0x4b, + 0x42, 0x63, 0x7f, 0xbd, 0x0c, 0xa3, 0xf5, 0xc0, 0x4d, 0x22, 0x0a, 0x9e, 0x37, 0x22, 0x0a, 0xce, + 0xa4, 0x22, 0x0a, 0x26, 0x75, 0xda, 0x07, 0x13, 0x50, 0x20, 0xf2, 0xe0, 0xb1, 0x37, 0x6e, 0x0e, + 0x19, 0x4c, 0x60, 0xe4, 0xc1, 0x53, 0x8c, 0xb0, 0xc9, 0xf7, 0xa7, 0x29, 0x88, 0xe0, 0x7f, 0x5b, + 0x30, 0x5e, 0x0f, 0x5c, 0x3a, 0x41, 0x7f, 0x9a, 0x66, 0xa3, 0x9e, 0x3d, 0x6f, 0xf0, 0x80, 0xec, + 0x79, 0xff, 0xcc, 0x82, 0xa1, 0x7a, 0xe0, 0x1e, 0x83, 0x79, 0x78, 0xc5, 0x34, 0x0f, 0x3f, 0x96, + 0x2b, 0x79, 0x7b, 0x58, 0x84, 0xbf, 0x59, 0x86, 0x31, 0xda, 0xe2, 0x60, 0x43, 0x8e, 0x97, 0xd1, + 0x37, 0x56, 0x81, 0xbe, 0xa1, 0x2a, 0x61, 0xd0, 0x6a, 0x05, 0xf7, 0xd2, 0x63, 0xb7, 0xc2, 0xa0, + 0x58, 0x60, 0xd1, 0x39, 0x18, 0x6e, 0x87, 0x64, 0xdb, 0x0b, 0x3a, 0x51, 0x3a, 0xda, 0xb3, 0x2e, + 0xe0, 0x58, 0x51, 0xa0, 0xe7, 0x61, 0x34, 0xf2, 0xfc, 0x26, 0x91, 0x4e, 0x2f, 0x15, 0xe6, 0xf4, + 0xc2, 0x13, 0x95, 0x6a, 0x70, 0x6c, 0x50, 0xa1, 0xdb, 0x50, 0x65, 0xff, 0xd9, 0x0a, 0xea, 0xff, + 0x09, 0x10, 0x91, 0x9f, 0x5c, 0x30, 0xc0, 0x09, 0x2f, 0x74, 0x11, 0x20, 0x96, 0xee, 0x39, 0x91, + 0x08, 0x4b, 0x56, 0x7a, 0xa9, 0x72, 0xdc, 0x89, 0xb0, 0x46, 0x85, 0x9e, 0x81, 0x6a, 0xec, 0x78, + 0xad, 0xab, 0x9e, 0x4f, 0x22, 0xe1, 0xde, 0x24, 0x92, 0x8e, 0x0b, 0x20, 0x4e, 0xf0, 0x74, 0xbf, + 0x67, 0x41, 0xef, 0xfc, 0x79, 0xa1, 0x61, 0x46, 0xcd, 0xf6, 0xfb, 0xab, 0x0a, 0x8a, 0x35, 0x0a, + 0xfb, 0x25, 0x38, 0x59, 0x0f, 0xdc, 0x7a, 0x10, 0xc6, 0x2b, 0x41, 0x78, 0xcf, 0x09, 0x5d, 0x39, + 0x7e, 0x73, 0x32, 0xd7, 0x35, 0xdd, 0x93, 0x07, 0xb8, 0x15, 0xc1, 0xc8, 0x5d, 0xfd, 0x1c, 0xdb, + 0xf1, 0xfb, 0x0c, 0x55, 0xf9, 0x41, 0x09, 0x50, 0x9d, 0x39, 0x10, 0x19, 0xaf, 0x51, 0x6d, 0xc2, + 0x78, 0x44, 0xae, 0x7a, 0x7e, 0xe7, 0xbe, 0x60, 0x55, 0x2c, 0x36, 0xa8, 0xb1, 0xac, 0x97, 0xe1, + 0x76, 0x1a, 0x13, 0x86, 0x53, 0x7c, 0x69, 0x67, 0x86, 0x1d, 0x7f, 0x21, 0xba, 0x19, 0x91, 0x50, + 0xbc, 0xbe, 0xc4, 0x3a, 0x13, 0x4b, 0x20, 0x4e, 0xf0, 0x74, 0xf2, 0xb0, 0x3f, 0xd7, 0x03, 0x1f, + 0x07, 0x41, 0x2c, 0xa7, 0x1b, 0x7b, 0x8d, 0x43, 0x83, 0x63, 0x83, 0x0a, 0xad, 0x00, 0x8a, 0x3a, + 0xed, 0x76, 0x8b, 0xdd, 0x94, 0x3a, 0xad, 0x4b, 0x61, 0xd0, 0x69, 0x73, 0x3f, 0x72, 0xf1, 0x90, + 0x45, 0xa3, 0x0b, 0x8b, 0x33, 0x4a, 0x50, 0x61, 0xb1, 0x1e, 0xb1, 0xdf, 0x22, 0x02, 0x9e, 0x5b, + 0x5b, 0x1b, 0x0c, 0x84, 0x25, 0xce, 0xfe, 0x45, 0xb6, 0xc1, 0xb1, 0x67, 0x71, 0xe2, 0x4e, 0x48, + 0xd0, 0x16, 0x8c, 0xb5, 0xd9, 0x26, 0x16, 0x87, 0x41, 0xab, 0x45, 0xa4, 0x7e, 0x79, 0x38, 0x17, + 0x26, 0xfe, 0x10, 0x86, 0xce, 0x0e, 0x9b, 0xdc, 0xed, 0xff, 0x36, 0xce, 0x64, 0x95, 0xb8, 0xac, + 0x1e, 0x12, 0xce, 0xca, 0x42, 0x93, 0xfb, 0x48, 0x91, 0x07, 0xee, 0x92, 0x7d, 0x40, 0xb8, 0x3e, + 0x63, 0xc9, 0x05, 0x7d, 0x86, 0xb9, 0xe2, 0x73, 0x01, 0x51, 0xfc, 0xd9, 0x4e, 0x4e, 0x6f, 0xb8, + 0xe1, 0x0b, 0x16, 0x58, 0x63, 0x87, 0xae, 0xc2, 0x98, 0x78, 0x45, 0x45, 0x98, 0x29, 0xca, 0xc6, + 0x11, 0x7b, 0x0c, 0xeb, 0xc8, 0xfd, 0x34, 0x00, 0x9b, 0x85, 0xd1, 0x06, 0x3c, 0xaa, 0xbd, 0x12, + 0x96, 0xe1, 0x6e, 0xc7, 0x25, 0xcf, 0x63, 0x7b, 0xbb, 0x73, 0x8f, 0xae, 0x1d, 0x44, 0x88, 0x0f, + 0xe6, 0x83, 0x6e, 0xc0, 0x49, 0xa7, 0x19, 0x7b, 0xdb, 0xa4, 0x46, 0x1c, 0xb7, 0xe5, 0xf9, 0xc4, + 0x4c, 0x93, 0xf0, 0xf0, 0xde, 0xee, 0xdc, 0xc9, 0x85, 0x2c, 0x02, 0x9c, 0x5d, 0x0e, 0x7d, 0x12, + 0xaa, 0xae, 0x1f, 0x89, 0x3e, 0x18, 0x34, 0x1e, 0xc5, 0xab, 0xd6, 0xae, 0x37, 0xd4, 0xf7, 0x27, + 0x7f, 0x70, 0x52, 0x00, 0xbd, 0x0b, 0xa3, 0x7a, 0xf8, 0x93, 0x78, 0x8c, 0xf1, 0xc5, 0x42, 0xe7, + 0x67, 0x23, 0x66, 0x88, 0x5b, 0xf0, 0x94, 0x5b, 0xab, 0x11, 0x4e, 0x64, 0x54, 0x81, 0x7e, 0x1e, + 0x50, 0x44, 0xc2, 0x6d, 0xaf, 0x49, 0x16, 0x9a, 0x2c, 0xbb, 0x2f, 0xb3, 0xf1, 0x0c, 0x1b, 0xf1, + 0x1d, 0xa8, 0xd1, 0x45, 0x81, 0x33, 0x4a, 0xa1, 0xcb, 0x54, 0xf2, 0xe8, 0x50, 0xe1, 0x85, 0x2c, + 0x15, 0xc3, 0x99, 0x1a, 0x69, 0x87, 0xa4, 0xe9, 0xc4, 0xc4, 0x35, 0x39, 0xe2, 0x54, 0x39, 0xba, + 0x2f, 0xa9, 0x07, 0x1c, 0xc0, 0xf4, 0x9d, 0xed, 0x7e, 0xc4, 0x81, 0x9e, 0xb3, 0x36, 0x83, 0x28, + 0xbe, 0x4e, 0xe2, 0x7b, 0x41, 0x78, 0x57, 0x64, 0x44, 0x4b, 0x52, 0x25, 0x26, 0x28, 0xac, 0xd3, + 0x51, 0x1d, 0x8a, 0x5d, 0xfd, 0xad, 0xd6, 0xd8, 0xbd, 0xca, 0x70, 0xb2, 0x76, 0x2e, 0x73, 0x30, + 0x96, 0x78, 0x49, 0xba, 0x5a, 0x5f, 0x62, 0x77, 0x24, 0x29, 0xd2, 0xd5, 0xfa, 0x12, 0x96, 0x78, + 0x14, 0x74, 0x3f, 0x3d, 0x38, 0x5e, 0xe4, 0xbe, 0xaa, 0x5b, 0x92, 0x17, 0x7c, 0x7d, 0xf0, 0x3e, + 0x4c, 0xaa, 0xe7, 0x0f, 0x79, 0xd2, 0xb8, 0x68, 0x66, 0x82, 0x4d, 0x9c, 0xc3, 0xe4, 0x9e, 0x53, + 0x76, 0xbd, 0xd5, 0x14, 0x4f, 0xdc, 0x55, 0x8b, 0x91, 0x9c, 0x63, 0x32, 0xf7, 0x51, 0x8e, 0xf3, + 0x50, 0x8d, 0x3a, 0x77, 0xdc, 0x60, 0xcb, 0xf1, 0x7c, 0x76, 0x91, 0xa1, 0x29, 0x31, 0x0d, 0x89, + 0xc0, 0x09, 0x0d, 0xaa, 0xc3, 0xb0, 0x23, 0x8e, 0x70, 0xe2, 0xc2, 0x21, 0x27, 0x0a, 0x5f, 0x1e, + 0xf8, 0xb8, 0x75, 0x55, 0xfe, 0xc3, 0x8a, 0x0b, 0x7a, 0x19, 0xc6, 0x44, 0x10, 0x99, 0x70, 0xf6, + 0x3c, 0x61, 0x06, 0x1c, 0x34, 0x74, 0x24, 0x36, 0x69, 0xd1, 0x06, 0x8c, 0x53, 0x2e, 0x89, 0x00, + 0x9c, 0x99, 0xee, 0x4f, 0x86, 0x6a, 0xe9, 0xcf, 0x75, 0x36, 0x38, 0xc5, 0x16, 0xb9, 0xf0, 0x88, + 0xd3, 0x89, 0x83, 0x2d, 0xba, 0x12, 0xcc, 0x75, 0xb2, 0x16, 0xdc, 0x25, 0x3e, 0xbb, 0x65, 0x18, + 0x5e, 0x3c, 0xb3, 0xb7, 0x3b, 0xf7, 0xc8, 0xc2, 0x01, 0x74, 0xf8, 0x40, 0x2e, 0xe8, 0x2d, 0x18, + 0x89, 0x83, 0x96, 0xf0, 0xe1, 0x8e, 0x66, 0x4e, 0x15, 0x49, 0x42, 0xb4, 0xa6, 0x0a, 0xe8, 0x66, + 0x0c, 0xc5, 0x04, 0xeb, 0x1c, 0xd1, 0x9b, 0x7c, 0x55, 0xb2, 0x84, 0x99, 0x24, 0x9a, 0x79, 0xa8, + 0x48, 0x67, 0xa9, 0x0c, 0x9b, 0xe6, 0xf2, 0x15, 0x3c, 0xb0, 0xce, 0x70, 0xf6, 0xe7, 0x60, 0xaa, + 0x4b, 0xe4, 0xf5, 0xe5, 0xdc, 0xfa, 0x1f, 0x07, 0xa0, 0xaa, 0x2c, 0x86, 0xe8, 0xbc, 0x69, 0x1c, + 0x7e, 0x38, 0x6d, 0x1c, 0x1e, 0xa6, 0x0a, 0x9a, 0x6e, 0x0f, 0x7e, 0x33, 0xe3, 0x41, 0xfd, 0xa7, + 0x73, 0xd7, 0x78, 0xf1, 0xc8, 0x36, 0xed, 0x88, 0x57, 0x2e, 0x6c, 0x6f, 0xae, 0x1c, 0x78, 0x6a, + 0x2c, 0xf8, 0x48, 0x24, 0x3d, 0x1f, 0xb6, 0x03, 0x77, 0xb5, 0x9e, 0x7e, 0x03, 0xad, 0x4e, 0x81, + 0x98, 0xe3, 0x98, 0x5e, 0x4f, 0xf7, 0x6c, 0xa6, 0xd7, 0x0f, 0x1d, 0x52, 0xaf, 0x97, 0x0c, 0x70, + 0xc2, 0x0b, 0x6d, 0xc3, 0x54, 0xd3, 0x7c, 0xd2, 0x4e, 0xc5, 0xab, 0x3d, 0xdb, 0xc7, 0x93, 0x72, + 0x1d, 0xed, 0x45, 0x9a, 0xa5, 0x34, 0x3f, 0xdc, 0x5d, 0x05, 0x7a, 0x19, 0x86, 0xdf, 0x0d, 0x22, + 0x76, 0x6d, 0x21, 0x36, 0x2e, 0x19, 0x17, 0x34, 0xfc, 0xea, 0x8d, 0x06, 0x83, 0xef, 0xef, 0xce, + 0x8d, 0xd4, 0x03, 0x57, 0xfe, 0xc5, 0xaa, 0x00, 0xfa, 0xac, 0x05, 0x27, 0x8d, 0x75, 0xac, 0x5a, + 0x0e, 0x87, 0x69, 0xf9, 0xa3, 0xa2, 0xe6, 0x93, 0xab, 0x59, 0x3c, 0x71, 0x76, 0x55, 0xf6, 0x77, + 0xb9, 0x89, 0x54, 0x18, 0x4d, 0x48, 0xd4, 0x69, 0x1d, 0xc7, 0xeb, 0x10, 0x37, 0x0c, 0x7b, 0xce, + 0x03, 0x30, 0xd2, 0xff, 0x7b, 0x8b, 0x19, 0xe9, 0xd7, 0xc8, 0x56, 0xbb, 0xe5, 0xc4, 0xc7, 0xe1, + 0xfb, 0xfc, 0x19, 0x18, 0x8e, 0x45, 0x6d, 0xc5, 0x9e, 0xb6, 0xd0, 0x9a, 0xc7, 0x2e, 0x2f, 0xd4, + 0xc6, 0x27, 0xa1, 0x58, 0x31, 0xb4, 0xff, 0x15, 0x1f, 0x15, 0x89, 0x39, 0x06, 0x4b, 0xc4, 0x75, + 0xd3, 0x12, 0xf1, 0x54, 0xe1, 0x6f, 0xe9, 0x61, 0x91, 0xf8, 0x8e, 0xf9, 0x05, 0xec, 0x7c, 0xf2, + 0x93, 0x73, 0x8b, 0x64, 0xff, 0xba, 0x05, 0xd3, 0x59, 0xce, 0x08, 0x54, 0x81, 0xe1, 0xa7, 0x23, + 0x75, 0xbf, 0xa6, 0x7a, 0xf5, 0x96, 0x80, 0x63, 0x45, 0x51, 0x38, 0xd7, 0x7c, 0x7f, 0x29, 0xb4, + 0x6e, 0x80, 0xf9, 0x38, 0x22, 0x7a, 0x85, 0x87, 0x3a, 0x58, 0xea, 0xf5, 0xc2, 0xfe, 0xc2, 0x1c, + 0xec, 0x6f, 0x94, 0x60, 0x9a, 0x1b, 0xb9, 0x17, 0xb6, 0x03, 0xcf, 0xad, 0x07, 0xae, 0x08, 0xfc, + 0x70, 0x61, 0xb4, 0xad, 0x1d, 0x6e, 0x8b, 0xa5, 0xe4, 0xd1, 0x8f, 0xc3, 0xc9, 0x81, 0x42, 0x87, + 0x62, 0x83, 0x2b, 0xad, 0x85, 0x6c, 0x7b, 0x4d, 0x65, 0x33, 0x2d, 0xf5, 0xbd, 0x33, 0xa8, 0x5a, + 0x96, 0x35, 0x3e, 0xd8, 0xe0, 0x7a, 0x04, 0x4f, 0xc4, 0xd8, 0x7f, 0xdf, 0x82, 0x87, 0x7a, 0xa4, + 0xed, 0xa1, 0xd5, 0xdd, 0x63, 0x17, 0x0b, 0xe2, 0xf5, 0x4d, 0x55, 0x1d, 0xbf, 0x6e, 0xc0, 0x02, + 0x8b, 0xee, 0x00, 0xf0, 0xeb, 0x02, 0xaa, 0x4b, 0xa7, 0xef, 0xb2, 0x0b, 0x26, 0xc7, 0xd0, 0xf2, + 0x26, 0x48, 0x4e, 0x58, 0xe3, 0x6a, 0x7f, 0xad, 0x0c, 0x03, 0xfc, 0x91, 0xf7, 0x3a, 0x0c, 0x6d, + 0xf2, 0x7c, 0xc6, 0xfd, 0xa5, 0x53, 0x4e, 0x0e, 0x2f, 0x1c, 0x80, 0x25, 0x1b, 0x74, 0x0d, 0x4e, + 0x88, 0xd0, 0xa3, 0x1a, 0x69, 0x39, 0x3b, 0xf2, 0x34, 0xcc, 0xdf, 0x0d, 0x91, 0x09, 0xee, 0x4f, + 0xac, 0x76, 0x93, 0xe0, 0xac, 0x72, 0xe8, 0x95, 0xae, 0xf4, 0x83, 0x3c, 0x4f, 0xb4, 0xd2, 0x84, + 0x73, 0x52, 0x10, 0xbe, 0x0c, 0x63, 0xed, 0xae, 0x73, 0xbf, 0xf6, 0x96, 0xb6, 0x79, 0xd6, 0x37, + 0x69, 0x99, 0xef, 0x42, 0x87, 0xf9, 0x6c, 0xac, 0x6d, 0x86, 0x24, 0xda, 0x0c, 0x5a, 0xae, 0x78, + 0x06, 0x36, 0xf1, 0x5d, 0x48, 0xe1, 0x71, 0x57, 0x09, 0xca, 0x65, 0xdd, 0xf1, 0x5a, 0x9d, 0x90, + 0x24, 0x5c, 0x06, 0x4d, 0x2e, 0x2b, 0x29, 0x3c, 0xee, 0x2a, 0x41, 0xe7, 0xd6, 0x49, 0xf1, 0x72, + 0xa8, 0x0c, 0x52, 0x17, 0x22, 0xe8, 0xd3, 0x30, 0x24, 0x03, 0x08, 0x0a, 0xe5, 0x52, 0x11, 0x8e, + 0x09, 0xea, 0x15, 0x52, 0xed, 0x1d, 0x39, 0x11, 0x3a, 0x20, 0xf9, 0x1d, 0xe6, 0x85, 0xca, 0x3f, + 0xb7, 0xe0, 0x44, 0x86, 0x23, 0x1c, 0x17, 0x69, 0x1b, 0x5e, 0x14, 0xab, 0x57, 0x2c, 0x34, 0x91, + 0xc6, 0xe1, 0x58, 0x51, 0xd0, 0xd5, 0xc2, 0x85, 0x66, 0x5a, 0x50, 0x0a, 0x17, 0x13, 0x81, 0xed, + 0x4f, 0x50, 0xa2, 0x33, 0x50, 0xe9, 0x44, 0x24, 0x94, 0x0f, 0x3a, 0x4a, 0x39, 0xcf, 0xec, 0x8c, + 0x0c, 0x43, 0xd5, 0xd6, 0x0d, 0x65, 0xe2, 0xd3, 0xd4, 0x56, 0x6e, 0xe4, 0xe3, 0x38, 0xfb, 0xcb, + 0x65, 0x98, 0x48, 0x39, 0xc4, 0xd2, 0x86, 0x6c, 0x05, 0xbe, 0x17, 0x07, 0x2a, 0xbf, 0x1d, 0x7f, + 0x43, 0x8e, 0xb4, 0x37, 0xaf, 0x09, 0x38, 0x56, 0x14, 0xe8, 0x49, 0xf9, 0x42, 0x70, 0xfa, 0x75, + 0x8e, 0xc5, 0x9a, 0xf1, 0x48, 0x70, 0xd1, 0x97, 0x75, 0x1e, 0x87, 0x4a, 0x3b, 0x50, 0x0f, 0xbe, + 0xab, 0xf1, 0xc4, 0x8b, 0xb5, 0x7a, 0x10, 0xb4, 0x30, 0x43, 0xa2, 0x27, 0xc4, 0xd7, 0xa7, 0x6e, + 0x46, 0xb0, 0xe3, 0x06, 0x91, 0xd6, 0x05, 0x4f, 0xc1, 0xd0, 0x5d, 0xb2, 0x13, 0x7a, 0xfe, 0x46, + 0xfa, 0x5e, 0xe8, 0x0a, 0x07, 0x63, 0x89, 0x37, 0x93, 0xd5, 0x0f, 0x1d, 0xf1, 0xeb, 0x39, 0xc3, + 0xb9, 0xfb, 0xe0, 0x37, 0x2d, 0x98, 0x60, 0xd9, 0x67, 0x45, 0x8a, 0x04, 0x2f, 0xf0, 0x8f, 0x41, + 0xc7, 0x78, 0x1c, 0x06, 0x42, 0x5a, 0x69, 0xfa, 0xf9, 0x0b, 0xd6, 0x12, 0xcc, 0x71, 0xe8, 0x11, + 0xa8, 0xb0, 0x26, 0xd0, 0x61, 0x1c, 0xe5, 0x49, 0xee, 0x6b, 0x4e, 0xec, 0x60, 0x06, 0x65, 0x31, + 0x68, 0x98, 0xb4, 0x5b, 0x1e, 0x6f, 0x74, 0x62, 0xce, 0xfd, 0xa0, 0xc5, 0xa0, 0x65, 0x36, 0xf2, + 0x41, 0xc5, 0xa0, 0x65, 0x33, 0x3f, 0x58, 0xcf, 0xff, 0xef, 0x25, 0x38, 0x9d, 0x59, 0x2e, 0xb9, + 0x61, 0x5e, 0x31, 0x6e, 0x98, 0x2f, 0xa6, 0x6e, 0x98, 0xed, 0x83, 0x4b, 0x3f, 0x98, 0x3b, 0xe7, + 0xec, 0xab, 0xe0, 0xf2, 0x31, 0x5e, 0x05, 0x57, 0x8a, 0xaa, 0x38, 0x03, 0x39, 0x2a, 0xce, 0x1f, + 0x59, 0xf0, 0x70, 0x66, 0x97, 0x7d, 0xe0, 0x82, 0xfe, 0x32, 0x5b, 0xd9, 0xe3, 0x74, 0xf2, 0x6b, + 0xe5, 0x1e, 0x5f, 0xc5, 0xce, 0x29, 0x67, 0xa9, 0x14, 0x62, 0xc8, 0x48, 0x28, 0x6f, 0xa3, 0x5c, + 0x02, 0x71, 0x18, 0x56, 0x58, 0x14, 0x69, 0x41, 0x73, 0xbc, 0x91, 0xcb, 0x87, 0x5c, 0x50, 0xf3, + 0xa6, 0x1d, 0x5e, 0xcf, 0xfb, 0x90, 0x0e, 0xa5, 0xbb, 0xad, 0x9d, 0x3c, 0xcb, 0x87, 0x39, 0x79, + 0x8e, 0x66, 0x9f, 0x3a, 0xd1, 0x02, 0x4c, 0x6c, 0x79, 0x3e, 0x7b, 0x74, 0xd7, 0xd4, 0x9e, 0x54, + 0xe4, 0xf2, 0x35, 0x13, 0x8d, 0xd3, 0xf4, 0xb3, 0x2f, 0xc3, 0xd8, 0xe1, 0xad, 0x6b, 0x3f, 0x2a, + 0xc3, 0x87, 0x0f, 0x10, 0x0a, 0x7c, 0x77, 0x30, 0xc6, 0x45, 0xdb, 0x1d, 0xba, 0xc6, 0xa6, 0x0e, + 0xd3, 0xeb, 0x9d, 0x56, 0x6b, 0x87, 0xf9, 0x67, 0x11, 0x57, 0x52, 0x08, 0xa5, 0x46, 0x25, 0xa3, + 0x5e, 0xc9, 0xa0, 0xc1, 0x99, 0x25, 0xd1, 0xcf, 0x03, 0x0a, 0xee, 0xb0, 0xb4, 0xc8, 0x6e, 0x92, + 0xd7, 0x82, 0x0d, 0x41, 0x39, 0x59, 0xaa, 0x37, 0xba, 0x28, 0x70, 0x46, 0x29, 0xaa, 0xa7, 0xd2, + 0x7d, 0x6c, 0x47, 0x35, 0x2b, 0xa5, 0xa7, 0x62, 0x1d, 0x89, 0x4d, 0x5a, 0x74, 0x09, 0xa6, 0x9c, + 0x6d, 0xc7, 0xe3, 0x69, 0xce, 0x24, 0x03, 0xae, 0xa8, 0x2a, 0xfb, 0xd5, 0x42, 0x9a, 0x00, 0x77, + 0x97, 0x41, 0x6d, 0xc3, 0x20, 0xc9, 0x5f, 0x66, 0xf8, 0xe4, 0x21, 0x66, 0x70, 0x61, 0x13, 0xa5, + 0xfd, 0xa7, 0x16, 0xdd, 0xfa, 0x32, 0xde, 0x67, 0xa5, 0x3d, 0xa2, 0x0c, 0x6c, 0x5a, 0x10, 0xa0, + 0xea, 0x91, 0x25, 0x1d, 0x89, 0x4d, 0x5a, 0x3e, 0x35, 0xa2, 0xc4, 0x5d, 0xdc, 0xd0, 0x36, 0x45, + 0xfc, 0xac, 0xa2, 0xa0, 0x1a, 0xb4, 0xeb, 0x6d, 0x7b, 0x51, 0x10, 0x8a, 0x05, 0xd4, 0xef, 0x0b, + 0xe8, 0x4a, 0x5e, 0xd6, 0x38, 0x1b, 0x2c, 0xf9, 0xd9, 0x5f, 0x29, 0xc1, 0x98, 0xac, 0xf1, 0xd5, + 0x4e, 0x10, 0x3b, 0xc7, 0xb0, 0xa5, 0xbf, 0x6a, 0x6c, 0xe9, 0xe7, 0x8b, 0x85, 0x13, 0xb3, 0xc6, + 0xf5, 0xdc, 0xca, 0x3f, 0x9d, 0xda, 0xca, 0x2f, 0xf4, 0xc3, 0xf4, 0xe0, 0x2d, 0xfc, 0xdf, 0x58, + 0x30, 0x65, 0xd0, 0x1f, 0xc3, 0x4e, 0x52, 0x37, 0x77, 0x92, 0x67, 0xfa, 0xf8, 0x9a, 0x1e, 0x3b, + 0xc8, 0xd7, 0x4b, 0xa9, 0xaf, 0x60, 0x3b, 0xc7, 0x2f, 0x40, 0x65, 0xd3, 0x09, 0xdd, 0x62, 0x39, + 0x3f, 0xbb, 0x8a, 0xcf, 0x5f, 0x76, 0x42, 0x97, 0xcb, 0xff, 0x73, 0xea, 0xf5, 0x38, 0x27, 0x74, + 0x73, 0xa3, 0x28, 0x58, 0xa5, 0xe8, 0x25, 0x18, 0x8c, 0x9a, 0x41, 0x5b, 0xf9, 0x99, 0x9e, 0xe1, + 0x2f, 0xcb, 0x51, 0xc8, 0xfe, 0xee, 0x1c, 0x32, 0xab, 0xa3, 0x60, 0x2c, 0xe8, 0x67, 0x37, 0xa0, + 0xaa, 0xaa, 0x3e, 0x52, 0x4f, 0xfb, 0xff, 0x5a, 0x86, 0x13, 0x19, 0x73, 0x05, 0xfd, 0xa2, 0xd1, + 0x6f, 0x2f, 0xf7, 0x3d, 0xd9, 0xde, 0x67, 0xcf, 0xfd, 0x22, 0x3b, 0x29, 0xb9, 0x62, 0x76, 0x1c, + 0xa2, 0xfa, 0x9b, 0x11, 0x49, 0x57, 0x4f, 0x41, 0xf9, 0xd5, 0xd3, 0x6a, 0x8f, 0xad, 0xfb, 0x69, + 0x45, 0xaa, 0xa5, 0x47, 0x3a, 0xce, 0x5f, 0xa8, 0xc0, 0x74, 0x56, 0xde, 0x02, 0xf4, 0x2b, 0x56, + 0xea, 0x85, 0x91, 0x57, 0xfa, 0x4f, 0x7e, 0xc0, 0x9f, 0x1d, 0x11, 0x59, 0x85, 0xe6, 0xcd, 0x37, + 0x47, 0x72, 0x7b, 0x5c, 0xd4, 0xce, 0xe2, 0x9f, 0x42, 0xfe, 0x5a, 0x8c, 0x94, 0x0a, 0x9f, 0x3a, + 0x44, 0x53, 0xc4, 0x83, 0x33, 0x51, 0x2a, 0xfe, 0x49, 0x82, 0xf3, 0xe3, 0x9f, 0x64, 0x1b, 0x66, + 0x3d, 0x18, 0xd1, 0xbe, 0xeb, 0x48, 0xa7, 0xc1, 0x5d, 0xba, 0x45, 0x69, 0xed, 0x3e, 0xd2, 0xa9, + 0xf0, 0x77, 0x2c, 0x48, 0x39, 0x85, 0x29, 0xb3, 0x8c, 0xd5, 0xd3, 0x2c, 0x73, 0x06, 0x2a, 0x61, + 0xd0, 0x22, 0xe9, 0x47, 0x27, 0x70, 0xd0, 0x22, 0x98, 0x61, 0xd4, 0x83, 0xd2, 0xe5, 0x5e, 0x0f, + 0x4a, 0xd3, 0x73, 0x7a, 0x8b, 0x6c, 0x13, 0x69, 0x24, 0x51, 0x62, 0xfc, 0x2a, 0x05, 0x62, 0x8e, + 0xb3, 0x7f, 0xa7, 0x02, 0x27, 0x32, 0x62, 0x01, 0xe9, 0x09, 0x69, 0xc3, 0x89, 0xc9, 0x3d, 0x67, + 0x27, 0x9d, 0xfc, 0xf6, 0x12, 0x07, 0x63, 0x89, 0x67, 0xce, 0xac, 0x3c, 0x81, 0x5e, 0xca, 0x74, + 0x25, 0xf2, 0xe6, 0x09, 0xec, 0xd1, 0x3f, 0x3d, 0x7c, 0x11, 0x20, 0x8a, 0x5a, 0xcb, 0x3e, 0xd5, + 0xf0, 0x5c, 0xe1, 0x34, 0x9b, 0xe4, 0x5d, 0x6c, 0x5c, 0x15, 0x18, 0xac, 0x51, 0xa1, 0x1a, 0x4c, + 0xb6, 0xc3, 0x20, 0xe6, 0x86, 0xc1, 0x1a, 0x77, 0xb4, 0x18, 0x30, 0xa3, 0xb5, 0xea, 0x29, 0x3c, + 0xee, 0x2a, 0x81, 0x5e, 0x80, 0x11, 0x11, 0xc1, 0x55, 0x0f, 0x82, 0x96, 0x30, 0x23, 0xa9, 0xeb, + 0xf8, 0x46, 0x82, 0xc2, 0x3a, 0x9d, 0x56, 0x8c, 0x59, 0x1b, 0x87, 0x32, 0x8b, 0x71, 0x8b, 0xa3, + 0x46, 0x97, 0xca, 0x6e, 0x32, 0x5c, 0x28, 0xbb, 0x49, 0x62, 0x58, 0xab, 0x16, 0xbe, 0x88, 0x81, + 0x5c, 0x03, 0xd4, 0x1f, 0x96, 0x61, 0x90, 0x0f, 0xc5, 0x31, 0x68, 0x79, 0x75, 0x61, 0x52, 0x2a, + 0x94, 0x49, 0x82, 0xb7, 0x6a, 0xbe, 0xe6, 0xc4, 0x0e, 0x17, 0x4d, 0x6a, 0x85, 0x24, 0x66, 0x28, + 0x34, 0x6f, 0xac, 0xa1, 0xd9, 0x94, 0xa5, 0x04, 0x38, 0x0f, 0x6d, 0x45, 0x6d, 0x02, 0x44, 0xec, + 0xf9, 0x5b, 0xca, 0x43, 0x64, 0xe6, 0x7d, 0xbe, 0x50, 0x3b, 0x1a, 0xaa, 0x18, 0x6f, 0x4d, 0x32, + 0x2d, 0x15, 0x02, 0x6b, 0xbc, 0x67, 0x5f, 0x84, 0xaa, 0x22, 0xce, 0x3b, 0x42, 0x8e, 0xea, 0xa2, + 0xed, 0x67, 0x61, 0x22, 0x55, 0x57, 0x5f, 0x27, 0xd0, 0xdf, 0xb3, 0x60, 0x82, 0x37, 0x79, 0xd9, + 0xdf, 0x16, 0xa2, 0xe0, 0x73, 0x16, 0x4c, 0xb7, 0x32, 0x56, 0xa2, 0x18, 0xe6, 0xc3, 0xac, 0x61, + 0x75, 0xf8, 0xcc, 0xc2, 0xe2, 0xcc, 0xda, 0xd0, 0x59, 0x18, 0xe6, 0xaf, 0x79, 0x3b, 0x2d, 0xe1, + 0xa1, 0x3d, 0xca, 0x73, 0x92, 0x73, 0x18, 0x56, 0x58, 0xfb, 0xc7, 0x16, 0x4c, 0xf1, 0x8f, 0xb8, + 0x42, 0x76, 0xd4, 0xf1, 0xea, 0x03, 0xf2, 0x19, 0x22, 0xfb, 0x7a, 0xa9, 0x47, 0xf6, 0x75, 0xfd, + 0x2b, 0xcb, 0x07, 0x7e, 0xe5, 0x37, 0x2c, 0x10, 0x33, 0xf4, 0x18, 0xce, 0x0f, 0xab, 0xe6, 0xf9, + 0xe1, 0x23, 0x45, 0x26, 0x7d, 0x8f, 0x83, 0xc3, 0xaf, 0x96, 0x60, 0x92, 0x13, 0x24, 0x37, 0x32, + 0x1f, 0x94, 0xc1, 0xe9, 0xef, 0x55, 0x20, 0xf5, 0x26, 0x6c, 0xf6, 0x97, 0x1a, 0x63, 0x59, 0x39, + 0x70, 0x2c, 0xff, 0xa7, 0x05, 0x88, 0xf7, 0x49, 0xfa, 0x29, 0x74, 0xbe, 0xbb, 0x69, 0xe6, 0x80, + 0x44, 0x72, 0x28, 0x0c, 0xd6, 0xa8, 0x1e, 0xf0, 0x27, 0xa4, 0xee, 0xc3, 0xca, 0xf9, 0xf7, 0x61, + 0x7d, 0x7c, 0xf5, 0x77, 0xcb, 0x90, 0x76, 0xd5, 0x44, 0x6f, 0xc3, 0x68, 0xd3, 0x69, 0x3b, 0x77, + 0xbc, 0x96, 0x17, 0x7b, 0x24, 0x2a, 0x76, 0xe1, 0xbe, 0xa4, 0x95, 0x10, 0xd7, 0x50, 0x1a, 0x04, + 0x1b, 0x1c, 0xd1, 0x3c, 0x40, 0x3b, 0xf4, 0xb6, 0xbd, 0x16, 0xd9, 0x60, 0x27, 0x1e, 0x16, 0xeb, + 0xc1, 0xef, 0x8e, 0x25, 0x14, 0x6b, 0x14, 0x19, 0xb1, 0x01, 0xe5, 0xe3, 0x88, 0x0d, 0xa8, 0xf4, + 0x19, 0x1b, 0x30, 0x50, 0x28, 0x36, 0x00, 0xc3, 0x29, 0xb9, 0x79, 0xd3, 0xff, 0x2b, 0x5e, 0x8b, + 0x08, 0xdd, 0x8d, 0xc7, 0x82, 0xcc, 0xee, 0xed, 0xce, 0x9d, 0xc2, 0x99, 0x14, 0xb8, 0x47, 0x49, + 0xbb, 0x03, 0x27, 0x1a, 0x24, 0x94, 0xcf, 0xd8, 0xa9, 0xb5, 0xf4, 0x26, 0x54, 0xc3, 0xd4, 0x32, + 0xee, 0x33, 0xe0, 0x5f, 0xcb, 0xf1, 0x26, 0x97, 0x6d, 0xc2, 0xd2, 0xfe, 0xeb, 0x25, 0x18, 0x12, + 0x4e, 0x9a, 0xc7, 0xa0, 0x7c, 0x5c, 0x31, 0x4c, 0x4c, 0x4f, 0xe5, 0xc9, 0x3f, 0xd6, 0xac, 0x9e, + 0xc6, 0xa5, 0x46, 0xca, 0xb8, 0xf4, 0x4c, 0x31, 0x76, 0x07, 0x9b, 0x95, 0x7e, 0xab, 0x0c, 0xe3, + 0xa6, 0xd3, 0xea, 0x31, 0x74, 0xcb, 0x6b, 0x30, 0x14, 0x09, 0xff, 0xe9, 0x52, 0x11, 0x9f, 0xbd, + 0xf4, 0x10, 0x27, 0x37, 0xf1, 0xc2, 0x63, 0x5a, 0xb2, 0xcb, 0x74, 0xd1, 0x2e, 0x1f, 0x8b, 0x8b, + 0x76, 0x9e, 0x2f, 0x71, 0xe5, 0x41, 0xf8, 0x12, 0xdb, 0xdf, 0x63, 0x22, 0x5f, 0x87, 0x1f, 0xc3, + 0x36, 0xfe, 0xaa, 0xb9, 0x39, 0x9c, 0x2b, 0x34, 0xef, 0x44, 0xf3, 0x7a, 0x6c, 0xe7, 0xdf, 0xb2, + 0x60, 0x44, 0x10, 0x1e, 0xc3, 0x07, 0xfc, 0xbc, 0xf9, 0x01, 0x4f, 0x14, 0xfa, 0x80, 0x1e, 0x2d, + 0xff, 0x4a, 0x49, 0xb5, 0xbc, 0x1e, 0x84, 0x71, 0xa1, 0x4c, 0xe8, 0xc3, 0xf4, 0xe8, 0x17, 0x34, + 0x83, 0x96, 0x50, 0xe0, 0x1e, 0x49, 0x42, 0xff, 0x38, 0x7c, 0x5f, 0xfb, 0x8d, 0x15, 0x35, 0x8b, + 0x4c, 0x0b, 0xc2, 0x58, 0x6c, 0xa0, 0x49, 0x64, 0x5a, 0x10, 0xc6, 0x98, 0x61, 0x90, 0x0b, 0x10, + 0x3b, 0xe1, 0x06, 0x89, 0x29, 0x4c, 0x44, 0xcd, 0xf6, 0x5e, 0xad, 0x9d, 0xd8, 0x6b, 0xcd, 0x7b, + 0x7e, 0x1c, 0xc5, 0xe1, 0xfc, 0xaa, 0x1f, 0xdf, 0x08, 0xb9, 0xd2, 0xaf, 0xc5, 0xf2, 0x29, 0x5e, + 0x58, 0xe3, 0x2b, 0x83, 0x44, 0x58, 0x1d, 0x03, 0xe6, 0x0d, 0xd2, 0x75, 0x01, 0xc7, 0x8a, 0xc2, + 0x7e, 0x91, 0x49, 0x76, 0xd6, 0x41, 0xfd, 0x85, 0xd9, 0x7d, 0x61, 0x48, 0x75, 0x2d, 0x33, 0x0b, + 0x5f, 0xd7, 0x83, 0xf9, 0x8a, 0x8a, 0x4f, 0xda, 0x04, 0xdd, 0x8f, 0x3a, 0x89, 0xfd, 0x43, 0xa4, + 0xeb, 0xda, 0xf1, 0xc5, 0xc2, 0x12, 0xb9, 0x8f, 0x8b, 0x46, 0x96, 0x92, 0x91, 0xe5, 0xa1, 0x5b, + 0xad, 0xa7, 0xf3, 0xd7, 0x2f, 0x49, 0x04, 0x4e, 0x68, 0xd0, 0x79, 0x71, 0xa0, 0xe4, 0x16, 0x97, + 0x0f, 0xa7, 0x0e, 0x94, 0xb2, 0x4b, 0xb4, 0x13, 0xe5, 0x05, 0x18, 0x51, 0x4f, 0x02, 0xd5, 0xf9, + 0x63, 0x2c, 0x55, 0xae, 0x5f, 0x2d, 0x27, 0x60, 0xac, 0xd3, 0xa0, 0x35, 0x98, 0x88, 0xf8, 0x7b, + 0x45, 0x32, 0x5a, 0x43, 0x18, 0x0e, 0x9e, 0x96, 0x97, 0x94, 0x0d, 0x13, 0xbd, 0xcf, 0x40, 0x7c, + 0x29, 0xcb, 0xf8, 0x8e, 0x34, 0x0b, 0xf4, 0x0a, 0x8c, 0xb7, 0xf4, 0x37, 0x5c, 0xeb, 0xc2, 0xae, + 0xa0, 0xdc, 0xce, 0x8c, 0x17, 0x5e, 0xeb, 0x38, 0x45, 0x8d, 0x5e, 0x83, 0x19, 0x1d, 0x22, 0x92, + 0x0b, 0x39, 0xfe, 0x06, 0x89, 0xc4, 0xdb, 0x26, 0x8f, 0xec, 0xed, 0xce, 0xcd, 0x5c, 0xed, 0x41, + 0x83, 0x7b, 0x96, 0x46, 0x2f, 0xc1, 0xa8, 0xfc, 0x7c, 0x2d, 0xb6, 0x29, 0x71, 0x78, 0xd4, 0x70, + 0xd8, 0xa0, 0x44, 0xf7, 0xe0, 0xa4, 0xfc, 0xbf, 0x16, 0x3a, 0xeb, 0xeb, 0x5e, 0x53, 0x04, 0x99, + 0x8d, 0x30, 0x16, 0x0b, 0xd2, 0x5f, 0x7c, 0x39, 0x8b, 0x68, 0x7f, 0x77, 0xee, 0x8c, 0xe8, 0xb5, + 0x4c, 0x3c, 0x1b, 0xc4, 0x6c, 0xfe, 0xe8, 0x1a, 0x9c, 0xd8, 0x24, 0x4e, 0x2b, 0xde, 0x5c, 0xda, + 0x24, 0xcd, 0xbb, 0x72, 0x61, 0xb1, 0x88, 0x29, 0xcd, 0x25, 0xf0, 0x72, 0x37, 0x09, 0xce, 0x2a, + 0xf7, 0xfe, 0xee, 0x94, 0x7f, 0x81, 0x16, 0xd6, 0xf4, 0x07, 0xf4, 0x0e, 0x8c, 0xea, 0x7d, 0x9d, + 0x56, 0x0c, 0xf2, 0xdf, 0xf7, 0x15, 0x7a, 0x88, 0x1a, 0x01, 0x1d, 0x87, 0x0d, 0xde, 0xf6, 0xbf, + 0x2b, 0xc1, 0x5c, 0x4e, 0xee, 0xae, 0x94, 0x35, 0xcb, 0x2a, 0x64, 0xcd, 0x5a, 0x90, 0x6f, 0xde, + 0x5c, 0x4f, 0xe5, 0x4c, 0x4f, 0xbd, 0x62, 0x93, 0x64, 0x4e, 0x4f, 0xd3, 0x17, 0xf6, 0x34, 0xd3, + 0x0d, 0x62, 0x95, 0x5c, 0x87, 0xbb, 0xd7, 0x75, 0x1b, 0xe7, 0xc0, 0x61, 0x94, 0xde, 0x9e, 0xe6, + 0x4d, 0xfb, 0x7b, 0x25, 0x38, 0xa9, 0x3a, 0xf3, 0xa7, 0xb7, 0x0b, 0xdf, 0xea, 0xee, 0xc2, 0x07, + 0x6a, 0x26, 0xb6, 0x6f, 0xc0, 0x60, 0x63, 0x27, 0x6a, 0xc6, 0xad, 0x02, 0x3b, 0xfe, 0xe3, 0xc6, + 0xba, 0x4a, 0x76, 0x23, 0xf6, 0x92, 0x9d, 0x58, 0x66, 0xf6, 0xe7, 0x2d, 0x98, 0x58, 0x5b, 0xaa, + 0x37, 0x82, 0xe6, 0x5d, 0x12, 0x2f, 0x70, 0x83, 0x06, 0x16, 0x1b, 0xbe, 0x75, 0xc8, 0x8d, 0x3c, + 0x4b, 0x45, 0x38, 0x03, 0x95, 0xcd, 0x20, 0x8a, 0xd3, 0x97, 0x02, 0x97, 0x83, 0x28, 0xc6, 0x0c, + 0x63, 0xff, 0x99, 0x05, 0x03, 0xec, 0xa1, 0xb6, 0xbc, 0x47, 0xfe, 0x8a, 0x7c, 0x17, 0x7a, 0x01, + 0x06, 0xc9, 0xfa, 0x3a, 0x69, 0xc6, 0x62, 0x7c, 0x65, 0x80, 0xcd, 0xe0, 0x32, 0x83, 0xd2, 0x1d, + 0x8d, 0x55, 0xc6, 0xff, 0x62, 0x41, 0x8c, 0x3e, 0x03, 0xd5, 0xd8, 0xdb, 0x22, 0x0b, 0xae, 0x2b, + 0xac, 0xf0, 0xfd, 0xf9, 0x7c, 0xa9, 0x1d, 0x76, 0x4d, 0x32, 0xc1, 0x09, 0x3f, 0xfb, 0x4b, 0x25, + 0x80, 0x24, 0x7c, 0x2e, 0xef, 0x33, 0x17, 0xbb, 0xde, 0x32, 0x7c, 0x32, 0xe3, 0x2d, 0x43, 0x94, + 0x30, 0xcc, 0x78, 0xc9, 0x50, 0x75, 0x55, 0xb9, 0x50, 0x57, 0x55, 0xfa, 0xe9, 0xaa, 0x25, 0x98, + 0x4a, 0xc2, 0xff, 0xcc, 0x38, 0x6a, 0x96, 0x6f, 0x78, 0x2d, 0x8d, 0xc4, 0xdd, 0xf4, 0xf6, 0x97, + 0x2c, 0x10, 0x5e, 0xc2, 0x05, 0x26, 0xb4, 0x2b, 0xdf, 0x1d, 0x33, 0x52, 0x0b, 0x3e, 0x5d, 0xc4, + 0x81, 0x5a, 0x24, 0x14, 0x54, 0x72, 0xdf, 0x48, 0x23, 0x68, 0x70, 0xb5, 0x7f, 0xdb, 0x82, 0x11, + 0x8e, 0xbe, 0xc6, 0x0e, 0xa2, 0xf9, 0xed, 0xea, 0x2b, 0x99, 0x35, 0x7b, 0x92, 0x8b, 0x32, 0x56, + 0x49, 0x8d, 0xf5, 0x27, 0xb9, 0x24, 0x02, 0x27, 0x34, 0xe8, 0x29, 0x18, 0x8a, 0x3a, 0x77, 0x18, + 0x79, 0xca, 0x65, 0xb8, 0xc1, 0xc1, 0x58, 0xe2, 0xed, 0x7f, 0x5a, 0x82, 0xc9, 0xb4, 0xc7, 0x38, + 0xc2, 0x30, 0xc8, 0x05, 0x48, 0xfa, 0x4c, 0x73, 0x90, 0x01, 0x54, 0xf3, 0x38, 0x07, 0xfe, 0xb0, + 0x3c, 0x13, 0x41, 0x82, 0x13, 0x5a, 0x87, 0x11, 0x37, 0xb8, 0xe7, 0xdf, 0x73, 0x42, 0x77, 0xa1, + 0xbe, 0x2a, 0x46, 0x22, 0xc7, 0xc7, 0xaf, 0x96, 0x14, 0xd0, 0xfd, 0xd9, 0x99, 0x41, 0x2e, 0x41, + 0x61, 0x9d, 0x31, 0x7a, 0x93, 0x65, 0x42, 0x59, 0xf7, 0x36, 0xae, 0x39, 0xed, 0x62, 0xde, 0x2c, + 0x4b, 0x92, 0x5c, 0xab, 0x63, 0x4c, 0x24, 0x4e, 0xe1, 0x08, 0x9c, 0xb0, 0xb4, 0x7f, 0xf5, 0x24, + 0x18, 0x73, 0xc1, 0xc8, 0x38, 0x6d, 0x3d, 0xf0, 0x8c, 0xd3, 0x6f, 0xc0, 0x30, 0xd9, 0x6a, 0xc7, + 0x3b, 0x35, 0x2f, 0x2c, 0xf6, 0x7e, 0xc0, 0xb2, 0xa0, 0xee, 0xe6, 0x2e, 0x31, 0x58, 0x71, 0xec, + 0x91, 0x3f, 0xbc, 0xfc, 0x81, 0xc8, 0x1f, 0x5e, 0xf9, 0x4b, 0xc9, 0x1f, 0xfe, 0x1a, 0x0c, 0x6d, + 0x78, 0x31, 0x26, 0xed, 0x40, 0xec, 0xc6, 0x39, 0x93, 0xe7, 0x12, 0x27, 0xee, 0xce, 0x2c, 0x2b, + 0x10, 0x58, 0xb2, 0x43, 0x6b, 0x6a, 0x51, 0x0d, 0x16, 0xd1, 0x41, 0xbb, 0x0d, 0xe4, 0x99, 0xcb, + 0x4a, 0xe4, 0x0b, 0x1f, 0x7a, 0xff, 0xf9, 0xc2, 0x55, 0x96, 0xef, 0xe1, 0x07, 0x95, 0xe5, 0xdb, + 0xc8, 0x96, 0x5e, 0x3d, 0x8a, 0x6c, 0xe9, 0x5f, 0xb2, 0xe0, 0x64, 0x3b, 0xeb, 0xad, 0x01, 0x91, + 0xaf, 0xfb, 0xe7, 0x0e, 0xf1, 0xfa, 0x82, 0x51, 0x35, 0xcb, 0xef, 0x91, 0x49, 0x86, 0xb3, 0x2b, + 0x96, 0x69, 0xd7, 0x47, 0xde, 0x7f, 0xda, 0xf5, 0xa3, 0x4e, 0xec, 0x9d, 0x24, 0x61, 0x1f, 0x3b, + 0x92, 0x24, 0xec, 0xe3, 0x0f, 0x30, 0x09, 0xbb, 0x96, 0x3e, 0x7d, 0xe2, 0xc1, 0xa6, 0x4f, 0xdf, + 0x34, 0xf7, 0x25, 0x9e, 0xad, 0xfb, 0x85, 0xc2, 0xfb, 0x92, 0x51, 0xc3, 0xc1, 0x3b, 0x13, 0x4f, + 0x24, 0x3f, 0xf5, 0x3e, 0x13, 0xc9, 0x1b, 0xe9, 0xd8, 0xd1, 0x51, 0xa4, 0x63, 0x7f, 0x5b, 0xdf, + 0x41, 0x4f, 0x14, 0xa9, 0x41, 0x6d, 0x94, 0xdd, 0x35, 0x64, 0xed, 0xa1, 0xdd, 0x09, 0xdf, 0xa7, + 0x8f, 0x3b, 0xe1, 0xfb, 0xc9, 0x23, 0x4c, 0xf8, 0x7e, 0xea, 0x58, 0x13, 0xbe, 0x3f, 0xf4, 0x01, + 0x49, 0xf8, 0x3e, 0x73, 0x5c, 0x09, 0xdf, 0x1f, 0x7e, 0xb0, 0x09, 0xdf, 0xdf, 0x86, 0x6a, 0x5b, + 0xc6, 0x5d, 0xce, 0xcc, 0x16, 0x19, 0xba, 0xcc, 0x30, 0x4d, 0x3e, 0x74, 0x0a, 0x85, 0x13, 0xa6, + 0xb4, 0x86, 0x24, 0x01, 0xfc, 0x87, 0x8b, 0xd4, 0x90, 0x69, 0xf7, 0x38, 0x20, 0xed, 0xfb, 0x17, + 0x4a, 0x70, 0xfa, 0xe0, 0xd5, 0x91, 0x18, 0x4d, 0xea, 0x89, 0x2d, 0x3b, 0x65, 0x34, 0x61, 0x9a, + 0xa7, 0x46, 0x55, 0x38, 0x9c, 0xfd, 0x12, 0x4c, 0x29, 0x3f, 0xaf, 0x96, 0xd7, 0xdc, 0xd1, 0x9e, + 0xa1, 0x52, 0xf1, 0x09, 0x8d, 0x34, 0x01, 0xee, 0x2e, 0x83, 0x16, 0x60, 0xc2, 0x00, 0xae, 0xd6, + 0xc4, 0xf9, 0x45, 0x59, 0x69, 0x1a, 0x26, 0x1a, 0xa7, 0xe9, 0xed, 0xaf, 0x5b, 0xf0, 0x50, 0x8f, + 0x0c, 0xaf, 0x85, 0x63, 0xb4, 0xdb, 0x30, 0xd1, 0x36, 0x8b, 0x16, 0x4e, 0xf9, 0x60, 0x64, 0x94, + 0x55, 0xad, 0x4e, 0x21, 0x70, 0x9a, 0xfd, 0xe2, 0xd9, 0xef, 0xff, 0xe8, 0xf4, 0x87, 0x7e, 0xf0, + 0xa3, 0xd3, 0x1f, 0xfa, 0xe1, 0x8f, 0x4e, 0x7f, 0xe8, 0x97, 0xf6, 0x4e, 0x5b, 0xdf, 0xdf, 0x3b, + 0x6d, 0xfd, 0x60, 0xef, 0xb4, 0xf5, 0xc3, 0xbd, 0xd3, 0xd6, 0x9f, 0xef, 0x9d, 0xb6, 0xbe, 0xf4, + 0xe3, 0xd3, 0x1f, 0x7a, 0xbd, 0xb4, 0x7d, 0xe1, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x6b, 0x1c, + 0xd0, 0x1f, 0x95, 0xd0, 0x00, 0x00, } diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/types.go b/vendor/k8s.io/kubernetes/pkg/api/v1/types.go index 6d7814a34929..f9bf023b426e 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/types.go +++ b/vendor/k8s.io/kubernetes/pkg/api/v1/types.go @@ -700,7 +700,7 @@ type EmptyDirVolumeSource struct { // The default is nil which means that the limit is undefined. // More info: http://kubernetes.io/docs/user-guide/volumes#emptydir // +optional - SizeLimit resource.Quantity `json:"sizeLimit,omitempty" protobuf:"bytes,2,opt,name=sizeLimit"` + SizeLimit *resource.Quantity `json:"sizeLimit,omitempty" protobuf:"bytes,2,opt,name=sizeLimit"` } // Represents a Glusterfs mount that lasts the lifetime of a pod. diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/api/v1/zz_generated.conversion.go index 9897d4710a41..4492a06379ca 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/zz_generated.conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/api/v1/zz_generated.conversion.go @@ -21,6 +21,7 @@ limitations under the License. package v1 import ( + resource "k8s.io/apimachinery/pkg/api/resource" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" @@ -1240,7 +1241,7 @@ func Convert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in *api.D func autoConvert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource(in *EmptyDirVolumeSource, out *api.EmptyDirVolumeSource, s conversion.Scope) error { out.Medium = api.StorageMedium(in.Medium) - out.SizeLimit = in.SizeLimit + out.SizeLimit = (*resource.Quantity)(unsafe.Pointer(in.SizeLimit)) return nil } @@ -1251,7 +1252,7 @@ func Convert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource(in *EmptyDirVol func autoConvert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in *api.EmptyDirVolumeSource, out *EmptyDirVolumeSource, s conversion.Scope) error { out.Medium = StorageMedium(in.Medium) - out.SizeLimit = in.SizeLimit + out.SizeLimit = (*resource.Quantity)(unsafe.Pointer(in.SizeLimit)) return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/api/v1/zz_generated.deepcopy.go index 14c969fdc0b7..18cd4311e879 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/api/v1/zz_generated.deepcopy.go @@ -1,10 +1,27 @@ -// +build !ignore_autogenerated_openshift +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ // This file was autogenerated by deepcopy-gen. Do not edit it manually! package v1 import ( + resource "k8s.io/apimachinery/pkg/api/resource" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" @@ -842,7 +859,11 @@ func DeepCopy_v1_EmptyDirVolumeSource(in interface{}, out interface{}, c *conver in := in.(*EmptyDirVolumeSource) out := out.(*EmptyDirVolumeSource) *out = *in - out.SizeLimit = in.SizeLimit.DeepCopy() + if in.SizeLimit != nil { + in, out := &in.SizeLimit, &out.SizeLimit + *out = new(resource.Quantity) + **out = (*in).DeepCopy() + } return nil } } diff --git a/vendor/k8s.io/kubernetes/pkg/api/validation/validation.go b/vendor/k8s.io/kubernetes/pkg/api/validation/validation.go index 2ace3b117efe..766463d840da 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/validation/validation.go +++ b/vendor/k8s.io/kubernetes/pkg/api/validation/validation.go @@ -400,10 +400,13 @@ func validateVolumeSource(source *api.VolumeSource, fldPath *field.Path) field.E if source.EmptyDir != nil { numVolumes++ if !utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) { - unsetSizeLimit := resource.Quantity{} - if unsetSizeLimit.Cmp(source.EmptyDir.SizeLimit) != 0 { + if source.EmptyDir.SizeLimit != nil && source.EmptyDir.SizeLimit.Cmp(resource.Quantity{}) != 0 { allErrs = append(allErrs, field.Forbidden(fldPath.Child("emptyDir").Child("sizeLimit"), "SizeLimit field disabled by feature-gate for EmptyDir volumes")) } + } else { + if source.EmptyDir.SizeLimit != nil && source.EmptyDir.SizeLimit.Cmp(resource.Quantity{}) < 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("emptyDir").Child("sizeLimit"), "SizeLimit field must be a valid resource quantity")) + } } } if source.HostPath != nil { @@ -3356,6 +3359,16 @@ func ValidateNodeUpdate(node, oldNode *api.Node) field.ErrorList { allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "podCIDR"), "node updates may not change podCIDR except from \"\" to valid")) } } + + // Allow controller manager updating provider ID when not set + if len(oldNode.Spec.ProviderID) == 0 { + oldNode.Spec.ProviderID = node.Spec.ProviderID + } else { + if oldNode.Spec.ProviderID != node.Spec.ProviderID { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "providerID"), "node updates may not change providerID except from \"\" to valid")) + } + } + // TODO: move reset function to its own location // Ignore metadata changes now that they have been tested oldNode.ObjectMeta = node.ObjectMeta diff --git a/vendor/k8s.io/kubernetes/pkg/api/validation/validation_test.go b/vendor/k8s.io/kubernetes/pkg/api/validation/validation_test.go index 559cef883317..114973bf7e2a 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/validation/validation_test.go +++ b/vendor/k8s.io/kubernetes/pkg/api/validation/validation_test.go @@ -2418,7 +2418,7 @@ func TestValidateVolumes(t *testing.T) { func TestAlphaLocalStorageCapacityIsolation(t *testing.T) { testCases := []api.VolumeSource{ - {EmptyDir: &api.EmptyDirVolumeSource{SizeLimit: *resource.NewQuantity(int64(5), resource.BinarySI)}}, + {EmptyDir: &api.EmptyDirVolumeSource{SizeLimit: resource.NewQuantity(int64(5), resource.BinarySI)}}, } // Enable alpha feature LocalStorageCapacityIsolation err := utilfeature.DefaultFeatureGate.Set("LocalStorageCapacityIsolation=true") @@ -7813,6 +7813,33 @@ func TestValidateNodeUpdate(t *testing.T) { }, }, }, false}, + {api.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "update-provider-id-when-not-set", + }, + }, api.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "update-provider-id-when-not-set", + }, + Spec: api.NodeSpec{ + ProviderID: "provider:///new", + }, + }, true}, + {api.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "update-provider-id-when-set", + }, + Spec: api.NodeSpec{ + ProviderID: "provider:///old", + }, + }, api.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "update-provider-id-when-set", + }, + Spec: api.NodeSpec{ + ProviderID: "provider:///new", + }, + }, false}, } for i, test := range tests { test.oldNode.ObjectMeta.ResourceVersion = "1" diff --git a/vendor/k8s.io/kubernetes/pkg/api/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/api/zz_generated.deepcopy.go index f432de6d2e63..0aa95e1071b0 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/api/zz_generated.deepcopy.go @@ -1,10 +1,27 @@ -// +build !ignore_autogenerated_openshift +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ // This file was autogenerated by deepcopy-gen. Do not edit it manually! package api import ( + resource "k8s.io/apimachinery/pkg/api/resource" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" conversion "k8s.io/apimachinery/pkg/conversion" fields "k8s.io/apimachinery/pkg/fields" @@ -844,7 +861,11 @@ func DeepCopy_api_EmptyDirVolumeSource(in interface{}, out interface{}, c *conve in := in.(*EmptyDirVolumeSource) out := out.(*EmptyDirVolumeSource) *out = *in - out.SizeLimit = in.SizeLimit.DeepCopy() + if in.SizeLimit != nil { + in, out := &in.SizeLimit, &out.SizeLimit + *out = new(resource.Quantity) + **out = (*in).DeepCopy() + } return nil } } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/zz_generated.deepcopy.go index 0648e3ae0bb8..fe99f2844845 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/zz_generated.deepcopy.go @@ -1,4 +1,20 @@ -// +build !ignore_autogenerated_openshift +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ // This file was autogenerated by deepcopy-gen. Do not edit it manually! diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/zz_generated.deepcopy.go index 72c417114211..74c83c56a74a 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/zz_generated.deepcopy.go @@ -1,4 +1,20 @@ -// +build !ignore_autogenerated_openshift +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ // This file was autogenerated by deepcopy-gen. Do not edit it manually! diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/zz_generated.deepcopy.go index 00cee6b45946..94fdc46d0628 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/zz_generated.deepcopy.go @@ -1,4 +1,20 @@ -// +build !ignore_autogenerated_openshift +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ // This file was autogenerated by deepcopy-gen. Do not edit it manually! diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/generated.pb.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/generated.pb.go index a210900969c8..65e4b91934ef 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/generated.pb.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/generated.pb.go @@ -3314,89 +3314,88 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 1331 bytes of a gzipped FileDescriptorProto + // 1323 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x57, 0x5b, 0x6f, 0x1b, 0x45, - 0x1b, 0xce, 0x3a, 0x4e, 0x9a, 0x6f, 0x9c, 0x26, 0xfd, 0xa6, 0x55, 0xeb, 0xa6, 0xd4, 0x8e, 0x56, - 0x08, 0xb5, 0x08, 0x76, 0xa9, 0x29, 0x88, 0x0a, 0x01, 0x8a, 0xcd, 0xa1, 0x15, 0x71, 0x0f, 0xd3, - 0x50, 0x21, 0x40, 0x82, 0xc9, 0x7a, 0xea, 0x0c, 0xf1, 0x1e, 0xb4, 0x33, 0xb6, 0x48, 0xa5, 0x4a, - 0xdc, 0x70, 0x87, 0x04, 0x37, 0xfc, 0x04, 0x24, 0xfe, 0x01, 0xd7, 0x20, 0x21, 0xf5, 0xb2, 0x97, - 0xe5, 0xc6, 0xa2, 0xee, 0x1d, 0x3f, 0x21, 0x12, 0x07, 0xcd, 0x61, 0x4f, 0x5e, 0x6f, 0x1a, 0x87, - 0xb4, 0x82, 0x3b, 0x7b, 0xe6, 0x7d, 0x9f, 0xe7, 0x3d, 0x3c, 0xf3, 0xce, 0x2c, 0x78, 0x6b, 0xfb, - 0x35, 0x66, 0x51, 0xdf, 0xde, 0xee, 0x6f, 0x92, 0xd0, 0x23, 0x9c, 0x30, 0x3b, 0xd8, 0xee, 0xda, - 0x38, 0xa0, 0xcc, 0xc6, 0x7d, 0xee, 0x33, 0x07, 0xf7, 0xa8, 0xd7, 0xb5, 0x07, 0x0d, 0xdc, 0x0b, - 0xb6, 0xf0, 0x05, 0xbb, 0x4b, 0x3c, 0x12, 0x62, 0x4e, 0x3a, 0x56, 0x10, 0xfa, 0xdc, 0x87, 0xb6, - 0x02, 0xb0, 0x12, 0x00, 0x2b, 0xd8, 0xee, 0x5a, 0x02, 0xc0, 0x4a, 0x01, 0x58, 0x11, 0xc0, 0xca, - 0x8b, 0x5d, 0xca, 0xb7, 0xfa, 0x9b, 0x96, 0xe3, 0xbb, 0x76, 0xd7, 0xef, 0xfa, 0xb6, 0xc4, 0xd9, - 0xec, 0xdf, 0x96, 0xff, 0xe4, 0x1f, 0xf9, 0x4b, 0xe1, 0xaf, 0x5c, 0xd4, 0x01, 0xe2, 0x80, 0xba, - 0xd8, 0xd9, 0xa2, 0x1e, 0x09, 0x77, 0xa2, 0x10, 0xed, 0x90, 0x30, 0xbf, 0x1f, 0x3a, 0x64, 0x3c, - 0xaa, 0x3d, 0xbd, 0x98, 0xed, 0x12, 0x8e, 0xed, 0x41, 0x2e, 0x97, 0x15, 0xbb, 0xc8, 0x2b, 0xec, - 0x7b, 0x9c, 0xba, 0x79, 0x9a, 0x57, 0x1f, 0xe7, 0xc0, 0x9c, 0x2d, 0xe2, 0xe2, 0x9c, 0xdf, 0xcb, - 0x45, 0x7e, 0x7d, 0x4e, 0x7b, 0x36, 0xf5, 0x38, 0xe3, 0x61, 0xce, 0xe9, 0x85, 0xc2, 0x56, 0x4d, - 0xca, 0xe5, 0xd2, 0x7e, 0x1b, 0x9b, 0x73, 0x35, 0xbf, 0x33, 0xc0, 0x99, 0x56, 0xe8, 0x33, 0x76, - 0x8b, 0x84, 0x8c, 0xfa, 0xde, 0xb5, 0xcd, 0xcf, 0x89, 0xc3, 0x11, 0xb9, 0x4d, 0x42, 0xe2, 0x39, - 0x04, 0xae, 0x82, 0xf2, 0x36, 0xf5, 0x3a, 0x55, 0x63, 0xd5, 0x38, 0xf7, 0xbf, 0xe6, 0xe2, 0xbd, - 0x61, 0x7d, 0x66, 0x34, 0xac, 0x97, 0xdf, 0xa7, 0x5e, 0x07, 0xc9, 0x1d, 0x61, 0xe1, 0x61, 0x97, - 0x54, 0x4b, 0x59, 0x8b, 0xab, 0xd8, 0x25, 0x48, 0xee, 0xc0, 0x06, 0x00, 0x38, 0xa0, 0x9a, 0xa0, - 0x3a, 0x2b, 0xed, 0xa0, 0xb6, 0x03, 0x6b, 0xd7, 0xaf, 0xe8, 0x1d, 0x94, 0xb2, 0x32, 0x1f, 0x95, - 0xc0, 0xa9, 0xcb, 0x7e, 0x48, 0xef, 0xf8, 0x1e, 0xc7, 0xbd, 0xeb, 0x7e, 0x67, 0x4d, 0xe7, 0x41, - 0x42, 0xf8, 0x19, 0x58, 0x10, 0x5d, 0xed, 0x60, 0x8e, 0x65, 0x5c, 0x95, 0xc6, 0x4b, 0x96, 0x56, - 0x66, 0xba, 0xc8, 0x89, 0x36, 0x85, 0xb5, 0x35, 0xb8, 0x60, 0xa9, 0xe4, 0xda, 0x84, 0xe3, 0x84, - 0x3f, 0x59, 0x43, 0x31, 0x2a, 0xf4, 0x40, 0x99, 0x05, 0xc4, 0x91, 0x39, 0x55, 0x1a, 0xeb, 0xd6, - 0x94, 0xba, 0xb7, 0x0a, 0x22, 0xbf, 0x19, 0x10, 0x27, 0xa9, 0x90, 0xf8, 0x87, 0x24, 0x0f, 0x1c, - 0x80, 0x79, 0xc6, 0x31, 0xef, 0x33, 0x59, 0x9d, 0x4a, 0xe3, 0xea, 0xa1, 0x31, 0x4a, 0xd4, 0xe6, - 0x92, 0xe6, 0x9c, 0x57, 0xff, 0x91, 0x66, 0x33, 0xbf, 0x99, 0x05, 0xab, 0x05, 0x9e, 0x2d, 0xdf, - 0xeb, 0x50, 0x4e, 0x7d, 0x0f, 0x5e, 0x06, 0x65, 0xbe, 0x13, 0x10, 0x2d, 0x81, 0x8b, 0x51, 0xf8, - 0x1b, 0x3b, 0x01, 0xd9, 0x1d, 0xd6, 0x9f, 0x7d, 0x9c, 0xbf, 0xb0, 0x43, 0x12, 0x01, 0xde, 0x8a, - 0xd3, 0x54, 0x62, 0x79, 0x33, 0x1b, 0xd6, 0xee, 0xb0, 0xbe, 0xa7, 0xee, 0xad, 0x18, 0x33, 0x9b, - 0x06, 0x1c, 0x00, 0xd8, 0xc3, 0x8c, 0x6f, 0x84, 0xd8, 0x63, 0x8a, 0x93, 0xba, 0x44, 0x97, 0xf2, - 0xf9, 0xfd, 0x49, 0x43, 0x78, 0x34, 0x57, 0x74, 0x3c, 0x70, 0x3d, 0x87, 0x86, 0x26, 0x30, 0xc0, - 0xe7, 0xc0, 0x7c, 0x48, 0x30, 0xf3, 0xbd, 0x6a, 0x59, 0xe6, 0x13, 0x97, 0x19, 0xc9, 0x55, 0xa4, - 0x77, 0xe1, 0x79, 0x70, 0xc4, 0x25, 0x8c, 0xe1, 0x2e, 0xa9, 0xce, 0x49, 0xc3, 0x65, 0x6d, 0x78, - 0xa4, 0xad, 0x96, 0x51, 0xb4, 0x6f, 0xfe, 0x6e, 0x80, 0x33, 0x05, 0x15, 0x5d, 0xa7, 0x8c, 0xc3, - 0x4f, 0x72, 0xda, 0xb7, 0xf6, 0x97, 0xa0, 0xf0, 0x96, 0xca, 0x3f, 0xa6, 0xb9, 0x17, 0xa2, 0x95, - 0x94, 0xee, 0x5d, 0x30, 0x47, 0x39, 0x71, 0x45, 0x7f, 0x66, 0xcf, 0x55, 0x1a, 0x97, 0x0f, 0x4b, - 0x86, 0xcd, 0xa3, 0x9a, 0x74, 0xee, 0x8a, 0x80, 0x47, 0x8a, 0xc5, 0xfc, 0xb3, 0x54, 0x98, 0xac, - 0x38, 0x1c, 0xf0, 0x6b, 0x03, 0x2c, 0xc9, 0xbf, 0x1b, 0x38, 0xec, 0x12, 0x31, 0x95, 0x74, 0xce, - 0xd3, 0x9f, 0xc8, 0x3d, 0x66, 0x5c, 0xf3, 0xa4, 0x0e, 0x6e, 0xe9, 0x66, 0x86, 0x0b, 0x8d, 0x71, - 0xc3, 0x0b, 0xa0, 0xe2, 0x52, 0x0f, 0x91, 0xa0, 0x47, 0x1d, 0xac, 0x34, 0x3c, 0xd7, 0x5c, 0x1e, - 0x0d, 0xeb, 0x95, 0x76, 0xb2, 0x8c, 0xd2, 0x36, 0xf0, 0x15, 0x50, 0x71, 0xf1, 0x17, 0xb1, 0xcb, - 0xac, 0x74, 0x39, 0xae, 0xf9, 0x2a, 0xed, 0x64, 0x0b, 0xa5, 0xed, 0xe0, 0x6d, 0x21, 0x18, 0x1e, - 0x52, 0x87, 0x55, 0xcb, 0xb2, 0x13, 0xaf, 0x4f, 0x9d, 0x70, 0x5b, 0xfa, 0xcb, 0x89, 0x93, 0x52, - 0x9b, 0xc4, 0x44, 0x11, 0xb8, 0xf9, 0x6b, 0x19, 0x9c, 0xdd, 0x73, 0x72, 0xc0, 0x77, 0x01, 0xf4, - 0x37, 0x19, 0x09, 0x07, 0xa4, 0xf3, 0x9e, 0xba, 0x3a, 0xc4, 0x0c, 0x17, 0x5d, 0x98, 0x6d, 0x9e, - 0x14, 0x47, 0xe5, 0x5a, 0x6e, 0x17, 0x4d, 0xf0, 0x80, 0x0e, 0x38, 0x2a, 0x0e, 0x90, 0xaa, 0x30, - 0xd5, 0xd7, 0xc5, 0x74, 0xa7, 0xf3, 0xff, 0xa3, 0x61, 0xfd, 0xe8, 0x7a, 0x1a, 0x04, 0x65, 0x31, - 0xe1, 0x1a, 0x58, 0x76, 0xfa, 0x61, 0x48, 0x3c, 0x3e, 0x56, 0xf1, 0x53, 0xba, 0x02, 0xcb, 0xad, - 0xec, 0x36, 0x1a, 0xb7, 0x17, 0x10, 0x1d, 0xc2, 0x68, 0x48, 0x3a, 0x31, 0x44, 0x39, 0x0b, 0xf1, - 0x76, 0x76, 0x1b, 0x8d, 0xdb, 0xc3, 0xbb, 0x60, 0x49, 0xa3, 0xea, 0x7a, 0x57, 0xe7, 0x64, 0x0f, - 0xdf, 0x38, 0x68, 0x0f, 0xd5, 0x0c, 0x8f, 0x55, 0xda, 0xca, 0x80, 0xa3, 0x31, 0x32, 0xf8, 0x95, - 0x01, 0x80, 0x13, 0x0d, 0x4a, 0x56, 0x9d, 0x97, 0xdc, 0x37, 0x0e, 0xeb, 0x24, 0xc7, 0x23, 0x38, - 0xb9, 0x41, 0xe3, 0x25, 0x86, 0x52, 0xc4, 0xe6, 0x1f, 0x25, 0x00, 0x12, 0x11, 0xc2, 0x8b, 0x99, - 0x5b, 0x64, 0x75, 0xec, 0x16, 0x39, 0xa6, 0x2d, 0xe5, 0x0b, 0x2f, 0x75, 0x63, 0x74, 0xc1, 0xbc, - 0x2f, 0x4f, 0xab, 0xd6, 0x4b, 0x6b, 0xea, 0x3c, 0xe2, 0xfb, 0x3d, 0x86, 0x6f, 0x02, 0x31, 0xa2, - 0xf5, 0x10, 0xd0, 0xf0, 0xf0, 0x53, 0x50, 0x0e, 0xfc, 0x4e, 0x74, 0xff, 0xae, 0x4d, 0x4d, 0x73, - 0xdd, 0xef, 0xb0, 0x0c, 0xc9, 0x82, 0xc8, 0x4e, 0xac, 0x22, 0x09, 0x0c, 0x7d, 0xb0, 0x10, 0xbd, - 0x60, 0xa5, 0xa2, 0x2a, 0x8d, 0x77, 0xa6, 0x26, 0x41, 0x1a, 0x20, 0x43, 0xb4, 0x28, 0x66, 0x79, - 0xb4, 0x83, 0x62, 0x12, 0xf3, 0xaf, 0x12, 0x58, 0x4c, 0x0b, 0xe8, 0xdf, 0xd1, 0x01, 0xa5, 0xe5, - 0x27, 0xdc, 0x01, 0x45, 0xf2, 0x14, 0x3a, 0xa0, 0x88, 0x8a, 0x3a, 0xf0, 0x7d, 0x09, 0xc0, 0xbc, - 0xfc, 0x20, 0x07, 0xf3, 0x5c, 0xde, 0x29, 0x4f, 0xe4, 0x32, 0x8b, 0xdf, 0x20, 0xfa, 0xde, 0xd2, - 0x5c, 0xe2, 0x11, 0xae, 0xa6, 0xfe, 0xd5, 0xe4, 0xb1, 0x1e, 0x1f, 0xe1, 0x76, 0xbc, 0x83, 0x52, - 0x56, 0x90, 0x80, 0x8a, 0xf2, 0xbe, 0x85, 0x7b, 0xfd, 0xe8, 0x41, 0xb5, 0xe7, 0x7b, 0xc3, 0x8a, - 0x92, 0xb7, 0x6e, 0xf4, 0xb1, 0xc7, 0x29, 0xdf, 0x49, 0x6e, 0xbb, 0x8d, 0x04, 0x0a, 0xa5, 0x71, - 0xcd, 0x1f, 0xc6, 0xeb, 0xa4, 0xf4, 0xfa, 0xdf, 0xa9, 0xd3, 0x16, 0x58, 0xd4, 0x43, 0xf8, 0x9f, - 0x14, 0xea, 0x84, 0x66, 0x59, 0x6c, 0xa5, 0xb0, 0x50, 0x06, 0xd9, 0xfc, 0xd9, 0x00, 0xc7, 0xc6, - 0x47, 0xcd, 0x58, 0xc8, 0xc6, 0xbe, 0x42, 0xbe, 0x03, 0xa0, 0x4a, 0x78, 0x6d, 0x40, 0x42, 0xdc, - 0x25, 0x2a, 0xf0, 0xd2, 0x81, 0x02, 0x8f, 0x9f, 0xcd, 0x1b, 0x39, 0x44, 0x34, 0x81, 0xc5, 0xfc, - 0x25, 0x9b, 0x84, 0xea, 0xf6, 0x41, 0x92, 0xb8, 0x0b, 0x8e, 0xeb, 0xea, 0x1c, 0x42, 0x16, 0x67, - 0x34, 0xd9, 0xf1, 0x56, 0x1e, 0x12, 0x4d, 0xe2, 0x31, 0x7f, 0x2c, 0x81, 0x13, 0x93, 0x46, 0x32, - 0x6c, 0xeb, 0x4f, 0x62, 0x95, 0xc5, 0xa5, 0xf4, 0x27, 0xf1, 0xee, 0xb0, 0x7e, 0x7e, 0xcf, 0x6f, - 0x9c, 0x08, 0x30, 0xf5, 0xfd, 0xfc, 0x21, 0xa8, 0x66, 0xaa, 0xf8, 0x01, 0xa7, 0x3d, 0x7a, 0x47, - 0xbd, 0xc4, 0xd4, 0x23, 0xf4, 0x99, 0xd1, 0xb0, 0x5e, 0xdd, 0x28, 0xb0, 0x41, 0x85, 0xde, 0xe2, - 0xc3, 0x69, 0x82, 0x0a, 0x0e, 0x26, 0xdf, 0x93, 0x53, 0x28, 0xe0, 0xa7, 0x7c, 0xe5, 0x94, 0x0a, - 0x0e, 0xb9, 0x72, 0x1f, 0x83, 0xd3, 0xd9, 0xc6, 0xe5, 0x4b, 0x77, 0x76, 0x34, 0xac, 0x9f, 0x6e, - 0x15, 0x19, 0xa1, 0x62, 0xff, 0x22, 0xf5, 0xcd, 0x3e, 0x1d, 0xf5, 0x35, 0xad, 0x7b, 0x0f, 0x6b, - 0x33, 0xf7, 0x1f, 0xd6, 0x66, 0x1e, 0x3c, 0xac, 0xcd, 0x7c, 0x39, 0xaa, 0x19, 0xf7, 0x46, 0x35, - 0xe3, 0xfe, 0xa8, 0x66, 0x3c, 0x18, 0xd5, 0x8c, 0xdf, 0x46, 0x35, 0xe3, 0xdb, 0x47, 0xb5, 0x99, - 0x8f, 0x16, 0xa2, 0x61, 0xf8, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0x4c, 0xa9, 0x91, 0xe9, 0xfe, - 0x13, 0x00, 0x00, + 0x14, 0xce, 0x3a, 0x4e, 0x1a, 0xc6, 0x69, 0x52, 0xa6, 0x55, 0xeb, 0xa6, 0xd4, 0x8e, 0x56, 0x08, + 0xb5, 0x08, 0x76, 0xa9, 0x29, 0x08, 0x84, 0x00, 0xc5, 0xe6, 0xd2, 0x8a, 0xb8, 0x97, 0x69, 0xa8, + 0x10, 0x20, 0xc1, 0x64, 0x3d, 0x75, 0x86, 0x78, 0x2f, 0xda, 0x19, 0x5b, 0xa4, 0x52, 0x25, 0x5e, + 0x78, 0x43, 0x82, 0x17, 0x7e, 0x02, 0x12, 0xff, 0x80, 0x67, 0x90, 0x90, 0xfa, 0xd8, 0xc7, 0xf2, + 0x62, 0x51, 0xf7, 0x8d, 0x9f, 0x50, 0x89, 0x8b, 0xe6, 0xb2, 0x37, 0xaf, 0xd7, 0xad, 0x43, 0x5a, + 0xc1, 0x9b, 0x3d, 0x73, 0xce, 0xf7, 0x9d, 0xcb, 0x37, 0x67, 0x66, 0xc1, 0xdb, 0xbb, 0xaf, 0x31, + 0x8b, 0xfa, 0xf6, 0x6e, 0x7f, 0x9b, 0x84, 0x1e, 0xe1, 0x84, 0xd9, 0xc1, 0x6e, 0xd7, 0xc6, 0x01, + 0x65, 0x36, 0xee, 0x73, 0x9f, 0x39, 0xb8, 0x47, 0xbd, 0xae, 0x3d, 0x68, 0xe0, 0x5e, 0xb0, 0x83, + 0xcf, 0xd9, 0x5d, 0xe2, 0x91, 0x10, 0x73, 0xd2, 0xb1, 0x82, 0xd0, 0xe7, 0x3e, 0xb4, 0x15, 0x80, + 0x95, 0x00, 0x58, 0xc1, 0x6e, 0xd7, 0x12, 0x00, 0x56, 0x0a, 0xc0, 0x8a, 0x00, 0xd6, 0x5e, 0xec, + 0x52, 0xbe, 0xd3, 0xdf, 0xb6, 0x1c, 0xdf, 0xb5, 0xbb, 0x7e, 0xd7, 0xb7, 0x25, 0xce, 0x76, 0xff, + 0x86, 0xfc, 0x27, 0xff, 0xc8, 0x5f, 0x0a, 0x7f, 0xed, 0xbc, 0x0e, 0x10, 0x07, 0xd4, 0xc5, 0xce, + 0x0e, 0xf5, 0x48, 0xb8, 0x17, 0x85, 0x68, 0x87, 0x84, 0xf9, 0xfd, 0xd0, 0x21, 0xe3, 0x51, 0x4d, + 0xf5, 0x62, 0xb6, 0x4b, 0x38, 0xb6, 0x07, 0xb9, 0x5c, 0xd6, 0xec, 0x22, 0xaf, 0xb0, 0xef, 0x71, + 0xea, 0xe6, 0x69, 0x5e, 0x7d, 0x98, 0x03, 0x73, 0x76, 0x88, 0x8b, 0x73, 0x7e, 0x2f, 0x17, 0xf9, + 0xf5, 0x39, 0xed, 0xd9, 0xd4, 0xe3, 0x8c, 0x87, 0x39, 0xa7, 0x17, 0x0a, 0x5b, 0x35, 0x21, 0x17, + 0xf3, 0x7b, 0x03, 0x9c, 0x6a, 0x85, 0x3e, 0x63, 0xd7, 0x49, 0xc8, 0xa8, 0xef, 0x5d, 0xde, 0xfe, + 0x82, 0x38, 0x1c, 0x91, 0x1b, 0x24, 0x24, 0x9e, 0x43, 0xe0, 0x3a, 0x28, 0xef, 0x52, 0xaf, 0x53, + 0x35, 0xd6, 0x8d, 0x33, 0x4f, 0x35, 0x97, 0x6f, 0x0f, 0xeb, 0x73, 0xa3, 0x61, 0xbd, 0xfc, 0x01, + 0xf5, 0x3a, 0x48, 0xee, 0x08, 0x0b, 0x0f, 0xbb, 0xa4, 0x5a, 0xca, 0x5a, 0x5c, 0xc2, 0x2e, 0x41, + 0x72, 0x07, 0x36, 0x00, 0xc0, 0x01, 0xd5, 0x04, 0xd5, 0x79, 0x69, 0x07, 0xb5, 0x1d, 0xd8, 0xb8, + 0x72, 0x51, 0xef, 0xa0, 0x94, 0x95, 0x79, 0xbf, 0x04, 0x4e, 0x5c, 0xf0, 0x43, 0x7a, 0xd3, 0xf7, + 0x38, 0xee, 0x5d, 0xf1, 0x3b, 0x1b, 0x5a, 0x24, 0x24, 0x84, 0x9f, 0x83, 0x25, 0xd1, 0x9a, 0x0e, + 0xe6, 0x58, 0xc6, 0x55, 0x69, 0xbc, 0x64, 0x69, 0x79, 0xa5, 0x2b, 0x95, 0x08, 0x4c, 0x58, 0x5b, + 0x83, 0x73, 0x96, 0x4a, 0xae, 0x4d, 0x38, 0x4e, 0xf8, 0x93, 0x35, 0x14, 0xa3, 0x42, 0x0f, 0x94, + 0x59, 0x40, 0x1c, 0x99, 0x53, 0xa5, 0xb1, 0x69, 0xcd, 0x28, 0x5e, 0xab, 0x20, 0xf2, 0x6b, 0x01, + 0x71, 0x92, 0x0a, 0x89, 0x7f, 0x48, 0xf2, 0xc0, 0x01, 0x58, 0x64, 0x1c, 0xf3, 0x3e, 0x93, 0xd5, + 0xa9, 0x34, 0x2e, 0x1d, 0x18, 0xa3, 0x44, 0x6d, 0xae, 0x68, 0xce, 0x45, 0xf5, 0x1f, 0x69, 0x36, + 0xf3, 0xdb, 0x79, 0xb0, 0x5e, 0xe0, 0xd9, 0xf2, 0xbd, 0x0e, 0xe5, 0xd4, 0xf7, 0xe0, 0x05, 0x50, + 0xe6, 0x7b, 0x01, 0xd1, 0x12, 0x38, 0x1f, 0x85, 0xbf, 0xb5, 0x17, 0x90, 0x07, 0xc3, 0xfa, 0xb3, + 0x0f, 0xf3, 0x17, 0x76, 0x48, 0x22, 0xc0, 0xeb, 0x71, 0x9a, 0x4a, 0x2c, 0x6f, 0x65, 0xc3, 0x7a, + 0x30, 0xac, 0x4f, 0x15, 0xaf, 0x15, 0x63, 0x66, 0xd3, 0x80, 0x03, 0x00, 0x7b, 0x98, 0xf1, 0xad, + 0x10, 0x7b, 0x4c, 0x71, 0x52, 0x97, 0xe8, 0x52, 0x3e, 0xff, 0x68, 0xd2, 0x10, 0x1e, 0xcd, 0x35, + 0x1d, 0x0f, 0xdc, 0xcc, 0xa1, 0xa1, 0x09, 0x0c, 0xf0, 0x39, 0xb0, 0x18, 0x12, 0xcc, 0x7c, 0xaf, + 0x5a, 0x96, 0xf9, 0xc4, 0x65, 0x46, 0x72, 0x15, 0xe9, 0x5d, 0x78, 0x16, 0x1c, 0x72, 0x09, 0x63, + 0xb8, 0x4b, 0xaa, 0x0b, 0xd2, 0x70, 0x55, 0x1b, 0x1e, 0x6a, 0xab, 0x65, 0x14, 0xed, 0x9b, 0x7f, + 0x18, 0xe0, 0x54, 0x41, 0x45, 0x37, 0x29, 0xe3, 0xf0, 0xd3, 0x9c, 0xf6, 0xad, 0x47, 0x4b, 0x50, + 0x78, 0x4b, 0xe5, 0x1f, 0xd1, 0xdc, 0x4b, 0xd1, 0x4a, 0x4a, 0xf7, 0x2e, 0x58, 0xa0, 0x9c, 0xb8, + 0xa2, 0x3f, 0xf3, 0x67, 0x2a, 0x8d, 0x0b, 0x07, 0x25, 0xc3, 0xe6, 0x61, 0x4d, 0xba, 0x70, 0x51, + 0xc0, 0x23, 0xc5, 0x62, 0xfe, 0x55, 0x2a, 0x4c, 0x56, 0x1c, 0x0e, 0xf8, 0x8d, 0x01, 0x56, 0xe4, + 0xdf, 0x2d, 0x1c, 0x76, 0x89, 0x98, 0x4a, 0x3a, 0xe7, 0xd9, 0x4f, 0xe4, 0x94, 0x19, 0xd7, 0x3c, + 0xae, 0x83, 0x5b, 0xb9, 0x96, 0xe1, 0x42, 0x63, 0xdc, 0xf0, 0x1c, 0xa8, 0xb8, 0xd4, 0x43, 0x24, + 0xe8, 0x51, 0x07, 0x2b, 0x0d, 0x2f, 0x34, 0x57, 0x47, 0xc3, 0x7a, 0xa5, 0x9d, 0x2c, 0xa3, 0xb4, + 0x0d, 0x7c, 0x05, 0x54, 0x5c, 0xfc, 0x65, 0xec, 0x32, 0x2f, 0x5d, 0x8e, 0x6a, 0xbe, 0x4a, 0x3b, + 0xd9, 0x42, 0x69, 0x3b, 0x78, 0x43, 0x08, 0x86, 0x87, 0xd4, 0x61, 0xd5, 0xb2, 0xec, 0xc4, 0x1b, + 0x33, 0x27, 0xdc, 0x96, 0xfe, 0x72, 0xe2, 0xa4, 0xd4, 0x26, 0x31, 0x51, 0x04, 0x6e, 0xfe, 0x56, + 0x06, 0xa7, 0xa7, 0x4e, 0x0e, 0xf8, 0x1e, 0x80, 0xfe, 0x36, 0x23, 0xe1, 0x80, 0x74, 0xde, 0x57, + 0x57, 0x87, 0x98, 0xe1, 0xa2, 0x0b, 0xf3, 0xcd, 0xe3, 0xe2, 0xa8, 0x5c, 0xce, 0xed, 0xa2, 0x09, + 0x1e, 0xd0, 0x01, 0x87, 0xc5, 0x01, 0x52, 0x15, 0xa6, 0xfa, 0xba, 0x98, 0xed, 0x74, 0x3e, 0x3d, + 0x1a, 0xd6, 0x0f, 0x6f, 0xa6, 0x41, 0x50, 0x16, 0x13, 0x6e, 0x80, 0x55, 0xa7, 0x1f, 0x86, 0xc4, + 0xe3, 0x63, 0x15, 0x3f, 0xa1, 0x2b, 0xb0, 0xda, 0xca, 0x6e, 0xa3, 0x71, 0x7b, 0x01, 0xd1, 0x21, + 0x8c, 0x86, 0xa4, 0x13, 0x43, 0x94, 0xb3, 0x10, 0xef, 0x64, 0xb7, 0xd1, 0xb8, 0x3d, 0xbc, 0x05, + 0x56, 0x34, 0xaa, 0xae, 0x77, 0x75, 0x41, 0xf6, 0xf0, 0xcd, 0xfd, 0xf6, 0x50, 0xcd, 0xf0, 0x58, + 0xa5, 0xad, 0x0c, 0x38, 0x1a, 0x23, 0x83, 0x5f, 0x1b, 0x00, 0x38, 0xd1, 0xa0, 0x64, 0xd5, 0x45, + 0xc9, 0x7d, 0xf5, 0xa0, 0x4e, 0x72, 0x3c, 0x82, 0x93, 0x1b, 0x34, 0x5e, 0x62, 0x28, 0x45, 0x6c, + 0xfe, 0x59, 0x02, 0x20, 0x11, 0x21, 0x3c, 0x9f, 0xb9, 0x45, 0xd6, 0xc7, 0x6e, 0x91, 0x23, 0xda, + 0x52, 0x3e, 0xd3, 0x52, 0x37, 0x46, 0x17, 0x2c, 0xfa, 0xf2, 0xb4, 0x6a, 0xbd, 0xb4, 0x66, 0xce, + 0x23, 0xbe, 0xdf, 0x63, 0xf8, 0x26, 0x10, 0x23, 0x5a, 0x0f, 0x01, 0x0d, 0x0f, 0x3f, 0x03, 0xe5, + 0xc0, 0xef, 0x44, 0xf7, 0xef, 0xc6, 0xcc, 0x34, 0x57, 0xfc, 0x0e, 0xcb, 0x90, 0x2c, 0x89, 0xec, + 0xc4, 0x2a, 0x92, 0xc0, 0xd0, 0x07, 0x4b, 0xd1, 0x33, 0x54, 0x2a, 0xaa, 0xd2, 0x78, 0x77, 0x66, + 0x12, 0xa4, 0x01, 0x32, 0x44, 0xcb, 0x62, 0x96, 0x47, 0x3b, 0x28, 0x26, 0x31, 0xff, 0x2e, 0x81, + 0xe5, 0xb4, 0x80, 0xfe, 0x1b, 0x1d, 0x50, 0x5a, 0x7e, 0xcc, 0x1d, 0x50, 0x24, 0x4f, 0xa0, 0x03, + 0x8a, 0xa8, 0xa8, 0x03, 0x3f, 0x94, 0x00, 0xcc, 0xcb, 0x0f, 0x72, 0xb0, 0xc8, 0xe5, 0x9d, 0xf2, + 0x58, 0x2e, 0xb3, 0xf8, 0x0d, 0xa2, 0xef, 0x2d, 0xcd, 0x25, 0x1e, 0xe1, 0x6a, 0xea, 0x5f, 0x4a, + 0x1e, 0xeb, 0xf1, 0x11, 0x6e, 0xc7, 0x3b, 0x28, 0x65, 0x05, 0x09, 0xa8, 0x28, 0xef, 0xeb, 0xb8, + 0xd7, 0x8f, 0x1e, 0x54, 0x53, 0xdf, 0x1b, 0x56, 0x94, 0xbc, 0x75, 0xb5, 0x8f, 0x3d, 0x4e, 0xf9, + 0x5e, 0x72, 0xdb, 0x6d, 0x25, 0x50, 0x28, 0x8d, 0x6b, 0xfe, 0x38, 0x5e, 0x27, 0xa5, 0xd7, 0xff, + 0x4f, 0x9d, 0x76, 0xc0, 0xb2, 0x1e, 0xc2, 0xff, 0xa6, 0x50, 0xc7, 0x34, 0xcb, 0x72, 0x2b, 0x85, + 0x85, 0x32, 0xc8, 0xe6, 0x2f, 0x06, 0x38, 0x32, 0x3e, 0x6a, 0xc6, 0x42, 0x36, 0x1e, 0x29, 0xe4, + 0x9b, 0x00, 0xaa, 0x84, 0x37, 0x06, 0x24, 0xc4, 0x5d, 0xa2, 0x02, 0x2f, 0xed, 0x2b, 0xf0, 0xf8, + 0xd9, 0xbc, 0x95, 0x43, 0x44, 0x13, 0x58, 0xcc, 0x5f, 0xb3, 0x49, 0xa8, 0x6e, 0xef, 0x27, 0x89, + 0x5b, 0xe0, 0xa8, 0xae, 0xce, 0x01, 0x64, 0x71, 0x4a, 0x93, 0x1d, 0x6d, 0xe5, 0x21, 0xd1, 0x24, + 0x1e, 0xf3, 0xa7, 0x12, 0x38, 0x36, 0x69, 0x24, 0xc3, 0xb6, 0xfe, 0x24, 0x56, 0x59, 0xbc, 0x9e, + 0xfe, 0x24, 0x7e, 0x30, 0xac, 0x9f, 0x9d, 0xfa, 0x8d, 0x13, 0x01, 0xa6, 0xbe, 0x9f, 0x3f, 0x02, + 0xd5, 0x4c, 0x15, 0x3f, 0xe4, 0xb4, 0x47, 0x6f, 0xaa, 0x97, 0x98, 0x7a, 0x84, 0x3e, 0x33, 0x1a, + 0xd6, 0xab, 0x5b, 0x05, 0x36, 0xa8, 0xd0, 0x5b, 0x7c, 0x38, 0x4d, 0x50, 0xc1, 0xfe, 0xe4, 0x7b, + 0x7c, 0x06, 0x05, 0xfc, 0x9c, 0xaf, 0x9c, 0x52, 0xc1, 0x01, 0x57, 0xee, 0x13, 0x70, 0x32, 0xdb, + 0xb8, 0x7c, 0xe9, 0x4e, 0x8f, 0x86, 0xf5, 0x93, 0xad, 0x22, 0x23, 0x54, 0xec, 0x5f, 0xa4, 0xbe, + 0xf9, 0x27, 0xa3, 0xbe, 0xa6, 0x75, 0xfb, 0x5e, 0x6d, 0xee, 0xce, 0xbd, 0xda, 0xdc, 0xdd, 0x7b, + 0xb5, 0xb9, 0xaf, 0x46, 0x35, 0xe3, 0xf6, 0xa8, 0x66, 0xdc, 0x19, 0xd5, 0x8c, 0xbb, 0xa3, 0x9a, + 0xf1, 0xfb, 0xa8, 0x66, 0x7c, 0x77, 0xbf, 0x36, 0xf7, 0xf1, 0x52, 0x34, 0x0c, 0xff, 0x09, 0x00, + 0x00, 0xff, 0xff, 0x88, 0x5a, 0x1f, 0xc3, 0xc3, 0x13, 0x00, 0x00, } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/generated.proto b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/generated.proto index d51547584f1e..c2223a1188c2 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/generated.proto +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/generated.proto @@ -27,7 +27,6 @@ import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v2alpha1"; diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/zz_generated.deepcopy.go index 55f485b6d0db..a15d9b32ba89 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/zz_generated.deepcopy.go @@ -1,4 +1,20 @@ -// +build !ignore_autogenerated_openshift +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ // This file was autogenerated by deepcopy-gen. Do not edit it manually! diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/zz_generated.deepcopy.go index 87c6062c2d42..a372c1eea987 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/zz_generated.deepcopy.go @@ -1,4 +1,20 @@ -// +build !ignore_autogenerated_openshift +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ // This file was autogenerated by deepcopy-gen. Do not edit it manually! diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/zz_generated.deepcopy.go index 8f7c692ef7fe..88f1a825bffd 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/zz_generated.deepcopy.go @@ -1,4 +1,20 @@ -// +build !ignore_autogenerated_openshift +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ // This file was autogenerated by deepcopy-gen. Do not edit it manually! diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/defaults.go index a4f925d6b4f6..7d02dff118f3 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/defaults.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/defaults.go @@ -370,7 +370,7 @@ func SetDefaults_KubeletConfiguration(obj *KubeletConfiguration) { obj.HairpinMode = PromiscuousBridge } if obj.EvictionHard == nil { - temp := "memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<15%" + temp := "memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%" obj.EvictionHard = &temp } if obj.EvictionPressureTransitionPeriod == zeroDuration { diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/zz_generated.deepcopy.go index cb49f9583b37..9b1527fae37e 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/zz_generated.deepcopy.go @@ -1,4 +1,20 @@ -// +build !ignore_autogenerated_openshift +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ // This file was autogenerated by deepcopy-gen. Do not edit it manually! diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/zz_generated.deepcopy.go index 1366b2f87dc8..c39fea4aae1c 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/zz_generated.deepcopy.go @@ -1,4 +1,20 @@ -// +build !ignore_autogenerated_openshift +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ // This file was autogenerated by deepcopy-gen. Do not edit it manually! diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/zz_generated.deepcopy.go index 73ed112a8ea9..00cf27c7b70e 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/zz_generated.deepcopy.go @@ -1,4 +1,20 @@ -// +build !ignore_autogenerated_openshift +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ // This file was autogenerated by deepcopy-gen. Do not edit it manually! diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/zz_generated.deepcopy.go index 5c8256c2dc10..0efd6727a297 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/zz_generated.deepcopy.go @@ -1,4 +1,20 @@ -// +build !ignore_autogenerated_openshift +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ // This file was autogenerated by deepcopy-gen. Do not edit it manually! diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/validation/validation.go b/vendor/k8s.io/kubernetes/pkg/apis/policy/validation/validation.go index 528a9bc52180..cb0e0506d742 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/policy/validation/validation.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/policy/validation/validation.go @@ -38,7 +38,7 @@ func ValidatePodDisruptionBudgetUpdate(pdb, oldPdb *policy.PodDisruptionBudget) restoreGeneration := pdb.Generation pdb.Generation = oldPdb.Generation - if !reflect.DeepEqual(pdb, oldPdb) { + if !reflect.DeepEqual(pdb.Spec, oldPdb.Spec) { allErrs = append(allErrs, field.Forbidden(field.NewPath("spec"), "updates to poddisruptionbudget spec are forbidden.")) } allErrs = append(allErrs, ValidatePodDisruptionBudgetStatus(pdb.Status, field.NewPath("status"))...) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/validation/validation_test.go b/vendor/k8s.io/kubernetes/pkg/apis/policy/validation/validation_test.go index a11454374c36..c2ba5b61fbe0 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/policy/validation/validation_test.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/policy/validation/validation_test.go @@ -113,3 +113,111 @@ func TestValidatePodDisruptionBudgetStatus(t *testing.T) { } } } + +func TestValidatePodDisruptionBudgetUpdate(t *testing.T) { + c1 := intstr.FromString("10%") + c2 := intstr.FromInt(1) + c3 := intstr.FromInt(2) + oldPdb := &policy.PodDisruptionBudget{} + pdb := &policy.PodDisruptionBudget{} + testCases := []struct { + generations []int64 + name string + specs []policy.PodDisruptionBudgetSpec + status []policy.PodDisruptionBudgetStatus + ok bool + }{ + { + name: "only update status", + generations: []int64{int64(2), int64(3)}, + specs: []policy.PodDisruptionBudgetSpec{ + { + MinAvailable: &c1, + MaxUnavailable: &c2, + }, + { + MinAvailable: &c1, + MaxUnavailable: &c2, + }, + }, + status: []policy.PodDisruptionBudgetStatus{ + { + PodDisruptionsAllowed: 10, + CurrentHealthy: 5, + ExpectedPods: 2, + }, + { + PodDisruptionsAllowed: 8, + CurrentHealthy: 5, + DesiredHealthy: 3, + }, + }, + ok: true, + }, + { + name: "only update pdb spec", + generations: []int64{int64(2), int64(3)}, + specs: []policy.PodDisruptionBudgetSpec{ + { + MaxUnavailable: &c2, + }, + { + MinAvailable: &c1, + MaxUnavailable: &c3, + }, + }, + status: []policy.PodDisruptionBudgetStatus{ + { + PodDisruptionsAllowed: 10, + }, + { + PodDisruptionsAllowed: 10, + }, + }, + ok: false, + }, + { + name: "update spec and status", + generations: []int64{int64(2), int64(3)}, + specs: []policy.PodDisruptionBudgetSpec{ + { + MaxUnavailable: &c2, + }, + { + MinAvailable: &c1, + MaxUnavailable: &c3, + }, + }, + status: []policy.PodDisruptionBudgetStatus{ + { + PodDisruptionsAllowed: 10, + CurrentHealthy: 5, + ExpectedPods: 2, + }, + { + PodDisruptionsAllowed: 8, + CurrentHealthy: 5, + DesiredHealthy: 3, + }, + }, + ok: false, + }, + } + + for i, tc := range testCases { + oldPdb.Spec = tc.specs[0] + oldPdb.Generation = tc.generations[0] + oldPdb.Status = tc.status[0] + + pdb.Spec = tc.specs[1] + pdb.Generation = tc.generations[1] + oldPdb.Status = tc.status[1] + + errs := ValidatePodDisruptionBudgetUpdate(oldPdb, pdb) + if tc.ok && len(errs) > 0 { + t.Errorf("[%d:%s] unexpected errors: %v", i, tc.name, errs) + } else if !tc.ok && len(errs) == 0 { + t.Errorf("[%d:%s] expected errors: %v", i, tc.name, errs) + } + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/.import-restrictions b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/.import-restrictions index 89c2cc64e2db..9fdb69002710 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/.import-restrictions +++ b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/.import-restrictions @@ -8,4 +8,3 @@ } ] } - diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws.go index 962c6a0fc72c..00ce9d8dcd58 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/aws/aws.go @@ -2077,6 +2077,11 @@ func isEqualUserGroupPair(l, r *ec2.UserIdGroupPair, compareGroupUserIDs bool) b // Returns true if and only if changes were made // The security group must already exist func (c *Cloud) setSecurityGroupIngress(securityGroupID string, permissions IPPermissionSet) (bool, error) { + // We do not want to make changes to the Global defined SG + if securityGroupID == c.cfg.Global.ElbSecurityGroup { + return false, nil + } + group, err := c.findSecurityGroup(securityGroupID) if err != nil { glog.Warningf("Error retrieving security group %q", err) @@ -2147,6 +2152,11 @@ func (c *Cloud) setSecurityGroupIngress(securityGroupID string, permissions IPPe // Returns true if and only if changes were made // The security group must already exist func (c *Cloud) addSecurityGroupIngress(securityGroupID string, addPermissions []*ec2.IpPermission) (bool, error) { + // We do not want to make changes to the Global defined SG + if securityGroupID == c.cfg.Global.ElbSecurityGroup { + return false, nil + } + group, err := c.findSecurityGroup(securityGroupID) if err != nil { glog.Warningf("Error retrieving security group: %q", err) @@ -2203,6 +2213,11 @@ func (c *Cloud) addSecurityGroupIngress(securityGroupID string, addPermissions [ // Returns true if and only if changes were made // If the security group no longer exists, will return (false, nil) func (c *Cloud) removeSecurityGroupIngress(securityGroupID string, removePermissions []*ec2.IpPermission) (bool, error) { + // We do not want to make changes to the Global defined SG + if securityGroupID == c.cfg.Global.ElbSecurityGroup { + return false, nil + } + group, err := c.findSecurityGroup(securityGroupID) if err != nil { glog.Warningf("Error retrieving security group: %q", err) @@ -2530,7 +2545,7 @@ func getPortSets(annotation string) (ports *portSets) { // buildELBSecurityGroupList returns list of SecurityGroups which should be // attached to ELB created by a service. List always consist of at least -// 1 member which is an SG created for this service. Extra groups can be +// 1 member which is an SG created for this service or a SG from the Global config. Extra groups can be // specified via annotation func (c *Cloud) buildELBSecurityGroupList(serviceName types.NamespacedName, loadBalancerName, annotation string) ([]string, error) { var err error diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/BUILD b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/BUILD index 37e23ace3810..2054172ac3a0 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/BUILD @@ -13,17 +13,18 @@ go_library( srcs = [ "azure.go", "azure_backoff.go", - "azure_blob.go", + "azure_blobDiskController.go", + "azure_controllerCommon.go", "azure_file.go", "azure_instances.go", "azure_loadbalancer.go", + "azure_managedDiskController.go", "azure_routes.go", "azure_storage.go", "azure_storageaccount.go", "azure_util.go", "azure_wrap.go", "azure_zones.go", - "vhd.go", ], tags = ["automanaged"], deps = [ @@ -34,15 +35,18 @@ go_library( "//pkg/version:go_default_library", "//pkg/volume:go_default_library", "//vendor/github.com/Azure/azure-sdk-for-go/arm/compute:go_default_library", + "//vendor/github.com/Azure/azure-sdk-for-go/arm/disk:go_default_library", "//vendor/github.com/Azure/azure-sdk-for-go/arm/network:go_default_library", "//vendor/github.com/Azure/azure-sdk-for-go/arm/storage:go_default_library", "//vendor/github.com/Azure/azure-sdk-for-go/storage:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest:go_default_library", + "//vendor/github.com/Azure/go-autorest/autorest/adal:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest/azure:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library", "//vendor/github.com/ghodss/yaml:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/rubiojr/go-vhd/vhd:go_default_library", + "//vendor/golang.org/x/crypto/pkcs12:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure.go index 1b4e645dd97e..487e60dac3ed 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure.go @@ -17,6 +17,8 @@ limitations under the License. package azure import ( + "crypto/rsa" + "crypto/x509" "fmt" "io" "io/ioutil" @@ -28,12 +30,15 @@ import ( "k8s.io/kubernetes/pkg/version" "github.com/Azure/azure-sdk-for-go/arm/compute" + "github.com/Azure/azure-sdk-for-go/arm/disk" "github.com/Azure/azure-sdk-for-go/arm/network" "github.com/Azure/azure-sdk-for-go/arm/storage" "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" "github.com/Azure/go-autorest/autorest/azure" "github.com/ghodss/yaml" "github.com/golang/glog" + "golang.org/x/crypto/pkcs12" "k8s.io/apimachinery/pkg/util/wait" ) @@ -63,6 +68,8 @@ type Config struct { Location string `json:"location" yaml:"location"` // The name of the VNet that the cluster is deployed in VnetName string `json:"vnetName" yaml:"vnetName"` + // The name of the resource group that the Vnet is deployed in + VnetResourceGroup string `json:"vnetResourceGroup" yaml:"vnetResourceGroup"` // The name of the subnet that the cluster is deployed in SubnetName string `json:"subnetName" yaml:"subnetName"` // The name of the security group attached to the cluster's subnet @@ -80,6 +87,10 @@ type Config struct { AADClientID string `json:"aadClientId" yaml:"aadClientId"` // The ClientSecret for an AAD application with RBAC access to talk to Azure RM APIs AADClientSecret string `json:"aadClientSecret" yaml:"aadClientSecret"` + // The path of a client certificate for an AAD application with RBAC access to talk to Azure RM APIs + AADClientCertPath string `json:"aadClientCertPath" yaml:"aadClientCertPath"` + // The password of the client certificate for an AAD application with RBAC access to talk to Azure RM APIs + AADClientCertPassword string `json:"aadClientCertPassword" yaml:"aadClientCertPassword"` // Enable exponential backoff to manage resource request retries CloudProviderBackoff bool `json:"cloudProviderBackoff" yaml:"cloudProviderBackoff"` // Backoff retry limit @@ -96,6 +107,12 @@ type Config struct { CloudProviderRateLimitQPS float32 `json:"cloudProviderRateLimitQPS" yaml:"cloudProviderRateLimitQPS"` // Rate limit Bucket Size CloudProviderRateLimitBucket int `json:"cloudProviderRateLimitBucket" yaml:"cloudProviderRateLimitBucket"` + + // Use instance metadata service where possible + UseInstanceMetadata bool `json:"useInstanceMetadata" yaml:"useInstanceMetadata"` + + // Use managed service identity for the virtual machine to access Azure ARM APIs + UseManagedIdentityExtension bool `json:"useManagedIdentityExtension"` } // Cloud holds the config and clients @@ -111,100 +128,149 @@ type Cloud struct { SecurityGroupsClient network.SecurityGroupsClient VirtualMachinesClient compute.VirtualMachinesClient StorageAccountClient storage.AccountsClient + DisksClient disk.DisksClient operationPollRateLimiter flowcontrol.RateLimiter resourceRequestBackoff wait.Backoff + + *BlobDiskController + *ManagedDiskController + *controllerCommon } func init() { cloudprovider.RegisterCloudProvider(CloudProviderName, NewCloud) } -// NewCloud returns a Cloud with initialized clients -func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) { - var az Cloud - - configContents, err := ioutil.ReadAll(configReader) +// decodePkcs12 decodes a PKCS#12 client certificate by extracting the public certificate and +// the private RSA key +func decodePkcs12(pkcs []byte, password string) (*x509.Certificate, *rsa.PrivateKey, error) { + privateKey, certificate, err := pkcs12.Decode(pkcs, password) if err != nil { - return nil, err + return nil, nil, fmt.Errorf("decoding the PKCS#12 client certificate: %v", err) + } + rsaPrivateKey, isRsaKey := privateKey.(*rsa.PrivateKey) + if !isRsaKey { + return nil, nil, fmt.Errorf("PKCS#12 certificate must contain a RSA private key") } - err = yaml.Unmarshal(configContents, &az) + + return certificate, rsaPrivateKey, nil +} + +// GetServicePrincipalToken creates a new service principal token based on the configuration +func GetServicePrincipalToken(config *Config, env *azure.Environment) (*adal.ServicePrincipalToken, error) { + oauthConfig, err := adal.NewOAuthConfig(env.ActiveDirectoryEndpoint, config.TenantID) if err != nil { - return nil, err + return nil, fmt.Errorf("creating the OAuth config: %v", err) } - if az.Cloud == "" { - az.Environment = azure.PublicCloud - } else { - az.Environment, err = azure.EnvironmentFromName(az.Cloud) + if config.UseManagedIdentityExtension { + glog.V(2).Infoln("azure: using managed identity extension to retrieve access token") + return adal.NewServicePrincipalTokenFromMSI( + *oauthConfig, + env.ServiceManagementEndpoint) + } + + if len(config.AADClientSecret) > 0 { + glog.V(2).Infoln("azure: using client_id+client_secret to retrieve access token") + return adal.NewServicePrincipalToken( + *oauthConfig, + config.AADClientID, + config.AADClientSecret, + env.ServiceManagementEndpoint) + } + + if len(config.AADClientCertPath) > 0 && len(config.AADClientCertPassword) > 0 { + glog.V(2).Infoln("azure: using jwt client_assertion (client_cert+client_private_key) to retrieve access token") + certData, err := ioutil.ReadFile(config.AADClientCertPath) + if err != nil { + return nil, fmt.Errorf("reading the client certificate from file %s: %v", config.AADClientCertPath, err) + } + certificate, privateKey, err := decodePkcs12(certData, config.AADClientCertPassword) if err != nil { - return nil, err + return nil, fmt.Errorf("decoding the client certificate: %v", err) } + return adal.NewServicePrincipalTokenFromCertificate( + *oauthConfig, + config.AADClientID, + certificate, + privateKey, + env.ServiceManagementEndpoint) } - oauthConfig, err := az.Environment.OAuthConfigForTenant(az.TenantID) + return nil, fmt.Errorf("No credentials provided for AAD application %s", config.AADClientID) +} + +// NewCloud returns a Cloud with initialized clients +func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) { + config, env, err := ParseConfig(configReader) if err != nil { return nil, err } + az := Cloud{ + Config: *config, + Environment: *env, + } - servicePrincipalToken, err := azure.NewServicePrincipalToken( - *oauthConfig, - az.AADClientID, - az.AADClientSecret, - az.Environment.ServiceManagementEndpoint) + servicePrincipalToken, err := GetServicePrincipalToken(config, env) if err != nil { return nil, err } az.SubnetsClient = network.NewSubnetsClient(az.SubscriptionID) az.SubnetsClient.BaseURI = az.Environment.ResourceManagerEndpoint - az.SubnetsClient.Authorizer = servicePrincipalToken + az.SubnetsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) az.SubnetsClient.PollingDelay = 5 * time.Second configureUserAgent(&az.SubnetsClient.Client) az.RouteTablesClient = network.NewRouteTablesClient(az.SubscriptionID) az.RouteTablesClient.BaseURI = az.Environment.ResourceManagerEndpoint - az.RouteTablesClient.Authorizer = servicePrincipalToken + az.RouteTablesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) az.RouteTablesClient.PollingDelay = 5 * time.Second configureUserAgent(&az.RouteTablesClient.Client) az.RoutesClient = network.NewRoutesClient(az.SubscriptionID) az.RoutesClient.BaseURI = az.Environment.ResourceManagerEndpoint - az.RoutesClient.Authorizer = servicePrincipalToken + az.RoutesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) az.RoutesClient.PollingDelay = 5 * time.Second configureUserAgent(&az.RoutesClient.Client) az.InterfacesClient = network.NewInterfacesClient(az.SubscriptionID) az.InterfacesClient.BaseURI = az.Environment.ResourceManagerEndpoint - az.InterfacesClient.Authorizer = servicePrincipalToken + az.InterfacesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) az.InterfacesClient.PollingDelay = 5 * time.Second configureUserAgent(&az.InterfacesClient.Client) az.LoadBalancerClient = network.NewLoadBalancersClient(az.SubscriptionID) az.LoadBalancerClient.BaseURI = az.Environment.ResourceManagerEndpoint - az.LoadBalancerClient.Authorizer = servicePrincipalToken + az.LoadBalancerClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) az.LoadBalancerClient.PollingDelay = 5 * time.Second configureUserAgent(&az.LoadBalancerClient.Client) az.VirtualMachinesClient = compute.NewVirtualMachinesClient(az.SubscriptionID) az.VirtualMachinesClient.BaseURI = az.Environment.ResourceManagerEndpoint - az.VirtualMachinesClient.Authorizer = servicePrincipalToken + az.VirtualMachinesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) az.VirtualMachinesClient.PollingDelay = 5 * time.Second configureUserAgent(&az.VirtualMachinesClient.Client) az.PublicIPAddressesClient = network.NewPublicIPAddressesClient(az.SubscriptionID) az.PublicIPAddressesClient.BaseURI = az.Environment.ResourceManagerEndpoint - az.PublicIPAddressesClient.Authorizer = servicePrincipalToken + az.PublicIPAddressesClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) az.PublicIPAddressesClient.PollingDelay = 5 * time.Second configureUserAgent(&az.PublicIPAddressesClient.Client) az.SecurityGroupsClient = network.NewSecurityGroupsClient(az.SubscriptionID) az.SecurityGroupsClient.BaseURI = az.Environment.ResourceManagerEndpoint - az.SecurityGroupsClient.Authorizer = servicePrincipalToken + az.SecurityGroupsClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) az.SecurityGroupsClient.PollingDelay = 5 * time.Second configureUserAgent(&az.SecurityGroupsClient.Client) az.StorageAccountClient = storage.NewAccountsClientWithBaseURI(az.Environment.ResourceManagerEndpoint, az.SubscriptionID) - az.StorageAccountClient.Authorizer = servicePrincipalToken + az.StorageAccountClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) + configureUserAgent(&az.StorageAccountClient.Client) + + az.DisksClient = disk.NewDisksClientWithBaseURI(az.Environment.ResourceManagerEndpoint, az.SubscriptionID) + az.DisksClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) + configureUserAgent(&az.DisksClient.Client) // Conditionally configure rate limits if az.CloudProviderRateLimit { @@ -254,9 +320,37 @@ func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) { az.CloudProviderBackoffJitter) } + if err := initDiskControllers(&az); err != nil { + return nil, err + } return &az, nil } +// ParseConfig returns a parsed configuration and azure.Environment for an Azure cloudprovider config file +func ParseConfig(configReader io.Reader) (*Config, *azure.Environment, error) { + var config Config + + configContents, err := ioutil.ReadAll(configReader) + if err != nil { + return nil, nil, err + } + err = yaml.Unmarshal(configContents, &config) + if err != nil { + return nil, nil, err + } + + var env azure.Environment + if config.Cloud == "" { + env = azure.PublicCloud + } else { + env, err = azure.EnvironmentFromName(config.Cloud) + if err != nil { + return nil, nil, err + } + } + return &config, &env, nil +} + // Initialize passes a Kubernetes clientBuilder interface to the cloud provider func (az *Cloud) Initialize(clientBuilder controller.ControllerClientBuilder) {} @@ -308,3 +402,42 @@ func configureUserAgent(client *autorest.Client) { k8sVersion := version.Get().GitVersion client.UserAgent = fmt.Sprintf("%s; kubernetes-cloudprovider/%s", client.UserAgent, k8sVersion) } + +func initDiskControllers(az *Cloud) error { + // Common controller contains the function + // needed by both blob disk and managed disk controllers + + common := &controllerCommon{ + aadResourceEndPoint: az.Environment.ServiceManagementEndpoint, + clientID: az.AADClientID, + clientSecret: az.AADClientSecret, + location: az.Location, + storageEndpointSuffix: az.Environment.StorageEndpointSuffix, + managementEndpoint: az.Environment.ResourceManagerEndpoint, + resourceGroup: az.ResourceGroup, + tenantID: az.TenantID, + tokenEndPoint: az.Environment.ActiveDirectoryEndpoint, + subscriptionID: az.SubscriptionID, + cloud: az, + } + + // BlobDiskController: contains the function needed to + // create/attach/detach/delete blob based (unmanaged disks) + blobController, err := newBlobDiskController(common) + if err != nil { + return fmt.Errorf("AzureDisk - failed to init Blob Disk Controller with error (%s)", err.Error()) + } + + // ManagedDiskController: contains the functions needed to + // create/attach/detach/delete managed disks + managedController, err := newManagedDiskController(common) + if err != nil { + return fmt.Errorf("AzureDisk - failed to init Managed Disk Controller with error (%s)", err.Error()) + } + + az.BlobDiskController = blobController + az.ManagedDiskController = managedController + az.controllerCommon = common + + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_backoff.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_backoff.go index 3fca4c49334b..839592f30352 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_backoff.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_backoff.go @@ -47,8 +47,10 @@ func (az *Cloud) GetVirtualMachineWithRetry(name types.NodeName) (compute.Virtua func (az *Cloud) CreateOrUpdateSGWithRetry(sg network.SecurityGroup) error { return wait.ExponentialBackoff(az.resourceRequestBackoff, func() (bool, error) { az.operationPollRateLimiter.Accept() - resp, err := az.SecurityGroupsClient.CreateOrUpdate(az.ResourceGroup, *sg.Name, sg, nil) - return processRetryResponse(resp, err) + respChan, errChan := az.SecurityGroupsClient.CreateOrUpdate(az.ResourceGroup, *sg.Name, sg, nil) + resp := <-respChan + err := <-errChan + return processRetryResponse(resp.Response, err) }) } @@ -56,8 +58,10 @@ func (az *Cloud) CreateOrUpdateSGWithRetry(sg network.SecurityGroup) error { func (az *Cloud) CreateOrUpdateLBWithRetry(lb network.LoadBalancer) error { return wait.ExponentialBackoff(az.resourceRequestBackoff, func() (bool, error) { az.operationPollRateLimiter.Accept() - resp, err := az.LoadBalancerClient.CreateOrUpdate(az.ResourceGroup, *lb.Name, lb, nil) - return processRetryResponse(resp, err) + respChan, errChan := az.LoadBalancerClient.CreateOrUpdate(az.ResourceGroup, *lb.Name, lb, nil) + resp := <-respChan + err := <-errChan + return processRetryResponse(resp.Response, err) }) } @@ -65,8 +69,10 @@ func (az *Cloud) CreateOrUpdateLBWithRetry(lb network.LoadBalancer) error { func (az *Cloud) CreateOrUpdatePIPWithRetry(pip network.PublicIPAddress) error { return wait.ExponentialBackoff(az.resourceRequestBackoff, func() (bool, error) { az.operationPollRateLimiter.Accept() - resp, err := az.PublicIPAddressesClient.CreateOrUpdate(az.ResourceGroup, *pip.Name, pip, nil) - return processRetryResponse(resp, err) + respChan, errChan := az.PublicIPAddressesClient.CreateOrUpdate(az.ResourceGroup, *pip.Name, pip, nil) + resp := <-respChan + err := <-errChan + return processRetryResponse(resp.Response, err) }) } @@ -74,8 +80,10 @@ func (az *Cloud) CreateOrUpdatePIPWithRetry(pip network.PublicIPAddress) error { func (az *Cloud) CreateOrUpdateInterfaceWithRetry(nic network.Interface) error { return wait.ExponentialBackoff(az.resourceRequestBackoff, func() (bool, error) { az.operationPollRateLimiter.Accept() - resp, err := az.InterfacesClient.CreateOrUpdate(az.ResourceGroup, *nic.Name, nic, nil) - return processRetryResponse(resp, err) + respChan, errChan := az.InterfacesClient.CreateOrUpdate(az.ResourceGroup, *nic.Name, nic, nil) + resp := <-respChan + err := <-errChan + return processRetryResponse(resp.Response, err) }) } @@ -83,7 +91,9 @@ func (az *Cloud) CreateOrUpdateInterfaceWithRetry(nic network.Interface) error { func (az *Cloud) DeletePublicIPWithRetry(pipName string) error { return wait.ExponentialBackoff(az.resourceRequestBackoff, func() (bool, error) { az.operationPollRateLimiter.Accept() - resp, err := az.PublicIPAddressesClient.Delete(az.ResourceGroup, pipName, nil) + respChan, errChan := az.PublicIPAddressesClient.Delete(az.ResourceGroup, pipName, nil) + resp := <-respChan + err := <-errChan return processRetryResponse(resp, err) }) } @@ -92,7 +102,9 @@ func (az *Cloud) DeletePublicIPWithRetry(pipName string) error { func (az *Cloud) DeleteLBWithRetry(lbName string) error { return wait.ExponentialBackoff(az.resourceRequestBackoff, func() (bool, error) { az.operationPollRateLimiter.Accept() - resp, err := az.LoadBalancerClient.Delete(az.ResourceGroup, lbName, nil) + respChan, errChan := az.LoadBalancerClient.Delete(az.ResourceGroup, lbName, nil) + resp := <-respChan + err := <-errChan return processRetryResponse(resp, err) }) } @@ -101,8 +113,10 @@ func (az *Cloud) DeleteLBWithRetry(lbName string) error { func (az *Cloud) CreateOrUpdateRouteTableWithRetry(routeTable network.RouteTable) error { return wait.ExponentialBackoff(az.resourceRequestBackoff, func() (bool, error) { az.operationPollRateLimiter.Accept() - resp, err := az.RouteTablesClient.CreateOrUpdate(az.ResourceGroup, az.RouteTableName, routeTable, nil) - return processRetryResponse(resp, err) + respChan, errChan := az.RouteTablesClient.CreateOrUpdate(az.ResourceGroup, az.RouteTableName, routeTable, nil) + resp := <-respChan + err := <-errChan + return processRetryResponse(resp.Response, err) }) } @@ -110,8 +124,10 @@ func (az *Cloud) CreateOrUpdateRouteTableWithRetry(routeTable network.RouteTable func (az *Cloud) CreateOrUpdateRouteWithRetry(route network.Route) error { return wait.ExponentialBackoff(az.resourceRequestBackoff, func() (bool, error) { az.operationPollRateLimiter.Accept() - resp, err := az.RoutesClient.CreateOrUpdate(az.ResourceGroup, az.RouteTableName, *route.Name, route, nil) - return processRetryResponse(resp, err) + respChan, errChan := az.RoutesClient.CreateOrUpdate(az.ResourceGroup, az.RouteTableName, *route.Name, route, nil) + resp := <-respChan + err := <-errChan + return processRetryResponse(resp.Response, err) }) } @@ -119,7 +135,9 @@ func (az *Cloud) CreateOrUpdateRouteWithRetry(route network.Route) error { func (az *Cloud) DeleteRouteWithRetry(routeName string) error { return wait.ExponentialBackoff(az.resourceRequestBackoff, func() (bool, error) { az.operationPollRateLimiter.Accept() - resp, err := az.RoutesClient.Delete(az.ResourceGroup, az.RouteTableName, routeName, nil) + respChan, errChan := az.RoutesClient.Delete(az.ResourceGroup, az.RouteTableName, routeName, nil) + resp := <-respChan + err := <-errChan return processRetryResponse(resp, err) }) } @@ -128,8 +146,10 @@ func (az *Cloud) DeleteRouteWithRetry(routeName string) error { func (az *Cloud) CreateOrUpdateVMWithRetry(vmName string, newVM compute.VirtualMachine) error { return wait.ExponentialBackoff(az.resourceRequestBackoff, func() (bool, error) { az.operationPollRateLimiter.Accept() - resp, err := az.VirtualMachinesClient.CreateOrUpdate(az.ResourceGroup, vmName, newVM, nil) - return processRetryResponse(resp, err) + respChan, errChan := az.VirtualMachinesClient.CreateOrUpdate(az.ResourceGroup, vmName, newVM, nil) + resp := <-respChan + err := <-errChan + return processRetryResponse(resp.Response, err) }) } diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_blob.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_blob.go deleted file mode 100644 index 47d1edd130f8..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_blob.go +++ /dev/null @@ -1,111 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package azure - -import ( - "fmt" - "regexp" - "strings" - - azs "github.com/Azure/azure-sdk-for-go/storage" -) - -const ( - vhdContainerName = "vhds" - useHTTPS = true - blobServiceName = "blob" -) - -// create page blob -func (az *Cloud) createVhdBlob(accountName, accountKey, name string, sizeGB int64, tags map[string]string) (string, string, error) { - blobClient, err := az.getBlobClient(accountName, accountKey) - if err != nil { - return "", "", err - } - size := 1024 * 1024 * 1024 * sizeGB - vhdSize := size + vhdHeaderSize /* header size */ - // Blob name in URL must end with '.vhd' extension. - name = name + ".vhd" - err = blobClient.PutPageBlob(vhdContainerName, name, vhdSize, tags) - if err != nil { - // if container doesn't exist, create one and retry PutPageBlob - detail := err.Error() - if strings.Contains(detail, errContainerNotFound) { - err = blobClient.CreateContainer(vhdContainerName, azs.ContainerAccessTypePrivate) - if err == nil { - err = blobClient.PutPageBlob(vhdContainerName, name, vhdSize, tags) - } - } - } - if err != nil { - return "", "", fmt.Errorf("failed to put page blob: %v", err) - } - - // add VHD signature to the blob - h, err := createVHDHeader(uint64(size)) - if err != nil { - az.deleteVhdBlob(accountName, accountKey, name) - return "", "", fmt.Errorf("failed to create vhd header, err: %v", err) - } - if err = blobClient.PutPage(vhdContainerName, name, size, vhdSize-1, azs.PageWriteTypeUpdate, h[:vhdHeaderSize], nil); err != nil { - az.deleteVhdBlob(accountName, accountKey, name) - return "", "", fmt.Errorf("failed to update vhd header, err: %v", err) - } - - scheme := "http" - if useHTTPS { - scheme = "https" - } - host := fmt.Sprintf("%s://%s.%s.%s", scheme, accountName, blobServiceName, az.Environment.StorageEndpointSuffix) - uri := fmt.Sprintf("%s/%s/%s", host, vhdContainerName, name) - return name, uri, nil - -} - -// delete a vhd blob -func (az *Cloud) deleteVhdBlob(accountName, accountKey, blobName string) error { - blobClient, err := az.getBlobClient(accountName, accountKey) - if err == nil { - return blobClient.DeleteBlob(vhdContainerName, blobName, nil) - } - return err -} - -func (az *Cloud) getBlobClient(accountName, accountKey string) (*azs.BlobStorageClient, error) { - client, err := azs.NewClient(accountName, accountKey, az.Environment.StorageEndpointSuffix, azs.DefaultAPIVersion, useHTTPS) - if err != nil { - return nil, fmt.Errorf("error creating azure client: %v", err) - } - b := client.GetBlobService() - return &b, nil -} - -// get uri https://foo.blob.core.windows.net/vhds/bar.vhd and return foo (account) and bar.vhd (blob name) -func (az *Cloud) getBlobNameAndAccountFromURI(uri string) (string, string, error) { - scheme := "http" - if useHTTPS { - scheme = "https" - } - host := fmt.Sprintf("%s://(.*).%s.%s", scheme, blobServiceName, az.Environment.StorageEndpointSuffix) - reStr := fmt.Sprintf("%s/%s/(.*)", host, vhdContainerName) - re := regexp.MustCompile(reStr) - res := re.FindSubmatch([]byte(uri)) - if len(res) < 3 { - return "", "", fmt.Errorf("invalid vhd URI for regex %s: %s", reStr, uri) - } - return string(res[1]), string(res[2]), nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_blobDiskController.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_blobDiskController.go new file mode 100644 index 000000000000..037c4941ef2a --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_blobDiskController.go @@ -0,0 +1,808 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azure + +import ( + "bytes" + "encoding/binary" + "fmt" + "math" + "net/url" + "os" + "regexp" + "sync" + + "strconv" + "strings" + "sync/atomic" + "time" + + storage "github.com/Azure/azure-sdk-for-go/arm/storage" + azstorage "github.com/Azure/azure-sdk-for-go/storage" + "github.com/Azure/go-autorest/autorest/to" + "github.com/golang/glog" + "github.com/rubiojr/go-vhd/vhd" + kwait "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/kubernetes/pkg/volume" +) + +const ( + vhdContainerName = "vhds" + useHTTPSForBlobBasedDisk = true + blobServiceName = "blob" +) + +type storageAccountState struct { + name string + saType storage.SkuName + key string + diskCount int32 + isValidating int32 + defaultContainerCreated bool +} + +//BlobDiskController : blob disk controller struct +type BlobDiskController struct { + common *controllerCommon + accounts map[string]*storageAccountState +} + +var defaultContainerName = "" +var storageAccountNamePrefix = "" +var storageAccountNameMatch = "" +var initFlag int64 + +var accountsLock = &sync.Mutex{} + +func newBlobDiskController(common *controllerCommon) (*BlobDiskController, error) { + c := BlobDiskController{common: common} + err := c.init() + + if err != nil { + return nil, err + } + + return &c, nil +} + +// CreateVolume creates a VHD blob in a given storage account, will create the given storage account if it does not exist in current resource group +func (c *BlobDiskController) CreateVolume(name, storageAccount string, storageAccountType storage.SkuName, location string, requestGB int) (string, string, int, error) { + key, err := c.common.cloud.getStorageAccesskey(storageAccount) + if err != nil { + glog.V(2).Infof("azureDisk - no key found for storage account %s in resource group %s, begin to create a new storage account", storageAccount, c.common.resourceGroup) + + cp := storage.AccountCreateParameters{ + Sku: &storage.Sku{Name: storageAccountType}, + Tags: &map[string]*string{"created-by": to.StringPtr("azure-dd")}, + Location: &location} + cancel := make(chan struct{}) + + _, errchan := c.common.cloud.StorageAccountClient.Create(c.common.resourceGroup, storageAccount, cp, cancel) + err = <-errchan + if err != nil { + return "", "", 0, fmt.Errorf(fmt.Sprintf("Create Storage Account %s, error: %s", storageAccount, err)) + } + + key, err = c.common.cloud.getStorageAccesskey(storageAccount) + if err != nil { + return "", "", 0, fmt.Errorf("no key found for storage account %s even after creating a new storage account", storageAccount) + } + + glog.Errorf("no key found for storage account %s in resource group %s", storageAccount, c.common.resourceGroup) + return "", "", 0, err + } + + client, err := azstorage.NewBasicClient(storageAccount, key) + if err != nil { + return "", "", 0, err + } + blobClient := client.GetBlobService() + + container := blobClient.GetContainerReference(vhdContainerName) + _, err = container.CreateIfNotExists(&azstorage.CreateContainerOptions{Access: azstorage.ContainerAccessTypePrivate}) + if err != nil { + return "", "", 0, err + } + + diskName, diskURI, err := c.createVHDBlobDisk(blobClient, storageAccount, name, vhdContainerName, int64(requestGB)) + if err != nil { + return "", "", 0, err + } + + glog.V(4).Infof("azureDisk - created vhd blob uri: %s", diskURI) + return diskName, diskURI, requestGB, err +} + +// DeleteVolume deletes a VHD blob +func (c *BlobDiskController) DeleteVolume(diskURI string) error { + glog.V(4).Infof("azureDisk - begin to delete volume %s", diskURI) + accountName, blob, err := c.common.cloud.getBlobNameAndAccountFromURI(diskURI) + if err != nil { + return fmt.Errorf("failed to parse vhd URI %v", err) + } + key, err := c.common.cloud.getStorageAccesskey(accountName) + if err != nil { + return fmt.Errorf("no key for storage account %s, err %v", accountName, err) + } + err = c.common.cloud.deleteVhdBlob(accountName, key, blob) + if err != nil { + glog.Warningf("azureDisk - failed to delete blob %s err: %v", diskURI, err) + detail := err.Error() + if strings.Contains(detail, errLeaseIDMissing) { + // disk is still being used + // see https://msdn.microsoft.com/en-us/library/microsoft.windowsazure.storage.blob.protocol.bloberrorcodestrings.leaseidmissing.aspx + return volume.NewDeletedVolumeInUseError(fmt.Sprintf("disk %q is still in use while being deleted", diskURI)) + } + return fmt.Errorf("failed to delete vhd %v, account %s, blob %s, err: %v", diskURI, accountName, blob, err) + } + glog.V(4).Infof("azureDisk - blob %s deleted", diskURI) + return nil + +} + +// get diskURI https://foo.blob.core.windows.net/vhds/bar.vhd and return foo (account) and bar.vhd (blob name) +func (c *BlobDiskController) getBlobNameAndAccountFromURI(diskURI string) (string, string, error) { + scheme := "http" + if useHTTPSForBlobBasedDisk { + scheme = "https" + } + host := fmt.Sprintf("%s://(.*).%s.%s", scheme, blobServiceName, c.common.storageEndpointSuffix) + reStr := fmt.Sprintf("%s/%s/(.*)", host, vhdContainerName) + re := regexp.MustCompile(reStr) + res := re.FindSubmatch([]byte(diskURI)) + if len(res) < 3 { + return "", "", fmt.Errorf("invalid vhd URI for regex %s: %s", reStr, diskURI) + } + return string(res[1]), string(res[2]), nil +} + +func (c *BlobDiskController) createVHDBlobDisk(blobClient azstorage.BlobStorageClient, accountName, vhdName, containerName string, sizeGB int64) (string, string, error) { + container := blobClient.GetContainerReference(containerName) + _, err := container.CreateIfNotExists(&azstorage.CreateContainerOptions{Access: azstorage.ContainerAccessTypePrivate}) + if err != nil { + return "", "", err + } + + size := 1024 * 1024 * 1024 * sizeGB + vhdSize := size + vhd.VHD_HEADER_SIZE /* header size */ + // Blob name in URL must end with '.vhd' extension. + vhdName = vhdName + ".vhd" + + tags := make(map[string]string) + tags["createdby"] = "k8sAzureDataDisk" + glog.V(4).Infof("azureDisk - creating page blob %name in container %s account %s", vhdName, containerName, accountName) + + blob := container.GetBlobReference(vhdName) + blob.Properties.ContentLength = vhdSize + blob.Metadata = tags + err = blob.PutPageBlob(nil) + if err != nil { + return "", "", fmt.Errorf("failed to put page blob %s in container %s: %v", vhdName, containerName, err) + } + + // add VHD signature to the blob + h, err := createVHDHeader(uint64(size)) + if err != nil { + blob.DeleteIfExists(nil) + return "", "", fmt.Errorf("failed to create vhd header, err: %v", err) + } + + blobRange := azstorage.BlobRange{ + Start: uint64(size), + End: uint64(vhdSize - 1), + } + if err = blob.WriteRange(blobRange, bytes.NewBuffer(h[:vhd.VHD_HEADER_SIZE]), nil); err != nil { + glog.Infof("azureDisk - failed to put header page for data disk %s in container %s account %s, error was %s\n", + vhdName, containerName, accountName, err.Error()) + return "", "", err + } + + scheme := "http" + if useHTTPSForBlobBasedDisk { + scheme = "https" + } + + host := fmt.Sprintf("%s://%s.%s.%s", scheme, accountName, blobServiceName, c.common.storageEndpointSuffix) + uri := fmt.Sprintf("%s/%s/%s", host, containerName, vhdName) + return vhdName, uri, nil +} + +// delete a vhd blob +func (c *BlobDiskController) deleteVhdBlob(accountName, accountKey, blobName string) error { + client, err := azstorage.NewBasicClient(accountName, accountKey) + if err != nil { + return err + } + blobSvc := client.GetBlobService() + + container := blobSvc.GetContainerReference(vhdContainerName) + blob := container.GetBlobReference(blobName) + return blob.Delete(nil) +} + +//CreateBlobDisk : create a blob disk in a node +func (c *BlobDiskController) CreateBlobDisk(dataDiskName string, storageAccountType storage.SkuName, sizeGB int, forceStandAlone bool) (string, error) { + glog.V(4).Infof("azureDisk - creating blob data disk named:%s on StorageAccountType:%s StandAlone:%v", dataDiskName, storageAccountType, forceStandAlone) + + var storageAccountName = "" + var err error + + if forceStandAlone { + // we have to wait until the storage account is is created + storageAccountName = "p" + MakeCRC32(c.common.subscriptionID+c.common.resourceGroup+dataDiskName) + err = c.createStorageAccount(storageAccountName, storageAccountType, c.common.location, false) + if err != nil { + return "", err + } + } else { + storageAccountName, err = c.findSANameForDisk(storageAccountType) + if err != nil { + return "", err + } + } + + blobClient, err := c.getBlobSvcClient(storageAccountName) + if err != nil { + return "", err + } + + _, diskURI, err := c.createVHDBlobDisk(blobClient, storageAccountName, dataDiskName, defaultContainerName, int64(sizeGB)) + if err != nil { + return "", err + } + + if !forceStandAlone { + atomic.AddInt32(&c.accounts[storageAccountName].diskCount, 1) + } + + return diskURI, nil +} + +//DeleteBlobDisk : delete a blob disk from a node +func (c *BlobDiskController) DeleteBlobDisk(diskURI string, wasForced bool) error { + storageAccountName, vhdName, err := diskNameandSANameFromURI(diskURI) + if err != nil { + return err + } + + _, ok := c.accounts[storageAccountName] + if !ok { + // the storage account is specified by user + glog.V(4).Infof("azureDisk - deleting volume %s", diskURI) + return c.DeleteVolume(diskURI) + } + // if forced (as in one disk = one storage account) + // delete the account completely + if wasForced { + return c.deleteStorageAccount(storageAccountName) + } + + blobSvc, err := c.getBlobSvcClient(storageAccountName) + if err != nil { + return err + } + + glog.V(4).Infof("azureDisk - About to delete vhd file %s on storage account %s container %s", vhdName, storageAccountName, defaultContainerName) + + container := blobSvc.GetContainerReference(defaultContainerName) + blob := container.GetBlobReference(vhdName) + _, err = blob.DeleteIfExists(nil) + + if c.accounts[storageAccountName].diskCount == -1 { + if diskCount, err := c.getDiskCount(storageAccountName); err != nil { + c.accounts[storageAccountName].diskCount = int32(diskCount) + } else { + glog.Warningf("azureDisk - failed to get disk count for %s however the delete disk operation was ok", storageAccountName) + return nil // we have failed to aquire a new count. not an error condition + } + } + atomic.AddInt32(&c.accounts[storageAccountName].diskCount, -1) + return err +} + +// Init tries best effort to ensure that 2 accounts standard/premium were created +// to be used by shared blob disks. This to increase the speed pvc provisioning (in most of cases) +func (c *BlobDiskController) init() error { + if !c.shouldInit() { + return nil + } + + c.setUniqueStrings() + + // get accounts + accounts, err := c.getAllStorageAccounts() + if err != nil { + return err + } + c.accounts = accounts + + if len(c.accounts) == 0 { + counter := 1 + for counter <= storageAccountsCountInit { + + accountType := storage.PremiumLRS + if n := math.Mod(float64(counter), 2); n == 0 { + accountType = storage.StandardLRS + } + + // We don't really care if these calls failed + // at this stage, we are trying to ensure 2 accounts (Standard/Premium) + // are there ready for PVC creation + + // if we failed here, the accounts will be created in the process + // of creating PVC + + // nor do we care if they were partially created, as the entire + // account creation process is idempotent + go func(thisNext int) { + newAccountName := getAccountNameForNum(thisNext) + + glog.Infof("azureDisk - BlobDiskController init process will create new storageAccount:%s type:%s", newAccountName, accountType) + err := c.createStorageAccount(newAccountName, accountType, c.common.location, true) + // TODO return created and error from + if err != nil { + glog.Infof("azureDisk - BlobDiskController init: create account %s with error:%s", newAccountName, err.Error()) + + } else { + glog.Infof("azureDisk - BlobDiskController init: created account %s", newAccountName) + } + }(counter) + counter = counter + 1 + } + } + + return nil +} + +//Sets unique strings to be used as accountnames && || blob containers names +func (c *BlobDiskController) setUniqueStrings() { + uniqueString := c.common.resourceGroup + c.common.location + c.common.subscriptionID + hash := MakeCRC32(uniqueString) + //used to generate a unqie container name used by this cluster PVC + defaultContainerName = hash + + storageAccountNamePrefix = fmt.Sprintf(storageAccountNameTemplate, hash) + // Used to filter relevant accounts (accounts used by shared PVC) + storageAccountNameMatch = storageAccountNamePrefix + // Used as a template to create new names for relevant accounts + storageAccountNamePrefix = storageAccountNamePrefix + "%s" +} +func (c *BlobDiskController) getStorageAccountKey(SAName string) (string, error) { + if account, exists := c.accounts[SAName]; exists && account.key != "" { + return c.accounts[SAName].key, nil + } + listKeysResult, err := c.common.cloud.StorageAccountClient.ListKeys(c.common.resourceGroup, SAName) + if err != nil { + return "", err + } + if listKeysResult.Keys == nil { + return "", fmt.Errorf("azureDisk - empty listKeysResult in storage account:%s keys", SAName) + } + for _, v := range *listKeysResult.Keys { + if v.Value != nil && *v.Value == "key1" { + if _, ok := c.accounts[SAName]; !ok { + glog.Warningf("azureDisk - account %s was not cached while getting keys", SAName) + return *v.Value, nil + } + } + + c.accounts[SAName].key = *v.Value + return c.accounts[SAName].key, nil + } + + return "", fmt.Errorf("couldn't find key named key1 in storage account:%s keys", SAName) +} + +func (c *BlobDiskController) getBlobSvcClient(SAName string) (azstorage.BlobStorageClient, error) { + key := "" + var client azstorage.Client + var blobSvc azstorage.BlobStorageClient + var err error + if key, err = c.getStorageAccountKey(SAName); err != nil { + return blobSvc, err + } + + if client, err = azstorage.NewBasicClient(SAName, key); err != nil { + return blobSvc, err + } + + blobSvc = client.GetBlobService() + return blobSvc, nil +} + +func (c *BlobDiskController) ensureDefaultContainer(storageAccountName string) error { + var err error + var blobSvc azstorage.BlobStorageClient + + // short circut the check via local cache + // we are forgiving the fact that account may not be in cache yet + if v, ok := c.accounts[storageAccountName]; ok && v.defaultContainerCreated { + return nil + } + + // not cached, check existance and readiness + bExist, provisionState, _ := c.getStorageAccountState(storageAccountName) + + // account does not exist + if !bExist { + return fmt.Errorf("azureDisk - account %s does not exist while trying to create/ensure default container", storageAccountName) + } + + // account exists but not ready yet + if provisionState != storage.Succeeded { + // we don't want many attempts to validate the account readiness + // here hence we are locking + counter := 1 + for swapped := atomic.CompareAndSwapInt32(&c.accounts[storageAccountName].isValidating, 0, 1); swapped != true; { + time.Sleep(3 * time.Second) + counter = counter + 1 + // check if we passed the max sleep + if counter >= 20 { + return fmt.Errorf("azureDisk - timeout waiting to aquire lock to validate account:%s readiness", storageAccountName) + } + } + + // swapped + defer func() { + c.accounts[storageAccountName].isValidating = 0 + }() + + // short circut the check again. + if v, ok := c.accounts[storageAccountName]; ok && v.defaultContainerCreated { + return nil + } + + err = kwait.ExponentialBackoff(defaultBackOff, func() (bool, error) { + _, provisionState, err := c.getStorageAccountState(storageAccountName) + + if err != nil { + glog.V(4).Infof("azureDisk - GetStorageAccount:%s err %s", storageAccountName, err.Error()) + return false, err + } + + if provisionState == storage.Succeeded { + return true, nil + } + + glog.V(4).Infof("azureDisk - GetStorageAccount:%s not ready yet", storageAccountName) + // leave it for next loop/sync loop + return false, fmt.Errorf("azureDisk - Account %s has not been flagged Succeeded by ARM", storageAccountName) + }) + // we have failed to ensure that account is ready for us to create + // the default vhd container + if err != nil { + return err + } + } + + if blobSvc, err = c.getBlobSvcClient(storageAccountName); err != nil { + return err + } + + container := blobSvc.GetContainerReference(defaultContainerName) + bCreated, err := container.CreateIfNotExists(&azstorage.CreateContainerOptions{Access: azstorage.ContainerAccessTypePrivate}) + if err != nil { + return err + } + if bCreated { + glog.V(2).Infof("azureDisk - storage account:%s had no default container(%s) and it was created \n", storageAccountName, defaultContainerName) + } + + // flag so we no longer have to check on ARM + c.accounts[storageAccountName].defaultContainerCreated = true + return nil +} + +// Gets Disk counts per storage account +func (c *BlobDiskController) getDiskCount(SAName string) (int, error) { + // if we have it in cache + if c.accounts[SAName].diskCount != -1 { + return int(c.accounts[SAName].diskCount), nil + } + + var err error + var blobSvc azstorage.BlobStorageClient + + if err = c.ensureDefaultContainer(SAName); err != nil { + return 0, err + } + + if blobSvc, err = c.getBlobSvcClient(SAName); err != nil { + return 0, err + } + params := azstorage.ListBlobsParameters{} + + container := blobSvc.GetContainerReference(defaultContainerName) + response, err := container.ListBlobs(params) + if err != nil { + return 0, err + } + glog.V(4).Infof("azure-Disk - refreshed data count for account %s and found %v", SAName, len(response.Blobs)) + c.accounts[SAName].diskCount = int32(len(response.Blobs)) + + return int(c.accounts[SAName].diskCount), nil +} + +// shouldInit ensures that we only init the plugin once +// and we only do that in the controller + +func (c *BlobDiskController) shouldInit() bool { + if os.Args[0] == "kube-controller-manager" || (os.Args[0] == "/hyperkube" && os.Args[1] == "controller-manager") { + swapped := atomic.CompareAndSwapInt64(&initFlag, 0, 1) + if swapped { + return true + } + } + return false +} + +func (c *BlobDiskController) getAllStorageAccounts() (map[string]*storageAccountState, error) { + accountListResult, err := c.common.cloud.StorageAccountClient.List() + if err != nil { + return nil, err + } + if accountListResult.Value == nil { + return nil, fmt.Errorf("azureDisk - empty accountListResult") + } + + accounts := make(map[string]*storageAccountState) + for _, v := range *accountListResult.Value { + if strings.Index(*v.Name, storageAccountNameMatch) != 0 { + continue + } + if v.Name == nil || v.Sku == nil { + glog.Infof("azureDisk - accountListResult Name or Sku is nil") + continue + } + glog.Infof("azureDisk - identified account %s as part of shared PVC accounts", *v.Name) + + sastate := &storageAccountState{ + name: *v.Name, + saType: (*v.Sku).Name, + diskCount: -1, + } + + accounts[*v.Name] = sastate + } + + return accounts, nil +} + +func (c *BlobDiskController) createStorageAccount(storageAccountName string, storageAccountType storage.SkuName, location string, checkMaxAccounts bool) error { + bExist, _, _ := c.getStorageAccountState(storageAccountName) + if bExist { + newAccountState := &storageAccountState{ + diskCount: -1, + saType: storageAccountType, + name: storageAccountName, + } + + c.addAccountState(storageAccountName, newAccountState) + } + // Account Does not exist + if !bExist { + if len(c.accounts) == maxStorageAccounts && checkMaxAccounts { + return fmt.Errorf("azureDisk - can not create new storage account, current storage accounts count:%v Max is:%v", len(c.accounts), maxStorageAccounts) + } + + glog.V(2).Infof("azureDisk - Creating storage account %s type %s \n", storageAccountName, string(storageAccountType)) + + cp := storage.AccountCreateParameters{ + Sku: &storage.Sku{Name: storageAccountType}, + Tags: &map[string]*string{"created-by": to.StringPtr("azure-dd")}, + Location: &location} + cancel := make(chan struct{}) + + _, errChan := c.common.cloud.StorageAccountClient.Create(c.common.resourceGroup, storageAccountName, cp, cancel) + err := <-errChan + if err != nil { + return fmt.Errorf(fmt.Sprintf("Create Storage Account: %s, error: %s", storageAccountName, err)) + } + + newAccountState := &storageAccountState{ + diskCount: -1, + saType: storageAccountType, + name: storageAccountName, + } + + c.addAccountState(storageAccountName, newAccountState) + } + + if !bExist { + // SA Accounts takes time to be provisioned + // so if this account was just created allow it sometime + // before polling + glog.V(2).Infof("azureDisk - storage account %s was just created, allowing time before polling status") + time.Sleep(25 * time.Second) // as observed 25 is the average time for SA to be provisioned + } + + // finally, make sure that we default container is created + // before handing it back over + return c.ensureDefaultContainer(storageAccountName) +} + +// finds a new suitable storageAccount for this disk +func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuName) (string, error) { + maxDiskCount := maxDisksPerStorageAccounts + SAName := "" + totalDiskCounts := 0 + countAccounts := 0 // account of this type. + for _, v := range c.accounts { + // filter out any stand-alone disks/accounts + if strings.Index(v.name, storageAccountNameMatch) != 0 { + continue + } + + // note: we compute avge stratified by type. + // this to enable user to grow per SA type to avoid low + //avg utilization on one account type skewing all data. + + if v.saType == storageAccountType { + // compute average + dCount, err := c.getDiskCount(v.name) + if err != nil { + return "", err + } + totalDiskCounts = totalDiskCounts + dCount + countAccounts = countAccounts + 1 + // empty account + if dCount == 0 { + glog.V(2).Infof("azureDisk - account %s identified for a new disk is because it has 0 allocated disks", v.name) + return v.name, nil // shortcircut, avg is good and no need to adjust + } + // if this account is less allocated + if dCount < maxDiskCount { + maxDiskCount = dCount + SAName = v.name + } + } + } + + // if we failed to find storageaccount + if SAName == "" { + glog.V(2).Infof("azureDisk - failed to identify a suitable account for new disk and will attempt to create new account") + SAName = getAccountNameForNum(c.getNextAccountNum()) + err := c.createStorageAccount(SAName, storageAccountType, c.common.location, true) + if err != nil { + return "", err + } + return SAName, nil + } + + disksAfter := totalDiskCounts + 1 // with the new one! + + avgUtilization := float64(disksAfter) / float64(countAccounts*maxDisksPerStorageAccounts) + aboveAvg := (avgUtilization > storageAccountUtilizationBeforeGrowing) + + // avg are not create and we should craete more accounts if we can + if aboveAvg && countAccounts < maxStorageAccounts { + glog.V(2).Infof("azureDisk - shared storageAccounts utilzation(%v) > grow-at-avg-utilization (%v). New storage account will be created", avgUtilization, storageAccountUtilizationBeforeGrowing) + SAName = getAccountNameForNum(c.getNextAccountNum()) + err := c.createStorageAccount(SAName, storageAccountType, c.common.location, true) + if err != nil { + return "", err + } + return SAName, nil + } + + // avergates are not ok and we are at capacity(max storage accounts allowed) + if aboveAvg && countAccounts == maxStorageAccounts { + glog.Infof("azureDisk - shared storageAccounts utilzation(%v) > grow-at-avg-utilization (%v). But k8s maxed on SAs for PVC(%v). k8s will now exceed grow-at-avg-utilization without adding accounts", + avgUtilization, storageAccountUtilizationBeforeGrowing, maxStorageAccounts) + } + + // we found a storage accounts && [ avg are ok || we reached max sa count ] + return SAName, nil +} +func (c *BlobDiskController) getNextAccountNum() int { + max := 0 + + for k := range c.accounts { + // filter out accounts that are for standalone + if strings.Index(k, storageAccountNameMatch) != 0 { + continue + } + num := getAccountNumFromName(k) + if num > max { + max = num + } + } + + return max + 1 +} + +func (c *BlobDiskController) deleteStorageAccount(storageAccountName string) error { + resp, err := c.common.cloud.StorageAccountClient.Delete(c.common.resourceGroup, storageAccountName) + if err != nil { + return fmt.Errorf("azureDisk - Delete of storage account '%s' failed with status %s...%v", storageAccountName, resp.Status, err) + } + + c.removeAccountState(storageAccountName) + + glog.Infof("azureDisk - Storage Account %s was deleted", storageAccountName) + return nil +} + +//Gets storage account exist, provisionStatus, Error if any +func (c *BlobDiskController) getStorageAccountState(storageAccountName string) (bool, storage.ProvisioningState, error) { + account, err := c.common.cloud.StorageAccountClient.GetProperties(c.common.resourceGroup, storageAccountName) + if err != nil { + return false, "", err + } + return true, account.AccountProperties.ProvisioningState, nil +} + +func (c *BlobDiskController) addAccountState(key string, state *storageAccountState) { + accountsLock.Lock() + defer accountsLock.Unlock() + + if _, ok := c.accounts[key]; !ok { + c.accounts[key] = state + } +} + +func (c *BlobDiskController) removeAccountState(key string) { + accountsLock.Lock() + defer accountsLock.Unlock() + delete(c.accounts, key) +} + +// pads account num with zeros as needed +func getAccountNameForNum(num int) string { + sNum := strconv.Itoa(num) + missingZeros := 3 - len(sNum) + strZero := "" + for missingZeros > 0 { + strZero = strZero + "0" + missingZeros = missingZeros - 1 + } + + sNum = strZero + sNum + return fmt.Sprintf(storageAccountNamePrefix, sNum) +} + +func getAccountNumFromName(accountName string) int { + nameLen := len(accountName) + num, _ := strconv.Atoi(accountName[nameLen-3:]) + + return num +} + +func createVHDHeader(size uint64) ([]byte, error) { + h := vhd.CreateFixedHeader(size, &vhd.VHDOptions{}) + b := new(bytes.Buffer) + err := binary.Write(b, binary.BigEndian, h) + if err != nil { + return nil, err + } + return b.Bytes(), nil +} + +func diskNameandSANameFromURI(diskURI string) (string, string, error) { + uri, err := url.Parse(diskURI) + if err != nil { + return "", "", err + } + + hostName := uri.Host + storageAccountName := strings.Split(hostName, ".")[0] + + segments := strings.Split(uri.Path, "/") + diskNameVhd := segments[len(segments)-1] + + return storageAccountName, diskNameVhd, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controllerCommon.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controllerCommon.go new file mode 100644 index 000000000000..881a7dbb2c4f --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_controllerCommon.go @@ -0,0 +1,270 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azure + +import ( + "fmt" + "strings" + "time" + + "k8s.io/apimachinery/pkg/types" + kwait "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/kubernetes/pkg/cloudprovider" + + "github.com/Azure/azure-sdk-for-go/arm/compute" + "github.com/golang/glog" +) + +const ( + defaultDataDiskCount int = 16 // which will allow you to work with most medium size VMs (if not found in map) + storageAccountNameTemplate = "pvc%s" + + // for limits check https://docs.microsoft.com/en-us/azure/azure-subscription-service-limits#storage-limits + maxStorageAccounts = 100 // max # is 200 (250 with special request). this allows 100 for everything else including stand alone disks + maxDisksPerStorageAccounts = 60 + storageAccountUtilizationBeforeGrowing = 0.5 + storageAccountsCountInit = 2 // When the plug-in is init-ed, 2 storage accounts will be created to allow fast pvc create/attach/mount + + maxLUN = 64 // max number of LUNs per VM + errLeaseFailed = "AcquireDiskLeaseFailed" + errLeaseIDMissing = "LeaseIdMissing" + errContainerNotFound = "ContainerNotFound" +) + +var defaultBackOff = kwait.Backoff{ + Steps: 20, + Duration: 2 * time.Second, + Factor: 1.5, + Jitter: 0.0, +} + +type controllerCommon struct { + tenantID string + subscriptionID string + location string + storageEndpointSuffix string + resourceGroup string + clientID string + clientSecret string + managementEndpoint string + tokenEndPoint string + aadResourceEndPoint string + aadToken string + expiresOn time.Time + cloud *Cloud +} + +// AttachDisk attaches a vhd to vm +// the vhd must exist, can be identified by diskName, diskURI, and lun. +func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error { + vm, exists, err := c.cloud.getVirtualMachine(nodeName) + if err != nil { + return err + } else if !exists { + return cloudprovider.InstanceNotFound + } + disks := *vm.StorageProfile.DataDisks + if isManagedDisk { + disks = append(disks, + compute.DataDisk{ + Name: &diskName, + Lun: &lun, + Caching: cachingMode, + CreateOption: "attach", + ManagedDisk: &compute.ManagedDiskParameters{ + ID: &diskURI, + }, + }) + } else { + disks = append(disks, + compute.DataDisk{ + Name: &diskName, + Vhd: &compute.VirtualHardDisk{ + URI: &diskURI, + }, + Lun: &lun, + Caching: cachingMode, + CreateOption: "attach", + }) + } + + newVM := compute.VirtualMachine{ + Location: vm.Location, + VirtualMachineProperties: &compute.VirtualMachineProperties{ + StorageProfile: &compute.StorageProfile{ + DataDisks: &disks, + }, + }, + } + vmName := mapNodeNameToVMName(nodeName) + glog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk", c.resourceGroup, vmName) + c.cloud.operationPollRateLimiter.Accept() + respChan, errChan := c.cloud.VirtualMachinesClient.CreateOrUpdate(c.resourceGroup, vmName, newVM, nil) + resp := <-respChan + err = <-errChan + if c.cloud.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) { + glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", c.resourceGroup, vmName) + retryErr := c.cloud.CreateOrUpdateVMWithRetry(vmName, newVM) + if retryErr != nil { + err = retryErr + glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s)", c.resourceGroup, vmName) + } + } + if err != nil { + glog.Errorf("azureDisk - azure attach failed, err: %v", err) + detail := err.Error() + if strings.Contains(detail, errLeaseFailed) { + // if lease cannot be acquired, immediately detach the disk and return the original error + glog.Infof("azureDisk - failed to acquire disk lease, try detach") + c.cloud.DetachDiskByName(diskName, diskURI, nodeName) + } + } else { + glog.V(4).Infof("azureDisk - azure attach succeeded") + } + return err +} + +// DetachDiskByName detaches a vhd from host +// the vhd can be identified by diskName or diskURI +func (c *controllerCommon) DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error { + vm, exists, err := c.cloud.getVirtualMachine(nodeName) + if err != nil || !exists { + // if host doesn't exist, no need to detach + glog.Warningf("azureDisk - cannot find node %s, skip detaching disk %s", nodeName, diskName) + return nil + } + + disks := *vm.StorageProfile.DataDisks + bFoundDisk := false + for i, disk := range disks { + if disk.Lun != nil && (disk.Name != nil && diskName != "" && *disk.Name == diskName) || + (disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) || + (disk.ManagedDisk != nil && diskURI != "" && *disk.ManagedDisk.ID == diskURI) { + // found the disk + glog.V(4).Infof("azureDisk - detach disk: name %q uri %q", diskName, diskURI) + disks = append(disks[:i], disks[i+1:]...) + bFoundDisk = true + break + } + } + + if !bFoundDisk { + return fmt.Errorf("detach azure disk failure, disk %s not found, diskURI: %s", diskName, diskURI) + } + + newVM := compute.VirtualMachine{ + Location: vm.Location, + VirtualMachineProperties: &compute.VirtualMachineProperties{ + StorageProfile: &compute.StorageProfile{ + DataDisks: &disks, + }, + }, + } + vmName := mapNodeNameToVMName(nodeName) + glog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk", c.resourceGroup, vmName) + c.cloud.operationPollRateLimiter.Accept() + respChan, errChan := c.cloud.VirtualMachinesClient.CreateOrUpdate(c.resourceGroup, vmName, newVM, nil) + resp := <-respChan + err = <-errChan + if c.cloud.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) { + glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", c.resourceGroup, vmName) + retryErr := c.cloud.CreateOrUpdateVMWithRetry(vmName, newVM) + if retryErr != nil { + err = retryErr + glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s)", c.cloud.ResourceGroup, vmName) + } + } + if err != nil { + glog.Errorf("azureDisk - azure disk detach failed, err: %v", err) + } else { + glog.V(4).Infof("azureDisk - azure disk detach succeeded") + } + return err +} + +// GetDiskLun finds the lun on the host that the vhd is attached to, given a vhd's diskName and diskURI +func (c *controllerCommon) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error) { + vm, exists, err := c.cloud.getVirtualMachine(nodeName) + if err != nil { + return -1, err + } else if !exists { + return -1, cloudprovider.InstanceNotFound + } + disks := *vm.StorageProfile.DataDisks + for _, disk := range disks { + if disk.Lun != nil && (disk.Name != nil && diskName != "" && *disk.Name == diskName) || + (disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) || + (disk.ManagedDisk != nil && *disk.ManagedDisk.ID == diskURI) { + // found the disk + glog.V(4).Infof("azureDisk - find disk: lun %d name %q uri %q", *disk.Lun, diskName, diskURI) + return *disk.Lun, nil + } + } + return -1, fmt.Errorf("Cannot find Lun for disk %s", diskName) +} + +// GetNextDiskLun searches all vhd attachment on the host and find unused lun +// return -1 if all luns are used +func (c *controllerCommon) GetNextDiskLun(nodeName types.NodeName) (int32, error) { + vm, exists, err := c.cloud.getVirtualMachine(nodeName) + if err != nil { + return -1, err + } else if !exists { + return -1, cloudprovider.InstanceNotFound + } + used := make([]bool, maxLUN) + disks := *vm.StorageProfile.DataDisks + for _, disk := range disks { + if disk.Lun != nil { + used[*disk.Lun] = true + } + } + for k, v := range used { + if !v { + return int32(k), nil + } + } + return -1, fmt.Errorf("All Luns are used") +} + +// DisksAreAttached checks if a list of volumes are attached to the node with the specified NodeName +func (c *controllerCommon) DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) { + attached := make(map[string]bool) + for _, diskName := range diskNames { + attached[diskName] = false + } + vm, exists, err := c.cloud.getVirtualMachine(nodeName) + if !exists { + // if host doesn't exist, no need to detach + glog.Warningf("azureDisk - Cannot find node %q, DisksAreAttached will assume disks %v are not attached to it.", + nodeName, diskNames) + return attached, nil + } else if err != nil { + return attached, err + } + + disks := *vm.StorageProfile.DataDisks + for _, disk := range disks { + for _, diskName := range diskNames { + if disk.Name != nil && diskName != "" && *disk.Name == diskName { + attached[diskName] = true + } + } + } + + return attached, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_file.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_file.go index ccdca622a46a..482911283243 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_file.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_file.go @@ -18,9 +18,13 @@ package azure import ( "fmt" - "strconv" azs "github.com/Azure/azure-sdk-for-go/storage" + "github.com/golang/glog" +) + +const ( + useHTTPS = true ) // create file share @@ -34,11 +38,15 @@ func (az *Cloud) createFileShare(accountName, accountKey, name string, sizeGB in // setting x-ms-share-quota can set quota on the new share, but in reality, setting quota in CreateShare // receives error "The metadata specified is invalid. It has characters that are not permitted." // As a result,breaking into two API calls: create share and set quota - if err = fileClient.CreateShare(name, nil); err != nil { + share := fileClient.GetShareReference(name) + if err = share.Create(nil); err != nil { return fmt.Errorf("failed to create file share, err: %v", err) } - if err = fileClient.SetShareProperties(name, azs.ShareHeaders{Quota: strconv.Itoa(sizeGB)}); err != nil { - az.deleteFileShare(accountName, accountKey, name) + share.Properties.Quota = sizeGB + if err = share.SetProperties(nil); err != nil { + if err := share.Delete(nil); err != nil { + glog.Errorf("Error deleting share: %v", err) + } return fmt.Errorf("failed to set quota on file share %s, err: %v", name, err) } return nil @@ -48,9 +56,10 @@ func (az *Cloud) createFileShare(accountName, accountKey, name string, sizeGB in func (az *Cloud) deleteFileShare(accountName, accountKey, name string) error { fileClient, err := az.getFileSvcClient(accountName, accountKey) if err == nil { - return fileClient.DeleteShare(name) + share := fileClient.GetShareReference(name) + return share.Delete(nil) } - return err + return nil } func (az *Cloud) getFileSvcClient(accountName, accountKey string) (*azs.FileServiceClient, error) { diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer.go index 741280f5821e..d334735cb74e 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_loadbalancer.go @@ -143,8 +143,10 @@ func (az *Cloud) EnsureLoadBalancer(clusterName string, service *v1.Service, nod sg.SecurityGroupPropertiesFormat.NetworkInterfaces = nil sg.SecurityGroupPropertiesFormat.Subnets = nil az.operationPollRateLimiter.Accept() - resp, err := az.SecurityGroupsClient.CreateOrUpdate(az.ResourceGroup, *sg.Name, sg, nil) - if az.CloudProviderBackoff && shouldRetryAPIRequest(resp, err) { + respChan, errChan := az.SecurityGroupsClient.CreateOrUpdate(az.ResourceGroup, *sg.Name, sg, nil) + resp := <-respChan + err := <-errChan + if az.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) { glog.V(2).Infof("ensure(%s) backing off: sg(%s) - updating", serviceName, *sg.Name) retryErr := az.CreateOrUpdateSGWithRetry(sg) if retryErr != nil { @@ -222,8 +224,10 @@ func (az *Cloud) EnsureLoadBalancer(clusterName string, service *v1.Service, nod if !existsLb || lbNeedsUpdate { glog.V(3).Infof("ensure(%s): lb(%s) - updating", serviceName, lbName) az.operationPollRateLimiter.Accept() - resp, err := az.LoadBalancerClient.CreateOrUpdate(az.ResourceGroup, *lb.Name, lb, nil) - if az.CloudProviderBackoff && shouldRetryAPIRequest(resp, err) { + respChan, errChan := az.LoadBalancerClient.CreateOrUpdate(az.ResourceGroup, *lb.Name, lb, nil) + resp := <-respChan + err := <-errChan + if az.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) { glog.V(2).Infof("ensure(%s) backing off: lb(%s) - updating", serviceName, lbName) retryErr := az.CreateOrUpdateLBWithRetry(lb) if retryErr != nil { @@ -315,8 +319,10 @@ func (az *Cloud) EnsureLoadBalancerDeleted(clusterName string, service *v1.Servi sg.SecurityGroupPropertiesFormat.NetworkInterfaces = nil sg.SecurityGroupPropertiesFormat.Subnets = nil az.operationPollRateLimiter.Accept() - resp, err := az.SecurityGroupsClient.CreateOrUpdate(az.ResourceGroup, *reconciledSg.Name, reconciledSg, nil) - if az.CloudProviderBackoff && shouldRetryAPIRequest(resp, err) { + respChan, errChan := az.SecurityGroupsClient.CreateOrUpdate(az.ResourceGroup, *reconciledSg.Name, reconciledSg, nil) + resp := <-respChan + err := <-errChan + if az.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) { glog.V(2).Infof("delete(%s) backing off: sg(%s) - updating", serviceName, az.SecurityGroupName) retryErr := az.CreateOrUpdateSGWithRetry(reconciledSg) if retryErr != nil { @@ -369,8 +375,10 @@ func (az *Cloud) cleanupLoadBalancer(clusterName string, service *v1.Service, is if len(*lb.FrontendIPConfigurations) > 0 { glog.V(3).Infof("delete(%s): lb(%s) - updating", serviceName, lbName) az.operationPollRateLimiter.Accept() - resp, err := az.LoadBalancerClient.CreateOrUpdate(az.ResourceGroup, *lb.Name, lb, nil) - if az.CloudProviderBackoff && shouldRetryAPIRequest(resp, err) { + respChan, errChan := az.LoadBalancerClient.CreateOrUpdate(az.ResourceGroup, *lb.Name, lb, nil) + resp := <-respChan + err := <-errChan + if az.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) { glog.V(2).Infof("delete(%s) backing off: sg(%s) - updating", serviceName, az.SecurityGroupName) retryErr := az.CreateOrUpdateLBWithRetry(lb) if retryErr != nil { @@ -385,7 +393,9 @@ func (az *Cloud) cleanupLoadBalancer(clusterName string, service *v1.Service, is glog.V(3).Infof("delete(%s): lb(%s) - deleting; no remaining frontendipconfigs", serviceName, lbName) az.operationPollRateLimiter.Accept() - resp, err := az.LoadBalancerClient.Delete(az.ResourceGroup, lbName, nil) + respChan, errChan := az.LoadBalancerClient.Delete(az.ResourceGroup, lbName, nil) + resp := <-respChan + err := <-errChan if az.CloudProviderBackoff && shouldRetryAPIRequest(resp, err) { glog.V(2).Infof("delete(%s) backing off: lb(%s) - deleting; no remaining frontendipconfigs", serviceName, lbName) retryErr := az.DeleteLBWithRetry(lbName) @@ -440,8 +450,10 @@ func (az *Cloud) ensurePublicIPExists(serviceName, pipName string) (*network.Pub glog.V(3).Infof("ensure(%s): pip(%s) - creating", serviceName, *pip.Name) az.operationPollRateLimiter.Accept() - resp, err := az.PublicIPAddressesClient.CreateOrUpdate(az.ResourceGroup, *pip.Name, pip, nil) - if az.CloudProviderBackoff && shouldRetryAPIRequest(resp, err) { + respChan, errChan := az.PublicIPAddressesClient.CreateOrUpdate(az.ResourceGroup, *pip.Name, pip, nil) + resp := <-respChan + err = <-errChan + if az.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) { glog.V(2).Infof("ensure(%s) backing off: pip(%s) - creating", serviceName, *pip.Name) retryErr := az.CreateOrUpdatePIPWithRetry(pip) if retryErr != nil { @@ -466,8 +478,9 @@ func (az *Cloud) ensurePublicIPExists(serviceName, pipName string) (*network.Pub func (az *Cloud) ensurePublicIPDeleted(serviceName, pipName string) error { glog.V(2).Infof("ensure(%s): pip(%s) - deleting", serviceName, pipName) az.operationPollRateLimiter.Accept() - resp, deleteErr := az.PublicIPAddressesClient.Delete(az.ResourceGroup, pipName, nil) - if az.CloudProviderBackoff && shouldRetryAPIRequest(resp, deleteErr) { + resp, deleteErrChan := az.PublicIPAddressesClient.Delete(az.ResourceGroup, pipName, nil) + deleteErr := <-deleteErrChan + if az.CloudProviderBackoff && shouldRetryAPIRequest(<-resp, deleteErr) { glog.V(2).Infof("ensure(%s) backing off: pip(%s) - deleting", serviceName, pipName) retryErr := az.DeletePublicIPWithRetry(pipName) if retryErr != nil { @@ -772,8 +785,8 @@ func (az *Cloud) reconcileSecurityGroup(sg network.SecurityGroup, clusterName st DestinationPortRange: to.StringPtr(strconv.Itoa(int(port.Port))), SourceAddressPrefix: to.StringPtr(sourceAddressPrefixes[j]), DestinationAddressPrefix: to.StringPtr("*"), - Access: network.Allow, - Direction: network.Inbound, + Access: network.SecurityRuleAccessAllow, + Direction: network.SecurityRuleDirectionInbound, }, } } @@ -918,8 +931,10 @@ func (az *Cloud) ensureHostInPool(serviceName string, nodeName types.NodeName, b glog.V(3).Infof("nicupdate(%s): nic(%s) - updating", serviceName, nicName) az.operationPollRateLimiter.Accept() - resp, err := az.InterfacesClient.CreateOrUpdate(az.ResourceGroup, *nic.Name, nic, nil) - if az.CloudProviderBackoff && shouldRetryAPIRequest(resp, err) { + respChan, errChan := az.InterfacesClient.CreateOrUpdate(az.ResourceGroup, *nic.Name, nic, nil) + resp := <-respChan + err := <-errChan + if az.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) { glog.V(2).Infof("nicupdate(%s) backing off: nic(%s) - updating, err=%v", serviceName, nicName, err) retryErr := az.CreateOrUpdateInterfaceWithRetry(nic) if retryErr != nil { diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_managedDiskController.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_managedDiskController.go new file mode 100644 index 000000000000..5acdf5835836 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_managedDiskController.go @@ -0,0 +1,129 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azure + +import ( + "path" + "strings" + + "github.com/Azure/azure-sdk-for-go/arm/disk" + storage "github.com/Azure/azure-sdk-for-go/arm/storage" + "github.com/golang/glog" + kwait "k8s.io/apimachinery/pkg/util/wait" +) + +//ManagedDiskController : managed disk controller struct +type ManagedDiskController struct { + common *controllerCommon +} + +func newManagedDiskController(common *controllerCommon) (*ManagedDiskController, error) { + return &ManagedDiskController{common: common}, nil +} + +//CreateManagedDisk : create managed disk +func (c *ManagedDiskController) CreateManagedDisk(diskName string, storageAccountType storage.SkuName, sizeGB int, tags map[string]string) (string, error) { + glog.V(4).Infof("azureDisk - creating new managed Name:%s StorageAccountType:%s Size:%v", diskName, storageAccountType, sizeGB) + + newTags := make(map[string]*string) + azureDDTag := "kubernetes-azure-dd" + newTags["created-by"] = &azureDDTag + + // insert original tags to newTags + if tags != nil { + for k, v := range tags { + // Azure won't allow / (forward slash) in tags + newKey := strings.Replace(k, "/", "-", -1) + newValue := strings.Replace(v, "/", "-", -1) + newTags[newKey] = &newValue + } + } + + diskSizeGB := int32(sizeGB) + model := disk.Model{ + Location: &c.common.location, + Tags: &newTags, + Properties: &disk.Properties{ + AccountType: disk.StorageAccountTypes(storageAccountType), + DiskSizeGB: &diskSizeGB, + CreationData: &disk.CreationData{CreateOption: disk.Empty}, + }} + cancel := make(chan struct{}) + respChan, errChan := c.common.cloud.DisksClient.CreateOrUpdate(c.common.resourceGroup, diskName, model, cancel) + <-respChan + err := <-errChan + if err != nil { + return "", err + } + + diskID := "" + + err = kwait.ExponentialBackoff(defaultBackOff, func() (bool, error) { + provisonState, id, err := c.getDisk(diskName) + diskID = id + // We are waiting for provisioningState==Succeeded + // We don't want to hand-off managed disks to k8s while they are + //still being provisioned, this is to avoid some race conditions + if err != nil { + return false, err + } + if strings.ToLower(provisonState) == "succeeded" { + return true, nil + } + return false, nil + }) + + if err != nil { + glog.V(2).Infof("azureDisk - created new MD Name:%s StorageAccountType:%s Size:%v but was unable to confirm provisioningState in poll process", diskName, storageAccountType, sizeGB) + } else { + glog.V(2).Infof("azureDisk - created new MD Name:%s StorageAccountType:%s Size:%v", diskName, storageAccountType, sizeGB) + } + + return diskID, nil +} + +//DeleteManagedDisk : delete managed disk +func (c *ManagedDiskController) DeleteManagedDisk(diskURI string) error { + diskName := path.Base(diskURI) + cancel := make(chan struct{}) + respChan, errChan := c.common.cloud.DisksClient.Delete(c.common.resourceGroup, diskName, cancel) + <-respChan + err := <-errChan + if err != nil { + return err + } + // We don't need poll here, k8s will immediatly stop referencing the disk + // the disk will be evantually deleted - cleanly - by ARM + + glog.V(2).Infof("azureDisk - deleted a managed disk: %s", diskURI) + + return nil +} + +// return: disk provisionState, diskID, error +func (c *ManagedDiskController) getDisk(diskName string) (string, string, error) { + result, err := c.common.cloud.DisksClient.Get(c.common.resourceGroup, diskName) + if err != nil { + return "", "", err + } + + if result.Properties != nil && (*result.Properties).ProvisioningState != nil { + return *(*result.Properties).ProvisioningState, *result.ID, nil + } + + return "", "", err +} diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_routes.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_routes.go index 0d7a23ebfd82..b7cb4ae810f8 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_routes.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_routes.go @@ -78,8 +78,10 @@ func (az *Cloud) CreateRoute(clusterName string, nameHint string, kubeRoute *clo glog.V(3).Infof("create: creating routetable. routeTableName=%q", az.RouteTableName) az.operationPollRateLimiter.Accept() - resp, err := az.RouteTablesClient.CreateOrUpdate(az.ResourceGroup, az.RouteTableName, routeTable, nil) - if az.CloudProviderBackoff && shouldRetryAPIRequest(resp, err) { + respChan, errChan := az.RouteTablesClient.CreateOrUpdate(az.ResourceGroup, az.RouteTableName, routeTable, nil) + resp := <-respChan + err := <-errChan + if az.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) { glog.V(2).Infof("create backing off: creating routetable. routeTableName=%q", az.RouteTableName) retryErr := az.CreateOrUpdateRouteTableWithRetry(routeTable) if retryErr != nil { @@ -114,8 +116,10 @@ func (az *Cloud) CreateRoute(clusterName string, nameHint string, kubeRoute *clo glog.V(3).Infof("create: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR) az.operationPollRateLimiter.Accept() - resp, err := az.RoutesClient.CreateOrUpdate(az.ResourceGroup, az.RouteTableName, *route.Name, route, nil) - if az.CloudProviderBackoff && shouldRetryAPIRequest(resp, err) { + respChan, errChan := az.RoutesClient.CreateOrUpdate(az.ResourceGroup, az.RouteTableName, *route.Name, route, nil) + resp := <-respChan + err = <-errChan + if az.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) { glog.V(2).Infof("create backing off: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR) retryErr := az.CreateOrUpdateRouteWithRetry(route) if retryErr != nil { @@ -138,7 +142,10 @@ func (az *Cloud) DeleteRoute(clusterName string, kubeRoute *cloudprovider.Route) routeName := mapNodeNameToRouteName(kubeRoute.TargetNode) az.operationPollRateLimiter.Accept() - resp, err := az.RoutesClient.Delete(az.ResourceGroup, az.RouteTableName, routeName, nil) + respChan, errChan := az.RoutesClient.Delete(az.ResourceGroup, az.RouteTableName, routeName, nil) + resp := <-respChan + err := <-errChan + if az.CloudProviderBackoff && shouldRetryAPIRequest(resp, err) { glog.V(2).Infof("delete backing off: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR) retryErr := az.DeleteRouteWithRetry(routeName) diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storage.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storage.go index b810480ab478..8572b9c779de 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storage.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_storage.go @@ -18,260 +18,10 @@ package azure import ( "fmt" - "strings" - "github.com/Azure/azure-sdk-for-go/arm/compute" "github.com/golang/glog" - "k8s.io/apimachinery/pkg/types" - "k8s.io/kubernetes/pkg/cloudprovider" - "k8s.io/kubernetes/pkg/volume" ) -const ( - maxLUN = 64 // max number of LUNs per VM - errLeaseFailed = "AcquireDiskLeaseFailed" - errLeaseIDMissing = "LeaseIdMissing" - errContainerNotFound = "ContainerNotFound" -) - -// AttachDisk attaches a vhd to vm -// the vhd must exist, can be identified by diskName, diskURI, and lun. -func (az *Cloud) AttachDisk(diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error { - vm, exists, err := az.getVirtualMachine(nodeName) - if err != nil { - return err - } else if !exists { - return cloudprovider.InstanceNotFound - } - disks := *vm.StorageProfile.DataDisks - disks = append(disks, - compute.DataDisk{ - Name: &diskName, - Vhd: &compute.VirtualHardDisk{ - URI: &diskURI, - }, - Lun: &lun, - Caching: cachingMode, - CreateOption: "attach", - }) - - newVM := compute.VirtualMachine{ - Location: vm.Location, - VirtualMachineProperties: &compute.VirtualMachineProperties{ - StorageProfile: &compute.StorageProfile{ - DataDisks: &disks, - }, - }, - } - vmName := mapNodeNameToVMName(nodeName) - glog.V(2).Infof("create(%s): vm(%s)", az.ResourceGroup, vmName) - az.operationPollRateLimiter.Accept() - resp, err := az.VirtualMachinesClient.CreateOrUpdate(az.ResourceGroup, vmName, newVM, nil) - if az.CloudProviderBackoff && shouldRetryAPIRequest(resp, err) { - glog.V(2).Infof("create(%s) backing off: vm(%s)", az.ResourceGroup, vmName) - retryErr := az.CreateOrUpdateVMWithRetry(vmName, newVM) - if retryErr != nil { - err = retryErr - glog.V(2).Infof("create(%s) abort backoff: vm(%s)", az.ResourceGroup, vmName) - } - } - if err != nil { - glog.Errorf("azure attach failed, err: %v", err) - detail := err.Error() - if strings.Contains(detail, errLeaseFailed) { - // if lease cannot be acquired, immediately detach the disk and return the original error - glog.Infof("failed to acquire disk lease, try detach") - az.DetachDiskByName(diskName, diskURI, nodeName) - } - } else { - glog.V(4).Infof("azure attach succeeded") - } - return err -} - -// DisksAreAttached checks if a list of volumes are attached to the node with the specified NodeName -func (az *Cloud) DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) { - attached := make(map[string]bool) - for _, diskName := range diskNames { - attached[diskName] = false - } - vm, exists, err := az.getVirtualMachine(nodeName) - if !exists { - // if host doesn't exist, no need to detach - glog.Warningf("Cannot find node %q, DisksAreAttached will assume disks %v are not attached to it.", - nodeName, diskNames) - return attached, nil - } else if err != nil { - return attached, err - } - - disks := *vm.StorageProfile.DataDisks - for _, disk := range disks { - for _, diskName := range diskNames { - if disk.Name != nil && diskName != "" && *disk.Name == diskName { - attached[diskName] = true - } - } - } - - return attached, nil -} - -// DetachDiskByName detaches a vhd from host -// the vhd can be identified by diskName or diskURI -func (az *Cloud) DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error { - vm, exists, err := az.getVirtualMachine(nodeName) - if err != nil || !exists { - // if host doesn't exist, no need to detach - glog.Warningf("cannot find node %s, skip detaching disk %s", nodeName, diskName) - return nil - } - - disks := *vm.StorageProfile.DataDisks - for i, disk := range disks { - if (disk.Name != nil && diskName != "" && *disk.Name == diskName) || (disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) { - // found the disk - glog.V(4).Infof("detach disk: name %q uri %q", diskName, diskURI) - disks = append(disks[:i], disks[i+1:]...) - break - } - } - newVM := compute.VirtualMachine{ - Location: vm.Location, - VirtualMachineProperties: &compute.VirtualMachineProperties{ - StorageProfile: &compute.StorageProfile{ - DataDisks: &disks, - }, - }, - } - vmName := mapNodeNameToVMName(nodeName) - glog.V(2).Infof("create(%s): vm(%s)", az.ResourceGroup, vmName) - az.operationPollRateLimiter.Accept() - resp, err := az.VirtualMachinesClient.CreateOrUpdate(az.ResourceGroup, vmName, newVM, nil) - if az.CloudProviderBackoff && shouldRetryAPIRequest(resp, err) { - glog.V(2).Infof("create(%s) backing off: vm(%s)", az.ResourceGroup, vmName) - retryErr := az.CreateOrUpdateVMWithRetry(vmName, newVM) - if retryErr != nil { - err = retryErr - glog.V(2).Infof("create(%s) abort backoff: vm(%s)", az.ResourceGroup, vmName) - } - } - if err != nil { - glog.Errorf("azure disk detach failed, err: %v", err) - } else { - glog.V(4).Infof("azure disk detach succeeded") - } - return err -} - -// GetDiskLun finds the lun on the host that the vhd is attached to, given a vhd's diskName and diskURI -func (az *Cloud) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error) { - vm, exists, err := az.getVirtualMachine(nodeName) - if err != nil { - return -1, err - } else if !exists { - return -1, cloudprovider.InstanceNotFound - } - disks := *vm.StorageProfile.DataDisks - for _, disk := range disks { - if disk.Lun != nil && (disk.Name != nil && diskName != "" && *disk.Name == diskName) || (disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) { - // found the disk - glog.V(4).Infof("find disk: lun %d name %q uri %q", *disk.Lun, diskName, diskURI) - return *disk.Lun, nil - } - } - return -1, fmt.Errorf("Cannot find Lun for disk %s", diskName) -} - -// GetNextDiskLun searches all vhd attachment on the host and find unused lun -// return -1 if all luns are used -func (az *Cloud) GetNextDiskLun(nodeName types.NodeName) (int32, error) { - vm, exists, err := az.getVirtualMachine(nodeName) - if err != nil { - return -1, err - } else if !exists { - return -1, cloudprovider.InstanceNotFound - } - used := make([]bool, maxLUN) - disks := *vm.StorageProfile.DataDisks - for _, disk := range disks { - if disk.Lun != nil { - used[*disk.Lun] = true - } - } - for k, v := range used { - if !v { - return int32(k), nil - } - } - return -1, fmt.Errorf("All Luns are used") -} - -// CreateVolume creates a VHD blob in a storage account that has storageType and location using the given storage account. -// If no storage account is given, search all the storage accounts associated with the resource group and pick one that -// fits storage type and location. -func (az *Cloud) CreateVolume(name, storageAccount, storageType, location string, requestGB int) (string, string, int, error) { - var err error - accounts := []accountWithLocation{} - if len(storageAccount) > 0 { - accounts = append(accounts, accountWithLocation{Name: storageAccount}) - } else { - // find a storage account - accounts, err = az.getStorageAccounts() - if err != nil { - // TODO: create a storage account and container - return "", "", 0, err - } - } - for _, account := range accounts { - glog.V(4).Infof("account %s type %s location %s", account.Name, account.StorageType, account.Location) - if ((storageType == "" || account.StorageType == storageType) && (location == "" || account.Location == location)) || len(storageAccount) > 0 { - // find the access key with this account - key, err := az.getStorageAccesskey(account.Name) - if err != nil { - glog.V(2).Infof("no key found for storage account %s", account.Name) - continue - } - - // create a page blob in this account's vhd container - name, uri, err := az.createVhdBlob(account.Name, key, name, int64(requestGB), nil) - if err != nil { - glog.V(2).Infof("failed to create vhd in account %s: %v", account.Name, err) - continue - } - glog.V(4).Infof("created vhd blob uri: %s", uri) - return name, uri, requestGB, err - } - } - return "", "", 0, fmt.Errorf("failed to find a matching storage account") -} - -// DeleteVolume deletes a VHD blob -func (az *Cloud) DeleteVolume(name, uri string) error { - accountName, blob, err := az.getBlobNameAndAccountFromURI(uri) - if err != nil { - return fmt.Errorf("failed to parse vhd URI %v", err) - } - key, err := az.getStorageAccesskey(accountName) - if err != nil { - return fmt.Errorf("no key for storage account %s, err %v", accountName, err) - } - err = az.deleteVhdBlob(accountName, key, blob) - if err != nil { - glog.Warningf("failed to delete blob %s err: %v", uri, err) - detail := err.Error() - if strings.Contains(detail, errLeaseIDMissing) { - // disk is still being used - // see https://msdn.microsoft.com/en-us/library/microsoft.windowsazure.storage.blob.protocol.bloberrorcodestrings.leaseidmissing.aspx - return volume.NewDeletedVolumeInUseError(fmt.Sprintf("disk %q is still in use while being deleted", name)) - } - return fmt.Errorf("failed to delete vhd %v, account %s, blob %s, err: %v", uri, accountName, blob, err) - } - glog.V(4).Infof("blob %s deleted", uri) - return nil - -} - // CreateFileShare creates a file share, using a matching storage account func (az *Cloud) CreateFileShare(name, storageAccount, storageType, location string, requestGB int) (string, string, error) { var err error diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_test.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_test.go index 6bc61bf78606..a7211e917596 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_test.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_test.go @@ -556,7 +556,7 @@ func TestProtocolTranslationTCP(t *testing.T) { if *transportProto != network.TransportProtocolTCP { t.Errorf("Expected TCP LoadBalancer Rule Protocol. Got %v", transportProto) } - if *securityGroupProto != network.TCP { + if *securityGroupProto != network.SecurityRuleProtocolTCP { t.Errorf("Expected TCP SecurityGroup Protocol. Got %v", transportProto) } if *probeProto != network.ProbeProtocolTCP { @@ -570,7 +570,7 @@ func TestProtocolTranslationUDP(t *testing.T) { if *transportProto != network.TransportProtocolUDP { t.Errorf("Expected UDP LoadBalancer Rule Protocol. Got %v", transportProto) } - if *securityGroupProto != network.UDP { + if *securityGroupProto != network.SecurityRuleProtocolUDP { t.Errorf("Expected UDP SecurityGroup Protocol. Got %v", transportProto) } if probeProto != nil { @@ -585,6 +585,8 @@ func TestNewCloudFromJSON(t *testing.T) { "subscriptionId": "--subscription-id--", "aadClientId": "--aad-client-id--", "aadClientSecret": "--aad-client-secret--", + "aadClientCertPath": "--aad-client-cert-path--", + "aadClientCertPassword": "--aad-client-cert-password--", "resourceGroup": "--resource-group--", "location": "--location--", "subnetName": "--subnet-name--", @@ -606,15 +608,20 @@ func TestNewCloudFromJSON(t *testing.T) { // Test Backoff and Rate Limit defaults (json) func TestCloudDefaultConfigFromJSON(t *testing.T) { - config := `{}` + config := `{ + "aadClientId": "--aad-client-id--", + "aadClientSecret": "--aad-client-secret--" + }` validateEmptyConfig(t, config) } // Test Backoff and Rate Limit defaults (yaml) func TestCloudDefaultConfigFromYAML(t *testing.T) { - config := `` - + config := ` +aadClientId: --aad-client-id-- +aadClientSecret: --aad-client-secret-- +` validateEmptyConfig(t, config) } @@ -625,6 +632,8 @@ tenantId: --tenant-id-- subscriptionId: --subscription-id-- aadClientId: --aad-client-id-- aadClientSecret: --aad-client-secret-- +aadClientCertPath: --aad-client-cert-path-- +aadClientCertPassword: --aad-client-cert-password-- resourceGroup: --resource-group-- location: --location-- subnetName: --subnet-name-- @@ -659,6 +668,12 @@ func validateConfig(t *testing.T, config string) { if azureCloud.AADClientSecret != "--aad-client-secret--" { t.Errorf("got incorrect value for AADClientSecret") } + if azureCloud.AADClientCertPath != "--aad-client-cert-path--" { + t.Errorf("got incorrect value for AADClientCertPath") + } + if azureCloud.AADClientCertPassword != "--aad-client-cert-password--" { + t.Errorf("got incorrect value for AADClientCertPassword") + } if azureCloud.ResourceGroup != "--resource-group--" { t.Errorf("got incorrect value for ResourceGroup") } diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_util.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_util.go index 9f092dcc5642..bbea8ed2bafa 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_util.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_util.go @@ -19,7 +19,9 @@ package azure import ( "errors" "fmt" + "hash/crc32" "regexp" + "strconv" "strings" "k8s.io/kubernetes/pkg/api/v1" @@ -135,12 +137,12 @@ func getProtocolsFromKubernetesProtocol(protocol v1.Protocol) (*network.Transpor switch protocol { case v1.ProtocolTCP: transportProto = network.TransportProtocolTCP - securityProto = network.TCP + securityProto = network.SecurityRuleProtocolTCP probeProto = network.ProbeProtocolTCP return &transportProto, &securityProto, &probeProto, nil case v1.ProtocolUDP: transportProto = network.TransportProtocolUDP - securityProto = network.UDP + securityProto = network.SecurityRuleProtocolUDP return &transportProto, &securityProto, nil, nil default: return &transportProto, &securityProto, &probeProto, fmt.Errorf("Only TCP and UDP are supported for Azure LoadBalancers") @@ -293,3 +295,58 @@ func splitProviderID(providerID string) (types.NodeName, error) { } return types.NodeName(matches[1]), nil } + +var polyTable = crc32.MakeTable(crc32.Koopman) + +//MakeCRC32 : convert string to CRC32 format +func MakeCRC32(str string) string { + crc := crc32.New(polyTable) + crc.Write([]byte(str)) + hash := crc.Sum32() + return strconv.FormatUint(uint64(hash), 10) +} + +//ExtractVMData : extract dataDisks, storageProfile from a map struct +func ExtractVMData(vmData map[string]interface{}) (dataDisks []interface{}, + storageProfile map[string]interface{}, + hardwareProfile map[string]interface{}, err error) { + props, ok := vmData["properties"].(map[string]interface{}) + if !ok { + return nil, nil, nil, fmt.Errorf("convert vmData(properties) to map error") + } + + storageProfile, ok = props["storageProfile"].(map[string]interface{}) + if !ok { + return nil, nil, nil, fmt.Errorf("convert vmData(storageProfile) to map error") + } + + hardwareProfile, ok = props["hardwareProfile"].(map[string]interface{}) + if !ok { + return nil, nil, nil, fmt.Errorf("convert vmData(hardwareProfile) to map error") + } + + dataDisks, ok = storageProfile["dataDisks"].([]interface{}) + if !ok { + return nil, nil, nil, fmt.Errorf("convert vmData(dataDisks) to map error") + } + return dataDisks, storageProfile, hardwareProfile, nil +} + +//ExtractDiskData : extract provisioningState, diskState from a map struct +func ExtractDiskData(diskData interface{}) (provisioningState string, diskState string, err error) { + fragment, ok := diskData.(map[string]interface{}) + if !ok { + return "", "", fmt.Errorf("convert diskData to map error") + } + + properties, ok := fragment["properties"].(map[string]interface{}) + if !ok { + return "", "", fmt.Errorf("convert diskData(properties) to map error") + } + + provisioningState, ok = properties["provisioningState"].(string) // if there is a disk, provisioningState property will be there + if ref, ok := properties["diskState"]; ok { + diskState = ref.(string) + } + return provisioningState, diskState, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_wrap.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_wrap.go index 613e59a439de..9879655945c2 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_wrap.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/azure_wrap.go @@ -132,9 +132,16 @@ func (az *Cloud) getPublicIPAddress(name string) (pip network.PublicIPAddress, e func (az *Cloud) getSubnet(virtualNetworkName string, subnetName string) (subnet network.Subnet, exists bool, err error) { var realErr error + var rg string + + if len(az.VnetResourceGroup) > 0 { + rg = az.VnetResourceGroup + } else { + rg = az.ResourceGroup + } az.operationPollRateLimiter.Accept() - subnet, err = az.SubnetsClient.Get(az.ResourceGroup, virtualNetworkName, subnetName, "") + subnet, err = az.SubnetsClient.Get(rg, virtualNetworkName, subnetName, "") exists, realErr = checkResourceExistsFromError(err) if realErr != nil { diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/vhd.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/vhd.go deleted file mode 100644 index 93c857743b0f..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/azure/vhd.go +++ /dev/null @@ -1,38 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package azure - -import ( - "bytes" - "encoding/binary" - - "github.com/rubiojr/go-vhd/vhd" -) - -const ( - vhdHeaderSize = vhd.VHD_HEADER_SIZE -) - -func createVHDHeader(size uint64) ([]byte, error) { - h := vhd.CreateFixedHeader(size, &vhd.VHDOptions{}) - b := new(bytes.Buffer) - err := binary.Write(b, binary.BigEndian, h) - if err != nil { - return nil, err - } - return b.Bytes(), nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce.go index 0086678bca91..0a0bbd4d1eb7 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce.go @@ -179,11 +179,11 @@ func newGCECloud(config io.Reader) (*GCECloud, error) { var nodeTags []string var nodeInstancePrefix string if config != nil { - var cfg Config - if err := gcfg.ReadInto(&cfg, config); err != nil { - glog.Errorf("Couldn't read config: %v", err) + cfg, err := readConfig(config) + if err != nil { return nil, err } + glog.Infof("Using GCE provider config %+v", cfg) if cfg.Global.ProjectID != "" { projectID = cfg.Global.ProjectID @@ -216,6 +216,15 @@ func newGCECloud(config io.Reader) (*GCECloud, error) { nodeTags, nodeInstancePrefix, tokenSource, true /* useMetadataServer */) } +func readConfig(reader io.Reader) (*Config, error) { + cfg := &Config{} + if err := gcfg.FatalOnly(gcfg.ReadInto(cfg, reader)); err != nil { + glog.Errorf("Couldn't read config: %v", err) + return nil, err + } + return cfg, nil +} + // Creates a GCECloud object using the specified parameters. // If no networkUrl is specified, loads networkName via rest call. // If no tokenSource is specified, uses oauth2.DefaultTokenSource. diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_addresses.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_addresses.go index 877d744ddfa5..3592fcf3b6c3 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_addresses.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_addresses.go @@ -33,18 +33,13 @@ func newAddressMetricContext(request, region string) *metricContext { // Caller is allocated a random IP if they do not specify an ipAddress. If an // ipAddress is specified, it must belong to the current project, eg: an // ephemeral IP associated with a global forwarding rule. -func (gce *GCECloud) ReserveGlobalAddress(addr *compute.Address) (*compute.Address, error) { +func (gce *GCECloud) ReserveGlobalAddress(addr *compute.Address) error { mc := newAddressMetricContext("reserve", "") op, err := gce.service.GlobalAddresses.Insert(gce.projectID, addr).Do() if err != nil { - return nil, mc.Observe(err) - } - - if err := gce.waitForGlobalOp(op, mc); err != nil { - return nil, err + return mc.Observe(err) } - - return gce.GetGlobalAddress(addr.Name) + return gce.waitForGlobalOp(op, mc) } // DeleteGlobalAddress deletes a global address by name. @@ -65,17 +60,13 @@ func (gce *GCECloud) GetGlobalAddress(name string) (*compute.Address, error) { } // ReserveRegionAddress creates a region address -func (gce *GCECloud) ReserveRegionAddress(addr *compute.Address, region string) (*compute.Address, error) { +func (gce *GCECloud) ReserveRegionAddress(addr *compute.Address, region string) error { mc := newAddressMetricContext("reserve", region) op, err := gce.service.Addresses.Insert(gce.projectID, region, addr).Do() if err != nil { - return nil, mc.Observe(err) - } - if err := gce.waitForRegionOp(op, region, mc); err != nil { - return nil, err + return mc.Observe(err) } - - return gce.GetRegionAddress(addr.Name, region) + return gce.waitForRegionOp(op, region, mc) } // DeleteRegionAddress deletes a region address by name. diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_annotations.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_annotations.go index a7f6683fb62e..d27f23091760 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_annotations.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_annotations.go @@ -16,7 +16,11 @@ limitations under the License. package gce -import "k8s.io/kubernetes/pkg/api/v1" +import ( + "k8s.io/kubernetes/pkg/api/v1" + + "github.com/golang/glog" +) type LoadBalancerType string @@ -26,12 +30,16 @@ const ( // Currently, only "internal" is supported. ServiceAnnotationLoadBalancerType = "cloud.google.com/load-balancer-type" - LBTypeInternal LoadBalancerType = "internal" + LBTypeInternal LoadBalancerType = "Internal" + // Deprecating the lowercase spelling of Internal. + deprecatedTypeInternalLowerCase LoadBalancerType = "internal" // ServiceAnnotationInternalBackendShare is annotated on a service with "true" when users // want to share GCP Backend Services for a set of internal load balancers. // ALPHA feature - this may be removed in a future release. - ServiceAnnotationILBBackendShare = "cloud.google.com/load-balancer-backend-share" + ServiceAnnotationILBBackendShare = "alpha.cloud.google.com/load-balancer-backend-share" + // This annotation did not correctly specify "alpha", so both annotations will be checked. + deprecatedServiceAnnotationILBBackendShare = "cloud.google.com/load-balancer-backend-share" ) // GetLoadBalancerAnnotationType returns the type of GCP load balancer which should be assembled. @@ -48,8 +56,8 @@ func GetLoadBalancerAnnotationType(service *v1.Service) (LoadBalancerType, bool) } switch v { - case LBTypeInternal: - return v, true + case LBTypeInternal, deprecatedTypeInternalLowerCase: + return LBTypeInternal, true default: return v, false } @@ -58,8 +66,13 @@ func GetLoadBalancerAnnotationType(service *v1.Service) (LoadBalancerType, bool) // GetLoadBalancerAnnotationBackendShare returns whether this service's backend service should be // shared with other load balancers. Health checks and the healthcheck firewall will be shared regardless. func GetLoadBalancerAnnotationBackendShare(service *v1.Service) bool { - l, exists := service.Annotations[ServiceAnnotationILBBackendShare] - if exists && l == "true" { + if l, exists := service.Annotations[ServiceAnnotationILBBackendShare]; exists && l == "true" { + return true + } + + // Check for deprecated annotation key + if l, exists := service.Annotations[deprecatedServiceAnnotationILBBackendShare]; exists && l == "true" { + glog.Warningf("Annotation %q is deprecated and replaced with an alpha-specific key: %q", deprecatedServiceAnnotationILBBackendShare, ServiceAnnotationILBBackendShare) return true } diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_healthchecks.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_healthchecks.go index 4f5f90c6a1d1..bc5ca0b88fb0 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_healthchecks.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_healthchecks.go @@ -37,8 +37,8 @@ var ( ) func init() { - if v, err := utilversion.ParseGeneric("1.7.0"); err != nil { - panic(err) + if v, err := utilversion.ParseGeneric("1.7.2"); err != nil { + glog.Fatalf("Failed to parse version for minNodesHealthCheckVersion: %v", err) } else { minNodesHealthCheckVersion = v } diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_healthchecks_test.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_healthchecks_test.go index 11be67954302..d421ba5c6399 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_healthchecks_test.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_healthchecks_test.go @@ -27,8 +27,9 @@ func TestIsAtLeastMinNodesHealthCheckVersion(t *testing.T) { version string expect bool }{ - {"v1.7.1", true}, - {"v1.7.0-alpha.2.597+276d289b90d322", true}, + {"v1.7.3", true}, + {"v1.7.2", true}, + {"v1.7.2-alpha.2.597+276d289b90d322", true}, {"v1.6.0-beta.3.472+831q821c907t31a", false}, {"v1.5.2", false}, } @@ -52,14 +53,14 @@ func TestSupportsNodesHealthCheck(t *testing.T) { { Status: v1.NodeStatus{ NodeInfo: v1.NodeSystemInfo{ - KubeProxyVersion: "v1.7.1", + KubeProxyVersion: "v1.7.2", }, }, }, { Status: v1.NodeStatus{ NodeInfo: v1.NodeSystemInfo{ - KubeProxyVersion: "v1.7.0-alpha.2.597+276d289b90d322", + KubeProxyVersion: "v1.7.2-alpha.2.597+276d289b90d322", }, }, }, @@ -92,14 +93,14 @@ func TestSupportsNodesHealthCheck(t *testing.T) { { Status: v1.NodeStatus{ NodeInfo: v1.NodeSystemInfo{ - KubeProxyVersion: "v1.7.1", + KubeProxyVersion: "v1.7.3", }, }, }, { Status: v1.NodeStatus{ NodeInfo: v1.NodeSystemInfo{ - KubeProxyVersion: "v1.7.0-alpha.2.597+276d289b90d322", + KubeProxyVersion: "v1.7.2-alpha.2.597+276d289b90d322", }, }, }, diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_external.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_external.go index 74aefee65ae9..5a6ddd561545 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_external.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_loadbalancer_external.go @@ -807,7 +807,7 @@ func (gce *GCECloud) ensureHttpHealthCheckFirewall(serviceName, ipAddress, regio if fw.Description != desc || len(fw.Allowed) != 1 || fw.Allowed[0].IPProtocol != string(ports[0].Protocol) || - !equalStringSets(fw.Allowed[0].Ports, []string{string(ports[0].Port)}) || + !equalStringSets(fw.Allowed[0].Ports, []string{strconv.Itoa(int(ports[0].Port))}) || !equalStringSets(fw.SourceRanges, sourceRanges.StringSlice()) { glog.Warningf("Firewall %v exists but parameters have drifted - updating...", fwName) if err := gce.updateFirewall(fwName, region, desc, sourceRanges, ports, hosts); err != nil { @@ -939,8 +939,7 @@ func (gce *GCECloud) ensureStaticIP(name, serviceName, region, existingIP string addressObj.Address = existingIP } - address, err := gce.ReserveRegionAddress(addressObj, region) - if err != nil { + if err = gce.ReserveRegionAddress(addressObj, region); err != nil { if !isHTTPErrorCode(err, http.StatusConflict) { return "", false, fmt.Errorf("error creating gce static IP address: %v", err) } @@ -948,5 +947,10 @@ func (gce *GCECloud) ensureStaticIP(name, serviceName, region, existingIP string existed = true } - return address.Address, existed, nil + addr, err := gce.GetRegionAddress(name, region) + if err != nil { + return "", false, fmt.Errorf("error getting static IP address: %v", err) + } + + return addr.Address, existed, nil } diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_test.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_test.go index c04473f9380c..0eff53dbdf78 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_test.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce_test.go @@ -18,9 +18,26 @@ package gce import ( "reflect" + "strings" "testing" ) +func TestExtraKeyInConfig(t *testing.T) { + const s = `[Global] +project-id = my-project +unknown-key = abc +network-name = my-network + ` + reader := strings.NewReader(s) + config, err := readConfig(reader) + if err != nil { + t.Fatalf("Unexpected config parsing error %v", err) + } + if config.Global.ProjectID != "my-project" || config.Global.NetworkName != "my-network" { + t.Fatalf("Expected config values to continue to be read despite extra key-value pair.") + } +} + func TestGetRegion(t *testing.T) { zoneName := "us-central1-b" regionName, err := GetGCERegion(zoneName) diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_volumes.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_volumes.go index 7f7b499bb774..f9a46140c819 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_volumes.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/openstack/openstack_volumes.go @@ -230,11 +230,9 @@ func (os *OpenStack) AttachDisk(instanceID, volumeID string) (string, error) { glog.V(4).Infof("Disk %s is already attached to instance %s", volumeID, instanceID) return volume.ID, nil } - glog.V(2).Infof("Disk %s is attached to a different instance (%s), detaching", volumeID, volume.AttachedServerId) - err = os.DetachDisk(volume.AttachedServerId, volumeID) - if err != nil { - return "", err - } + errmsg := fmt.Sprintf("Disk %s is attached to a different instance (%s)", volumeID, volume.AttachedServerId) + glog.V(2).Infof(errmsg) + return "", errors.New(errmsg) } startTime := time.Now() diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vsphere.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vsphere.go index 35d60888d2f1..695d67cbcae7 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vsphere.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vsphere.go @@ -19,6 +19,7 @@ package vsphere import ( "errors" "fmt" + "hash/fnv" "io" "io/ioutil" "net" @@ -332,8 +333,8 @@ func newVSphere(cfg VSphereConfig) (*VSphere, error) { if cfg.Global.RoundTripperCount == 0 { cfg.Global.RoundTripperCount = RoundTripperDefaultCount } - if cfg.Global.VCenterPort != "" { - glog.Warningf("port is a deprecated field in vsphere.conf and will be removed in future release.") + if cfg.Global.VCenterPort == "" { + cfg.Global.VCenterPort = "443" } var c *govmomi.Client @@ -382,7 +383,7 @@ func logout(vs *VSphere) { func newClient(ctx context.Context, cfg *VSphereConfig) (*govmomi.Client, error) { // Parse URL from string - u, err := url.Parse(fmt.Sprintf("https://%s/sdk", cfg.Global.VCenterIP)) + u, err := url.Parse(fmt.Sprintf("https://%s:%s/sdk", cfg.Global.VCenterIP, cfg.Global.VCenterPort)) if err != nil { return nil, err } @@ -806,7 +807,7 @@ func (vs *VSphere) AttachDisk(vmDiskPath string, storagePolicyID string, nodeNam } scsiControllersOfRequiredType := getSCSIControllersOfType(vmDevices, diskControllerType) - scsiController := getAvailableSCSIController(scsiControllersOfRequiredType) + scsiController = getAvailableSCSIController(scsiControllersOfRequiredType) if scsiController == nil { glog.Errorf("cannot find SCSI controller in VM") // attempt clean up of scsi controller @@ -1401,7 +1402,9 @@ func (vs *VSphere) CreateVolume(volumeOptions *VolumeOptions) (volumePath string // Check if the VM exists in kubernetes cluster folder. // The kubernetes cluster folder - vs.cfg.Global.WorkingDir is where all the nodes in the kubernetes cluster are created. - dummyVMFullName := DummyVMPrefixName + "-" + volumeOptions.Name + fnvHash := fnv.New32a() + fnvHash.Write([]byte(volumeOptions.Name)) + dummyVMFullName := DummyVMPrefixName + "-" + fmt.Sprint(fnvHash.Sum32()) vmRegex := vs.cfg.Global.WorkingDir + dummyVMFullName dummyVM, err := f.VirtualMachine(ctx, vmRegex) if err != nil { diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vsphere_test.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vsphere_test.go index 7ab128ecc9dd..4dd44a6e7b28 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vsphere_test.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vsphere_test.go @@ -33,6 +33,7 @@ func configFromEnv() (cfg VSphereConfig, ok bool) { var InsecureFlag bool var err error cfg.Global.VCenterIP = os.Getenv("VSPHERE_VCENTER") + cfg.Global.VCenterPort = os.Getenv("VSPHERE_VCENTER_PORT") cfg.Global.User = os.Getenv("VSPHERE_USER") cfg.Global.Password = os.Getenv("VSPHERE_PASSWORD") cfg.Global.Datacenter = os.Getenv("VSPHERE_DATACENTER") diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vsphere_util.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vsphere_util.go index 75ed69880976..a5bd63b15c54 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vsphere_util.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vsphere_util.go @@ -61,6 +61,7 @@ func GetVSphere() (*VSphere, error) { func getVSphereConfig() *VSphereConfig { var cfg VSphereConfig cfg.Global.VCenterIP = os.Getenv("VSPHERE_VCENTER") + cfg.Global.VCenterPort = os.Getenv("VSPHERE_VCENTER_PORT") cfg.Global.User = os.Getenv("VSPHERE_USER") cfg.Global.Password = os.Getenv("VSPHERE_PASSWORD") cfg.Global.Datacenter = os.Getenv("VSPHERE_DATACENTER") diff --git a/vendor/k8s.io/kubernetes/pkg/controller/certificates/approver/sarapprove.go b/vendor/k8s.io/kubernetes/pkg/controller/certificates/approver/sarapprove.go index 26832c3d36a0..c844ae9bbb24 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/certificates/approver/sarapprove.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/certificates/approver/sarapprove.go @@ -90,10 +90,15 @@ func (a *sarApprover) handle(csr *capi.CertificateSigningRequest) error { return fmt.Errorf("unable to parse csr %q: %v", csr.Name, err) } + tried := []string{} + for _, r := range a.recognizers { if !r.recognize(csr, x509cr) { continue } + + tried = append(tried, r.permission.Subresource) + approved, err := a.authorize(csr, r.permission) if err != nil { return err @@ -107,6 +112,11 @@ func (a *sarApprover) handle(csr *capi.CertificateSigningRequest) error { return nil } } + + if len(tried) != 0 { + return fmt.Errorf("recognized csr %q as %v but subject access review was not approved", csr.Name, tried) + } + return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/controller/certificates/approver/sarapprove_test.go b/vendor/k8s.io/kubernetes/pkg/controller/certificates/approver/sarapprove_test.go index b5842d8dc662..0088a423c8a6 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/certificates/approver/sarapprove_test.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/certificates/approver/sarapprove_test.go @@ -88,6 +88,7 @@ func TestHandle(t *testing.T) { message string allowed bool recognized bool + err bool verify func(*testing.T, []testclient.Action) }{ { @@ -118,6 +119,7 @@ func TestHandle(t *testing.T) { } _ = as[0].(testclient.CreateActionImpl) }, + err: true, }, { recognized: true, @@ -154,7 +156,7 @@ func TestHandle(t *testing.T) { } for _, c := range cases { - t.Run(fmt.Sprintf("recognized:%v,allowed: %v", c.recognized, c.allowed), func(t *testing.T) { + t.Run(fmt.Sprintf("recognized:%v,allowed: %v,err: %v", c.recognized, c.allowed, c.err), func(t *testing.T) { client := &fake.Clientset{} client.AddReactor("create", "subjectaccessreviews", func(action testclient.Action) (handled bool, ret runtime.Object, err error) { return true, &authorization.SubjectAccessReview{ @@ -176,7 +178,7 @@ func TestHandle(t *testing.T) { }, } csr := makeTestCsr() - if err := approver.handle(csr); err != nil { + if err := approver.handle(csr); err != nil && !c.err { t.Errorf("unexpected err: %v", err) } c.verify(t, client.Actions()) diff --git a/vendor/k8s.io/kubernetes/pkg/controller/cronjob/cronjob_controller.go b/vendor/k8s.io/kubernetes/pkg/controller/cronjob/cronjob_controller.go index deefb9356e36..07dcb84eb1c9 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/cronjob/cronjob_controller.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/cronjob/cronjob_controller.go @@ -97,8 +97,8 @@ func NewCronJobControllerFromClient(kubeClient clientset.Interface) *CronJobCont func (jm *CronJobController) Run(stopCh <-chan struct{}) { defer utilruntime.HandleCrash() glog.Infof("Starting CronJob Manager") - // Check things every 30 second. - go wait.Until(jm.syncAll, 30*time.Second, stopCh) + // Check things every 10 second. + go wait.Until(jm.syncAll, 10*time.Second, stopCh) <-stopCh glog.Infof("Shutting down CronJob Manager") } diff --git a/vendor/k8s.io/kubernetes/pkg/controller/daemon/daemoncontroller.go b/vendor/k8s.io/kubernetes/pkg/controller/daemon/daemoncontroller.go index 185a91676614..e2135d34a0e9 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/daemon/daemoncontroller.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/daemon/daemoncontroller.go @@ -748,7 +748,7 @@ func (dsc *DaemonSetsController) manage(ds *extensions.DaemonSet, hash string) e var daemonPodsRunning []*v1.Pod for _, pod := range daemonPods { if pod.Status.Phase == v1.PodFailed { - msg := fmt.Sprintf("Found failed daemon pod %s/%s on node %s, will try to kill it", pod.Namespace, pod.Name, node.Name) + msg := fmt.Sprintf("Found failed daemon pod %s/%s on node %s, will try to kill it", pod.Namespace, node.Name, pod.Name) glog.V(2).Infof(msg) // Emit an event so that it's discoverable to users. dsc.eventRecorder.Eventf(ds, v1.EventTypeWarning, FailedDaemonPodReason, msg) @@ -1046,30 +1046,6 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error { return dsc.updateDaemonSetStatus(ds, hash) } -// hasIntentionalPredicatesReasons checks if any of the given predicate failure reasons -// is intentional. -func hasIntentionalPredicatesReasons(reasons []algorithm.PredicateFailureReason) bool { - for _, r := range reasons { - switch reason := r.(type) { - case *predicates.PredicateFailureError: - switch reason { - // intentional - case - predicates.ErrNodeSelectorNotMatch, - predicates.ErrPodNotMatchHostName, - predicates.ErrNodeLabelPresenceViolated, - // this one is probably intentional since it's a workaround for not having - // pod hard anti affinity. - predicates.ErrPodNotFitsHostPorts, - // DaemonSet is expected to respect taints and tolerations - predicates.ErrTaintsTolerationsNotMatch: - return true - } - } - } - return false -} - // nodeShouldRunDaemonPod checks a set of preconditions against a (node,daemonset) and returns a // summary. Returned booleans are: // * wantToRun: @@ -1088,7 +1064,14 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *exten // Because these bools require an && of all their required conditions, we start // with all bools set to true and set a bool to false if a condition is not met. - // A bool should probably not be set to true after this line. + // A bool should probably not be set to true after this line. We can + // return early if we are: + // + // 1. return false, false, false, err + // 2. return false, false, false, nil + // + // Otherwise if a condition is not met, we should set one of these + // bools to false. wantToRun, shouldSchedule, shouldContinueRunning = true, true, true // If the daemon set specifies a node name, check that it matches with node.Name. if !(ds.Spec.Template.Spec.NodeName == "" || ds.Spec.Template.Spec.NodeName == node.Name) { @@ -1159,22 +1142,37 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *exten return false, false, false, err } - // Return directly if there is any intentional predicate failure reason, so that daemonset controller skips - // checking other predicate failures, such as InsufficientResourceError and unintentional errors. - if hasIntentionalPredicatesReasons(reasons) { - return false, false, false, nil - } + var insufficientResourceErr error for _, r := range reasons { glog.V(4).Infof("DaemonSet Predicates failed on node %s for ds '%s/%s' for reason: %v", node.Name, ds.ObjectMeta.Namespace, ds.ObjectMeta.Name, r.GetReason()) switch reason := r.(type) { case *predicates.InsufficientResourceError: - dsc.eventRecorder.Eventf(ds, v1.EventTypeWarning, FailedPlacementReason, "failed to place pod on %q: %s", node.ObjectMeta.Name, reason.Error()) - shouldSchedule = false + insufficientResourceErr = reason case *predicates.PredicateFailureError: var emitEvent bool + // we try to partition predicates into two partitions here: intentional on the part of the operator and not. switch reason { - // unintentional predicates reasons need to be fired out to event. + // intentional + case + predicates.ErrNodeSelectorNotMatch, + predicates.ErrPodNotMatchHostName, + predicates.ErrNodeLabelPresenceViolated, + // this one is probably intentional since it's a workaround for not having + // pod hard anti affinity. + predicates.ErrPodNotFitsHostPorts: + return false, false, false, nil + case predicates.ErrTaintsTolerationsNotMatch: + // DaemonSet is expected to respect taints and tolerations + fitsNoExecute, _, err := predicates.PodToleratesNodeNoExecuteTaints(newPod, nil, nodeInfo) + if err != nil { + return false, false, false, err + } + if !fitsNoExecute { + return false, false, false, nil + } + wantToRun, shouldSchedule = false, false + // unintentional case predicates.ErrDiskConflict, predicates.ErrVolumeZoneConflict, @@ -1202,6 +1200,12 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *exten } } } + // only emit this event if insufficient resource is the only thing + // preventing the daemon pod from scheduling + if shouldSchedule && insufficientResourceErr != nil { + dsc.eventRecorder.Eventf(ds, v1.EventTypeWarning, FailedPlacementReason, "failed to place pod on %q: %s", node.ObjectMeta.Name, insufficientResourceErr.Error()) + shouldSchedule = false + } return } diff --git a/vendor/k8s.io/kubernetes/pkg/controller/daemon/daemoncontroller_test.go b/vendor/k8s.io/kubernetes/pkg/controller/daemon/daemoncontroller_test.go index bb0069686141..0b3a5a7c3ea4 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/daemon/daemoncontroller_test.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/daemon/daemoncontroller_test.go @@ -60,6 +60,7 @@ var ( var ( noScheduleTolerations = []v1.Toleration{{Key: "dedicated", Value: "user1", Effect: "NoSchedule"}} noScheduleTaints = []v1.Taint{{Key: "dedicated", Value: "user1", Effect: "NoSchedule"}} + noExecuteTaints = []v1.Taint{{Key: "dedicated", Value: "user1", Effect: "NoExecute"}} ) var ( @@ -1101,10 +1102,46 @@ func TestDaemonKillFailedPods(t *testing.T) { } } +// Daemonset should not remove a running pod from a node if the pod doesn't +// tolerate the nodes NoSchedule taint +func TestNoScheduleTaintedDoesntEvicitRunningIntolerantPod(t *testing.T) { + for _, strategy := range updateStrategies() { + ds := newDaemonSet("intolerant") + ds.Spec.UpdateStrategy = *strategy + manager, podControl, _ := newTestController(ds) + + node := newNode("tainted", nil) + manager.nodeStore.Add(node) + setNodeTaint(node, noScheduleTaints) + manager.podStore.Add(newPod("keep-running-me", "tainted", simpleDaemonSetLabel, ds)) + manager.dsStore.Add(ds) + + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0) + } +} + +// Daemonset should remove a running pod from a node if the pod doesn't +// tolerate the nodes NoExecute taint +func TestNoExecuteTaintedDoesEvicitRunningIntolerantPod(t *testing.T) { + for _, strategy := range updateStrategies() { + ds := newDaemonSet("intolerant") + ds.Spec.UpdateStrategy = *strategy + manager, podControl, _ := newTestController(ds) + + node := newNode("tainted", nil) + manager.nodeStore.Add(node) + setNodeTaint(node, noExecuteTaints) + manager.podStore.Add(newPod("stop-running-me", "tainted", simpleDaemonSetLabel, ds)) + manager.dsStore.Add(ds) + + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 1) + } +} + // DaemonSet should not launch a pod on a tainted node when the pod doesn't tolerate that taint. -func TestTaintedNodeDaemonDoesNotLaunchUntoleratePod(t *testing.T) { +func TestTaintedNodeDaemonDoesNotLaunchIntolerantPod(t *testing.T) { for _, strategy := range updateStrategies() { - ds := newDaemonSet("untolerate") + ds := newDaemonSet("intolerant") ds.Spec.UpdateStrategy = *strategy manager, podControl, _ := newTestController(ds) diff --git a/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal.go b/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal.go index 6182d175c2de..9651d75ec279 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal.go @@ -450,14 +450,14 @@ func (a *HorizontalController) reconcileAutoscaler(hpav1Shared *autoscalingv1.Ho case desiredReplicas > scaleUpLimit: setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionTrue, "ScaleUpLimit", "the desired replica count is increasing faster than the maximum scale rate") desiredReplicas = scaleUpLimit - case desiredReplicas == 0: - // never scale down to 0, reserved for disabling autoscaling - setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionTrue, "TooFewReplicas", "the desired replica count was zero") - desiredReplicas = 1 case hpa.Spec.MinReplicas != nil && desiredReplicas < *hpa.Spec.MinReplicas: // make sure we aren't below our minimum setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionTrue, "TooFewReplicas", "the desired replica count was less than the minimum replica count") desiredReplicas = *hpa.Spec.MinReplicas + case desiredReplicas == 0: + // never scale down to 0, reserved for disabling autoscaling + setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionTrue, "TooFewReplicas", "the desired replica count was zero") + desiredReplicas = 1 case desiredReplicas > hpa.Spec.MaxReplicas: // make sure we aren't above our maximum setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionTrue, "TooManyReplicas", "the desired replica count was more than the maximum replica count") diff --git a/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal_test.go b/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal_test.go index dfe9d756b730..11bf6ad566da 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal_test.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal_test.go @@ -994,6 +994,25 @@ func TestMinReplicas(t *testing.T) { tc.runTest(t) } +func TestMinReplicasDesiredZero(t *testing.T) { + tc := testCase{ + minReplicas: 2, + maxReplicas: 5, + initialReplicas: 3, + desiredReplicas: 2, + CPUTarget: 90, + reportedLevels: []uint64{0, 0, 0}, + reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")}, + useMetricsApi: true, + expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{ + Type: autoscalingv2.ScalingLimited, + Status: v1.ConditionTrue, + Reason: "TooFewReplicas", + }), + } + tc.runTest(t) +} + func TestZeroReplicas(t *testing.T) { tc := testCase{ minReplicas: 3, diff --git a/vendor/k8s.io/kubernetes/pkg/controller/statefulset/stateful_pod_control_test.go b/vendor/k8s.io/kubernetes/pkg/controller/statefulset/stateful_pod_control_test.go index 0e376785b429..01c1cc3d32f1 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/statefulset/stateful_pod_control_test.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/statefulset/stateful_pod_control_test.go @@ -391,34 +391,6 @@ func TestStatefulPodControlUpdatePodConflictSuccess(t *testing.T) { } } -func TestStatefulPodControlUpdatePodConflictFailure(t *testing.T) { - recorder := record.NewFakeRecorder(10) - set := newStatefulSet(3) - pod := newStatefulSetPod(set, 0) - fakeClient := &fake.Clientset{} - indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) - updatedPod := newStatefulSetPod(set, 0) - updatedPod.Spec.Hostname = "wrong" - indexer.Add(updatedPod) - podLister := corelisters.NewPodLister(indexer) - control := NewRealStatefulPodControl(fakeClient, nil, podLister, nil, recorder) - fakeClient.AddReactor("update", "pods", func(action core.Action) (bool, runtime.Object, error) { - update := action.(core.UpdateAction) - return true, update.GetObject(), apierrors.NewConflict(action.GetResource().GroupResource(), pod.Name, errors.New("conflict")) - - }) - pod.Name = "goo-0" - if err := control.UpdateStatefulPod(set, pod); err == nil { - t.Error("Failed update did not return an error") - } - events := collectEvents(recorder.Events) - if eventCount := len(events); eventCount != 1 { - t.Errorf("Expected 1 event for failed Pod update found %d", eventCount) - } else if !strings.Contains(events[0], v1.EventTypeWarning) { - t.Errorf("Expected normal event found %s", events[0]) - } -} - func TestStatefulPodControlDeletesStatefulPod(t *testing.T) { recorder := record.NewFakeRecorder(10) set := newStatefulSet(3) diff --git a/vendor/k8s.io/kubernetes/pkg/controller/statefulset/stateful_set_utils.go b/vendor/k8s.io/kubernetes/pkg/controller/statefulset/stateful_set_utils.go index c55a1e0455fc..7d240c6cd3e0 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/statefulset/stateful_set_utils.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/statefulset/stateful_set_utils.go @@ -116,9 +116,7 @@ func identityMatches(set *apps.StatefulSet, pod *v1.Pod) bool { return ordinal >= 0 && set.Name == parent && pod.Name == getPodName(set, ordinal) && - pod.Namespace == set.Namespace && - pod.Spec.Hostname == pod.Name && - pod.Spec.Subdomain == set.Spec.ServiceName + pod.Namespace == set.Namespace } // storageMatches returns true if pod's Volumes cover the set of PersistentVolumeClaims @@ -186,12 +184,18 @@ func updateStorage(set *apps.StatefulSet, pod *v1.Pod) { pod.Spec.Volumes = newVolumes } +func initIdentity(set *apps.StatefulSet, pod *v1.Pod) { + updateIdentity(set, pod) + // Set these immutable fields only on initial Pod creation, not updates. + pod.Spec.Hostname = pod.Name + pod.Spec.Subdomain = set.Spec.ServiceName +} + // updateIdentity updates pod's name, hostname, and subdomain to conform to set's name and headless service. func updateIdentity(set *apps.StatefulSet, pod *v1.Pod) { pod.Name = getPodName(set, getOrdinal(pod)) pod.Namespace = set.Namespace - pod.Spec.Hostname = pod.Name - pod.Spec.Subdomain = set.Spec.ServiceName + } // isRunningAndReady returns true if pod is in the PodRunning Phase, if it has a condition of PodReady, and if the init @@ -276,7 +280,7 @@ func getPodRevision(pod *v1.Pod) string { func newStatefulSetPod(set *apps.StatefulSet, ordinal int) *v1.Pod { pod, _ := controller.GetPodFromTemplate(&set.Spec.Template, set, newControllerRef(set)) pod.Name = getPodName(set, ordinal) - updateIdentity(set, pod) + initIdentity(set, pod) updateStorage(set, pod) return pod } diff --git a/vendor/k8s.io/kubernetes/pkg/controller/statefulset/stateful_set_utils_test.go b/vendor/k8s.io/kubernetes/pkg/controller/statefulset/stateful_set_utils_test.go index b27145e5ad1d..817078caf78a 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/statefulset/stateful_set_utils_test.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/statefulset/stateful_set_utils_test.go @@ -79,16 +79,6 @@ func TestIdentityMatches(t *testing.T) { if identityMatches(set, pod) { t.Error("identity matches for a Pod with the wrong namespace") } - pod = newStatefulSetPod(set, 1) - pod.Spec.Hostname = "" - if identityMatches(set, pod) { - t.Error("identity matches for a Pod with no hostname") - } - pod = newStatefulSetPod(set, 1) - pod.Spec.Subdomain = "" - if identityMatches(set, pod) { - t.Error("identity matches for a Pod with no subdomain") - } } func TestStorageMatches(t *testing.T) { @@ -138,24 +128,6 @@ func TestUpdateIdentity(t *testing.T) { if !identityMatches(set, pod) { t.Error("updateIdentity failed to update the Pods namespace") } - pod = newStatefulSetPod(set, 1) - pod.Spec.Hostname = "" - if identityMatches(set, pod) { - t.Error("identity matches for a Pod with no hostname") - } - updateIdentity(set, pod) - if !identityMatches(set, pod) { - t.Error("updateIdentity failed to update the Pod's hostname") - } - pod = newStatefulSetPod(set, 1) - pod.Spec.Subdomain = "" - if identityMatches(set, pod) { - t.Error("identity matches for a Pod with no subdomain") - } - updateIdentity(set, pod) - if !identityMatches(set, pod) { - t.Error("updateIdentity failed to update the Pod's subdomain") - } } func TestUpdateStorage(t *testing.T) { diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache/actual_state_of_world.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache/actual_state_of_world.go index 3aaf71552520..6d79598780cb 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache/actual_state_of_world.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache/actual_state_of_world.go @@ -125,10 +125,6 @@ type ActualStateOfWorld interface { // GetNodesToUpdateStatusFor returns the map of nodeNames to nodeToUpdateStatusFor GetNodesToUpdateStatusFor() map[types.NodeName]nodeToUpdateStatusFor - - // Removes the given node from the record of attach updates. The node's entire - // volumesToReportAsAttached list is removed. - RemoveNodeFromAttachUpdates(nodeName types.NodeName) error } // AttachedVolume represents a volume that is attached to a node. @@ -264,19 +260,6 @@ func (asw *actualStateOfWorld) AddVolumeToReportAsAttached( asw.addVolumeToReportAsAttached(volumeName, nodeName) } -func (asw *actualStateOfWorld) RemoveNodeFromAttachUpdates(nodeName types.NodeName) error { - asw.Lock() - defer asw.Unlock() - - _, nodeToUpdateExists := asw.nodesToUpdateStatusFor[nodeName] - if nodeToUpdateExists { - delete(asw.nodesToUpdateStatusFor, nodeName) - return nil - } - return fmt.Errorf("node %q does not exist in volumesToReportAsAttached list", - nodeName) -} - func (asw *actualStateOfWorld) AddVolumeNode( uniqueName v1.UniqueVolumeName, volumeSpec *volume.Spec, nodeName types.NodeName, devicePath string) (v1.UniqueVolumeName, error) { asw.Lock() diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache/actual_state_of_world_test.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache/actual_state_of_world_test.go index 81e5653fd024..9254a2713d49 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache/actual_state_of_world_test.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache/actual_state_of_world_test.go @@ -1172,89 +1172,6 @@ func Test_updateNodeStatusUpdateNeededError(t *testing.T) { } } -// Test_RemoveNodeFromAttachUpdates_Positive expects an entire node entry to be removed -// from nodesToUpdateStatusFor -func Test_RemoveNodeFromAttachUpdates_Positive(t *testing.T) { - // Arrange - volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) - asw := &actualStateOfWorld{ - attachedVolumes: make(map[v1.UniqueVolumeName]attachedVolume), - nodesToUpdateStatusFor: make(map[types.NodeName]nodeToUpdateStatusFor), - volumePluginMgr: volumePluginMgr, - } - nodeName := types.NodeName("node-1") - nodeToUpdate := nodeToUpdateStatusFor{ - nodeName: nodeName, - statusUpdateNeeded: true, - volumesToReportAsAttached: make(map[v1.UniqueVolumeName]v1.UniqueVolumeName), - } - asw.nodesToUpdateStatusFor[nodeName] = nodeToUpdate - - // Act - err := asw.RemoveNodeFromAttachUpdates(nodeName) - - // Assert - if err != nil { - t.Fatalf("RemoveNodeFromAttachUpdates should not return error, but got: %v", err) - } - if len(asw.nodesToUpdateStatusFor) > 0 { - t.Fatal("nodesToUpdateStatusFor should be empty as its only entry has been deleted.") - } -} - -// Test_RemoveNodeFromAttachUpdates_Negative_NodeDoesNotExist expects an error to be thrown -// when nodeName is not in nodesToUpdateStatusFor. -func Test_RemoveNodeFromAttachUpdates_Negative_NodeDoesNotExist(t *testing.T) { - // Arrange - volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) - asw := &actualStateOfWorld{ - attachedVolumes: make(map[v1.UniqueVolumeName]attachedVolume), - nodesToUpdateStatusFor: make(map[types.NodeName]nodeToUpdateStatusFor), - volumePluginMgr: volumePluginMgr, - } - nodeName := types.NodeName("node-1") - nodeToUpdate := nodeToUpdateStatusFor{ - nodeName: nodeName, - statusUpdateNeeded: true, - volumesToReportAsAttached: make(map[v1.UniqueVolumeName]v1.UniqueVolumeName), - } - asw.nodesToUpdateStatusFor[nodeName] = nodeToUpdate - - // Act - err := asw.RemoveNodeFromAttachUpdates("node-2") - - // Assert - if err == nil { - t.Fatal("RemoveNodeFromAttachUpdates should return an error as the nodeName doesn't exist.") - } - if len(asw.nodesToUpdateStatusFor) != 1 { - t.Fatal("The length of nodesToUpdateStatusFor should not change because no operation was performed.") - } -} - -// Test_RemoveNodeFromAttachUpdates_Negative_Empty expects an error to be thrown -// when a nodesToUpdateStatusFor is empty. -func Test_RemoveNodeFromAttachUpdates_Negative_Empty(t *testing.T) { - // Arrange - volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) - asw := &actualStateOfWorld{ - attachedVolumes: make(map[v1.UniqueVolumeName]attachedVolume), - nodesToUpdateStatusFor: make(map[types.NodeName]nodeToUpdateStatusFor), - volumePluginMgr: volumePluginMgr, - } - - // Act - err := asw.RemoveNodeFromAttachUpdates("node-1") - - // Assert - if err == nil { - t.Fatal("RemoveNodeFromAttachUpdates should return an error as nodeToUpdateStatusFor is empty.") - } - if len(asw.nodesToUpdateStatusFor) != 0 { - t.Fatal("The length of nodesToUpdateStatusFor should be 0.") - } -} - func verifyAttachedVolume( t *testing.T, attachedVolumes []AttachedVolume, diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler/reconciler.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler/reconciler.go index e66253cfd0ff..66169be4f908 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler/reconciler.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler/reconciler.go @@ -148,6 +148,11 @@ func (rc *reconciler) isMultiAttachForbidden(volumeSpec *volume.Spec) bool { // Only if this volume is a persistent volume, we have reliable information on wether it's allowed or not to // multi-attach. We trust in the individual volume implementations to not allow unsupported access modes if volumeSpec.PersistentVolume != nil { + // Check for persistent volume types which do not fail when trying to multi-attach + if volumeSpec.PersistentVolume.Spec.VsphereVolume != nil { + return false + } + if len(volumeSpec.PersistentVolume.Spec.AccessModes) == 0 { // No access mode specified so we don't know for sure. Let the attacher fail if needed return false diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater/node_status_updater.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater/node_status_updater.go index 94db1f2b6b14..818f3c6d9c1e 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater/node_status_updater.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater/node_status_updater.go @@ -68,13 +68,11 @@ func (nsu *nodeStatusUpdater) UpdateNodeStatuses() error { nodeObj, err := nsu.nodeLister.Get(string(nodeName)) if errors.IsNotFound(err) { // If node does not exist, its status cannot be updated. - // Remove the node entry from the collection of attach updates, preventing the - // status updater from unnecessarily updating the node. + // Do nothing so that there is no retry until node is created. glog.V(2).Infof( "Could not update node status. Failed to find node %q in NodeInformer cache. Error: '%v'", nodeName, err) - nsu.actualStateOfWorld.RemoveNodeFromAttachUpdates(nodeName) continue } else if err != nil { // For all other errors, log error and reset flag statusUpdateNeeded diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/BUILD index 7dcf18a296cc..0d6da56ec02c 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/BUILD @@ -36,7 +36,6 @@ go_library( "//pkg/util/io:go_default_library", "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", - "//pkg/volume/util:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller.go index 39d25bca0c92..97ef17f6b7a0 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller.go @@ -39,7 +39,6 @@ import ( "k8s.io/kubernetes/pkg/util/goroutinemap" "k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff" vol "k8s.io/kubernetes/pkg/volume" - "k8s.io/kubernetes/pkg/volume/util" "github.com/golang/glog" ) @@ -1216,10 +1215,7 @@ func (ctrl *PersistentVolumeController) doDeleteVolume(volume *v1.PersistentVolu return false, fmt.Errorf("Failed to create deleter for volume %q: %v", volume.Name, err) } - opComplete := util.OperationCompleteHook(plugin.GetPluginName(), "volume_delete") - err = deleter.Delete() - opComplete(err) - if err != nil { + if err = deleter.Delete(); err != nil { // Deleter failed return false, err } @@ -1329,9 +1325,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claimObj interfa return } - opComplete := util.OperationCompleteHook(plugin.GetPluginName(), "volume_provision") volume, err = provisioner.Provision() - opComplete(err) if err != nil { strerr := fmt.Sprintf("Failed to provision volume with StorageClass %q: %v", storageClass.Name, err) glog.V(2).Infof("failed to provision volume for claim %q with StorageClass %q: %v", claimToClaimKey(claim), storageClass.Name, err) diff --git a/vendor/k8s.io/kubernetes/pkg/credentialprovider/azure/BUILD b/vendor/k8s.io/kubernetes/pkg/credentialprovider/azure/BUILD index 41b2154ad536..aacd43a55aa9 100644 --- a/vendor/k8s.io/kubernetes/pkg/credentialprovider/azure/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/credentialprovider/azure/BUILD @@ -16,10 +16,10 @@ go_library( "//pkg/cloudprovider/providers/azure:go_default_library", "//pkg/credentialprovider:go_default_library", "//vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry:go_default_library", + "//vendor/github.com/Azure/go-autorest/autorest:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest/azure:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", - "//vendor/gopkg.in/yaml.v2:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/credentialprovider/azure/azure_credentials.go b/vendor/k8s.io/kubernetes/pkg/credentialprovider/azure/azure_credentials.go index 771c41780b53..257cbee5b81e 100644 --- a/vendor/k8s.io/kubernetes/pkg/credentialprovider/azure/azure_credentials.go +++ b/vendor/k8s.io/kubernetes/pkg/credentialprovider/azure/azure_credentials.go @@ -17,12 +17,12 @@ limitations under the License. package azure import ( - "io/ioutil" + "io" + "os" "time" - yaml "gopkg.in/yaml.v2" - "github.com/Azure/azure-sdk-for-go/arm/containerregistry" + "github.com/Azure/go-autorest/autorest" azureapi "github.com/Azure/go-autorest/autorest/azure" "github.com/golang/glog" "github.com/spf13/pflag" @@ -45,10 +45,12 @@ func init() { }) } +// RegistriesClient is a testable interface for the ACR client List operation. type RegistriesClient interface { List() (containerregistry.RegistryListResult, error) } +// NewACRProvider parses the specified configFile and returns a DockerConfigProvider func NewACRProvider(configFile *string) credentialprovider.DockerConfigProvider { return &acrProvider{ file: configFile, @@ -57,24 +59,16 @@ func NewACRProvider(configFile *string) credentialprovider.DockerConfigProvider type acrProvider struct { file *string - config azure.Config - environment azureapi.Environment + config *azure.Config + environment *azureapi.Environment registryClient RegistriesClient } -func (a *acrProvider) loadConfig(contents []byte) error { - err := yaml.Unmarshal(contents, &a.config) +func (a *acrProvider) loadConfig(rdr io.Reader) error { + var err error + a.config, a.environment, err = azure.ParseConfig(rdr) if err != nil { - return err - } - - if a.config.Cloud == "" { - a.environment = azureapi.PublicCloud - } else { - a.environment, err = azureapi.EnvironmentFromName(a.config.Cloud) - if err != nil { - return err - } + glog.Errorf("Failed to load azure credential file: %v", err) } return nil } @@ -84,27 +78,21 @@ func (a *acrProvider) Enabled() bool { glog.V(5).Infof("Azure config unspecified, disabling") return false } - contents, err := ioutil.ReadFile(*a.file) + + f, err := os.Open(*a.file) if err != nil { - glog.Errorf("Failed to load azure credential file: %v", err) - return false - } - if err := a.loadConfig(contents); err != nil { - glog.Errorf("Failed to parse azure credential file: %v", err) + glog.Errorf("Failed to load config from file: %s", *a.file) return false } + defer f.Close() - oauthConfig, err := a.environment.OAuthConfigForTenant(a.config.TenantID) + err = a.loadConfig(f) if err != nil { - glog.Errorf("Failed to get oauth config: %v", err) + glog.Errorf("Failed to load config from file: %s", *a.file) return false } - servicePrincipalToken, err := azureapi.NewServicePrincipalToken( - *oauthConfig, - a.config.AADClientID, - a.config.AADClientSecret, - a.environment.ServiceManagementEndpoint) + servicePrincipalToken, err := azure.GetServicePrincipalToken(a.config, a.environment) if err != nil { glog.Errorf("Failed to create service principal token: %v", err) return false @@ -112,7 +100,7 @@ func (a *acrProvider) Enabled() bool { registryClient := containerregistry.NewRegistriesClient(a.config.SubscriptionID) registryClient.BaseURI = a.environment.ResourceManagerEndpoint - registryClient.Authorizer = servicePrincipalToken + registryClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) a.registryClient = registryClient return true diff --git a/vendor/k8s.io/kubernetes/pkg/credentialprovider/azure/azure_credentials_test.go b/vendor/k8s.io/kubernetes/pkg/credentialprovider/azure/azure_credentials_test.go index 8f6973877682..9d966fe6be54 100644 --- a/vendor/k8s.io/kubernetes/pkg/credentialprovider/azure/azure_credentials_test.go +++ b/vendor/k8s.io/kubernetes/pkg/credentialprovider/azure/azure_credentials_test.go @@ -17,6 +17,7 @@ limitations under the License. package azure import ( + "bytes" "testing" "github.com/Azure/azure-sdk-for-go/arm/containerregistry" @@ -66,7 +67,7 @@ func Test(t *testing.T) { provider := &acrProvider{ registryClient: fakeClient, } - provider.loadConfig([]byte(configStr)) + provider.loadConfig(bytes.NewBufferString(configStr)) creds := provider.Provide() diff --git a/vendor/k8s.io/kubernetes/pkg/generated/openapi/zz_generated.openapi.go b/vendor/k8s.io/kubernetes/pkg/generated/openapi/zz_generated.openapi.go index bface04d6a63..9b412f6bd9f5 100644 --- a/vendor/k8s.io/kubernetes/pkg/generated/openapi/zz_generated.openapi.go +++ b/vendor/k8s.io/kubernetes/pkg/generated/openapi/zz_generated.openapi.go @@ -2277,6 +2277,93 @@ func GetOpenAPIDefinitions(ref openapi.ReferenceCallback) map[string]openapi.Ope Dependencies: []string{ "k8s.io/apiserver/pkg/apis/audit/v1alpha1.GroupResources"}, }, + "k8s.io/code-generator/cmd/client-gen/test_apis/testgroup/v1.TestType": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/code-generator/cmd/client-gen/test_apis/testgroup/v1.TestTypeStatus"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/code-generator/cmd/client-gen/test_apis/testgroup/v1.TestTypeStatus"}, + }, + "k8s.io/code-generator/cmd/client-gen/test_apis/testgroup/v1.TestTypeList": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/code-generator/cmd/client-gen/test_apis/testgroup/v1.TestType"), + }, + }, + }, + }, + }, + }, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta", "k8s.io/code-generator/cmd/client-gen/test_apis/testgroup/v1.TestType"}, + }, + "k8s.io/code-generator/cmd/client-gen/test_apis/testgroup/v1.TestTypeStatus": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Properties: map[string]spec.Schema{ + "Blah": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"Blah"}, + }, + }, + Dependencies: []string{}, + }, "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1.APIService": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ @@ -2521,93 +2608,6 @@ func GetOpenAPIDefinitions(ref openapi.ReferenceCallback) map[string]openapi.Ope }, Dependencies: []string{}, }, - "k8s.io/kubernetes/cmd/libs/go2idl/client-gen/test_apis/testgroup/v1.TestType": { - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "status": { - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/kubernetes/cmd/libs/go2idl/client-gen/test_apis/testgroup/v1.TestTypeStatus"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/kubernetes/cmd/libs/go2idl/client-gen/test_apis/testgroup/v1.TestTypeStatus"}, - }, - "k8s.io/kubernetes/cmd/libs/go2idl/client-gen/test_apis/testgroup/v1.TestTypeList": { - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), - }, - }, - "items": { - SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/kubernetes/cmd/libs/go2idl/client-gen/test_apis/testgroup/v1.TestType"), - }, - }, - }, - }, - }, - }, - Required: []string{"items"}, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta", "k8s.io/kubernetes/cmd/libs/go2idl/client-gen/test_apis/testgroup/v1.TestType"}, - }, - "k8s.io/kubernetes/cmd/libs/go2idl/client-gen/test_apis/testgroup/v1.TestTypeStatus": { - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Properties: map[string]spec.Schema{ - "Blah": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"Blah"}, - }, - }, - Dependencies: []string{}, - }, "k8s.io/kubernetes/federation/apis/federation/v1beta1.Cluster": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ diff --git a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/admission/configuration/BUILD b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/admission/configuration/BUILD index 218ed11ea540..4cf9f02270a5 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/admission/configuration/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/admission/configuration/BUILD @@ -12,14 +12,17 @@ go_test( name = "go_default_test", srcs = [ "configuration_manager_test.go", + "external_admission_hook_manager_test.go", "initializer_manager_test.go", ], library = ":go_default_library", tags = ["automanaged"], deps = [ "//pkg/apis/admissionregistration/v1alpha1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", ], ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/admission/configuration/external_admission_hook_manager.go b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/admission/configuration/external_admission_hook_manager.go index 64d4bd375893..5ca2d6206162 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/admission/configuration/external_admission_hook_manager.go +++ b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/admission/configuration/external_admission_hook_manager.go @@ -20,6 +20,9 @@ import ( "fmt" "reflect" + "github.com/golang/glog" + + "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1" @@ -37,6 +40,10 @@ func NewExternalAdmissionHookConfigurationManager(c ExternalAdmissionHookConfigu getFn := func() (runtime.Object, error) { list, err := c.List(metav1.ListOptions{}) if err != nil { + if errors.IsNotFound(err) || errors.IsForbidden(err) { + glog.V(5).Infof("ExternalAdmissionHookConfiguration are disabled due to an error: %v", err) + return nil, ErrDisabled + } return nil, err } return mergeExternalAdmissionHookConfigurations(list), nil diff --git a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/admission/configuration/external_admission_hook_manager_test.go b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/admission/configuration/external_admission_hook_manager_test.go new file mode 100644 index 000000000000..14a3debfb425 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/admission/configuration/external_admission_hook_manager_test.go @@ -0,0 +1,40 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package configuration + +import ( + "testing" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1" +) + +type disabledWebhookConfigLister struct{} + +func (l *disabledWebhookConfigLister) List(options metav1.ListOptions) (*v1alpha1.ExternalAdmissionHookConfigurationList, error) { + return nil, errors.NewNotFound(schema.GroupResource{Group: "admissionregistration", Resource: "externalAdmissionHookConfigurations"}, "") +} +func TestWebhookConfigDisabled(t *testing.T) { + manager := NewExternalAdmissionHookConfigurationManager(&disabledWebhookConfigLister{}) + manager.sync() + _, err := manager.ExternalAdmissionHooks() + if err.Error() != ErrDisabled.Error() { + t.Errorf("expected %v, got %v", ErrDisabled, err) + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/admission/configuration/initializer_manager_test.go b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/admission/configuration/initializer_manager_test.go index b9d1e7991381..63d4337034eb 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/admission/configuration/initializer_manager_test.go +++ b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/admission/configuration/initializer_manager_test.go @@ -22,7 +22,9 @@ import ( "testing" "time" + "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1" ) @@ -164,3 +166,17 @@ func TestMergeInitializerConfigurations(t *testing.T) { t.Errorf("expected: %#v, got: %#v", expected, got) } } + +type disabledInitializerConfigLister struct{} + +func (l *disabledInitializerConfigLister) List(options metav1.ListOptions) (*v1alpha1.InitializerConfigurationList, error) { + return nil, errors.NewNotFound(schema.GroupResource{Group: "admissionregistration", Resource: "initializerConfigurations"}, "") +} +func TestInitializerConfigDisabled(t *testing.T) { + manager := NewInitializerConfigurationManager(&disabledInitializerConfigLister{}) + manager.sync() + _, err := manager.Initializers() + if err.Error() != ErrDisabled.Error() { + t.Errorf("expected %v, got %v", ErrDisabled, err) + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/server/insecure_handler.go b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/server/insecure_handler.go index 24127f546a89..8186a0ac7abc 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubeapiserver/server/insecure_handler.go +++ b/vendor/k8s.io/kubernetes/pkg/kubeapiserver/server/insecure_handler.go @@ -46,7 +46,7 @@ func BuildInsecureHandlerChain(apiHandler http.Handler, c *server.Config) http.H handler = genericapifilters.WithAuthentication(handler, c.RequestContextMapper, insecureSuperuser{}, nil) handler = genericfilters.WithCORS(handler, c.CorsAllowedOriginList, nil, nil, nil, "true") handler = genericfilters.WithPanicRecovery(handler) - handler = genericfilters.WithTimeoutForNonLongRunningRequests(handler, c.RequestContextMapper, c.LongRunningFunc) + handler = genericfilters.WithTimeoutForNonLongRunningRequests(handler, c.RequestContextMapper, c.LongRunningFunc, c.RequestTimeout) handler = genericfilters.WithMaxInFlightLimit(handler, c.MaxRequestsInFlight, c.MaxMutatingRequestsInFlight, c.RequestContextMapper, c.LongRunningFunc) handler = genericapifilters.WithRequestInfo(handler, server.NewRequestInfoResolver(c), c.RequestContextMapper) handler = apirequest.WithRequestContext(handler, c.RequestContextMapper) diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/set/set_selector.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/set/set_selector.go index 3cbb85970773..a7f3a60e05ba 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/set/set_selector.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/set/set_selector.go @@ -116,6 +116,10 @@ func (o *SelectorOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args [ mapper, _ := f.Object() o.mapper = mapper o.encoder = f.JSONEncoder() + o.resources, o.selector, err = getResourcesAndSelector(args) + if err != nil { + return err + } o.builder = f.NewBuilder(!o.local). ContinueOnError(). @@ -135,8 +139,6 @@ func (o *SelectorOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args [ o.ClientForMapping = func(mapping *meta.RESTMapping) (resource.RESTClient, error) { return f.ClientForMapping(mapping) } - - o.resources, o.selector, err = getResourcesAndSelector(args) return err } diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/testdata/edit/record_testcase.sh b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/testdata/edit/record_testcase.sh index 9a51c351c668..6c790749c580 100755 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/testdata/edit/record_testcase.sh +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/testdata/edit/record_testcase.sh @@ -64,7 +64,7 @@ kind: Config users: [] " > "${edit_kubeconfig}" export KUBECONFIG="${edit_kubeconfig}" - + echo "Starting subshell. Type exit when finished." bash popd diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/testdata/edit/testcase-not-update-annotation/0.response b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/testdata/edit/testcase-not-update-annotation/0.response index 9d4d9c7750ab..b4693de49418 100755 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/testdata/edit/testcase-not-update-annotation/0.response +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/testdata/edit/testcase-not-update-annotation/0.response @@ -35,4 +35,3 @@ "loadBalancer": {} } } - diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/testdata/edit/testcase-update-annotation/0.response b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/testdata/edit/testcase-update-annotation/0.response index 9d4d9c7750ab..b4693de49418 100755 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/testdata/edit/testcase-update-annotation/0.response +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/testdata/edit/testcase-update-annotation/0.response @@ -35,4 +35,3 @@ "loadBalancer": {} } } - diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/plugins/examples/aging/aging.rb b/vendor/k8s.io/kubernetes/pkg/kubectl/plugins/examples/aging/aging.rb index ed9a87fffc6f..da8c372ecb7f 100755 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/plugins/examples/aging/aging.rb +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/plugins/examples/aging/aging.rb @@ -48,7 +48,7 @@ def duration if age < min_age min_age = age end -} +} data = data.sort_by{ |name, age| age } @@ -57,7 +57,7 @@ def duration data.each { |name, age| output = "" output += name.rjust(max_name_length, ' ') + ": " - bar_size = (age*80/max_age).ceil + bar_size = (age*80/max_age).ceil bar_size.times{ output += "▒" } output += " " + age.duration puts output @@ -66,4 +66,3 @@ def duration else puts "No pods" end - diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/plugins/examples/aging/plugin.yaml b/vendor/k8s.io/kubernetes/pkg/kubectl/plugins/examples/aging/plugin.yaml index 07dc2022a685..ee44e8bcea33 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/plugins/examples/aging/plugin.yaml +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/plugins/examples/aging/plugin.yaml @@ -2,7 +2,7 @@ name: "aging" shortDesc: "Aging shows pods by age" longDesc: > Aging shows pods from the current namespace by age. - Once we have plugin support for global flags through + Once we have plugin support for global flags through env vars (planned for V1) we'll be able to switch between namespaces using the --namespace flag. command: ./aging.rb diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/resource_filter.go b/vendor/k8s.io/kubernetes/pkg/kubectl/resource_filter.go index 34b5dba91573..a783a8155d76 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/resource_filter.go +++ b/vendor/k8s.io/kubernetes/pkg/kubectl/resource_filter.go @@ -38,21 +38,18 @@ func NewResourceFilter() Filters { } // filterPods returns true if a pod should be skipped. -// defaults to true for terminated pods +// If show-all is true, the pod will be never be skipped (return false); +// otherwise, skip terminated pod. func filterPods(obj runtime.Object, options printers.PrintOptions) bool { + if options.ShowAll { + return false + } + switch p := obj.(type) { case *v1.Pod: - reason := string(p.Status.Phase) - if p.Status.Reason != "" { - reason = p.Status.Reason - } - return !options.ShowAll && (reason == string(v1.PodSucceeded) || reason == string(v1.PodFailed)) + return p.Status.Phase == v1.PodSucceeded || p.Status.Phase == v1.PodFailed case *api.Pod: - reason := string(p.Status.Phase) - if p.Status.Reason != "" { - reason = p.Status.Reason - } - return !options.ShowAll && (reason == string(api.PodSucceeded) || reason == string(api.PodFailed)) + return p.Status.Phase == api.PodSucceeded || p.Status.Phase == api.PodFailed } return false } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/BUILD index ce6fdc59d8d1..3fa667a97721 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/BUILD @@ -41,6 +41,7 @@ go_library( "//pkg/util/sysctl:go_default_library", "//pkg/util/version:go_default_library", "//vendor/github.com/golang/glog:go_default_library", + "//vendor/github.com/google/cadvisor/info/v2:go_default_library", "//vendor/github.com/opencontainers/runc/libcontainer/cgroups:go_default_library", "//vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs:go_default_library", "//vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_linux.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_linux.go index 7ab9b59d9c07..175f7c02c036 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_linux.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_linux.go @@ -30,6 +30,7 @@ import ( "time" "github.com/golang/glog" + cadvisorapiv2 "github.com/google/cadvisor/info/v2" "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/opencontainers/runc/libcontainer/cgroups/fs" "github.com/opencontainers/runc/libcontainer/configs" @@ -219,30 +220,12 @@ func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.I var capacity = v1.ResourceList{} // It is safe to invoke `MachineInfo` on cAdvisor before logically initializing cAdvisor here because // machine info is computed and cached once as part of cAdvisor object creation. + // But `RootFsInfo` and `ImagesFsInfo` are not available at this moment so they will be called later during manager starts if info, err := cadvisorInterface.MachineInfo(); err == nil { capacity = cadvisor.CapacityFromMachineInfo(info) } else { return nil, err } - rootfs, err := cadvisorInterface.RootFsInfo() - if err != nil { - capacity[v1.ResourceStorageScratch] = resource.MustParse("0Gi") - } else { - for rName, rCap := range cadvisor.StorageScratchCapacityFromFsInfo(rootfs) { - capacity[rName] = rCap - } - } - - if hasDedicatedImageFs, _ := cadvisorInterface.HasDedicatedImageFs(); hasDedicatedImageFs { - imagesfs, err := cadvisorInterface.ImagesFsInfo() - if err != nil { - glog.Errorf("Failed to get Image filesystem information: %v", err) - } else { - for rName, rCap := range cadvisor.StorageOverlayCapacityFromFsInfo(imagesfs) { - capacity[rName] = rCap - } - } - } cgroupRoot := nodeConfig.CgroupRoot cgroupManager := NewCgroupManager(subsystems, nodeConfig.CgroupDriver) @@ -551,6 +534,44 @@ func (cm *containerManagerImpl) Start(node *v1.Node, activePods ActivePodsFunc) }, 5*time.Minute, wait.NeverStop) } + // Local storage filesystem information from `RootFsInfo` and `ImagesFsInfo` is available at a later time + // depending on the time when cadvisor manager updates container stats. Therefore use a go routine to keep + // retrieving the information until it is available. + stopChan := make(chan struct{}) + go wait.Until(func() { + if err := cm.setFsCapacity(); err != nil { + glog.Errorf("[ContainerManager]: %v", err) + return + } + close(stopChan) + }, time.Second, stopChan) + return nil +} + +func (cm *containerManagerImpl) setFsCapacity() error { + rootfs, err := cm.cadvisorInterface.RootFsInfo() + if err != nil { + return fmt.Errorf("Fail to get rootfs information %v", err) + } + hasDedicatedImageFs, _ := cm.cadvisorInterface.HasDedicatedImageFs() + var imagesfs cadvisorapiv2.FsInfo + if hasDedicatedImageFs { + imagesfs, err = cm.cadvisorInterface.ImagesFsInfo() + if err != nil { + return fmt.Errorf("Fail to get imagefs information %v", err) + } + } + + cm.Lock() + for rName, rCap := range cadvisor.StorageScratchCapacityFromFsInfo(rootfs) { + cm.capacity[rName] = rCap + } + if hasDedicatedImageFs { + for rName, rCap := range cadvisor.StorageOverlayCapacityFromFsInfo(imagesfs) { + cm.capacity[rName] = rCap + } + } + cm.Unlock() return nil } @@ -809,6 +830,8 @@ func getDockerAPIVersion(cadvisor cadvisor.Interface) *utilversion.Version { return dockerAPIVersion } -func (m *containerManagerImpl) GetCapacity() v1.ResourceList { - return m.capacity +func (cm *containerManagerImpl) GetCapacity() v1.ResourceList { + cm.RLock() + defer cm.RUnlock() + return cm.capacity } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_linux_test.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_linux_test.go index 12219d0b9b9c..d1cba37ae472 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_linux_test.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_linux_test.go @@ -47,6 +47,14 @@ func (mi *fakeMountInterface) List() ([]mount.MountPoint, error) { return mi.mountPoints, nil } +func (mi *fakeMountInterface) IsMountPointMatch(mp mount.MountPoint, dir string) bool { + return (mp.Path == dir) +} + +func (mi *fakeMountInterface) IsNotMountPoint(dir string) (bool, error) { + return false, fmt.Errorf("unsupported") +} + func (mi *fakeMountInterface) IsLikelyNotMountPoint(file string) (bool, error) { return false, fmt.Errorf("unsupported") } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_unsupported_test.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_unsupported_test.go index 11d70260e9d9..d1451946fc0e 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_unsupported_test.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_unsupported_test.go @@ -40,6 +40,14 @@ func (mi *fakeMountInterface) List() ([]mount.MountPoint, error) { return mi.mountPoints, nil } +func (f *fakeMountInterface) IsMountPointMatch(mp mount.MountPoint, dir string) bool { + return (mp.Path == dir) +} + +func (f *fakeMountInterface) IsNotMountPoint(dir string) (bool, error) { + return false, fmt.Errorf("unsupported") +} + func (mi *fakeMountInterface) IsLikelyNotMountPoint(file string) (bool, error) { return false, fmt.Errorf("unsupported") } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/node_container_manager.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/node_container_manager.go index 7fd127b2a40b..58097429e0c4 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/node_container_manager.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/node_container_manager.go @@ -217,6 +217,10 @@ func hardEvictionReservation(thresholds []evictionapi.Threshold, capacity v1.Res memoryCapacity := capacity[v1.ResourceMemory] value := evictionapi.GetThresholdQuantity(threshold.Value, &memoryCapacity) ret[v1.ResourceMemory] = *value + case evictionapi.SignalNodeFsAvailable: + storageCapacity := capacity[v1.ResourceStorageScratch] + value := evictionapi.GetThresholdQuantity(threshold.Value, &storageCapacity) + ret[v1.ResourceStorageScratch] = *value } } return ret diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/BUILD index 864dfcac8fe3..8f646da59730 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/BUILD @@ -54,6 +54,7 @@ go_library( "//pkg/security/apparmor:go_default_library", "//pkg/util/exec:go_default_library", "//pkg/util/hash:go_default_library", + "//pkg/util/parsers:go_default_library", "//pkg/util/term:go_default_library", "//vendor/github.com/blang/semver:go_default_library", "//vendor/github.com/docker/docker/pkg/jsonmessage:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_sandbox.go b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_sandbox.go index d1215bc1b150..70c5bcb0ded1 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_sandbox.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_sandbox.go @@ -226,9 +226,7 @@ func (ds *dockerService) StopPodSandbox(podSandboxID string) error { // since it is stopped. With empty network namespcae, CNI bridge plugin will conduct best // effort clean up and will not return error. errList := []error{} - ready, ok := ds.getNetworkReady(podSandboxID) - if needNetworkTearDown && (ready || !ok) { - // Only tear down the pod network if we haven't done so already + if needNetworkTearDown { cID := kubecontainer.BuildContainerID(runtimeName, podSandboxID) err := ds.network.TearDownPod(namespace, name, cID) if err == nil { @@ -271,15 +269,12 @@ func (ds *dockerService) RemovePodSandbox(podSandboxID string) error { } // Remove the sandbox container. - err = ds.client.RemoveContainer(podSandboxID, dockertypes.ContainerRemoveOptions{RemoveVolumes: true, Force: true}) - if err == nil || libdocker.IsContainerNotFoundError(err) { - // Only clear network ready when the sandbox has actually been - // removed from docker or doesn't exist - ds.clearNetworkReady(podSandboxID) - } else { + if err := ds.client.RemoveContainer(podSandboxID, dockertypes.ContainerRemoveOptions{RemoveVolumes: true, Force: true}); err != nil && !libdocker.IsContainerNotFoundError(err) { errs = append(errs, err) } + ds.clearNetworkReady(podSandboxID) + // Remove the checkpoint of the sandbox. if err := ds.checkpointHandler.RemoveCheckpoint(podSandboxID); err != nil { errs = append(errs, err) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/exec.go b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/exec.go index 1d73a8a8d5c6..f7344eb727be 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/exec.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/exec.go @@ -135,6 +135,9 @@ func (*NsenterExecHandler) ExecInContainer(client libdocker.Interface, container type NativeExecHandler struct{} func (*NativeExecHandler) ExecInContainer(client libdocker.Interface, container *dockertypes.ContainerJSON, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize, timeout time.Duration) error { + done := make(chan struct{}) + defer close(done) + createOpts := dockertypes.ExecConfig{ Cmd: cmd, AttachStdin: stdin != nil, @@ -149,9 +152,23 @@ func (*NativeExecHandler) ExecInContainer(client libdocker.Interface, container // Have to start this before the call to client.StartExec because client.StartExec is a blocking // call :-( Otherwise, resize events don't get processed and the terminal never resizes. - kubecontainer.HandleResizing(resize, func(size remotecommand.TerminalSize) { - client.ResizeExecTTY(execObj.ID, int(size.Height), int(size.Width)) - }) + // + // We also have to delay attempting to send a terminal resize request to docker until after the + // exec has started; otherwise, the initial resize request will fail. + execStarted := make(chan struct{}) + go func() { + select { + case <-execStarted: + // client.StartExec has started the exec, so we can start resizing + case <-done: + // ExecInContainer has returned, so short-circuit + return + } + + kubecontainer.HandleResizing(resize, func(size remotecommand.TerminalSize) { + client.ResizeExecTTY(execObj.ID, int(size.Height), int(size.Width)) + }) + }() startOpts := dockertypes.ExecStartCheck{Detach: false, Tty: tty} streamOpts := libdocker.StreamOptions{ @@ -159,6 +176,7 @@ func (*NativeExecHandler) ExecInContainer(client libdocker.Interface, container OutputStream: stdout, ErrorStream: stderr, RawTerminal: tty, + ExecStarted: execStarted, } err = client.StartExec(execObj.ID, startOpts, streamOpts) if err != nil { diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers.go b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers.go index c5b99a32d575..20b538a72cf8 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers.go @@ -22,7 +22,6 @@ import ( "encoding/json" "fmt" "io/ioutil" - "os" "path/filepath" "regexp" "strconv" @@ -34,13 +33,14 @@ import ( dockernat "github.com/docker/go-connections/nat" "github.com/golang/glog" + utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/credentialprovider" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" + "k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker" "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/security/apparmor" - - "k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker" + "k8s.io/kubernetes/pkg/util/parsers" ) const ( @@ -393,11 +393,6 @@ func getSecurityOptSeparator(v *semver.Version) rune { // ensureSandboxImageExists pulls the sandbox image when it's not present. func ensureSandboxImageExists(client libdocker.Interface, image string) error { - dockerCfgSearchPath := []string{"/.docker", filepath.Join(os.Getenv("HOME"), ".docker")} - return ensureSandboxImageExistsDockerCfg(client, image, dockerCfgSearchPath) -} - -func ensureSandboxImageExistsDockerCfg(client libdocker.Interface, image string, dockerCfgSearchPath []string) error { _, err := client.InspectImageByRef(image) if err == nil { return nil @@ -406,34 +401,37 @@ func ensureSandboxImageExistsDockerCfg(client libdocker.Interface, image string, return fmt.Errorf("failed to inspect sandbox image %q: %v", image, err) } - // To support images in private registries, try to read docker config - authConfig := dockertypes.AuthConfig{} - keyring := &credentialprovider.BasicDockerKeyring{} - var cfgLoadErr error - if cfg, err := credentialprovider.ReadDockerConfigJSONFile(dockerCfgSearchPath); err == nil { - keyring.Add(cfg) - } else if cfg, err := credentialprovider.ReadDockercfgFile(dockerCfgSearchPath); err == nil { - keyring.Add(cfg) - } else { - cfgLoadErr = err - } - if creds, withCredentials := keyring.Lookup(image); withCredentials { - // Use the first one that matched our image - for _, cred := range creds { - authConfig.Username = cred.Username - authConfig.Password = cred.Password - break + repoToPull, _, _, err := parsers.ParseImageName(image) + if err != nil { + return err + } + + keyring := credentialprovider.NewDockerKeyring() + creds, withCredentials := keyring.Lookup(repoToPull) + if !withCredentials { + glog.V(3).Infof("Pulling image %q without credentials", image) + + err := client.PullImage(image, dockertypes.AuthConfig{}, dockertypes.ImagePullOptions{}) + if err != nil { + return fmt.Errorf("failed pulling image %q: %v", image, err) } + + return nil } - err = client.PullImage(image, authConfig, dockertypes.ImagePullOptions{}) - if err != nil { - if cfgLoadErr != nil { - glog.Warningf("Couldn't load Docker cofig. If sandbox image %q is in a private registry, this will cause further errors. Error: %v", image, cfgLoadErr) + var pullErrs []error + for _, currentCreds := range creds { + authConfig := credentialprovider.LazyProvide(currentCreds) + err := client.PullImage(image, authConfig, dockertypes.ImagePullOptions{}) + // If there was no error, return success + if err == nil { + return nil } - return fmt.Errorf("unable to pull sandbox image %q: %v", image, err) + + pullErrs = append(pullErrs, err) } - return nil + + return utilerrors.NewAggregate(pullErrs) } func getAppArmorOpts(profile string) ([]dockerOpt, error) { diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers_test.go b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers_test.go index 7304abfd9667..109726cb3467 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers_test.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers_test.go @@ -17,7 +17,6 @@ limitations under the License. package dockershim import ( - "encoding/base64" "fmt" "io/ioutil" "os" @@ -34,9 +33,8 @@ import ( "k8s.io/kubernetes/pkg/api/v1" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" - "k8s.io/kubernetes/pkg/security/apparmor" - "k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker" + "k8s.io/kubernetes/pkg/security/apparmor" ) func TestLabelsAndAnnotationsRoundTrip(t *testing.T) { @@ -267,10 +265,7 @@ func writeDockerConfig(cfg string) (string, error) { func TestEnsureSandboxImageExists(t *testing.T) { sandboxImage := "gcr.io/test/image" - registryHost := "https://gcr.io/" authConfig := dockertypes.AuthConfig{Username: "user", Password: "pass"} - authB64 := base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", authConfig.Username, authConfig.Password))) - authJSON := fmt.Sprintf("{\"auths\": {\"%s\": {\"auth\": \"%s\"} } }", registryHost, authB64) for desc, test := range map[string]struct { injectImage bool imgNeedsAuth bool @@ -302,14 +297,6 @@ func TestEnsureSandboxImageExists(t *testing.T) { calls: []string{"inspect_image", "pull"}, err: true, }, - "should pull private image using dockerauth if image doesn't exist": { - injectImage: true, - imgNeedsAuth: true, - injectErr: libdocker.ImageNotFoundError{ID: "image_id"}, - calls: []string{"inspect_image", "pull"}, - configJSON: authJSON, - err: false, - }, } { t.Logf("TestCase: %q", desc) _, fakeDocker, _ := newTestDockerService() @@ -322,15 +309,7 @@ func TestEnsureSandboxImageExists(t *testing.T) { } fakeDocker.InjectError("inspect_image", test.injectErr) - var dockerCfgSearchPath []string - if test.configJSON != "" { - tmpdir, err := writeDockerConfig(test.configJSON) - require.NoError(t, err, "could not create a temp docker config file") - dockerCfgSearchPath = append(dockerCfgSearchPath, filepath.Join(tmpdir, ".docker")) - defer os.RemoveAll(tmpdir) - } - - err := ensureSandboxImageExistsDockerCfg(fakeDocker, sandboxImage, dockerCfgSearchPath) + err := ensureSandboxImageExists(fakeDocker, sandboxImage) assert.NoError(t, fakeDocker.AssertCalls(test.calls)) assert.Equal(t, test.err, err != nil) } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/kube_docker_client.go b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/kube_docker_client.go index f3bbc3f0536e..7fdfb03e1ca0 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/kube_docker_client.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/kube_docker_client.go @@ -463,6 +463,15 @@ func (d *kubeDockerClient) StartExec(startExec string, opts dockertypes.ExecStar return err } defer resp.Close() + + if sopts.ExecStarted != nil { + // Send a message to the channel indicating that the exec has started. This is needed so + // interactive execs can handle resizing correctly - the request to resize the TTY has to happen + // after the call to d.client.ContainerExecAttach, and because d.holdHijackedConnection below + // blocks, we use sopts.ExecStarted to signal the caller that it's ok to resize. + sopts.ExecStarted <- struct{}{} + } + return d.holdHijackedConnection(sopts.RawTerminal || opts.Tty, sopts.InputStream, sopts.OutputStream, sopts.ErrorStream, resp) } @@ -593,6 +602,7 @@ type StreamOptions struct { InputStream io.Reader OutputStream io.Writer ErrorStream io.Writer + ExecStarted chan struct{} } // operationTimeout is the error returned when the docker operations are timeout. diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/eviction_manager.go b/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/eviction_manager.go index 458cb6e7b1b4..bc7776328a75 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/eviction_manager.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/eviction_manager.go @@ -148,11 +148,11 @@ func (m *managerImpl) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAd } // Start starts the control loop to observe and response to low compute resources. -func (m *managerImpl) Start(diskInfoProvider DiskInfoProvider, podFunc ActivePodsFunc, podCleanedUpFunc PodCleanedUpFunc, nodeProvider NodeProvider, monitoringInterval time.Duration) { +func (m *managerImpl) Start(diskInfoProvider DiskInfoProvider, podFunc ActivePodsFunc, podCleanedUpFunc PodCleanedUpFunc, capacityProvider CapacityProvider, monitoringInterval time.Duration) { // start the eviction manager monitoring go func() { for { - if evictedPods := m.synchronize(diskInfoProvider, podFunc, nodeProvider); evictedPods != nil { + if evictedPods := m.synchronize(diskInfoProvider, podFunc, capacityProvider); evictedPods != nil { glog.Infof("eviction manager: pods %s evicted, waiting for pod to be cleaned up", format.Pods(evictedPods)) m.waitForPodsCleanup(podCleanedUpFunc, evictedPods) } else { @@ -211,7 +211,7 @@ func startMemoryThresholdNotifier(thresholds []evictionapi.Threshold, observatio // synchronize is the main control loop that enforces eviction thresholds. // Returns the pod that was killed, or nil if no pod was killed. -func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc ActivePodsFunc, nodeProvider NodeProvider) []*v1.Pod { +func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc ActivePodsFunc, capacityProvider CapacityProvider) []*v1.Pod { // if we have nothing to do, just return thresholds := m.config.Thresholds if len(thresholds) == 0 { @@ -233,7 +233,7 @@ func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc Act activePods := podFunc() // make observations and get a function to derive pod usage stats relative to those observations. - observations, statsFunc, err := makeSignalObservations(m.summaryProvider, nodeProvider, activePods, *m.dedicatedImageFs) + observations, statsFunc, err := makeSignalObservations(m.summaryProvider, capacityProvider, activePods, *m.dedicatedImageFs) if err != nil { glog.Errorf("eviction manager: unexpected err: %v", err) return nil @@ -248,7 +248,7 @@ func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc Act err = startMemoryThresholdNotifier(m.config.Thresholds, observations, false, func(desc string) { glog.Infof("soft memory eviction threshold crossed at %s", desc) // TODO wait grace period for soft memory limit - m.synchronize(diskInfoProvider, podFunc, nodeProvider) + m.synchronize(diskInfoProvider, podFunc, capacityProvider) }) if err != nil { glog.Warningf("eviction manager: failed to create hard memory threshold notifier: %v", err) @@ -256,7 +256,7 @@ func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc Act // start hard memory notification err = startMemoryThresholdNotifier(m.config.Thresholds, observations, true, func(desc string) { glog.Infof("hard memory eviction threshold crossed at %s", desc) - m.synchronize(diskInfoProvider, podFunc, nodeProvider) + m.synchronize(diskInfoProvider, podFunc, capacityProvider) }) if err != nil { glog.Warningf("eviction manager: failed to create soft memory threshold notifier: %v", err) @@ -491,7 +491,7 @@ func (m *managerImpl) emptyDirLimitEviction(podStats statsapi.PodStats, pod *v1. if source.EmptyDir != nil { size := source.EmptyDir.SizeLimit used := podVolumeUsed[pod.Spec.Volumes[i].Name] - if used != nil && size.Sign() == 1 && used.Cmp(size) > 0 { + if used != nil && size != nil && size.Sign() == 1 && used.Cmp(*size) > 0 { // the emptyDir usage exceeds the size limit, evict the pod return m.evictPod(pod, v1.ResourceName("EmptyDir"), fmt.Sprintf("emptyDir usage exceeds the limit %q", size.String())) } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/eviction_manager_test.go b/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/eviction_manager_test.go index 2177be4b35af..644444309a5e 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/eviction_manager_test.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/eviction_manager_test.go @@ -59,22 +59,25 @@ func (m *mockDiskInfoProvider) HasDedicatedImageFs() (bool, error) { return m.dedicatedImageFs, nil } -func newMockNodeProvider(allocatableCapacity v1.ResourceList) *mockNodeProvider { - return &mockNodeProvider{ - node: v1.Node{ - Status: v1.NodeStatus{ - Allocatable: allocatableCapacity, - }, - }, +func newMockCapacityProvider(capacity, reservation v1.ResourceList) *mockCapacityProvider { + return &mockCapacityProvider{ + capacity: capacity, + reservation: reservation, } } -type mockNodeProvider struct { - node v1.Node +type mockCapacityProvider struct { + capacity v1.ResourceList + reservation v1.ResourceList +} + +func (m *mockCapacityProvider) GetCapacity() v1.ResourceList { + return m.capacity + } -func (m *mockNodeProvider) GetNode() (*v1.Node, error) { - return &m.node, nil +func (m *mockCapacityProvider) GetNodeAllocatableReservation() v1.ResourceList { + return m.reservation } // mockDiskGC is used to simulate invoking image and container garbage collection. @@ -200,7 +203,7 @@ func TestMemoryPressure(t *testing.T) { fakeClock := clock.NewFakeClock(time.Now()) podKiller := &mockPodKiller{} diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false} - nodeProvider := newMockNodeProvider(v1.ResourceList{v1.ResourceMemory: *quantityMustParse("2Gi")}) + capacityProvider := newMockCapacityProvider(v1.ResourceList{v1.ResourceMemory: *quantityMustParse("3Gi")}, v1.ResourceList{v1.ResourceMemory: *quantityMustParse("1Gi")}) imageGC := &mockDiskGC{imageBytesFreed: int64(0), err: nil} nodeRef := &clientv1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""} @@ -243,7 +246,7 @@ func TestMemoryPressure(t *testing.T) { burstablePodToAdmit, _ := podMaker("burst-admit", newResourceList("100m", "100Mi"), newResourceList("200m", "200Mi"), "0Gi") // synchronize - manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider) + manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider) // we should not have memory pressure if manager.IsUnderMemoryPressure() { @@ -261,7 +264,7 @@ func TestMemoryPressure(t *testing.T) { // induce soft threshold fakeClock.Step(1 * time.Minute) summaryProvider.result = summaryStatsMaker("1500Mi", podStats) - manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider) + manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider) // we should have memory pressure if !manager.IsUnderMemoryPressure() { @@ -276,7 +279,7 @@ func TestMemoryPressure(t *testing.T) { // step forward in time pass the grace period fakeClock.Step(3 * time.Minute) summaryProvider.result = summaryStatsMaker("1500Mi", podStats) - manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider) + manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider) // we should have memory pressure if !manager.IsUnderMemoryPressure() { @@ -301,7 +304,7 @@ func TestMemoryPressure(t *testing.T) { // remove memory pressure fakeClock.Step(20 * time.Minute) summaryProvider.result = summaryStatsMaker("3Gi", podStats) - manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider) + manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider) // we should not have memory pressure if manager.IsUnderMemoryPressure() { @@ -311,7 +314,7 @@ func TestMemoryPressure(t *testing.T) { // induce memory pressure! fakeClock.Step(1 * time.Minute) summaryProvider.result = summaryStatsMaker("500Mi", podStats) - manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider) + manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider) // we should have memory pressure if !manager.IsUnderMemoryPressure() { @@ -339,7 +342,7 @@ func TestMemoryPressure(t *testing.T) { fakeClock.Step(1 * time.Minute) summaryProvider.result = summaryStatsMaker("2Gi", podStats) podKiller.pod = nil // reset state - manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider) + manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider) // we should have memory pressure (because transition period not yet met) if !manager.IsUnderMemoryPressure() { @@ -363,7 +366,7 @@ func TestMemoryPressure(t *testing.T) { fakeClock.Step(5 * time.Minute) summaryProvider.result = summaryStatsMaker("2Gi", podStats) podKiller.pod = nil // reset state - manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider) + manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider) // we should not have memory pressure (because transition period met) if manager.IsUnderMemoryPressure() { @@ -418,7 +421,7 @@ func TestDiskPressureNodeFs(t *testing.T) { fakeClock := clock.NewFakeClock(time.Now()) podKiller := &mockPodKiller{} diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false} - nodeProvider := newMockNodeProvider(v1.ResourceList{v1.ResourceMemory: *quantityMustParse("2Gi")}) + capacityProvider := newMockCapacityProvider(v1.ResourceList{v1.ResourceMemory: *quantityMustParse("3Gi")}, v1.ResourceList{v1.ResourceMemory: *quantityMustParse("1Gi")}) diskGC := &mockDiskGC{imageBytesFreed: int64(0), err: nil} nodeRef := &clientv1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""} @@ -461,7 +464,7 @@ func TestDiskPressureNodeFs(t *testing.T) { podToAdmit, _ := podMaker("pod-to-admit", newResourceList("", ""), newResourceList("", ""), "0Gi", "0Gi", "0Gi") // synchronize - manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider) + manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider) // we should not have disk pressure if manager.IsUnderDiskPressure() { @@ -476,7 +479,7 @@ func TestDiskPressureNodeFs(t *testing.T) { // induce soft threshold fakeClock.Step(1 * time.Minute) summaryProvider.result = summaryStatsMaker("1.5Gi", "200Gi", podStats) - manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider) + manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider) // we should have disk pressure if !manager.IsUnderDiskPressure() { @@ -491,7 +494,7 @@ func TestDiskPressureNodeFs(t *testing.T) { // step forward in time pass the grace period fakeClock.Step(3 * time.Minute) summaryProvider.result = summaryStatsMaker("1.5Gi", "200Gi", podStats) - manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider) + manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider) // we should have disk pressure if !manager.IsUnderDiskPressure() { @@ -516,7 +519,7 @@ func TestDiskPressureNodeFs(t *testing.T) { // remove disk pressure fakeClock.Step(20 * time.Minute) summaryProvider.result = summaryStatsMaker("16Gi", "200Gi", podStats) - manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider) + manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider) // we should not have disk pressure if manager.IsUnderDiskPressure() { @@ -526,7 +529,7 @@ func TestDiskPressureNodeFs(t *testing.T) { // induce disk pressure! fakeClock.Step(1 * time.Minute) summaryProvider.result = summaryStatsMaker("500Mi", "200Gi", podStats) - manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider) + manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider) // we should have disk pressure if !manager.IsUnderDiskPressure() { @@ -551,7 +554,7 @@ func TestDiskPressureNodeFs(t *testing.T) { fakeClock.Step(1 * time.Minute) summaryProvider.result = summaryStatsMaker("16Gi", "200Gi", podStats) podKiller.pod = nil // reset state - manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider) + manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider) // we should have disk pressure (because transition period not yet met) if !manager.IsUnderDiskPressure() { @@ -572,7 +575,7 @@ func TestDiskPressureNodeFs(t *testing.T) { fakeClock.Step(5 * time.Minute) summaryProvider.result = summaryStatsMaker("16Gi", "200Gi", podStats) podKiller.pod = nil // reset state - manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider) + manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider) // we should not have disk pressure (because transition period met) if manager.IsUnderDiskPressure() { @@ -617,7 +620,7 @@ func TestMinReclaim(t *testing.T) { fakeClock := clock.NewFakeClock(time.Now()) podKiller := &mockPodKiller{} diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false} - nodeProvider := newMockNodeProvider(v1.ResourceList{v1.ResourceMemory: *quantityMustParse("2Gi")}) + capacityProvider := newMockCapacityProvider(v1.ResourceList{v1.ResourceMemory: *quantityMustParse("3Gi")}, v1.ResourceList{v1.ResourceMemory: *quantityMustParse("1Gi")}) diskGC := &mockDiskGC{imageBytesFreed: int64(0), err: nil} nodeRef := &clientv1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""} @@ -652,7 +655,7 @@ func TestMinReclaim(t *testing.T) { } // synchronize - manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider) + manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider) // we should not have memory pressure if manager.IsUnderMemoryPressure() { @@ -662,7 +665,7 @@ func TestMinReclaim(t *testing.T) { // induce memory pressure! fakeClock.Step(1 * time.Minute) summaryProvider.result = summaryStatsMaker("500Mi", podStats) - manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider) + manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider) // we should have memory pressure if !manager.IsUnderMemoryPressure() { @@ -682,7 +685,7 @@ func TestMinReclaim(t *testing.T) { fakeClock.Step(1 * time.Minute) summaryProvider.result = summaryStatsMaker("1.2Gi", podStats) podKiller.pod = nil // reset state - manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider) + manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider) // we should have memory pressure (because transition period not yet met) if !manager.IsUnderMemoryPressure() { @@ -702,7 +705,7 @@ func TestMinReclaim(t *testing.T) { fakeClock.Step(1 * time.Minute) summaryProvider.result = summaryStatsMaker("2Gi", podStats) podKiller.pod = nil // reset state - manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider) + manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider) // we should have memory pressure (because transition period not yet met) if !manager.IsUnderMemoryPressure() { @@ -718,7 +721,7 @@ func TestMinReclaim(t *testing.T) { fakeClock.Step(5 * time.Minute) summaryProvider.result = summaryStatsMaker("2Gi", podStats) podKiller.pod = nil // reset state - manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider) + manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider) // we should not have memory pressure (because transition period met) if manager.IsUnderMemoryPressure() { @@ -757,7 +760,7 @@ func TestNodeReclaimFuncs(t *testing.T) { fakeClock := clock.NewFakeClock(time.Now()) podKiller := &mockPodKiller{} diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false} - nodeProvider := newMockNodeProvider(v1.ResourceList{v1.ResourceMemory: *quantityMustParse("2Gi")}) + capacityProvider := newMockCapacityProvider(v1.ResourceList{v1.ResourceMemory: *quantityMustParse("3Gi")}, v1.ResourceList{v1.ResourceMemory: *quantityMustParse("1Gi")}) imageGcFree := resource.MustParse("700Mi") diskGC := &mockDiskGC{imageBytesFreed: imageGcFree.Value(), err: nil} nodeRef := &clientv1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""} @@ -793,7 +796,7 @@ func TestNodeReclaimFuncs(t *testing.T) { } // synchronize - manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider) + manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider) // we should not have disk pressure if manager.IsUnderDiskPressure() { @@ -803,7 +806,7 @@ func TestNodeReclaimFuncs(t *testing.T) { // induce hard threshold fakeClock.Step(1 * time.Minute) summaryProvider.result = summaryStatsMaker(".9Gi", "200Gi", podStats) - manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider) + manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider) // we should have disk pressure if !manager.IsUnderDiskPressure() { @@ -827,7 +830,7 @@ func TestNodeReclaimFuncs(t *testing.T) { // remove disk pressure fakeClock.Step(20 * time.Minute) summaryProvider.result = summaryStatsMaker("16Gi", "200Gi", podStats) - manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider) + manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider) // we should not have disk pressure if manager.IsUnderDiskPressure() { @@ -837,7 +840,7 @@ func TestNodeReclaimFuncs(t *testing.T) { // induce disk pressure! fakeClock.Step(1 * time.Minute) summaryProvider.result = summaryStatsMaker("400Mi", "200Gi", podStats) - manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider) + manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider) // we should have disk pressure if !manager.IsUnderDiskPressure() { @@ -864,7 +867,7 @@ func TestNodeReclaimFuncs(t *testing.T) { diskGC.imageGCInvoked = false // reset state diskGC.containerGCInvoked = false // reset state podKiller.pod = nil // reset state - manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider) + manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider) // we should have disk pressure (because transition period not yet met) if !manager.IsUnderDiskPressure() { @@ -887,7 +890,7 @@ func TestNodeReclaimFuncs(t *testing.T) { diskGC.imageGCInvoked = false // reset state diskGC.containerGCInvoked = false // reset state podKiller.pod = nil // reset state - manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider) + manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider) // we should not have disk pressure (because transition period met) if manager.IsUnderDiskPressure() { @@ -955,7 +958,7 @@ func TestInodePressureNodeFsInodes(t *testing.T) { fakeClock := clock.NewFakeClock(time.Now()) podKiller := &mockPodKiller{} diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false} - nodeProvider := newMockNodeProvider(v1.ResourceList{v1.ResourceMemory: *quantityMustParse("2Gi")}) + capacityProvider := newMockCapacityProvider(v1.ResourceList{v1.ResourceMemory: *quantityMustParse("3Gi")}, v1.ResourceList{v1.ResourceMemory: *quantityMustParse("1Gi")}) diskGC := &mockDiskGC{imageBytesFreed: int64(0), err: nil} nodeRef := &clientv1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""} @@ -998,7 +1001,7 @@ func TestInodePressureNodeFsInodes(t *testing.T) { podToAdmit, _ := podMaker("pod-to-admit", newResourceList("", ""), newResourceList("", ""), "0", "0", "0") // synchronize - manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider) + manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider) // we should not have disk pressure if manager.IsUnderDiskPressure() { @@ -1013,7 +1016,7 @@ func TestInodePressureNodeFsInodes(t *testing.T) { // induce soft threshold fakeClock.Step(1 * time.Minute) summaryProvider.result = summaryStatsMaker("1.5Mi", "4Mi", podStats) - manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider) + manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider) // we should have disk pressure if !manager.IsUnderDiskPressure() { @@ -1028,7 +1031,7 @@ func TestInodePressureNodeFsInodes(t *testing.T) { // step forward in time pass the grace period fakeClock.Step(3 * time.Minute) summaryProvider.result = summaryStatsMaker("1.5Mi", "4Mi", podStats) - manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider) + manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider) // we should have disk pressure if !manager.IsUnderDiskPressure() { @@ -1053,7 +1056,7 @@ func TestInodePressureNodeFsInodes(t *testing.T) { // remove inode pressure fakeClock.Step(20 * time.Minute) summaryProvider.result = summaryStatsMaker("3Mi", "4Mi", podStats) - manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider) + manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider) // we should not have disk pressure if manager.IsUnderDiskPressure() { @@ -1063,7 +1066,7 @@ func TestInodePressureNodeFsInodes(t *testing.T) { // induce inode pressure! fakeClock.Step(1 * time.Minute) summaryProvider.result = summaryStatsMaker("0.5Mi", "4Mi", podStats) - manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider) + manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider) // we should have disk pressure if !manager.IsUnderDiskPressure() { @@ -1088,7 +1091,7 @@ func TestInodePressureNodeFsInodes(t *testing.T) { fakeClock.Step(1 * time.Minute) summaryProvider.result = summaryStatsMaker("3Mi", "4Mi", podStats) podKiller.pod = nil // reset state - manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider) + manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider) // we should have disk pressure (because transition period not yet met) if !manager.IsUnderDiskPressure() { @@ -1109,7 +1112,7 @@ func TestInodePressureNodeFsInodes(t *testing.T) { fakeClock.Step(5 * time.Minute) summaryProvider.result = summaryStatsMaker("3Mi", "4Mi", podStats) podKiller.pod = nil // reset state - manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider) + manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider) // we should not have disk pressure (because transition period met) if manager.IsUnderDiskPressure() { @@ -1157,7 +1160,7 @@ func TestCriticalPodsAreNotEvicted(t *testing.T) { fakeClock := clock.NewFakeClock(time.Now()) podKiller := &mockPodKiller{} diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false} - nodeProvider := newMockNodeProvider(v1.ResourceList{v1.ResourceMemory: *quantityMustParse("2Gi")}) + capacityProvider := newMockCapacityProvider(v1.ResourceList{v1.ResourceMemory: *quantityMustParse("3Gi")}, v1.ResourceList{v1.ResourceMemory: *quantityMustParse("1Gi")}) diskGC := &mockDiskGC{imageBytesFreed: int64(0), err: nil} nodeRef := &clientv1.ObjectReference{ Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: "", @@ -1203,7 +1206,7 @@ func TestCriticalPodsAreNotEvicted(t *testing.T) { // induce soft threshold fakeClock.Step(1 * time.Minute) summaryProvider.result = summaryStatsMaker("1500Mi", podStats) - manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider) + manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider) // we should have memory pressure if !manager.IsUnderMemoryPressure() { @@ -1218,7 +1221,7 @@ func TestCriticalPodsAreNotEvicted(t *testing.T) { // step forward in time pass the grace period fakeClock.Step(3 * time.Minute) summaryProvider.result = summaryStatsMaker("1500Mi", podStats) - manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider) + manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider) // we should have memory pressure if !manager.IsUnderMemoryPressure() { @@ -1236,7 +1239,7 @@ func TestCriticalPodsAreNotEvicted(t *testing.T) { // remove memory pressure fakeClock.Step(20 * time.Minute) summaryProvider.result = summaryStatsMaker("3Gi", podStats) - manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider) + manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider) // we should not have memory pressure if manager.IsUnderMemoryPressure() { @@ -1249,7 +1252,7 @@ func TestCriticalPodsAreNotEvicted(t *testing.T) { // induce memory pressure! fakeClock.Step(1 * time.Minute) summaryProvider.result = summaryStatsMaker("500Mi", podStats) - manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider) + manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider) // we should have memory pressure if !manager.IsUnderMemoryPressure() { @@ -1290,7 +1293,7 @@ func TestAllocatableMemoryPressure(t *testing.T) { fakeClock := clock.NewFakeClock(time.Now()) podKiller := &mockPodKiller{} diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false} - nodeProvider := newMockNodeProvider(v1.ResourceList{v1.ResourceMemory: *quantityMustParse("2Gi")}) + capacityProvider := newMockCapacityProvider(v1.ResourceList{v1.ResourceMemory: *quantityMustParse("3Gi")}, v1.ResourceList{v1.ResourceMemory: *quantityMustParse("1Gi")}) diskGC := &mockDiskGC{imageBytesFreed: int64(0), err: nil} nodeRef := &clientv1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""} @@ -1326,7 +1329,7 @@ func TestAllocatableMemoryPressure(t *testing.T) { burstablePodToAdmit, _ := podMaker("burst-admit", newResourceList("100m", "100Mi"), newResourceList("200m", "200Mi"), "0Gi") // synchronize - manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider) + manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider) // we should not have memory pressure if manager.IsUnderMemoryPressure() { @@ -1346,7 +1349,7 @@ func TestAllocatableMemoryPressure(t *testing.T) { pod, podStat := podMaker("guaranteed-high-2", newResourceList("100m", "1Gi"), newResourceList("100m", "1Gi"), "1Gi") podStats[pod] = podStat summaryProvider.result = summaryStatsMaker(constantCapacity, podStats) - manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider) + manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider) // we should have memory pressure if !manager.IsUnderMemoryPressure() { @@ -1382,7 +1385,7 @@ func TestAllocatableMemoryPressure(t *testing.T) { } summaryProvider.result = summaryStatsMaker(constantCapacity, podStats) podKiller.pod = nil // reset state - manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider) + manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider) // we should have memory pressure (because transition period not yet met) if !manager.IsUnderMemoryPressure() { @@ -1406,7 +1409,7 @@ func TestAllocatableMemoryPressure(t *testing.T) { fakeClock.Step(5 * time.Minute) summaryProvider.result = summaryStatsMaker(constantCapacity, podStats) podKiller.pod = nil // reset state - manager.synchronize(diskInfoProvider, activePodsFunc, nodeProvider) + manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider) // we should not have memory pressure (because transition period met) if manager.IsUnderMemoryPressure() { diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/helpers.go b/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/helpers.go index 4c9532c6146d..835054ed3ee9 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/helpers.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/helpers.go @@ -658,16 +658,11 @@ func (a byEvictionPriority) Less(i, j int) bool { } // makeSignalObservations derives observations using the specified summary provider. -func makeSignalObservations(summaryProvider stats.SummaryProvider, nodeProvider NodeProvider, pods []*v1.Pod, withImageFs bool) (signalObservations, statsFunc, error) { +func makeSignalObservations(summaryProvider stats.SummaryProvider, capacityProvider CapacityProvider, pods []*v1.Pod, withImageFs bool) (signalObservations, statsFunc, error) { summary, err := summaryProvider.Get() if err != nil { return nil, nil, err } - node, err := nodeProvider.GetNode() - if err != nil { - return nil, nil, err - } - // build the function to work against for pod stats statsFunc := cachedStatsFunc(summary.Pods) // build an evaluation context for current eviction signals @@ -714,8 +709,12 @@ func makeSignalObservations(summaryProvider stats.SummaryProvider, nodeProvider } } } - if memoryAllocatableCapacity, ok := node.Status.Allocatable[v1.ResourceMemory]; ok { - memoryAllocatableAvailable := memoryAllocatableCapacity.Copy() + + nodeCapacity := capacityProvider.GetCapacity() + allocatableReservation := capacityProvider.GetNodeAllocatableReservation() + + memoryAllocatableCapacity, memoryAllocatableAvailable, exist := getResourceAllocatable(nodeCapacity, allocatableReservation, v1.ResourceMemory) + if exist { for _, pod := range summary.Pods { mu, err := podMemoryUsage(pod) if err == nil { @@ -724,12 +723,12 @@ func makeSignalObservations(summaryProvider stats.SummaryProvider, nodeProvider } result[evictionapi.SignalAllocatableMemoryAvailable] = signalObservation{ available: memoryAllocatableAvailable, - capacity: memoryAllocatableCapacity.Copy(), + capacity: memoryAllocatableCapacity, } } - if storageScratchAllocatableCapacity, ok := node.Status.Allocatable[v1.ResourceStorage]; ok { - storageScratchAllocatable := storageScratchAllocatableCapacity.Copy() + storageScratchCapacity, storageScratchAllocatable, exist := getResourceAllocatable(nodeCapacity, allocatableReservation, v1.ResourceStorageScratch) + if exist { for _, pod := range pods { podStat, ok := statsFunc(pod) if !ok { @@ -754,13 +753,25 @@ func makeSignalObservations(summaryProvider stats.SummaryProvider, nodeProvider } result[evictionapi.SignalAllocatableNodeFsAvailable] = signalObservation{ available: storageScratchAllocatable, - capacity: storageScratchAllocatableCapacity.Copy(), + capacity: storageScratchCapacity, } } return result, statsFunc, nil } +func getResourceAllocatable(capacity v1.ResourceList, reservation v1.ResourceList, resourceName v1.ResourceName) (*resource.Quantity, *resource.Quantity, bool) { + if capacity, ok := capacity[resourceName]; ok { + allocate := capacity.Copy() + if reserved, exists := reservation[resourceName]; exists { + allocate.Sub(reserved) + } + return capacity.Copy(), allocate, true + } + glog.Errorf("Could not find capacity information for resource %v", resourceName) + return nil, nil, false +} + // thresholdsMet returns the set of thresholds that were met independent of grace period func thresholdsMet(thresholds []evictionapi.Threshold, observations signalObservations, enforceMinReclaim bool) []evictionapi.Threshold { results := []evictionapi.Threshold{} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/helpers_test.go b/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/helpers_test.go index fda1d010e1e5..a2dc238f4d14 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/helpers_test.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/helpers_test.go @@ -782,12 +782,12 @@ func TestMakeSignalObservations(t *testing.T) { fakeStats.Pods = append(fakeStats.Pods, newPodStats(pod, containerWorkingSetBytes)) } res := quantityMustParse("5Gi") - nodeProvider := newMockNodeProvider(v1.ResourceList{v1.ResourceMemory: *res}) + capacityProvider := newMockCapacityProvider(v1.ResourceList{v1.ResourceMemory: *quantityMustParse("5Gi")}, v1.ResourceList{v1.ResourceMemory: *quantityMustParse("0Gi")}) // Allocatable thresholds are always 100%. Verify that Threshold == Capacity. if res.CmpInt64(int64(allocatableMemoryCapacity)) != 0 { t.Errorf("Expected Threshold %v to be equal to value %v", res.Value(), allocatableMemoryCapacity) } - actualObservations, statsFunc, err := makeSignalObservations(provider, nodeProvider, pods, false) + actualObservations, statsFunc, err := makeSignalObservations(provider, capacityProvider, pods, false) if err != nil { t.Errorf("Unexpected err: %v", err) } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/types.go b/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/types.go index 8f986eb0d1e4..00d798c22c91 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/types.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/eviction/types.go @@ -53,7 +53,7 @@ type Config struct { // Manager evaluates when an eviction threshold for node stability has been met on the node. type Manager interface { // Start starts the control loop to monitor eviction thresholds at specified interval. - Start(diskInfoProvider DiskInfoProvider, podFunc ActivePodsFunc, podCleanedUpFunc PodCleanedUpFunc, nodeProvider NodeProvider, monitoringInterval time.Duration) + Start(diskInfoProvider DiskInfoProvider, podFunc ActivePodsFunc, podCleanedUpFunc PodCleanedUpFunc, capacityProvider CapacityProvider, monitoringInterval time.Duration) // IsUnderMemoryPressure returns true if the node is under memory pressure. IsUnderMemoryPressure() bool @@ -68,10 +68,12 @@ type DiskInfoProvider interface { HasDedicatedImageFs() (bool, error) } -// NodeProvider is responsible for providing the node api object describing this node -type NodeProvider interface { - // GetNode returns the node info for this node - GetNode() (*v1.Node, error) +// CapacityProvider is responsible for providing the resource capacity and reservation information +type CapacityProvider interface { + // GetCapacity returns the amount of compute resources tracked by container manager available on the node. + GetCapacity() v1.ResourceList + // GetNodeAllocatable returns the amount of compute resources that have to be reserved from scheduling. + GetNodeAllocatableReservation() v1.ResourceList } // ImageGC is responsible for performing garbage collection of unused images. diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go index 0dc725cf2026..0bebe417ffcc 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go @@ -1255,7 +1255,7 @@ func (kl *Kubelet) initializeRuntimeDependentModules() { glog.Fatalf("Failed to start cAdvisor %v", err) } // eviction manager must start after cadvisor because it needs to know if the container runtime has a dedicated imagefs - kl.evictionManager.Start(kl.cadvisor, kl.GetActivePods, kl.podResourcesAreReclaimed, kl, evictionMonitoringPeriod) + kl.evictionManager.Start(kl.cadvisor, kl.GetActivePods, kl.podResourcesAreReclaimed, kl.containerManager, evictionMonitoringPeriod) } // Run starts the kubelet reacting to config updates diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go index e0fef3c95b5c..f81cacead5d0 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go @@ -156,7 +156,7 @@ func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, h hostPath = filepath.Join(hostPath, mount.SubPath) - if subPathExists, err := util.FileExists(hostPath); err != nil { + if subPathExists, err := util.FileOrSymlinkExists(hostPath); err != nil { glog.Errorf("Could not determine if subPath %s exists; will not attempt to change its permissions", hostPath) } else if !subPathExists { // Create the sub path now because if it's auto-created later when referenced, it may have an diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_logs.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_logs.go index 7a4526c0b10e..4f9513d4b570 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_logs.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_logs.go @@ -280,16 +280,14 @@ func parseCRILog(log []byte, msg *logMessage) error { return nil } -// dockerJSONLog is the JSON log buffer used in parseDockerJSONLog. -var dockerJSONLog = &jsonlog.JSONLog{} - // parseDockerJSONLog parses logs in Docker JSON log format. Docker JSON log format // example: // {"log":"content 1","stream":"stdout","time":"2016-10-20T18:39:20.57606443Z"} // {"log":"content 2","stream":"stderr","time":"2016-10-20T18:39:20.57606444Z"} func parseDockerJSONLog(log []byte, msg *logMessage) error { - dockerJSONLog.Reset() - l := dockerJSONLog + var l = &jsonlog.JSONLog{} + l.Reset() + // TODO: JSON decoding is fairly expensive, we should evaluate this. if err := json.Unmarshal(log, l); err != nil { return fmt.Errorf("failed with %v to unmarshal log %q", err, l) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/pleg/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/pleg/BUILD index e907644f6133..6ad5cae0e5ae 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/pleg/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/pleg/BUILD @@ -17,7 +17,6 @@ go_library( ], tags = ["automanaged"], deps = [ - "//pkg/kubelet/apis/cri/v1alpha1/runtime:go_default_library", "//pkg/kubelet/container:go_default_library", "//pkg/kubelet/metrics:go_default_library", "//vendor/github.com/golang/glog:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/pleg/generic.go b/vendor/k8s.io/kubernetes/pkg/kubelet/pleg/generic.go index 6c6c980c3d8b..8e447f0ef85b 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/pleg/generic.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/pleg/generic.go @@ -26,7 +26,6 @@ import ( "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" - runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/metrics" ) @@ -330,41 +329,6 @@ func (g *GenericPLEG) cacheEnabled() bool { return g.cache != nil } -// Preserve an older cached status' pod IP if the new status has no pod IP -// and its sandboxes have exited -func (g *GenericPLEG) getPodIP(pid types.UID, status *kubecontainer.PodStatus) string { - if status.IP != "" { - return status.IP - } - - oldStatus, err := g.cache.Get(pid) - if err != nil || oldStatus.IP == "" { - return "" - } - - for _, sandboxStatus := range status.SandboxStatuses { - // If at least one sandbox is ready, then use this status update's pod IP - if sandboxStatus.State == runtimeapi.PodSandboxState_SANDBOX_READY { - return status.IP - } - } - - if len(status.SandboxStatuses) == 0 { - // Without sandboxes (which built-in runtimes like rkt don't report) - // look at all the container statuses, and if any containers are - // running then use the new pod IP - for _, containerStatus := range status.ContainerStatuses { - if containerStatus.State == kubecontainer.ContainerStateCreated || containerStatus.State == kubecontainer.ContainerStateRunning { - return status.IP - } - } - } - - // For pods with no ready containers or sandboxes (like exited pods) - // use the old status' pod IP - return oldStatus.IP -} - func (g *GenericPLEG) updateCache(pod *kubecontainer.Pod, pid types.UID) error { if pod == nil { // The pod is missing in the current relist. This means that @@ -379,14 +343,6 @@ func (g *GenericPLEG) updateCache(pod *kubecontainer.Pod, pid types.UID) error { // all containers again. status, err := g.runtime.GetPodStatus(pod.ID, pod.Name, pod.Namespace) glog.V(4).Infof("PLEG: Write status for %s/%s: %#v (err: %v)", pod.Name, pod.Namespace, status, err) - if err == nil { - // Preserve the pod IP across cache updates if the new IP is empty. - // When a pod is torn down, kubelet may race with PLEG and retrieve - // a pod status after network teardown, but the kubernetes API expects - // the completed pod's IP to be available after the pod is dead. - status.IP = g.getPodIP(pid, status) - } - g.cache.Set(pod.ID, status, err, timestamp) return err } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/pleg/generic_test.go b/vendor/k8s.io/kubernetes/pkg/kubelet/pleg/generic_test.go index 468f98bffaee..f5fd5635ca29 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/pleg/generic_test.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/pleg/generic_test.go @@ -496,58 +496,3 @@ func TestRelistingWithSandboxes(t *testing.T) { actual = getEventsFromChannel(ch) verifyEvents(t, expected, actual) } - -func TestRelistIPChange(t *testing.T) { - pleg, runtimeMock := newTestGenericPLEGWithRuntimeMock() - ch := pleg.Watch() - - id := types.UID("test-pod-0") - cState := kubecontainer.ContainerStateRunning - container := createTestContainer("c0", cState) - pod := &kubecontainer.Pod{ - ID: id, - Containers: []*kubecontainer.Container{container}, - } - ipAddr := "192.168.1.5/24" - status := &kubecontainer.PodStatus{ - ID: id, - IP: ipAddr, - ContainerStatuses: []*kubecontainer.ContainerStatus{{ID: container.ID, State: cState}}, - } - event := &PodLifecycleEvent{ID: pod.ID, Type: ContainerStarted, Data: container.ID.ID} - - runtimeMock.On("GetPods", true).Return([]*kubecontainer.Pod{pod}, nil).Once() - runtimeMock.On("GetPodStatus", pod.ID, "", "").Return(status, nil).Once() - - pleg.relist() - actualEvents := getEventsFromChannel(ch) - actualStatus, actualErr := pleg.cache.Get(pod.ID) - assert.Equal(t, status, actualStatus, "test0") - assert.Nil(t, actualErr, "test0") - assert.Exactly(t, []*PodLifecycleEvent{event}, actualEvents) - - // Clear the IP address and mark the container terminated - container = createTestContainer("c0", kubecontainer.ContainerStateExited) - pod = &kubecontainer.Pod{ - ID: id, - Containers: []*kubecontainer.Container{container}, - } - status = &kubecontainer.PodStatus{ - ID: id, - ContainerStatuses: []*kubecontainer.ContainerStatus{{ID: container.ID, State: kubecontainer.ContainerStateExited}}, - } - event = &PodLifecycleEvent{ID: pod.ID, Type: ContainerDied, Data: container.ID.ID} - runtimeMock.On("GetPods", true).Return([]*kubecontainer.Pod{pod}, nil).Once() - runtimeMock.On("GetPodStatus", pod.ID, "", "").Return(status, nil).Once() - - pleg.relist() - actualEvents = getEventsFromChannel(ch) - actualStatus, actualErr = pleg.cache.Get(pod.ID) - // Must copy status to compare since its pointer gets passed through all - // the way to the event - statusCopy := *status - statusCopy.IP = ipAddr - assert.Equal(t, &statusCopy, actualStatus, "test0") - assert.Nil(t, actualErr, "test0") - assert.Exactly(t, []*PodLifecycleEvent{event}, actualEvents) -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/prober/prober.go b/vendor/k8s.io/kubernetes/pkg/kubelet/prober/prober.go index 37a94b06ac58..9cff34a208bb 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/prober/prober.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/prober/prober.go @@ -46,14 +46,10 @@ const maxProbeRetries = 3 // Prober helps to check the liveness/readiness of a container. type prober struct { - exec execprobe.ExecProber - // probe types needs different httprobe instances so they don't - // share a connection pool which can cause collsions to the - // same host:port and transient failures. See #49740. - readinessHttp httprobe.HTTPProber - livenessHttp httprobe.HTTPProber - tcp tcprobe.TCPProber - runner kubecontainer.ContainerCommandRunner + exec execprobe.ExecProber + http httprobe.HTTPProber + tcp tcprobe.TCPProber + runner kubecontainer.ContainerCommandRunner refManager *kubecontainer.RefManager recorder record.EventRecorder @@ -67,13 +63,12 @@ func newProber( recorder record.EventRecorder) *prober { return &prober{ - exec: execprobe.New(), - readinessHttp: httprobe.New(), - livenessHttp: httprobe.New(), - tcp: tcprobe.New(), - runner: runner, - refManager: refManager, - recorder: recorder, + exec: execprobe.New(), + http: httprobe.New(), + tcp: tcprobe.New(), + runner: runner, + refManager: refManager, + recorder: recorder, } } @@ -95,7 +90,7 @@ func (pb *prober) probe(probeType probeType, pod *v1.Pod, status v1.PodStatus, c return results.Success, nil } - result, output, err := pb.runProbeWithRetries(probeType, probeSpec, pod, status, container, containerID, maxProbeRetries) + result, output, err := pb.runProbeWithRetries(probeSpec, pod, status, container, containerID, maxProbeRetries) if err != nil || result != probe.Success { // Probe failed in one way or another. ref, hasRef := pb.refManager.GetRef(containerID) @@ -121,12 +116,12 @@ func (pb *prober) probe(probeType probeType, pod *v1.Pod, status v1.PodStatus, c // runProbeWithRetries tries to probe the container in a finite loop, it returns the last result // if it never succeeds. -func (pb *prober) runProbeWithRetries(probeType probeType, p *v1.Probe, pod *v1.Pod, status v1.PodStatus, container v1.Container, containerID kubecontainer.ContainerID, retries int) (probe.Result, string, error) { +func (pb *prober) runProbeWithRetries(p *v1.Probe, pod *v1.Pod, status v1.PodStatus, container v1.Container, containerID kubecontainer.ContainerID, retries int) (probe.Result, string, error) { var err error var result probe.Result var output string for i := 0; i < retries; i++ { - result, output, err = pb.runProbe(probeType, p, pod, status, container, containerID) + result, output, err = pb.runProbe(p, pod, status, container, containerID) if err == nil { return result, output, nil } @@ -144,7 +139,7 @@ func buildHeader(headerList []v1.HTTPHeader) http.Header { return headers } -func (pb *prober) runProbe(probeType probeType, p *v1.Probe, pod *v1.Pod, status v1.PodStatus, container v1.Container, containerID kubecontainer.ContainerID) (probe.Result, string, error) { +func (pb *prober) runProbe(p *v1.Probe, pod *v1.Pod, status v1.PodStatus, container v1.Container, containerID kubecontainer.ContainerID) (probe.Result, string, error) { timeout := time.Duration(p.TimeoutSeconds) * time.Second if p.Exec != nil { glog.V(4).Infof("Exec-Probe Pod: %v, Container: %v, Command: %v", pod, container, p.Exec.Command) @@ -166,11 +161,7 @@ func (pb *prober) runProbe(probeType probeType, p *v1.Probe, pod *v1.Pod, status url := formatURL(scheme, host, port, path) headers := buildHeader(p.HTTPGet.HTTPHeaders) glog.V(4).Infof("HTTP-Probe Headers: %v", headers) - if probeType == liveness { - return pb.livenessHttp.Probe(url, headers, timeout) - } else { // readiness - return pb.readinessHttp.Probe(url, headers, timeout) - } + return pb.http.Probe(url, headers, timeout) } if p.TCPSocket != nil { port, err := extractPort(p.TCPSocket.Port, container) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/server/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/server/BUILD index c823ac4f80d5..8a44d8179710 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/server/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/server/BUILD @@ -26,6 +26,7 @@ go_library( "//pkg/kubelet/server/remotecommand:go_default_library", "//pkg/kubelet/server/stats:go_default_library", "//pkg/kubelet/server/streaming:go_default_library", + "//pkg/kubelet/types:go_default_library", "//pkg/util/configz:go_default_library", "//pkg/util/limitwriter:go_default_library", "//pkg/volume:go_default_library", @@ -33,7 +34,9 @@ go_library( "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/google/cadvisor/info/v1:go_default_library", "//vendor/github.com/google/cadvisor/info/v2:go_default_library", + "//vendor/github.com/google/cadvisor/metrics:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", + "//vendor/github.com/prometheus/client_golang/prometheus/promhttp:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/master/thirdparty/tprregistration_controller.go b/vendor/k8s.io/kubernetes/pkg/master/thirdparty/tprregistration_controller.go index 6e0bcc3fac51..432c985edcaa 100644 --- a/vendor/k8s.io/kubernetes/pkg/master/thirdparty/tprregistration_controller.go +++ b/vendor/k8s.io/kubernetes/pkg/master/thirdparty/tprregistration_controller.go @@ -59,6 +59,8 @@ type tprRegistrationController struct { syncHandler func(groupVersion schema.GroupVersion) error + syncedInitialSet chan struct{} + // queue is where incoming work is placed to de-dup and to allow "easy" rate limited requeues on errors // this is actually keyed by a groupVersion queue workqueue.RateLimitingInterface @@ -75,7 +77,8 @@ func NewAutoRegistrationController(tprInformer informers.ThirdPartyResourceInfor crdLister: crdinformer.Lister(), crdSynced: crdinformer.Informer().HasSynced, apiServiceRegistration: apiServiceRegistration, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "tpr-autoregister"), + syncedInitialSet: make(chan struct{}), + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "tpr-autoregister"), } c.syncHandler = c.handleVersionUpdate @@ -145,10 +148,39 @@ func (c *tprRegistrationController) Run(threadiness int, stopCh <-chan struct{}) defer glog.Infof("Shutting down tpr-autoregister controller") // wait for your secondary caches to fill before starting your work - if !controller.WaitForCacheSync("tpr-autoregister", stopCh, c.tprSynced) { + if !controller.WaitForCacheSync("tpr-autoregister", stopCh, c.tprSynced, c.crdSynced) { return } + // process each tpr in the list once + if tprs, err := c.tprLister.List(labels.Everything()); err != nil { + utilruntime.HandleError(err) + } else { + for _, tpr := range tprs { + _, group, err := thirdpartyresourcedata.ExtractApiGroupAndKind(tpr) + if err != nil { + utilruntime.HandleError(err) + continue + } + for _, version := range tpr.Versions { + if err := c.syncHandler(schema.GroupVersion{Group: group, Version: version.Name}); err != nil { + utilruntime.HandleError(err) + } + } + } + } + // process each item in the list once + if crds, err := c.crdLister.List(labels.Everything()); err != nil { + utilruntime.HandleError(err) + } else { + for _, crd := range crds { + if err := c.syncHandler(schema.GroupVersion{Group: crd.Spec.Group, Version: crd.Spec.Version}); err != nil { + utilruntime.HandleError(err) + } + } + } + close(c.syncedInitialSet) + // start up your worker threads based on threadiness. Some controllers have multiple kinds of workers for i := 0; i < threadiness; i++ { // runWorker will loop until "something bad" happens. The .Until will then rekick the worker @@ -160,6 +192,11 @@ func (c *tprRegistrationController) Run(threadiness int, stopCh <-chan struct{}) <-stopCh } +// WaitForInitialSync blocks until the initial set of CRD resources has been processed +func (c *tprRegistrationController) WaitForInitialSync() { + <-c.syncedInitialSet +} + func (c *tprRegistrationController) runWorker() { // hot loop until we're told to stop. processNextWorkItem will automatically wait until there's work // available, so we don't worry about secondary waits diff --git a/vendor/k8s.io/kubernetes/pkg/printers/internalversion/describe.go b/vendor/k8s.io/kubernetes/pkg/printers/internalversion/describe.go index 38ce792d4bff..0bf823370850 100644 --- a/vendor/k8s.io/kubernetes/pkg/printers/internalversion/describe.go +++ b/vendor/k8s.io/kubernetes/pkg/printers/internalversion/describe.go @@ -2097,15 +2097,10 @@ func (d *ServiceAccountDescriber) Describe(namespace, name string, describerSett } } - var events *api.EventList - if describerSettings.ShowEvents { - events, _ = d.Core().Events(namespace).Search(api.Scheme, serviceAccount) - } - - return describeServiceAccount(serviceAccount, tokens, missingSecrets, events) + return describeServiceAccount(serviceAccount, tokens, missingSecrets) } -func describeServiceAccount(serviceAccount *api.ServiceAccount, tokens []api.Secret, missingSecrets sets.String, events *api.EventList) (string, error) { +func describeServiceAccount(serviceAccount *api.ServiceAccount, tokens []api.Secret, missingSecrets sets.String) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%s\n", serviceAccount.Name) @@ -2157,10 +2152,6 @@ func describeServiceAccount(serviceAccount *api.ServiceAccount, tokens []api.Sec w.WriteLine() } - if events != nil { - DescribeEvents(events, w) - } - return nil }) } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/externaladmissionhookconfiguration/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/externaladmissionhookconfiguration/storage/storage.go index f49d8b849c71..8e7d570bcb0e 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/externaladmissionhookconfiguration/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/externaladmissionhookconfiguration/storage/storage.go @@ -23,6 +23,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/admissionregistration" "k8s.io/kubernetes/pkg/registry/admissionregistration/externaladmissionhookconfiguration" + "k8s.io/kubernetes/pkg/registry/cachesize" ) // rest implements a RESTStorage for pod disruption budgets against etcd @@ -41,6 +42,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST { }, PredicateFunc: externaladmissionhookconfiguration.MatchExternalAdmissionHookConfiguration, DefaultQualifiedResource: admissionregistration.Resource("externaladmissionhookconfigurations"), + WatchCacheSize: cachesize.GetWatchCacheSizeByResource("externaladmissionhookconfigurations"), CreateStrategy: externaladmissionhookconfiguration.Strategy, UpdateStrategy: externaladmissionhookconfiguration.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/initializerconfiguration/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/initializerconfiguration/storage/storage.go index 5e7ec678ee9d..becea1ae56be 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/initializerconfiguration/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/initializerconfiguration/storage/storage.go @@ -23,6 +23,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/admissionregistration" "k8s.io/kubernetes/pkg/registry/admissionregistration/initializerconfiguration" + "k8s.io/kubernetes/pkg/registry/cachesize" ) // rest implements a RESTStorage for pod disruption budgets against etcd @@ -41,6 +42,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST { }, PredicateFunc: initializerconfiguration.MatchInitializerConfiguration, DefaultQualifiedResource: admissionregistration.Resource("initializerconfigurations"), + WatchCacheSize: cachesize.GetWatchCacheSizeByResource("initializerconfigurations"), CreateStrategy: initializerconfiguration.Strategy, UpdateStrategy: initializerconfiguration.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/apps/controllerrevision/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/apps/controllerrevision/storage/storage.go index 06d68c0c8240..556d5ef1be5c 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/apps/controllerrevision/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/apps/controllerrevision/storage/storage.go @@ -23,6 +23,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/registry/apps/controllerrevision" + "k8s.io/kubernetes/pkg/registry/cachesize" ) // REST implements a RESTStorage for ControllerRevision @@ -38,6 +39,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST { NewListFunc: func() runtime.Object { return &apps.ControllerRevisionList{} }, PredicateFunc: controllerrevision.MatchControllerRevision, DefaultQualifiedResource: apps.Resource("controllerrevisions"), + WatchCacheSize: cachesize.GetWatchCacheSizeByResource("controllerrevisions"), CreateStrategy: controllerrevision.Strategy, UpdateStrategy: controllerrevision.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/apps/statefulset/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/apps/statefulset/storage/storage.go index 45fab6865147..d40f1d14c5e3 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/apps/statefulset/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/apps/statefulset/storage/storage.go @@ -26,6 +26,7 @@ import ( "k8s.io/kubernetes/pkg/api" appsapi "k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/registry/apps/statefulset" + "k8s.io/kubernetes/pkg/registry/cachesize" ) // rest implements a RESTStorage for replication controllers against etcd @@ -41,6 +42,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { NewListFunc: func() runtime.Object { return &appsapi.StatefulSetList{} }, PredicateFunc: statefulset.MatchStatefulSet, DefaultQualifiedResource: appsapi.Resource("statefulsets"), + WatchCacheSize: cachesize.GetWatchCacheSizeByResource("statefulsets"), CreateStrategy: statefulset.Strategy, UpdateStrategy: statefulset.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/autoscaling/horizontalpodautoscaler/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/autoscaling/horizontalpodautoscaler/storage/storage.go index 3fee2e0a6bd7..3421192cd074 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/autoscaling/horizontalpodautoscaler/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/autoscaling/horizontalpodautoscaler/storage/storage.go @@ -26,6 +26,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/autoscaling" "k8s.io/kubernetes/pkg/registry/autoscaling/horizontalpodautoscaler" + "k8s.io/kubernetes/pkg/registry/cachesize" ) type REST struct { @@ -40,6 +41,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { NewListFunc: func() runtime.Object { return &autoscaling.HorizontalPodAutoscalerList{} }, PredicateFunc: horizontalpodautoscaler.MatchAutoscaler, DefaultQualifiedResource: autoscaling.Resource("horizontalpodautoscalers"), + WatchCacheSize: cachesize.GetWatchCacheSizeByResource("horizontalpodautoscalers"), CreateStrategy: horizontalpodautoscaler.Strategy, UpdateStrategy: horizontalpodautoscaler.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/batch/cronjob/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/batch/cronjob/storage/storage.go index 085e596486bd..cdc7cac6516e 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/batch/cronjob/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/batch/cronjob/storage/storage.go @@ -26,6 +26,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/registry/batch/cronjob" + "k8s.io/kubernetes/pkg/registry/cachesize" ) // REST implements a RESTStorage for scheduled jobs against etcd @@ -41,6 +42,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { NewListFunc: func() runtime.Object { return &batch.CronJobList{} }, PredicateFunc: cronjob.MatchCronJob, DefaultQualifiedResource: batch.Resource("cronjobs"), + WatchCacheSize: cachesize.GetWatchCacheSizeByResource("cronjobs"), CreateStrategy: cronjob.Strategy, UpdateStrategy: cronjob.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/batch/job/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/batch/job/storage/storage.go index 5fda289ecbd8..aa52d251023c 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/batch/job/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/batch/job/storage/storage.go @@ -26,6 +26,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/registry/batch/job" + "k8s.io/kubernetes/pkg/registry/cachesize" ) // JobStorage includes dummy storage for Job. @@ -56,6 +57,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { NewListFunc: func() runtime.Object { return &batch.JobList{} }, PredicateFunc: job.MatchJob, DefaultQualifiedResource: batch.Resource("jobs"), + WatchCacheSize: cachesize.GetWatchCacheSizeByResource("jobs"), CreateStrategy: job.Strategy, UpdateStrategy: job.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/cachesize/cachesize.go b/vendor/k8s.io/kubernetes/pkg/registry/cachesize/cachesize.go index 6babf5080dce..b853cb5e6a8a 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/cachesize/cachesize.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/cachesize/cachesize.go @@ -14,15 +14,65 @@ See the License for the specific language governing permissions and limitations under the License. */ +//use for --watch-cache-sizes param of kube-apiserver +//make watch cache size of resources configurable package cachesize import ( - "k8s.io/apimachinery/pkg/runtime/schema" + "strconv" + "strings" + + "github.com/golang/glog" +) + +type Resource string + +const ( + APIServices Resource = "apiservices" + CertificateSigningRequests Resource = "certificatesigningrequests" + ClusterRoles Resource = "clusterroles" + ClusterRoleBindings Resource = "clusterrolebindings" + ConfigMaps Resource = "configmaps" + Controllers Resource = "controllers" + Daemonsets Resource = "daemonsets" + Deployments Resource = "deployments" + Endpoints Resource = "endpoints" + HorizontalPodAutoscalers Resource = "horizontalpodautoscalers" + Ingress Resource = "ingress" + PodDisruptionBudget Resource = "poddisruptionbudgets" + StatefulSet Resource = "statefulset" + Jobs Resource = "jobs" + LimitRanges Resource = "limitranges" + Namespaces Resource = "namespaces" + NetworkPolicys Resource = "networkpolicies" + Nodes Resource = "nodes" + PersistentVolumes Resource = "persistentvolumes" + PersistentVolumeClaims Resource = "persistentvolumeclaims" + Pods Resource = "pods" + PodSecurityPolicies Resource = "podsecuritypolicies" + PodTemplates Resource = "podtemplates" + Replicasets Resource = "replicasets" + ResourceQuotas Resource = "resourcequotas" + CronJobs Resource = "cronjobs" + Roles Resource = "roles" + RoleBindings Resource = "rolebindings" + Secrets Resource = "secrets" + ServiceAccounts Resource = "serviceaccounts" + Services Resource = "services" + StorageClasses Resource = "storageclasses" ) -// NewHeuristicWatchCacheSizes returns a map of suggested watch cache sizes based on total -// memory. -func NewHeuristicWatchCacheSizes(expectedRAMCapacityMB int) map[schema.GroupResource]int { +// TODO: This shouldn't be a global variable. +var watchCacheSizes map[Resource]int + +func init() { + watchCacheSizes = make(map[Resource]int) +} + +func InitializeWatchCacheSizes(expectedRAMCapacityMB int) { + // This is the heuristics that from memory capacity is trying to infer + // the maximum number of nodes in the cluster and set cache sizes based + // on that value. // From our documentation, we officially recommend 120GB machines for // 2000 nodes, and we scale from that point. Thus we assume ~60MB of // capacity per node. @@ -33,14 +83,39 @@ func NewHeuristicWatchCacheSizes(expectedRAMCapacityMB int) map[schema.GroupReso // is supposed to have non-default value. // // TODO: Figure out which resource we should have non-default value. - watchCacheSizes := make(map[schema.GroupResource]int) - watchCacheSizes[schema.GroupResource{Resource: "replicationcontrollers"}] = maxInt(5*clusterSize, 100) - watchCacheSizes[schema.GroupResource{Resource: "endpoints"}] = maxInt(10*clusterSize, 1000) - watchCacheSizes[schema.GroupResource{Resource: "nodes"}] = maxInt(5*clusterSize, 1000) - watchCacheSizes[schema.GroupResource{Resource: "pods"}] = maxInt(50*clusterSize, 1000) - watchCacheSizes[schema.GroupResource{Resource: "services"}] = maxInt(5*clusterSize, 1000) - watchCacheSizes[schema.GroupResource{Resource: "apiservices", Group: "apiregistration.k8s.io"}] = maxInt(5*clusterSize, 1000) - return watchCacheSizes + watchCacheSizes[Controllers] = maxInt(5*clusterSize, 100) + watchCacheSizes[Endpoints] = maxInt(10*clusterSize, 1000) + watchCacheSizes[Nodes] = maxInt(5*clusterSize, 1000) + watchCacheSizes[Pods] = maxInt(50*clusterSize, 1000) + watchCacheSizes[Services] = maxInt(5*clusterSize, 1000) + watchCacheSizes[APIServices] = maxInt(5*clusterSize, 1000) +} + +func SetWatchCacheSizes(cacheSizes []string) { + for _, c := range cacheSizes { + tokens := strings.Split(c, "#") + if len(tokens) != 2 { + glog.Errorf("invalid value of watch cache capabilities: %s", c) + continue + } + + size, err := strconv.Atoi(tokens[1]) + if err != nil { + glog.Errorf("invalid size of watch cache capabilities: %s", c) + continue + } + + watchCacheSizes[Resource(strings.ToLower(tokens[0]))] = size + } +} + +// GetWatchCacheSizeByResource returns the configured watch cache size for the given resource. +// A nil value means to use a default size, zero means to disable caching. +func GetWatchCacheSizeByResource(resource string) (ret *int) { // TODO this should use schema.GroupResource for lookups + if value, found := watchCacheSizes[Resource(resource)]; found { + return &value + } + return nil } func maxInt(a, b int) int { diff --git a/vendor/k8s.io/kubernetes/pkg/registry/certificates/certificates/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/certificates/certificates/storage/storage.go index 2f6cfdab0fc9..2da56ee106fb 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/certificates/certificates/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/certificates/certificates/storage/storage.go @@ -24,6 +24,7 @@ import ( "k8s.io/apiserver/pkg/registry/rest" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/certificates" + "k8s.io/kubernetes/pkg/registry/cachesize" csrregistry "k8s.io/kubernetes/pkg/registry/certificates/certificates" ) @@ -40,6 +41,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST, *Approva NewListFunc: func() runtime.Object { return &certificates.CertificateSigningRequestList{} }, PredicateFunc: csrregistry.Matcher, DefaultQualifiedResource: certificates.Resource("certificatesigningrequests"), + WatchCacheSize: cachesize.GetWatchCacheSizeByResource("certificatesigningrequests"), CreateStrategy: csrregistry.Strategy, UpdateStrategy: csrregistry.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/configmap/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/core/configmap/storage/storage.go index a9ebbfa4da1f..8aed341b7c0f 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/configmap/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/configmap/storage/storage.go @@ -22,6 +22,7 @@ import ( genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/core/configmap" ) @@ -38,6 +39,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST { NewListFunc: func() runtime.Object { return &api.ConfigMapList{} }, PredicateFunc: configmap.MatchConfigMap, DefaultQualifiedResource: api.Resource("configmaps"), + WatchCacheSize: cachesize.GetWatchCacheSizeByResource("configmaps"), CreateStrategy: configmap.Strategy, UpdateStrategy: configmap.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/endpoint/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/core/endpoint/storage/storage.go index fd38ad5a7281..44abfc437a81 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/endpoint/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/endpoint/storage/storage.go @@ -22,6 +22,7 @@ import ( genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/core/endpoint" ) @@ -37,6 +38,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST { NewListFunc: func() runtime.Object { return &api.EndpointsList{} }, PredicateFunc: endpoint.MatchEndpoints, DefaultQualifiedResource: api.Resource("endpoints"), + WatchCacheSize: cachesize.GetWatchCacheSizeByResource("endpoints"), CreateStrategy: endpoint.Strategy, UpdateStrategy: endpoint.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/event/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/core/event/storage/storage.go index 44507d307b99..abc030be1fba 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/event/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/event/storage/storage.go @@ -22,6 +22,7 @@ import ( genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/core/event" ) @@ -50,6 +51,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter, ttl uint64) *REST { return ttl, nil }, DefaultQualifiedResource: resource, + WatchCacheSize: cachesize.GetWatchCacheSizeByResource(resource.Resource), CreateStrategy: event.Strategy, UpdateStrategy: event.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/limitrange/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/core/limitrange/storage/storage.go index 7cf050264708..ba29960e3624 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/limitrange/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/limitrange/storage/storage.go @@ -22,6 +22,7 @@ import ( genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/core/limitrange" ) @@ -37,6 +38,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST { NewListFunc: func() runtime.Object { return &api.LimitRangeList{} }, PredicateFunc: limitrange.MatchLimitRange, DefaultQualifiedResource: api.Resource("limitranges"), + WatchCacheSize: cachesize.GetWatchCacheSizeByResource("limitranges"), CreateStrategy: limitrange.Strategy, UpdateStrategy: limitrange.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/namespace/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/core/namespace/storage/storage.go index da389c698d94..0b3edf132d5a 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/namespace/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/namespace/storage/storage.go @@ -31,6 +31,7 @@ import ( "k8s.io/apiserver/pkg/storage" storageerr "k8s.io/apiserver/pkg/storage/errors" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/core/namespace" ) @@ -58,6 +59,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST, *Finaliz NewListFunc: func() runtime.Object { return &api.NamespaceList{} }, PredicateFunc: namespace.MatchNamespace, DefaultQualifiedResource: api.Resource("namespaces"), + WatchCacheSize: cachesize.GetWatchCacheSizeByResource("namespaces"), CreateStrategy: namespace.Strategy, UpdateStrategy: namespace.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/node/rest/proxy.go b/vendor/k8s.io/kubernetes/pkg/registry/core/node/rest/proxy.go index adea1b4b1bed..627ec716ccb8 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/node/rest/proxy.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/node/rest/proxy.go @@ -70,7 +70,7 @@ func (r *ProxyREST) Connect(ctx genericapirequest.Context, id string, opts runti if err != nil { return nil, err } - location.Path = path.Join(location.Path, proxyOpts.Path) + location.Path = path.Join("/", location.Path, proxyOpts.Path) // Return a proxy handler that uses the desired transport, wrapped with additional proxy handling (to get URL rewriting, X-Forwarded-* headers, etc) return newThrottledUpgradeAwareProxyHandler(location, transport, true, false, responder), nil } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/node/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/core/node/storage/storage.go index e894bb5051de..dc5be3dc7051 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/node/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/node/storage/storage.go @@ -30,6 +30,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/kubelet/client" + "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/core/node" noderest "k8s.io/kubernetes/pkg/registry/core/node/rest" ) @@ -76,6 +77,7 @@ func NewStorage(optsGetter generic.RESTOptionsGetter, kubeletClientConfig client NewListFunc: func() runtime.Object { return &api.NodeList{} }, PredicateFunc: node.MatchNode, DefaultQualifiedResource: api.Resource("nodes"), + WatchCacheSize: cachesize.GetWatchCacheSizeByResource("nodes"), CreateStrategy: node.Strategy, UpdateStrategy: node.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/persistentvolume/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/core/persistentvolume/storage/storage.go index a24aa391039d..7c69f7eb8129 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/persistentvolume/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/persistentvolume/storage/storage.go @@ -24,6 +24,7 @@ import ( genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/core/persistentvolume" ) @@ -39,6 +40,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { NewListFunc: func() runtime.Object { return &api.PersistentVolumeList{} }, PredicateFunc: persistentvolume.MatchPersistentVolumes, DefaultQualifiedResource: api.Resource("persistentvolumes"), + WatchCacheSize: cachesize.GetWatchCacheSizeByResource("persistentvolumes"), CreateStrategy: persistentvolume.Strategy, UpdateStrategy: persistentvolume.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/persistentvolumeclaim/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/core/persistentvolumeclaim/storage/storage.go index 78c6a1e04111..d5295dbf631b 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/persistentvolumeclaim/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/persistentvolumeclaim/storage/storage.go @@ -24,6 +24,7 @@ import ( genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/core/persistentvolumeclaim" ) @@ -39,6 +40,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { NewListFunc: func() runtime.Object { return &api.PersistentVolumeClaimList{} }, PredicateFunc: persistentvolumeclaim.MatchPersistentVolumeClaim, DefaultQualifiedResource: api.Resource("persistentvolumeclaims"), + WatchCacheSize: cachesize.GetWatchCacheSizeByResource("persistentvolumeclaims"), CreateStrategy: persistentvolumeclaim.Strategy, UpdateStrategy: persistentvolumeclaim.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/pod/rest/subresources.go b/vendor/k8s.io/kubernetes/pkg/registry/core/pod/rest/subresources.go index e7fae10cd734..3b54089a4604 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/pod/rest/subresources.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/pod/rest/subresources.go @@ -69,7 +69,7 @@ func (r *ProxyREST) Connect(ctx genericapirequest.Context, id string, opts runti if err != nil { return nil, err } - location.Path = path.Join(location.Path, proxyOpts.Path) + location.Path = path.Join("/", location.Path, proxyOpts.Path) // Return a proxy handler that uses the desired transport, wrapped with additional proxy handling (to get URL rewriting, X-Forwarded-* headers, etc) return newThrottledUpgradeAwareProxyHandler(location, transport, true, false, false, responder), nil } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/pod/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/core/pod/storage/storage.go index 6cb38101d216..83b20d588ef5 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/pod/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/pod/storage/storage.go @@ -38,6 +38,7 @@ import ( "k8s.io/kubernetes/pkg/printers" printersinternal "k8s.io/kubernetes/pkg/printers/internalversion" printerstorage "k8s.io/kubernetes/pkg/printers/storage" + "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/core/pod" podrest "k8s.io/kubernetes/pkg/registry/core/pod/rest" ) @@ -70,6 +71,7 @@ func NewStorage(optsGetter generic.RESTOptionsGetter, k client.ConnectionInfoGet NewListFunc: func() runtime.Object { return &api.PodList{} }, PredicateFunc: pod.MatchPod, DefaultQualifiedResource: api.Resource("pods"), + WatchCacheSize: cachesize.GetWatchCacheSizeByResource("pods"), CreateStrategy: pod.Strategy, UpdateStrategy: pod.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/podtemplate/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/core/podtemplate/storage/storage.go index cee6f0d71a4b..13d1d4365945 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/podtemplate/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/podtemplate/storage/storage.go @@ -21,6 +21,7 @@ import ( "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/core/podtemplate" ) @@ -36,6 +37,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST { NewListFunc: func() runtime.Object { return &api.PodTemplateList{} }, PredicateFunc: podtemplate.MatchPodTemplate, DefaultQualifiedResource: api.Resource("podtemplates"), + WatchCacheSize: cachesize.GetWatchCacheSizeByResource("podtemplates"), CreateStrategy: podtemplate.Strategy, UpdateStrategy: podtemplate.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/replicationcontroller/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/core/replicationcontroller/storage/storage.go index c9fce60d8c8a..1d0bb1e76e98 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/replicationcontroller/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/replicationcontroller/storage/storage.go @@ -32,6 +32,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/autoscaling" "k8s.io/kubernetes/pkg/apis/autoscaling/validation" + "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/core/replicationcontroller" ) @@ -65,6 +66,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { NewListFunc: func() runtime.Object { return &api.ReplicationControllerList{} }, PredicateFunc: replicationcontroller.MatchController, DefaultQualifiedResource: api.Resource("replicationcontrollers"), + WatchCacheSize: cachesize.GetWatchCacheSizeByResource("replicationcontrollers"), CreateStrategy: replicationcontroller.Strategy, UpdateStrategy: replicationcontroller.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/resourcequota/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/core/resourcequota/storage/storage.go index d5d46241cf71..90638de8a6fc 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/resourcequota/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/resourcequota/storage/storage.go @@ -24,6 +24,7 @@ import ( genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/core/resourcequota" ) @@ -39,6 +40,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { NewListFunc: func() runtime.Object { return &api.ResourceQuotaList{} }, PredicateFunc: resourcequota.MatchResourceQuota, DefaultQualifiedResource: api.Resource("resourcequotas"), + WatchCacheSize: cachesize.GetWatchCacheSizeByResource("resourcequotas"), CreateStrategy: resourcequota.Strategy, UpdateStrategy: resourcequota.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/secret/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/core/secret/storage/storage.go index fe823f7aad97..8dc5592eed0f 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/secret/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/secret/storage/storage.go @@ -21,6 +21,7 @@ import ( "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/core/secret" ) @@ -36,6 +37,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST { NewListFunc: func() runtime.Object { return &api.SecretList{} }, PredicateFunc: secret.Matcher, DefaultQualifiedResource: api.Resource("secrets"), + WatchCacheSize: cachesize.GetWatchCacheSizeByResource("secrets"), CreateStrategy: secret.Strategy, UpdateStrategy: secret.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/service/proxy.go b/vendor/k8s.io/kubernetes/pkg/registry/core/service/proxy.go index 7f5f503a396c..e4fc0d4e4d9a 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/service/proxy.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/service/proxy.go @@ -66,7 +66,7 @@ func (r *ProxyREST) Connect(ctx genericapirequest.Context, id string, opts runti if err != nil { return nil, err } - location.Path = path.Join(location.Path, proxyOpts.Path) + location.Path = path.Join("/", location.Path, proxyOpts.Path) // Return a proxy handler that uses the desired transport, wrapped with additional proxy handling (to get URL rewriting, X-Forwarded-* headers, etc) return newThrottledUpgradeAwareProxyHandler(location, transport, true, false, responder), nil } diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/service/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/core/service/storage/storage.go index 5726ac270d8e..6192b2842c8b 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/service/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/service/storage/storage.go @@ -24,6 +24,7 @@ import ( genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/core/service" ) @@ -39,6 +40,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { NewListFunc: func() runtime.Object { return &api.ServiceList{} }, PredicateFunc: service.MatchServices, DefaultQualifiedResource: api.Resource("services"), + WatchCacheSize: cachesize.GetWatchCacheSizeByResource("services"), CreateStrategy: service.Strategy, UpdateStrategy: service.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/serviceaccount/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/core/serviceaccount/storage/storage.go index 58133a41eb9b..3c9b1d26e94e 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/serviceaccount/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/serviceaccount/storage/storage.go @@ -22,6 +22,7 @@ import ( genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/core/serviceaccount" ) @@ -37,6 +38,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST { NewListFunc: func() runtime.Object { return &api.ServiceAccountList{} }, PredicateFunc: serviceaccount.Matcher, DefaultQualifiedResource: api.Resource("serviceaccounts"), + WatchCacheSize: cachesize.GetWatchCacheSizeByResource("serviceaccounts"), CreateStrategy: serviceaccount.Strategy, UpdateStrategy: serviceaccount.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/extensions/daemonset/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/extensions/daemonset/storage/storage.go index 28cdde76a747..86e7e45c2fb1 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/extensions/daemonset/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/extensions/daemonset/storage/storage.go @@ -25,6 +25,7 @@ import ( "k8s.io/apiserver/pkg/registry/rest" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/extensions/daemonset" ) @@ -41,6 +42,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { NewListFunc: func() runtime.Object { return &extensions.DaemonSetList{} }, PredicateFunc: daemonset.MatchDaemonSet, DefaultQualifiedResource: extensions.Resource("daemonsets"), + WatchCacheSize: cachesize.GetWatchCacheSizeByResource("daemonsets"), CreateStrategy: daemonset.Strategy, UpdateStrategy: daemonset.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/extensions/deployment/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/extensions/deployment/storage/storage.go index 133fba16d810..d747ecad8af6 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/extensions/deployment/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/extensions/deployment/storage/storage.go @@ -32,6 +32,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/extensions" extvalidation "k8s.io/kubernetes/pkg/apis/extensions/validation" + "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/extensions/deployment" ) @@ -67,6 +68,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST, *Rollbac NewListFunc: func() runtime.Object { return &extensions.DeploymentList{} }, PredicateFunc: deployment.MatchDeployment, DefaultQualifiedResource: extensions.Resource("deployments"), + WatchCacheSize: cachesize.GetWatchCacheSizeByResource("deployments"), CreateStrategy: deployment.Strategy, UpdateStrategy: deployment.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/extensions/ingress/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/extensions/ingress/storage/storage.go index 55c8befe93bb..8b891a6209ad 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/extensions/ingress/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/extensions/ingress/storage/storage.go @@ -25,6 +25,7 @@ import ( "k8s.io/apiserver/pkg/registry/rest" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/extensions/ingress" ) @@ -41,6 +42,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { NewListFunc: func() runtime.Object { return &extensions.IngressList{} }, PredicateFunc: ingress.MatchIngress, DefaultQualifiedResource: extensions.Resource("ingresses"), + WatchCacheSize: cachesize.GetWatchCacheSizeByResource("ingresses"), CreateStrategy: ingress.Strategy, UpdateStrategy: ingress.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/extensions/networkpolicy/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/extensions/networkpolicy/storage/storage.go index a6d484b99f51..8bb412a4a0dc 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/extensions/networkpolicy/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/extensions/networkpolicy/storage/storage.go @@ -22,6 +22,7 @@ import ( genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/kubernetes/pkg/api" extensionsapi "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/extensions/networkpolicy" ) @@ -38,6 +39,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST { NewListFunc: func() runtime.Object { return &extensionsapi.NetworkPolicyList{} }, PredicateFunc: networkpolicy.MatchNetworkPolicy, DefaultQualifiedResource: extensionsapi.Resource("networkpolicies"), + WatchCacheSize: cachesize.GetWatchCacheSizeByResource("networkpolicies"), CreateStrategy: networkpolicy.Strategy, UpdateStrategy: networkpolicy.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/extensions/podsecuritypolicy/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/extensions/podsecuritypolicy/storage/storage.go index 699bb0f9a14f..eff8d11b76e5 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/extensions/podsecuritypolicy/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/extensions/podsecuritypolicy/storage/storage.go @@ -22,6 +22,7 @@ import ( genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/extensions/podsecuritypolicy" ) @@ -38,6 +39,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST { NewListFunc: func() runtime.Object { return &extensions.PodSecurityPolicyList{} }, PredicateFunc: podsecuritypolicy.MatchPodSecurityPolicy, DefaultQualifiedResource: extensions.Resource("podsecuritypolicies"), + WatchCacheSize: cachesize.GetWatchCacheSizeByResource("podsecuritypolicies"), CreateStrategy: podsecuritypolicy.Strategy, UpdateStrategy: podsecuritypolicy.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/extensions/replicaset/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/extensions/replicaset/storage/storage.go index 90314a840219..b10c455ae911 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/extensions/replicaset/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/extensions/replicaset/storage/storage.go @@ -31,6 +31,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/extensions" extvalidation "k8s.io/kubernetes/pkg/apis/extensions/validation" + "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/extensions/replicaset" ) @@ -64,6 +65,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { NewListFunc: func() runtime.Object { return &extensions.ReplicaSetList{} }, PredicateFunc: replicaset.MatchReplicaSet, DefaultQualifiedResource: extensions.Resource("replicasets"), + WatchCacheSize: cachesize.GetWatchCacheSizeByResource("replicasets"), CreateStrategy: replicaset.Strategy, UpdateStrategy: replicaset.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresource/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresource/storage/storage.go index 1910e5f02103..120cfdc82061 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresource/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresource/storage/storage.go @@ -22,6 +22,7 @@ import ( genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresource" ) @@ -47,6 +48,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST { NewListFunc: func() runtime.Object { return &extensions.ThirdPartyResourceList{} }, PredicateFunc: thirdpartyresource.Matcher, DefaultQualifiedResource: resource, + WatchCacheSize: cachesize.GetWatchCacheSizeByResource(resource.Resource), CreateStrategy: thirdpartyresource.Strategy, UpdateStrategy: thirdpartyresource.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresourcedata/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresourcedata/storage/storage.go index e89477a33d5a..ccf18ecb2630 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresourcedata/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresourcedata/storage/storage.go @@ -30,6 +30,7 @@ import ( "k8s.io/apiserver/pkg/registry/rest" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresourcedata" ) @@ -103,6 +104,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter, group, kind string) *REST { NewListFunc: func() runtime.Object { return &extensions.ThirdPartyResourceDataList{} }, PredicateFunc: thirdpartyresourcedata.Matcher, DefaultQualifiedResource: resource, + WatchCacheSize: cachesize.GetWatchCacheSizeByResource(resource.Resource), CreateStrategy: thirdpartyresourcedata.Strategy, UpdateStrategy: thirdpartyresourcedata.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/networking/networkpolicy/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/networking/networkpolicy/storage/storage.go index 48823519c2d5..e29b501b547d 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/networking/networkpolicy/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/networking/networkpolicy/storage/storage.go @@ -22,6 +22,7 @@ import ( genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/kubernetes/pkg/api" networkingapi "k8s.io/kubernetes/pkg/apis/networking" + "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/networking/networkpolicy" ) @@ -38,6 +39,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST { NewListFunc: func() runtime.Object { return &networkingapi.NetworkPolicyList{} }, PredicateFunc: networkpolicy.Matcher, DefaultQualifiedResource: networkingapi.Resource("networkpolicies"), + WatchCacheSize: cachesize.GetWatchCacheSizeByResource("networkpolicies"), CreateStrategy: networkpolicy.Strategy, UpdateStrategy: networkpolicy.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/policy/poddisruptionbudget/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/policy/poddisruptionbudget/storage/storage.go index 0686d70893ed..2daa230365d1 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/policy/poddisruptionbudget/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/policy/poddisruptionbudget/storage/storage.go @@ -25,6 +25,7 @@ import ( "k8s.io/apiserver/pkg/registry/rest" "k8s.io/kubernetes/pkg/api" policyapi "k8s.io/kubernetes/pkg/apis/policy" + "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/policy/poddisruptionbudget" ) @@ -41,6 +42,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { NewListFunc: func() runtime.Object { return &policyapi.PodDisruptionBudgetList{} }, PredicateFunc: poddisruptionbudget.MatchPodDisruptionBudget, DefaultQualifiedResource: policyapi.Resource("poddisruptionbudgets"), + WatchCacheSize: cachesize.GetWatchCacheSizeByResource("poddisruptionbudgets"), CreateStrategy: poddisruptionbudget.Strategy, UpdateStrategy: poddisruptionbudget.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrole/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrole/storage/storage.go index b83a79267f98..e434280a4098 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrole/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrole/storage/storage.go @@ -22,6 +22,7 @@ import ( genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/rbac" + "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/rbac/clusterrole" ) @@ -38,6 +39,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST { NewListFunc: func() runtime.Object { return &rbac.ClusterRoleList{} }, PredicateFunc: clusterrole.Matcher, DefaultQualifiedResource: rbac.Resource("clusterroles"), + WatchCacheSize: cachesize.GetWatchCacheSizeByResource("clusterroles"), CreateStrategy: clusterrole.Strategy, UpdateStrategy: clusterrole.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding/storage/storage.go index 790c34df6888..2cb178ca63f0 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding/storage/storage.go @@ -22,6 +22,7 @@ import ( genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/rbac" + "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding" ) @@ -38,6 +39,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST { NewListFunc: func() runtime.Object { return &rbac.ClusterRoleBindingList{} }, PredicateFunc: clusterrolebinding.Matcher, DefaultQualifiedResource: rbac.Resource("clusterrolebindings"), + WatchCacheSize: cachesize.GetWatchCacheSizeByResource("clusterrolebindings"), CreateStrategy: clusterrolebinding.Strategy, UpdateStrategy: clusterrolebinding.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/role/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/rbac/role/storage/storage.go index 4744420d0e63..18f178672527 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/rbac/role/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/role/storage/storage.go @@ -22,6 +22,7 @@ import ( genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/rbac" + "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/rbac/role" ) @@ -38,6 +39,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST { NewListFunc: func() runtime.Object { return &rbac.RoleList{} }, PredicateFunc: role.Matcher, DefaultQualifiedResource: rbac.Resource("roles"), + WatchCacheSize: cachesize.GetWatchCacheSizeByResource("roles"), CreateStrategy: role.Strategy, UpdateStrategy: role.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/rolebinding/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/rbac/rolebinding/storage/storage.go index cafd8ce47973..3ccf4f72cf6e 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/rbac/rolebinding/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/rolebinding/storage/storage.go @@ -22,6 +22,7 @@ import ( genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/rbac" + "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/rbac/rolebinding" ) @@ -38,6 +39,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST { NewListFunc: func() runtime.Object { return &rbac.RoleBindingList{} }, PredicateFunc: rolebinding.Matcher, DefaultQualifiedResource: rbac.Resource("rolebindings"), + WatchCacheSize: cachesize.GetWatchCacheSizeByResource("rolebindings"), CreateStrategy: rolebinding.Strategy, UpdateStrategy: rolebinding.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/settings/podpreset/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/settings/podpreset/storage/storage.go index 78ffe711598e..4d1c7ca54b50 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/settings/podpreset/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/settings/podpreset/storage/storage.go @@ -22,6 +22,7 @@ import ( genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/kubernetes/pkg/api" settingsapi "k8s.io/kubernetes/pkg/apis/settings" + "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/settings/podpreset" ) @@ -38,6 +39,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST { NewListFunc: func() runtime.Object { return &settingsapi.PodPresetList{} }, PredicateFunc: podpreset.Matcher, DefaultQualifiedResource: settingsapi.Resource("podpresets"), + WatchCacheSize: cachesize.GetWatchCacheSizeByResource("podpresets"), CreateStrategy: podpreset.Strategy, UpdateStrategy: podpreset.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/storage/storageclass/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/storage/storageclass/storage/storage.go index 7e8d293eb3a0..3dcb18a301f7 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/storage/storageclass/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/storage/storageclass/storage/storage.go @@ -23,6 +23,7 @@ import ( "k8s.io/apiserver/pkg/registry/rest" "k8s.io/kubernetes/pkg/api" storageapi "k8s.io/kubernetes/pkg/apis/storage" + "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/storage/storageclass" ) @@ -38,6 +39,7 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST { NewListFunc: func() runtime.Object { return &storageapi.StorageClassList{} }, PredicateFunc: storageclass.MatchStorageClasses, DefaultQualifiedResource: storageapi.Resource("storageclasses"), + WatchCacheSize: cachesize.GetWatchCacheSizeByResource("storageclass"), CreateStrategy: storageclass.Strategy, UpdateStrategy: storageclass.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/util/mount/fake.go b/vendor/k8s.io/kubernetes/pkg/util/mount/fake.go index 972bff26a10f..2b71fa0a7285 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/mount/fake.go +++ b/vendor/k8s.io/kubernetes/pkg/util/mount/fake.go @@ -124,6 +124,14 @@ func (f *FakeMounter) List() ([]MountPoint, error) { return f.MountPoints, nil } +func (f *FakeMounter) IsMountPointMatch(mp MountPoint, dir string) bool { + return (mp.Path == dir) +} + +func (f *FakeMounter) IsNotMountPoint(dir string) (bool, error) { + return IsNotMountPoint(f, dir) +} + func (f *FakeMounter) IsLikelyNotMountPoint(file string) (bool, error) { f.mutex.Lock() defer f.mutex.Unlock() diff --git a/vendor/k8s.io/kubernetes/pkg/util/mount/mount.go b/vendor/k8s.io/kubernetes/pkg/util/mount/mount.go index 8315f7ea3779..4bb1c1d453dd 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/mount/mount.go +++ b/vendor/k8s.io/kubernetes/pkg/util/mount/mount.go @@ -44,8 +44,21 @@ type Interface interface { // it could change between chunked reads). This is guaranteed to be // consistent. List() ([]MountPoint, error) - // IsLikelyNotMountPoint determines if a directory is a mountpoint. + // IsMountPointMatch determines if the mountpoint matches the dir + IsMountPointMatch(mp MountPoint, dir string) bool + // IsNotMountPoint determines if a directory is a mountpoint. // It should return ErrNotExist when the directory does not exist. + // IsNotMountPoint is more expensive than IsLikelyNotMountPoint. + // IsNotMountPoint detects bind mounts in linux. + // IsNotMountPoint enumerates all the mountpoints using List() and + // the list of mountpoints may be large, then it uses + // IsMountPointMatch to evaluate whether the directory is a mountpoint + IsNotMountPoint(file string) (bool, error) + // IsLikelyNotMountPoint uses heuristics to determine if a directory + // is a mountpoint. + // It should return ErrNotExist when the directory does not exist. + // IsLikelyNotMountPoint does NOT properly detect all mountpoint types + // most notably linux bind mounts. IsLikelyNotMountPoint(file string) (bool, error) // DeviceOpened determines if the device is in use elsewhere // on the system, i.e. still mounted. @@ -190,3 +203,34 @@ func getDeviceNameFromMount(mounter Interface, mountPath, pluginDir string) (str return path.Base(mountPath), nil } + +// IsNotMountPoint determines if a directory is a mountpoint. +// It should return ErrNotExist when the directory does not exist. +// This method uses the List() of all mountpoints +// It is more extensive than IsLikelyNotMountPoint +// and it detects bind mounts in linux +func IsNotMountPoint(mounter Interface, file string) (bool, error) { + // IsLikelyNotMountPoint provides a quick check + // to determine whether file IS A mountpoint + notMnt, notMntErr := mounter.IsLikelyNotMountPoint(file) + if notMntErr != nil { + return notMnt, notMntErr + } + // identified as mountpoint, so return this fact + if notMnt == false { + return notMnt, nil + } + // check all mountpoints since IsLikelyNotMountPoint + // is not reliable for some mountpoint types + mountPoints, mountPointsErr := mounter.List() + if mountPointsErr != nil { + return notMnt, mountPointsErr + } + for _, mp := range mountPoints { + if mounter.IsMountPointMatch(mp, file) { + notMnt = false + break + } + } + return notMnt, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/mount/mount_linux.go b/vendor/k8s.io/kubernetes/pkg/util/mount/mount_linux.go index e9167facfc53..ae7709d471b6 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/mount/mount_linux.go +++ b/vendor/k8s.io/kubernetes/pkg/util/mount/mount_linux.go @@ -235,6 +235,15 @@ func (*Mounter) List() ([]MountPoint, error) { return listProcMounts(procMountsPath) } +func (mounter *Mounter) IsMountPointMatch(mp MountPoint, dir string) bool { + deletedDir := fmt.Sprintf("%s\\040(deleted)", dir) + return ((mp.Path == dir) || (mp.Path == deletedDir)) +} + +func (mounter *Mounter) IsNotMountPoint(dir string) (bool, error) { + return IsNotMountPoint(mounter, dir) +} + // IsLikelyNotMountPoint determines if a directory is not a mountpoint. // It is fast but not necessarily ALWAYS correct. If the path is in fact // a bind mount from one part of a mount to another it will not be detected. @@ -242,10 +251,6 @@ func (*Mounter) List() ([]MountPoint, error) { // will return true. When in fact /tmp/b is a mount point. If this situation // if of interest to you, don't use this function... func (mounter *Mounter) IsLikelyNotMountPoint(file string) (bool, error) { - return IsNotMountPoint(file) -} - -func IsNotMountPoint(file string) (bool, error) { stat, err := os.Stat(file) if err != nil { return true, err diff --git a/vendor/k8s.io/kubernetes/pkg/util/mount/mount_unsupported.go b/vendor/k8s.io/kubernetes/pkg/util/mount/mount_unsupported.go index 2119b1a0ad67..6c4000a0ee83 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/mount/mount_unsupported.go +++ b/vendor/k8s.io/kubernetes/pkg/util/mount/mount_unsupported.go @@ -43,6 +43,14 @@ func (mounter *Mounter) List() ([]MountPoint, error) { return []MountPoint{}, nil } +func (mounter *Mounter) IsMountPointMatch(mp MountPoint, dir string) bool { + return (mp.Path == dir) +} + +func (mounter *Mounter) IsNotMountPoint(dir string) (bool, error) { + return IsNotMountPoint(mounter, dir) +} + func (mounter *Mounter) IsLikelyNotMountPoint(file string) (bool, error) { return true, nil } @@ -66,7 +74,3 @@ func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, func (mounter *SafeFormatAndMount) diskLooksUnformatted(disk string) (bool, error) { return true, nil } - -func IsNotMountPoint(file string) (bool, error) { - return true, nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/mount/nsenter_mount.go b/vendor/k8s.io/kubernetes/pkg/util/mount/nsenter_mount.go index 2a274b9dc6d6..d237483b4295 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/mount/nsenter_mount.go +++ b/vendor/k8s.io/kubernetes/pkg/util/mount/nsenter_mount.go @@ -199,6 +199,15 @@ func (*NsenterMounter) List() ([]MountPoint, error) { return listProcMounts(hostProcMountsPath) } +func (m *NsenterMounter) IsNotMountPoint(dir string) (bool, error) { + return IsNotMountPoint(m, dir) +} + +func (*NsenterMounter) IsMountPointMatch(mp MountPoint, dir string) bool { + deletedDir := fmt.Sprintf("%s\\040(deleted)", dir) + return ((mp.Path == dir) || (mp.Path == deletedDir)) +} + // IsLikelyNotMountPoint determines whether a path is a mountpoint by calling findmnt // in the host's root mount namespace. func (n *NsenterMounter) IsLikelyNotMountPoint(file string) (bool, error) { diff --git a/vendor/k8s.io/kubernetes/pkg/util/mount/nsenter_mount_unsupported.go b/vendor/k8s.io/kubernetes/pkg/util/mount/nsenter_mount_unsupported.go index dcf19edefd26..e955e1b781b4 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/mount/nsenter_mount_unsupported.go +++ b/vendor/k8s.io/kubernetes/pkg/util/mount/nsenter_mount_unsupported.go @@ -38,6 +38,14 @@ func (*NsenterMounter) List() ([]MountPoint, error) { return []MountPoint{}, nil } +func (m *NsenterMounter) IsNotMountPoint(dir string) (bool, error) { + return IsNotMountPoint(m, dir) +} + +func (*NsenterMounter) IsMountPointMatch(mp MountPoint, dir string) bool { + return (mp.Path == dir) +} + func (*NsenterMounter) IsLikelyNotMountPoint(file string) (bool, error) { return true, nil } diff --git a/vendor/k8s.io/kubernetes/pkg/util/removeall/removeall_test.go b/vendor/k8s.io/kubernetes/pkg/util/removeall/removeall_test.go index 938ef08e3585..a5b19fe41a20 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/removeall/removeall_test.go +++ b/vendor/k8s.io/kubernetes/pkg/util/removeall/removeall_test.go @@ -49,6 +49,12 @@ func (mounter *fakeMounter) PathIsDevice(pathname string) (bool, error) { func (mounter *fakeMounter) GetDeviceNameFromMount(mountPath, pluginDir string) (string, error) { return "", errors.New("not implemented") } +func (mounter *fakeMounter) IsMountPointMatch(mp mount.MountPoint, dir string) bool { + return (mp.Path == dir) +} +func (mounter *fakeMounter) IsNotMountPoint(dir string) (bool, error) { + return mount.IsNotMountPoint(mounter, dir) +} func (mounter *fakeMounter) IsLikelyNotMountPoint(file string) (bool, error) { name := path.Base(file) if strings.HasPrefix(name, "mount") { diff --git a/vendor/k8s.io/kubernetes/pkg/util/util.go b/vendor/k8s.io/kubernetes/pkg/util/util.go index 356b295a3e18..389e145e8496 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/util.go +++ b/vendor/k8s.io/kubernetes/pkg/util/util.go @@ -84,6 +84,15 @@ func FileExists(filename string) (bool, error) { return true, nil } +func FileOrSymlinkExists(filename string) (bool, error) { + if _, err := os.Lstat(filename); os.IsNotExist(err) { + return false, nil + } else if err != nil { + return false, err + } + return true, nil +} + // ReadDirNoStat returns a string of files/directories contained // in dirname without calling lstat on them. func ReadDirNoStat(dirname string) ([]string, error) { diff --git a/vendor/k8s.io/kubernetes/pkg/version/base.go b/vendor/k8s.io/kubernetes/pkg/version/base.go index 5dbc70eaf12d..311b63d01cd7 100644 --- a/vendor/k8s.io/kubernetes/pkg/version/base.go +++ b/vendor/k8s.io/kubernetes/pkg/version/base.go @@ -51,7 +51,7 @@ var ( // semantic version is a git hash, but the version itself is no // longer the direct output of "git describe", but a slight // translation to be semver compliant. - gitVersion string = "v1.7.0+$Format:%h$" + gitVersion string = "v1.7.6+$Format:%h$" gitCommit string = "$Format:%H$" // sha1 from git, output of $(git rev-parse HEAD) gitTreeState string = "not a git tree" // state of git tree, either "clean" or "dirty" diff --git a/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/BUILD b/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/BUILD index acc86be97671..6f95f7748b66 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/BUILD @@ -12,12 +12,14 @@ go_library( name = "go_default_library", srcs = [ "attacher.go", + "azure_common.go", "azure_dd.go", + "azure_mounter.go", "azure_provision.go", - "vhd_util.go", ], tags = ["automanaged"], deps = [ + "//pkg/api:go_default_library", "//pkg/api/v1:go_default_library", "//pkg/cloudprovider:go_default_library", "//pkg/cloudprovider/providers/azure:go_default_library", @@ -27,45 +29,43 @@ go_library( "//pkg/util/strings:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", - "//pkg/volume/util/volumehelper:go_default_library", "//vendor/github.com/Azure/azure-sdk-for-go/arm/compute:go_default_library", + "//vendor/github.com/Azure/azure-sdk-for-go/arm/storage:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", ], ) +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) + go_test( name = "go_default_test", srcs = [ + "azure_common_test.go", "azure_dd_test.go", - "vhd_util_test.go", ], library = ":go_default_library", tags = ["automanaged"], deps = [ "//pkg/api/v1:go_default_library", "//pkg/util/exec:go_default_library", - "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/testing:go_default_library", - "//vendor/github.com/Azure/azure-sdk-for-go/arm/compute:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/client-go/util/testing:go_default_library", ], ) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/attacher.go b/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/attacher.go index d491356e24f9..0b854e0f0901 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/attacher.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/attacher.go @@ -30,51 +30,39 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/cloudprovider" + "k8s.io/kubernetes/pkg/cloudprovider/providers/azure" "k8s.io/kubernetes/pkg/util/exec" "k8s.io/kubernetes/pkg/util/keymutex" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" - "k8s.io/kubernetes/pkg/volume/util" + volumeutil "k8s.io/kubernetes/pkg/volume/util" ) +type azureDiskDetacher struct { + plugin *azureDataDiskPlugin + cloud *azure.Cloud +} + type azureDiskAttacher struct { - host volume.VolumeHost - azureProvider azureCloudProvider + plugin *azureDataDiskPlugin + cloud *azure.Cloud } var _ volume.Attacher = &azureDiskAttacher{} - -var _ volume.AttachableVolumePlugin = &azureDataDiskPlugin{} - -const ( - checkSleepDuration = time.Second -) +var _ volume.Detacher = &azureDiskDetacher{} // acquire lock to get an lun number var getLunMutex = keymutex.NewKeyMutex() -// NewAttacher initializes an Attacher -func (plugin *azureDataDiskPlugin) NewAttacher() (volume.Attacher, error) { - azure, err := getAzureCloudProvider(plugin.host.GetCloudProvider()) - if err != nil { - glog.V(4).Infof("failed to get azure provider") - return nil, err - } - - return &azureDiskAttacher{ - host: plugin.host, - azureProvider: azure, - }, nil -} - // Attach attaches a volume.Spec to an Azure VM referenced by NodeName, returning the disk's LUN -func (attacher *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) { +func (a *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) { volumeSource, err := getVolumeSource(spec) if err != nil { glog.Warningf("failed to get azure disk spec") return "", err } - instanceid, err := attacher.azureProvider.InstanceID(nodeName) + + instanceid, err := a.cloud.InstanceID(nodeName) if err != nil { glog.Warningf("failed to get azure instance id") return "", fmt.Errorf("failed to get azure instance id for node %q", nodeName) @@ -83,7 +71,12 @@ func (attacher *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.Node instanceid = instanceid[(ind + 1):] } - lun, err := attacher.azureProvider.GetDiskLun(volumeSource.DiskName, volumeSource.DataDiskURI, nodeName) + diskController, err := getDiskController(a.plugin.host) + if err != nil { + return "", err + } + + lun, err := diskController.GetDiskLun(volumeSource.DiskName, volumeSource.DataDiskURI, nodeName) if err == cloudprovider.InstanceNotFound { // Log error and continue with attach glog.Warningf( @@ -99,13 +92,14 @@ func (attacher *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.Node getLunMutex.LockKey(instanceid) defer getLunMutex.UnlockKey(instanceid) - lun, err = attacher.azureProvider.GetNextDiskLun(nodeName) + lun, err = diskController.GetNextDiskLun(nodeName) if err != nil { glog.Warningf("no LUN available for instance %q", nodeName) return "", fmt.Errorf("all LUNs are used, cannot attach volume %q to instance %q", volumeSource.DiskName, instanceid) } glog.V(4).Infof("Trying to attach volume %q lun %d to node %q.", volumeSource.DataDiskURI, lun, nodeName) - err = attacher.azureProvider.AttachDisk(volumeSource.DiskName, volumeSource.DataDiskURI, nodeName, lun, compute.CachingTypes(*volumeSource.CachingMode)) + isManagedDisk := (*volumeSource.Kind == v1.AzureManagedDisk) + err = diskController.AttachDisk(isManagedDisk, volumeSource.DiskName, volumeSource.DataDiskURI, nodeName, lun, compute.CachingTypes(*volumeSource.CachingMode)) if err == nil { glog.V(4).Infof("Attach operation successful: volume %q attached to node %q.", volumeSource.DataDiskURI, nodeName) } else { @@ -117,14 +111,14 @@ func (attacher *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.Node return strconv.Itoa(int(lun)), err } -func (attacher *azureDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) { +func (a *azureDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) { volumesAttachedCheck := make(map[*volume.Spec]bool) volumeSpecMap := make(map[string]*volume.Spec) volumeIDList := []string{} for _, spec := range specs { volumeSource, err := getVolumeSource(spec) if err != nil { - glog.Errorf("Error getting volume (%q) source : %v", spec.Name(), err) + glog.Errorf("azureDisk - Error getting volume (%q) source : %v", spec.Name(), err) continue } @@ -132,11 +126,16 @@ func (attacher *azureDiskAttacher) VolumesAreAttached(specs []*volume.Spec, node volumesAttachedCheck[spec] = true volumeSpecMap[volumeSource.DiskName] = spec } - attachedResult, err := attacher.azureProvider.DisksAreAttached(volumeIDList, nodeName) + + diskController, err := getDiskController(a.plugin.host) + if err != nil { + return nil, err + } + attachedResult, err := diskController.DisksAreAttached(volumeIDList, nodeName) if err != nil { // Log error and continue with attach glog.Errorf( - "Error checking if volumes (%v) are attached to current node (%q). err=%v", + "azureDisk - Error checking if volumes (%v) are attached to current node (%q). err=%v", volumeIDList, nodeName, err) return volumesAttachedCheck, err } @@ -145,71 +144,85 @@ func (attacher *azureDiskAttacher) VolumesAreAttached(specs []*volume.Spec, node if !attached { spec := volumeSpecMap[volumeID] volumesAttachedCheck[spec] = false - glog.V(2).Infof("VolumesAreAttached: check volume %q (specName: %q) is no longer attached", volumeID, spec.Name()) + glog.V(2).Infof("azureDisk - VolumesAreAttached: check volume %q (specName: %q) is no longer attached", volumeID, spec.Name()) } } return volumesAttachedCheck, nil } // WaitForAttach runs on the node to detect if the volume (referenced by LUN) is attached. If attached, the device path is returned -func (attacher *azureDiskAttacher) WaitForAttach(spec *volume.Spec, lunStr string, _ *v1.Pod, timeout time.Duration) (string, error) { +func (a *azureDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string, _ *v1.Pod, timeout time.Duration) (string, error) { + var err error + lun, err := strconv.Atoi(devicePath) + if err != nil { + return "", fmt.Errorf("azureDisk - Wait for attach expect device path as a lun number, instead got: %s", devicePath) + } + volumeSource, err := getVolumeSource(spec) if err != nil { return "", err } - if len(lunStr) == 0 { - return "", fmt.Errorf("WaitForAttach failed for Azure disk %q: lun is empty.", volumeSource.DiskName) - } + io := &osIOHandler{} + scsiHostRescan(io) - lun, err := strconv.Atoi(lunStr) - if err != nil { - return "", fmt.Errorf("WaitForAttach: wrong lun %q, err: %v", lunStr, err) - } - scsiHostRescan(&osIOHandler{}) - exe := exec.New() - devicePath := "" - - err = wait.Poll(checkSleepDuration, timeout, func() (bool, error) { - glog.V(4).Infof("Checking Azure disk %q(lun %s) is attached.", volumeSource.DiskName, lunStr) - if devicePath, err = findDiskByLun(lun, &osIOHandler{}, exe); err == nil { - if len(devicePath) == 0 { - glog.Warningf("cannot find attached Azure disk %q(lun %s) locally.", volumeSource.DiskName, lunStr) - return false, fmt.Errorf("cannot find attached Azure disk %q(lun %s) locally.", volumeSource.DiskName, lunStr) - } - glog.V(4).Infof("Successfully found attached Azure disk %q(lun %s, device path %s).", volumeSource.DiskName, lunStr, devicePath) + diskName := volumeSource.DiskName + nodeName := a.plugin.host.GetHostName() + newDevicePath := "" + + err = wait.Poll(1*time.Second, timeout, func() (bool, error) { + exe := exec.New() + + if newDevicePath, err = findDiskByLun(lun, io, exe); err != nil { + return false, fmt.Errorf("azureDisk - WaitForAttach ticker failed node (%s) disk (%s) lun(%v) err(%s)", nodeName, diskName, lun, err) + } + + // did we find it? + if newDevicePath != "" { + // the curent sequence k8s uses for unformated disk (check-disk, mount, fail, mkfs.extX) hangs on + // Azure Managed disk scsi interface. this is a hack and will be replaced once we identify and solve + // the root case on Azure. + formatIfNotFormatted(newDevicePath, *volumeSource.FSType) return true, nil - } else { - //Log error, if any, and continue checking periodically - glog.V(4).Infof("Error Stat Azure disk (%q) is attached: %v", volumeSource.DiskName, err) - return false, nil } + + return false, fmt.Errorf("azureDisk - WaitForAttach failed within timeout node (%s) diskId:(%s) lun:(%v)", nodeName, diskName, lun) }) - return devicePath, err + + return newDevicePath, err } -// GetDeviceMountPath finds the volume's mount path on the node -func (attacher *azureDiskAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error) { +// to avoid name conflicts (similar *.vhd name) +// we use hash diskUri and we use it as device mount target. +// this is generalized for both managed and blob disks +// we also prefix the hash with m/b based on disk kind +func (a *azureDiskAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error) { volumeSource, err := getVolumeSource(spec) if err != nil { return "", err } - return makeGlobalPDPath(attacher.host, volumeSource.DiskName), nil + if volumeSource.Kind == nil { // this spec was constructed from info on the node + pdPath := path.Join(a.plugin.host.GetPluginDir(azureDataDiskPluginName), mount.MountsInGlobalPDPath, volumeSource.DataDiskURI) + return pdPath, nil + } + + isManagedDisk := (*volumeSource.Kind == v1.AzureManagedDisk) + return makeGlobalPDPath(a.plugin.host, volumeSource.DataDiskURI, isManagedDisk) } -// MountDevice runs mount command on the node to mount the volume func (attacher *azureDiskAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error { - mounter := attacher.host.GetMounter() + mounter := attacher.plugin.host.GetMounter() notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath) + if err != nil { if os.IsNotExist(err) { if err := os.MkdirAll(deviceMountPath, 0750); err != nil { - return err + return fmt.Errorf("azureDisk - mountDevice:CreateDirectory failed with %s", err) } notMnt = true } else { - return err + return fmt.Errorf("azureDisk - mountDevice:IsLikelyNotMountPoint failed with %s", err) } } @@ -219,47 +232,27 @@ func (attacher *azureDiskAttacher) MountDevice(spec *volume.Spec, devicePath str } options := []string{} - if spec.ReadOnly { - options = append(options, "ro") - } if notMnt { diskMounter := &mount.SafeFormatAndMount{Interface: mounter, Runner: exec.New()} mountOptions := volume.MountOptionFromSpec(spec, options...) err = diskMounter.FormatAndMount(devicePath, deviceMountPath, *volumeSource.FSType, mountOptions) if err != nil { - os.Remove(deviceMountPath) - return err + if cleanErr := os.Remove(deviceMountPath); cleanErr != nil { + return fmt.Errorf("azureDisk - mountDevice:FormatAndMount failed with %s and clean up failed with :%v", err, cleanErr) + } + return fmt.Errorf("azureDisk - mountDevice:FormatAndMount failed with %s", err) } } return nil } -type azureDiskDetacher struct { - mounter mount.Interface - azureProvider azureCloudProvider -} - -var _ volume.Detacher = &azureDiskDetacher{} - -// NewDetacher initializes a volume Detacher -func (plugin *azureDataDiskPlugin) NewDetacher() (volume.Detacher, error) { - azure, err := getAzureCloudProvider(plugin.host.GetCloudProvider()) - if err != nil { - return nil, err - } - - return &azureDiskDetacher{ - mounter: plugin.host.GetMounter(), - azureProvider: azure, - }, nil -} - // Detach detaches disk from Azure VM. -func (detacher *azureDiskDetacher) Detach(diskName string, nodeName types.NodeName) error { - if diskName == "" { - return fmt.Errorf("invalid disk to detach: %q", diskName) +func (d *azureDiskDetacher) Detach(diskURI string, nodeName types.NodeName) error { + if diskURI == "" { + return fmt.Errorf("invalid disk to detach: %q", diskURI) } - instanceid, err := detacher.azureProvider.InstanceID(nodeName) + + instanceid, err := d.cloud.InstanceID(nodeName) if err != nil { glog.Warningf("no instance id for node %q, skip detaching", nodeName) return nil @@ -268,22 +261,28 @@ func (detacher *azureDiskDetacher) Detach(diskName string, nodeName types.NodeNa instanceid = instanceid[(ind + 1):] } - glog.V(4).Infof("detach %v from node %q", diskName, nodeName) - err = detacher.azureProvider.DetachDiskByName(diskName, "" /* diskURI */, nodeName) + glog.V(4).Infof("detach %v from node %q", diskURI, nodeName) + + diskController, err := getDiskController(d.plugin.host) if err != nil { - glog.Errorf("failed to detach azure disk %q, err %v", diskName, err) + return err + } + err = diskController.DetachDiskByName("", diskURI, nodeName) + if err != nil { + glog.Errorf("failed to detach azure disk %q, err %v", diskURI, err) } + glog.V(2).Infof("azureDisk - disk:%s was detached from node:%v", diskURI, nodeName) return err } // UnmountDevice unmounts the volume on the node func (detacher *azureDiskDetacher) UnmountDevice(deviceMountPath string) error { - volume := path.Base(deviceMountPath) - if err := util.UnmountPath(deviceMountPath, detacher.mounter); err != nil { - glog.Errorf("Error unmounting %q: %v", volume, err) - return err + err := volumeutil.UnmountPath(deviceMountPath, detacher.plugin.host.GetMounter()) + if err == nil { + glog.V(4).Infof("azureDisk - Device %s was unmounted", deviceMountPath) } else { - return nil + glog.Infof("azureDisk - Device %s failed to unmount with error: %s", deviceMountPath, err.Error()) } + return err } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common.go b/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common.go new file mode 100644 index 000000000000..485284d7042d --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common.go @@ -0,0 +1,342 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azure_dd + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "regexp" + "strconv" + libstrings "strings" + + storage "github.com/Azure/azure-sdk-for-go/arm/storage" + "github.com/golang/glog" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/cloudprovider/providers/azure" + "k8s.io/kubernetes/pkg/util/exec" + "k8s.io/kubernetes/pkg/util/mount" + "k8s.io/kubernetes/pkg/util/strings" + "k8s.io/kubernetes/pkg/volume" +) + +const ( + defaultFSType = "ext4" + defaultStorageAccountType = storage.StandardLRS +) + +type dataDisk struct { + volume.MetricsProvider + volumeName string + diskName string + podUID types.UID +} + +var ( + supportedCachingModes = sets.NewString( + string(api.AzureDataDiskCachingNone), + string(api.AzureDataDiskCachingReadOnly), + string(api.AzureDataDiskCachingReadWrite)) + + supportedDiskKinds = sets.NewString( + string(api.AzureSharedBlobDisk), + string(api.AzureDedicatedBlobDisk), + string(api.AzureManagedDisk)) + + supportedStorageAccountTypes = sets.NewString("Premium_LRS", "Standard_LRS") +) + +func getPath(uid types.UID, volName string, host volume.VolumeHost) string { + return host.GetPodVolumeDir(uid, strings.EscapeQualifiedNameForDisk(azureDataDiskPluginName), volName) +} + +// creates a unique path for disks (even if they share the same *.vhd name) +func makeGlobalPDPath(host volume.VolumeHost, diskUri string, isManaged bool) (string, error) { + diskUri = libstrings.ToLower(diskUri) // always lower uri because users may enter it in caps. + uniqueDiskNameTemplate := "%s%s" + hashedDiskUri := azure.MakeCRC32(diskUri) + prefix := "b" + if isManaged { + prefix = "m" + } + // "{m for managed b for blob}{hashed diskUri or DiskId depending on disk kind }" + diskName := fmt.Sprintf(uniqueDiskNameTemplate, prefix, hashedDiskUri) + pdPath := path.Join(host.GetPluginDir(azureDataDiskPluginName), mount.MountsInGlobalPDPath, diskName) + + return pdPath, nil +} + +func makeDataDisk(volumeName string, podUID types.UID, diskName string, host volume.VolumeHost) *dataDisk { + var metricProvider volume.MetricsProvider + if podUID != "" { + metricProvider = volume.NewMetricsStatFS(getPath(podUID, volumeName, host)) + } + + return &dataDisk{ + MetricsProvider: metricProvider, + volumeName: volumeName, + diskName: diskName, + podUID: podUID, + } +} + +func getVolumeSource(spec *volume.Spec) (*v1.AzureDiskVolumeSource, error) { + if spec.Volume != nil && spec.Volume.AzureDisk != nil { + return spec.Volume.AzureDisk, nil + } + + if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.AzureDisk != nil { + return spec.PersistentVolume.Spec.AzureDisk, nil + } + + return nil, fmt.Errorf("azureDisk - Spec does not reference an Azure disk volume type") +} + +func normalizeFsType(fsType string) string { + if fsType == "" { + return defaultFSType + } + + return fsType +} + +func normalizeKind(kind string) (v1.AzureDataDiskKind, error) { + if kind == "" { + return v1.AzureDedicatedBlobDisk, nil + } + + if !supportedDiskKinds.Has(kind) { + return "", fmt.Errorf("azureDisk - %s is not supported disk kind. Supported values are %s", kind, supportedDiskKinds.List()) + } + + return v1.AzureDataDiskKind(kind), nil +} + +func normalizeStorageAccountType(storageAccountType string) (storage.SkuName, error) { + if storageAccountType == "" { + return defaultStorageAccountType, nil + } + + if !supportedStorageAccountTypes.Has(storageAccountType) { + return "", fmt.Errorf("azureDisk - %s is not supported sku/storageaccounttype. Supported values are %s", storageAccountType, supportedStorageAccountTypes.List()) + } + + return storage.SkuName(storageAccountType), nil +} + +func normalizeCachingMode(cachingMode v1.AzureDataDiskCachingMode) (v1.AzureDataDiskCachingMode, error) { + if cachingMode == "" { + return v1.AzureDataDiskCachingReadWrite, nil + } + + if !supportedCachingModes.Has(string(cachingMode)) { + return "", fmt.Errorf("azureDisk - %s is not supported cachingmode. Supported values are %s", cachingMode, supportedCachingModes.List()) + } + + return cachingMode, nil +} + +type ioHandler interface { + ReadDir(dirname string) ([]os.FileInfo, error) + WriteFile(filename string, data []byte, perm os.FileMode) error + Readlink(name string) (string, error) +} + +//TODO: check if priming the iscsi interface is actually needed + +type osIOHandler struct{} + +func (handler *osIOHandler) ReadDir(dirname string) ([]os.FileInfo, error) { + return ioutil.ReadDir(dirname) +} + +func (handler *osIOHandler) WriteFile(filename string, data []byte, perm os.FileMode) error { + return ioutil.WriteFile(filename, data, perm) +} + +func (handler *osIOHandler) Readlink(name string) (string, error) { + return os.Readlink(name) +} + +// exclude those used by azure as resource and OS root in /dev/disk/azure +func listAzureDiskPath(io ioHandler) []string { + azureDiskPath := "/dev/disk/azure/" + var azureDiskList []string + if dirs, err := io.ReadDir(azureDiskPath); err == nil { + for _, f := range dirs { + name := f.Name() + diskPath := azureDiskPath + name + if link, linkErr := io.Readlink(diskPath); linkErr == nil { + sd := link[(libstrings.LastIndex(link, "/") + 1):] + azureDiskList = append(azureDiskList, sd) + } + } + } + glog.V(12).Infof("Azure sys disks paths: %v", azureDiskList) + return azureDiskList +} + +func scsiHostRescan(io ioHandler) { + scsi_path := "/sys/class/scsi_host/" + if dirs, err := io.ReadDir(scsi_path); err == nil { + for _, f := range dirs { + name := scsi_path + f.Name() + "/scan" + data := []byte("- - -") + if err = io.WriteFile(name, data, 0666); err != nil { + glog.Warningf("failed to rescan scsi host %s", name) + } + } + } else { + glog.Warningf("failed to read %s, err %v", scsi_path, err) + } +} + +func findDiskByLun(lun int, io ioHandler, exe exec.Interface) (string, error) { + azureDisks := listAzureDiskPath(io) + return findDiskByLunWithConstraint(lun, io, exe, azureDisks) +} + +// finds a device mounted to "current" node +func findDiskByLunWithConstraint(lun int, io ioHandler, exe exec.Interface, azureDisks []string) (string, error) { + var err error + sys_path := "/sys/bus/scsi/devices" + if dirs, err := io.ReadDir(sys_path); err == nil { + for _, f := range dirs { + name := f.Name() + // look for path like /sys/bus/scsi/devices/3:0:0:1 + arr := libstrings.Split(name, ":") + if len(arr) < 4 { + continue + } + // extract LUN from the path. + // LUN is the last index of the array, i.e. 1 in /sys/bus/scsi/devices/3:0:0:1 + l, err := strconv.Atoi(arr[3]) + if err != nil { + // unknown path format, continue to read the next one + glog.V(4).Infof("azure disk - failed to parse lun from %v (%v), err %v", arr[3], name, err) + continue + } + if lun == l { + // find the matching LUN + // read vendor and model to ensure it is a VHD disk + vendor := path.Join(sys_path, name, "vendor") + model := path.Join(sys_path, name, "model") + out, err := exe.Command("cat", vendor, model).CombinedOutput() + if err != nil { + glog.V(4).Infof("azure disk - failed to cat device vendor and model, err: %v", err) + continue + } + matched, err := regexp.MatchString("^MSFT[ ]{0,}\nVIRTUAL DISK[ ]{0,}\n$", libstrings.ToUpper(string(out))) + if err != nil || !matched { + glog.V(4).Infof("azure disk - doesn't match VHD, output %v, error %v", string(out), err) + continue + } + // find a disk, validate name + dir := path.Join(sys_path, name, "block") + if dev, err := io.ReadDir(dir); err == nil { + found := false + for _, diskName := range azureDisks { + glog.V(12).Infof("azure disk - validating disk %q with sys disk %q", dev[0].Name(), diskName) + if string(dev[0].Name()) == diskName { + found = true + break + } + } + if !found { + return "/dev/" + dev[0].Name(), nil + } + } + } + } + } + return "", err +} + +func formatIfNotFormatted(disk string, fstype string) { + notFormatted, err := diskLooksUnformatted(disk) + if err == nil && notFormatted { + args := []string{disk} + // Disk is unformatted so format it. + // Use 'ext4' as the default + if len(fstype) == 0 { + fstype = "ext4" + } + if fstype == "ext4" || fstype == "ext3" { + args = []string{"-E", "lazy_itable_init=0,lazy_journal_init=0", "-F", disk} + } + glog.Infof("azureDisk - Disk %q appears to be unformatted, attempting to format as type: %q with options: %v", disk, fstype, args) + runner := exec.New() + cmd := runner.Command("mkfs."+fstype, args...) + _, err := cmd.CombinedOutput() + if err == nil { + // the disk has been formatted successfully try to mount it again. + glog.Infof("azureDisk - Disk successfully formatted (mkfs): %s - %s %s", fstype, disk, "tt") + } + glog.Warningf("azureDisk - format of disk %q failed: type:(%q) target:(%q) options:(%q)error:(%v)", disk, fstype, "tt", "o", err) + } else { + if err != nil { + glog.Warningf("azureDisk - Failed to check if the disk %s formatted with error %s, will attach anyway", disk, err) + } else { + glog.Infof("azureDisk - Disk %s already formatted, will not format", disk) + } + } +} + +func diskLooksUnformatted(disk string) (bool, error) { + args := []string{"-nd", "-o", "FSTYPE", disk} + runner := exec.New() + cmd := runner.Command("lsblk", args...) + glog.V(4).Infof("Attempting to determine if disk %q is formatted using lsblk with args: (%v)", disk, args) + dataOut, err := cmd.CombinedOutput() + if err != nil { + glog.Errorf("Could not determine if disk %q is formatted (%v)", disk, err) + return false, err + } + output := libstrings.TrimSpace(string(dataOut)) + return output == "", nil +} + +func getDiskController(host volume.VolumeHost) (DiskController, error) { + cloudProvider := host.GetCloudProvider() + az, ok := cloudProvider.(*azure.Cloud) + + if !ok || az == nil { + return nil, fmt.Errorf("AzureDisk - failed to get Azure Cloud Provider. GetCloudProvider returned %v instead", cloudProvider) + } + return az, nil +} + +func getCloud(host volume.VolumeHost) (*azure.Cloud, error) { + cloudProvider := host.GetCloudProvider() + az, ok := cloudProvider.(*azure.Cloud) + + if !ok || az == nil { + return nil, fmt.Errorf("AzureDisk - failed to get Azure Cloud Provider. GetCloudProvider returned %v instead", cloudProvider) + } + return az, nil +} + +func strFirstLetterToUpper(str string) string { + if len(str) < 2 { + return str + } + return libstrings.ToUpper(string(str[0])) + str[1:] +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/vhd_util_test.go b/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common_test.go similarity index 98% rename from vendor/k8s.io/kubernetes/pkg/volume/azure_dd/vhd_util_test.go rename to vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common_test.go index 93c767217780..b0f4988a9e41 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/vhd_util_test.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_common_test.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_dd.go b/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_dd.go index edffe4fd754c..343439f19d7f 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_dd.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_dd.go @@ -17,67 +17,63 @@ limitations under the License. package azure_dd import ( - "fmt" - "os" - "path" - "github.com/Azure/azure-sdk-for-go/arm/compute" - + storage "github.com/Azure/azure-sdk-for-go/arm/storage" "github.com/golang/glog" "k8s.io/apimachinery/pkg/types" "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/cloudprovider" - "k8s.io/kubernetes/pkg/cloudprovider/providers/azure" - "k8s.io/kubernetes/pkg/util/exec" - "k8s.io/kubernetes/pkg/util/keymutex" "k8s.io/kubernetes/pkg/util/mount" - utilstrings "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" - "k8s.io/kubernetes/pkg/volume/util" ) -// This is the primary entrypoint for volume plugins. -func ProbeVolumePlugins() []volume.VolumePlugin { - return []volume.VolumePlugin{&azureDataDiskPlugin{}} -} +// interface exposed by the cloud provider implementing Disk functionlity +type DiskController interface { + CreateBlobDisk(dataDiskName string, storageAccountType storage.SkuName, sizeGB int, forceStandAlone bool) (string, error) + DeleteBlobDisk(diskUri string, wasForced bool) error -type azureDataDiskPlugin struct { - host volume.VolumeHost - volumeLocks keymutex.KeyMutex -} + CreateManagedDisk(diskName string, storageAccountType storage.SkuName, sizeGB int, tags map[string]string) (string, error) + DeleteManagedDisk(diskURI string) error -// Abstract interface to disk operations. -// azure cloud provider should implement it -type azureCloudProvider interface { // Attaches the disk to the host machine. - AttachDisk(diskName, diskUri string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error + AttachDisk(isManagedDisk bool, diskName, diskUri string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error // Detaches the disk, identified by disk name or uri, from the host machine. DetachDiskByName(diskName, diskUri string, nodeName types.NodeName) error + // Check if a list of volumes are attached to the node with the specified NodeName DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) + // Get the LUN number of the disk that is attached to the host GetDiskLun(diskName, diskUri string, nodeName types.NodeName) (int32, error) // Get the next available LUN number to attach a new VHD GetNextDiskLun(nodeName types.NodeName) (int32, error) - // InstanceID returns the cloud provider ID of the specified instance. - InstanceID(nodeName types.NodeName) (string, error) + // Create a VHD blob - CreateVolume(name, storageAccount, storageType, location string, requestGB int) (string, string, int, error) + CreateVolume(name, storageAccount string, storageAccountType storage.SkuName, location string, requestGB int) (string, string, int, error) // Delete a VHD blob - DeleteVolume(name, uri string) error + DeleteVolume(diskURI string) error +} + +type azureDataDiskPlugin struct { + host volume.VolumeHost } var _ volume.VolumePlugin = &azureDataDiskPlugin{} var _ volume.PersistentVolumePlugin = &azureDataDiskPlugin{} +var _ volume.DeletableVolumePlugin = &azureDataDiskPlugin{} +var _ volume.ProvisionableVolumePlugin = &azureDataDiskPlugin{} +var _ volume.AttachableVolumePlugin = &azureDataDiskPlugin{} const ( azureDataDiskPluginName = "kubernetes.io/azure-disk" ) +func ProbeVolumePlugins() []volume.VolumePlugin { + return []volume.VolumePlugin{&azureDataDiskPlugin{}} +} + func (plugin *azureDataDiskPlugin) Init(host volume.VolumeHost) error { plugin.host = host - plugin.volumeLocks = keymutex.NewKeyMutex() return nil } @@ -91,7 +87,7 @@ func (plugin *azureDataDiskPlugin) GetVolumeName(spec *volume.Spec) (string, err return "", err } - return volumeSource.DiskName, nil + return volumeSource.DataDiskURI, nil } func (plugin *azureDataDiskPlugin) CanSupport(spec *volume.Spec) bool { @@ -117,281 +113,104 @@ func (plugin *azureDataDiskPlugin) GetAccessModes() []v1.PersistentVolumeAccessM } } -func (plugin *azureDataDiskPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) { - return plugin.newMounterInternal(spec, pod.UID, plugin.host.GetMounter()) -} - -func (plugin *azureDataDiskPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, mounter mount.Interface) (volume.Mounter, error) { - // azures used directly in a pod have a ReadOnly flag set by the pod author. - // azures used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV - azure, err := getVolumeSource(spec) +// NewAttacher initializes an Attacher +func (plugin *azureDataDiskPlugin) NewAttacher() (volume.Attacher, error) { + azure, err := getCloud(plugin.host) if err != nil { + glog.V(4).Infof("failed to get azure cloud in NewAttacher, plugin.host : %s", plugin.host.GetHostName()) return nil, err } - fsType := "ext4" - if azure.FSType != nil { - fsType = *azure.FSType - } - cachingMode := v1.AzureDataDiskCachingNone - if azure.CachingMode != nil { - cachingMode = *azure.CachingMode - } - readOnly := false - if azure.ReadOnly != nil { - readOnly = *azure.ReadOnly - } - diskName := azure.DiskName - diskUri := azure.DataDiskURI - return &azureDiskMounter{ - azureDisk: &azureDisk{ - podUID: podUID, - volName: spec.Name(), - diskName: diskName, - diskUri: diskUri, - cachingMode: cachingMode, - mounter: mounter, - plugin: plugin, - }, - fsType: fsType, - readOnly: readOnly, - diskMounter: &mount.SafeFormatAndMount{Interface: plugin.host.GetMounter(), Runner: exec.New()}}, nil -} -func (plugin *azureDataDiskPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) { - return plugin.newUnmounterInternal(volName, podUID, plugin.host.GetMounter()) -} - -func (plugin *azureDataDiskPlugin) newUnmounterInternal(volName string, podUID types.UID, mounter mount.Interface) (volume.Unmounter, error) { - return &azureDiskUnmounter{ - &azureDisk{ - podUID: podUID, - volName: volName, - mounter: mounter, - plugin: plugin, - }, + return &azureDiskAttacher{ + plugin: plugin, + cloud: azure, }, nil } -func (plugin *azureDataDiskPlugin) ConstructVolumeSpec(volName, mountPath string) (*volume.Spec, error) { - mounter := plugin.host.GetMounter() - pluginDir := plugin.host.GetPluginDir(plugin.GetPluginName()) - sourceName, err := mounter.GetDeviceNameFromMount(mountPath, pluginDir) +func (plugin *azureDataDiskPlugin) NewDetacher() (volume.Detacher, error) { + azure, err := getCloud(plugin.host) if err != nil { + glog.V(4).Infof("failed to get azure cloud in NewDetacher, plugin.host : %s", plugin.host.GetHostName()) return nil, err } - azVolume := &v1.Volume{ - Name: volName, - VolumeSource: v1.VolumeSource{ - AzureDisk: &v1.AzureDiskVolumeSource{ - DiskName: sourceName, - }, - }, - } - return volume.NewSpecFromVolume(azVolume), nil -} - -func (plugin *azureDataDiskPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) { - mounter := plugin.host.GetMounter() - return mount.GetMountRefs(mounter, deviceMountPath) -} - -type azureDisk struct { - volName string - podUID types.UID - diskName string - diskUri string - cachingMode v1.AzureDataDiskCachingMode - mounter mount.Interface - plugin *azureDataDiskPlugin - volume.MetricsNil -} -type azureDiskMounter struct { - *azureDisk - // Filesystem type, optional. - fsType string - // Specifies whether the disk will be attached as read-only. - readOnly bool - // diskMounter provides the interface that is used to mount the actual block device. - diskMounter *mount.SafeFormatAndMount + return &azureDiskDetacher{ + plugin: plugin, + cloud: azure, + }, nil } -var _ volume.Mounter = &azureDiskMounter{} - -func (b *azureDiskMounter) GetAttributes() volume.Attributes { - return volume.Attributes{ - ReadOnly: b.readOnly, - Managed: !b.readOnly, - SupportsSELinux: true, +func (plugin *azureDataDiskPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) { + volumeSource, err := getVolumeSource(spec) + if err != nil { + return nil, err } -} -// Checks prior to mount operations to verify that the required components (binaries, etc.) -// to mount the volume are available on the underlying node. -// If not, it returns an error -func (b *azureDiskMounter) CanMount() error { - return nil -} + disk := makeDataDisk(spec.Name(), "", volumeSource.DiskName, plugin.host) -// SetUp attaches the disk and bind mounts to the volume path. -func (b *azureDiskMounter) SetUp(fsGroup *int64) error { - return b.SetUpAt(b.GetPath(), fsGroup) + return &azureDiskDeleter{ + spec: spec, + plugin: plugin, + dataDisk: disk, + }, nil } -// SetUpAt attaches the disk and bind mounts to the volume path. -func (b *azureDiskMounter) SetUpAt(dir string, fsGroup *int64) error { - b.plugin.volumeLocks.LockKey(b.diskName) - defer b.plugin.volumeLocks.UnlockKey(b.diskName) - - // TODO: handle failed mounts here. - notMnt, err := b.mounter.IsLikelyNotMountPoint(dir) - glog.V(4).Infof("DataDisk set up: %s %v %v", dir, !notMnt, err) - if err != nil && !os.IsNotExist(err) { - glog.Errorf("IsLikelyNotMountPoint failed: %v", err) - return err +func (plugin *azureDataDiskPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) { + if len(options.PVC.Spec.AccessModes) == 0 { + options.PVC.Spec.AccessModes = plugin.GetAccessModes() } - if !notMnt { - glog.V(4).Infof("%s is a mount point", dir) - return nil - } - - globalPDPath := makeGlobalPDPath(b.plugin.host, b.diskName) - if err := os.MkdirAll(dir, 0750); err != nil { - glog.V(4).Infof("Could not create directory %s: %v", dir, err) - return err - } + return &azureDiskProvisioner{ + plugin: plugin, + options: options, + }, nil +} - // Perform a bind mount to the full path to allow duplicate mounts of the same PD. - options := []string{"bind"} - if b.readOnly { - options = append(options, "ro") - } - err = b.mounter.Mount(globalPDPath, dir, "", options) +func (plugin *azureDataDiskPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, options volume.VolumeOptions) (volume.Mounter, error) { + volumeSource, err := getVolumeSource(spec) if err != nil { - notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir) - if mntErr != nil { - glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) - return err - } - if !notMnt { - if mntErr = b.mounter.Unmount(dir); mntErr != nil { - glog.Errorf("Failed to unmount: %v", mntErr) - return err - } - notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir) - if mntErr != nil { - glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) - return err - } - if !notMnt { - // This is very odd, we don't expect it. We'll try again next sync loop. - glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", dir) - return err - } - } - os.Remove(dir) - return err - } - - if !b.readOnly { - volume.SetVolumeOwnership(b, fsGroup) + return nil, err } - glog.V(3).Infof("Azure disk volume %s mounted to %s", b.diskName, dir) - return nil -} - -func makeGlobalPDPath(host volume.VolumeHost, volume string) string { - return path.Join(host.GetPluginDir(azureDataDiskPluginName), mount.MountsInGlobalPDPath, volume) -} - -func (azure *azureDisk) GetPath() string { - name := azureDataDiskPluginName - return azure.plugin.host.GetPodVolumeDir(azure.podUID, utilstrings.EscapeQualifiedNameForDisk(name), azure.volName) -} + disk := makeDataDisk(spec.Name(), pod.UID, volumeSource.DiskName, plugin.host) -type azureDiskUnmounter struct { - *azureDisk + return &azureDiskMounter{ + plugin: plugin, + spec: spec, + options: options, + dataDisk: disk, + }, nil } -var _ volume.Unmounter = &azureDiskUnmounter{} +func (plugin *azureDataDiskPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) { + disk := makeDataDisk(volName, podUID, "", plugin.host) -// Unmounts the bind mount, and detaches the disk only if the PD -// resource was the last reference to that disk on the kubelet. -func (c *azureDiskUnmounter) TearDown() error { - return c.TearDownAt(c.GetPath()) + return &azureDiskUnmounter{ + plugin: plugin, + dataDisk: disk, + }, nil } -// Unmounts the bind mount, and detaches the disk only if the PD -// resource was the last reference to that disk on the kubelet. -func (c *azureDiskUnmounter) TearDownAt(dir string) error { - if pathExists, pathErr := util.PathExists(dir); pathErr != nil { - return fmt.Errorf("Error checking if path exists: %v", pathErr) - } else if !pathExists { - glog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir) - return nil - } +func (plugin *azureDataDiskPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) { + mounter := plugin.host.GetMounter() + pluginDir := plugin.host.GetPluginDir(plugin.GetPluginName()) + sourceName, err := mounter.GetDeviceNameFromMount(mountPath, pluginDir) - notMnt, err := c.mounter.IsLikelyNotMountPoint(dir) - if err != nil { - glog.Errorf("Error checking if mountpoint %s: %v", dir, err) - return err - } - if notMnt { - glog.V(2).Info("Not mountpoint, deleting") - return os.Remove(dir) - } - // lock the volume (and thus wait for any concurrrent SetUpAt to finish) - c.plugin.volumeLocks.LockKey(c.diskName) - defer c.plugin.volumeLocks.UnlockKey(c.diskName) - refs, err := mount.GetMountRefs(c.mounter, dir) if err != nil { - glog.Errorf("Error getting mountrefs for %s: %v", dir, err) - return err - } - if len(refs) == 0 { - glog.Errorf("Did not find pod-mount for %s during tear down", dir) - return fmt.Errorf("%s is not mounted", dir) - } - c.diskName = path.Base(refs[0]) - glog.V(4).Infof("Found volume %s mounted to %s", c.diskName, dir) - - // Unmount the bind-mount inside this pod - if err := c.mounter.Unmount(dir); err != nil { - glog.Errorf("Error unmounting dir %s %v", dir, err) - return err - } - notMnt, mntErr := c.mounter.IsLikelyNotMountPoint(dir) - if mntErr != nil { - glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) - return err - } - if notMnt { - if err := os.Remove(dir); err != nil { - glog.Errorf("Error removing mountpoint %s %v", dir, err) - return err - } + return nil, err } - return nil -} -func getVolumeSource(spec *volume.Spec) (*v1.AzureDiskVolumeSource, error) { - if spec.Volume != nil && spec.Volume.AzureDisk != nil { - return spec.Volume.AzureDisk, nil - } - if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.AzureDisk != nil { - return spec.PersistentVolume.Spec.AzureDisk, nil + azureVolume := &v1.Volume{ + Name: volumeName, + VolumeSource: v1.VolumeSource{ + AzureDisk: &v1.AzureDiskVolumeSource{ + DataDiskURI: sourceName, + }, + }, } - - return nil, fmt.Errorf("Spec does not reference an Azure disk volume type") + return volume.NewSpecFromVolume(azureVolume), nil } -// Return cloud provider -func getAzureCloudProvider(cloudProvider cloudprovider.Interface) (azureCloudProvider, error) { - azureCloudProvider, ok := cloudProvider.(*azure.Cloud) - if !ok || azureCloudProvider == nil { - return nil, fmt.Errorf("Failed to get Azure Cloud Provider. GetCloudProvider returned %v instead", cloudProvider) - } - - return azureCloudProvider, nil +func (plugin *azureDataDiskPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) { + m := plugin.host.GetMounter() + return mount.GetMountRefs(m, deviceMountPath) } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_dd_test.go b/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_dd_test.go index db885c40f35e..e3454d524f24 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_dd_test.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_dd_test.go @@ -17,17 +17,11 @@ limitations under the License. package azure_dd import ( - "fmt" "os" - "path" "testing" - "github.com/Azure/azure-sdk-for-go/arm/compute" - - "k8s.io/apimachinery/pkg/types" utiltesting "k8s.io/client-go/util/testing" "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" volumetest "k8s.io/kubernetes/pkg/volume/testing" ) @@ -57,121 +51,5 @@ func TestCanSupport(t *testing.T) { } } -const ( - fakeDiskName = "foo" - fakeDiskUri = "https://azure/vhds/bar.vhd" - fakeLun = 2 -) - -type fakeAzureProvider struct { -} - -func (fake *fakeAzureProvider) AttachDisk(diskName, diskUri, vmName string, lun int32, cachingMode compute.CachingTypes) error { - if diskName != fakeDiskName || diskUri != fakeDiskUri || lun != fakeLun { - return fmt.Errorf("wrong disk") - } - return nil - -} - -func (fake *fakeAzureProvider) DetachDiskByName(diskName, diskUri, vmName string) error { - if diskName != fakeDiskName || diskUri != fakeDiskUri { - return fmt.Errorf("wrong disk") - } - return nil -} -func (fake *fakeAzureProvider) GetDiskLun(diskName, diskUri, vmName string) (int32, error) { - return int32(fakeLun), nil -} - -func (fake *fakeAzureProvider) GetNextDiskLun(vmName string) (int32, error) { - return fakeLun, nil -} -func (fake *fakeAzureProvider) InstanceID(name string) (string, error) { - return "localhost", nil -} - -func (fake *fakeAzureProvider) CreateVolume(name, storageAccount, storageType, location string, requestGB int) (string, string, int, error) { - return "", "", 0, fmt.Errorf("not implemented") -} - -func (fake *fakeAzureProvider) DeleteVolume(name, uri string) error { - return fmt.Errorf("not implemented") -} - -func TestPlugin(t *testing.T) { - tmpDir, err := utiltesting.MkTmpdir("azure_ddTest") - if err != nil { - t.Fatalf("can't make a temp dir: %v", err) - } - defer os.RemoveAll(tmpDir) - plugMgr := volume.VolumePluginMgr{} - plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil)) - - plug, err := plugMgr.FindPluginByName(azureDataDiskPluginName) - if err != nil { - t.Errorf("Can't find the plugin by name") - } - fs := "ext4" - ro := false - caching := v1.AzureDataDiskCachingNone - spec := &v1.Volume{ - Name: "vol1", - VolumeSource: v1.VolumeSource{ - AzureDisk: &v1.AzureDiskVolumeSource{ - DiskName: fakeDiskName, - DataDiskURI: fakeDiskUri, - FSType: &fs, - CachingMode: &caching, - ReadOnly: &ro, - }, - }, - } - mounter, err := plug.(*azureDataDiskPlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &mount.FakeMounter{}) - if err != nil { - t.Errorf("Failed to make a new Mounter: %v", err) - } - if mounter == nil { - t.Errorf("Got a nil Mounter") - } - volPath := path.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~azure-disk/vol1") - path := mounter.GetPath() - if path != volPath { - t.Errorf("Got unexpected path: %s, should be %s", path, volPath) - } - - if err := mounter.SetUp(nil); err != nil { - t.Errorf("Expected success, got: %v", err) - } - if _, err := os.Stat(path); err != nil { - if os.IsNotExist(err) { - t.Errorf("SetUp() failed, volume path not created: %s", path) - } else { - t.Errorf("SetUp() failed: %v", err) - } - } - if _, err := os.Stat(path); err != nil { - if os.IsNotExist(err) { - t.Errorf("SetUp() failed, volume path not created: %s", path) - } else { - t.Errorf("SetUp() failed: %v", err) - } - } - - unmounter, err := plug.(*azureDataDiskPlugin).newUnmounterInternal("vol1", types.UID("poduid"), &mount.FakeMounter{}) - if err != nil { - t.Errorf("Failed to make a new Unmounter: %v", err) - } - if unmounter == nil { - t.Errorf("Got a nil Unmounter") - } - - if err := unmounter.TearDown(); err != nil { - t.Errorf("Expected success, got: %v", err) - } - if _, err := os.Stat(path); err == nil { - t.Errorf("TearDown() failed, volume path still exists: %s", path) - } else if !os.IsNotExist(err) { - t.Errorf("SetUp() failed: %v", err) - } -} +// fakeAzureProvider type was removed because all functions were not used +// Testing mounting will require path calculation which depends on the cloud provider, which is faked in the above test. diff --git a/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_mounter.go b/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_mounter.go new file mode 100644 index 000000000000..21b98becb774 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_mounter.go @@ -0,0 +1,184 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azure_dd + +import ( + "fmt" + "os" + + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/volume" + "k8s.io/kubernetes/pkg/volume/util" +) + +type azureDiskMounter struct { + *dataDisk + spec *volume.Spec + plugin *azureDataDiskPlugin + options volume.VolumeOptions +} + +type azureDiskUnmounter struct { + *dataDisk + plugin *azureDataDiskPlugin +} + +var _ volume.Unmounter = &azureDiskUnmounter{} +var _ volume.Mounter = &azureDiskMounter{} + +func (m *azureDiskMounter) GetAttributes() volume.Attributes { + volumeSource, _ := getVolumeSource(m.spec) + return volume.Attributes{ + ReadOnly: *volumeSource.ReadOnly, + Managed: !*volumeSource.ReadOnly, + SupportsSELinux: true, + } +} + +func (m *azureDiskMounter) CanMount() error { + return nil +} + +func (m *azureDiskMounter) SetUp(fsGroup *int64) error { + return m.SetUpAt(m.GetPath(), fsGroup) +} + +func (m *azureDiskMounter) GetPath() string { + return getPath(m.dataDisk.podUID, m.dataDisk.volumeName, m.plugin.host) +} + +func (m *azureDiskMounter) SetUpAt(dir string, fsGroup *int64) error { + mounter := m.plugin.host.GetMounter() + volumeSource, err := getVolumeSource(m.spec) + + if err != nil { + glog.Infof("azureDisk - mounter failed to get volume source for spec %s", m.spec.Name()) + return err + } + + diskName := volumeSource.DiskName + mountPoint, err := mounter.IsLikelyNotMountPoint(dir) + + if err != nil && !os.IsNotExist(err) { + glog.Infof("azureDisk - cannot validate mount point for disk %s on %s %v", diskName, dir, err) + return err + } + if !mountPoint { + return fmt.Errorf("azureDisk - Not a mounting point for disk %s on %s", diskName, dir) + } + + if err := os.MkdirAll(dir, 0750); err != nil { + glog.Infof("azureDisk - mkdir failed on disk %s on dir: %s (%v)", diskName, dir, err) + return err + } + + options := []string{"bind"} + + if *volumeSource.ReadOnly { + options = append(options, "ro") + } + + glog.V(4).Infof("azureDisk - Attempting to mount %s on %s", diskName, dir) + isManagedDisk := (*volumeSource.Kind == v1.AzureManagedDisk) + globalPDPath, err := makeGlobalPDPath(m.plugin.host, volumeSource.DataDiskURI, isManagedDisk) + + if err != nil { + return err + } + + mountErr := mounter.Mount(globalPDPath, dir, *volumeSource.FSType, options) + // Everything in the following control flow is meant as an + // attempt cleanup a failed setupAt (bind mount) + if mountErr != nil { + glog.Infof("azureDisk - SetupAt:Mount disk:%s at dir:%s failed during mounting with error:%v, will attempt to clean up", diskName, dir, mountErr) + mountPoint, err := mounter.IsLikelyNotMountPoint(dir) + if err != nil { + return fmt.Errorf("azureDisk - SetupAt:Mount:Failure:cleanup IsLikelyNotMountPoint check failed for disk:%s on dir:%s with error %v original-mountErr:%v", diskName, dir, err, mountErr) + } + + if !mountPoint { + if err = mounter.Unmount(dir); err != nil { + return fmt.Errorf("azureDisk - SetupAt:Mount:Failure:cleanup failed to unmount disk:%s on dir:%s with error:%v original-mountErr:%v", diskName, dir, err, mountErr) + } + mountPoint, err := mounter.IsLikelyNotMountPoint(dir) + if err != nil { + return fmt.Errorf("azureDisk - SetupAt:Mount:Failure:cleanup IsLikelyNotMountPoint for disk:%s on dir:%s check failed with error:%v original-mountErr:%v", diskName, dir, err, mountErr) + } + if !mountPoint { + // not cool. leave for next sync loop. + return fmt.Errorf("azureDisk - SetupAt:Mount:Failure:cleanup disk %s is still mounted on %s during cleanup original-mountErr:%v, despite call to unmount(). Will try again next sync loop.", diskName, dir, mountErr) + } + } + + if err = os.Remove(dir); err != nil { + return fmt.Errorf("azureDisk - SetupAt:Mount:Failure error cleaning up (removing dir:%s) with error:%v original-mountErr:%v", dir, err, mountErr) + } + + glog.V(2).Infof("azureDisk - Mount of disk:%s on dir:%s failed with mount error:%v post failure clean up was completed", diskName, dir, err, mountErr) + return mountErr + } + + if !*volumeSource.ReadOnly { + volume.SetVolumeOwnership(m, fsGroup) + } + + glog.V(2).Infof("azureDisk - successfully mounted disk %s on %s", diskName, dir) + return nil +} + +func (u *azureDiskUnmounter) TearDown() error { + return u.TearDownAt(u.GetPath()) +} + +func (u *azureDiskUnmounter) TearDownAt(dir string) error { + if pathExists, pathErr := util.PathExists(dir); pathErr != nil { + return fmt.Errorf("Error checking if path exists: %v", pathErr) + } else if !pathExists { + glog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir) + return nil + } + + glog.V(4).Infof("azureDisk - TearDownAt: %s", dir) + mounter := u.plugin.host.GetMounter() + mountPoint, err := mounter.IsLikelyNotMountPoint(dir) + if err != nil { + return fmt.Errorf("azureDisk - TearDownAt: %s failed to do IsLikelyNotMountPoint %s", dir, err) + } + if mountPoint { + if err := os.Remove(dir); err != nil { + return fmt.Errorf("azureDisk - TearDownAt: %s failed to do os.Remove %s", dir, err) + } + } + if err := mounter.Unmount(dir); err != nil { + return fmt.Errorf("azureDisk - TearDownAt: %s failed to do mounter.Unmount %s", dir, err) + } + mountPoint, err = mounter.IsLikelyNotMountPoint(dir) + if err != nil { + return fmt.Errorf("azureDisk - TearTownAt:IsLikelyNotMountPoint check failed: %v", err) + } + + if mountPoint { + return os.Remove(dir) + } + + return fmt.Errorf("azureDisk - failed to un-bind-mount volume dir") +} + +func (u *azureDiskUnmounter) GetPath() string { + return getPath(u.dataDisk.podUID, u.dataDisk.volumeName, u.plugin.host) +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_provision.go b/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_provision.go index 0b63b5b73da6..6770102b9b80 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_provision.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_provision.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -20,147 +20,182 @@ import ( "fmt" "strings" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/api/v1" - utilstrings "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" ) -var _ volume.DeletableVolumePlugin = &azureDataDiskPlugin{} -var _ volume.ProvisionableVolumePlugin = &azureDataDiskPlugin{} +type azureDiskProvisioner struct { + plugin *azureDataDiskPlugin + options volume.VolumeOptions +} type azureDiskDeleter struct { - *azureDisk - azureProvider azureCloudProvider + *dataDisk + spec *volume.Spec + plugin *azureDataDiskPlugin } -func (plugin *azureDataDiskPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) { - azure, err := getAzureCloudProvider(plugin.host.GetCloudProvider()) - if err != nil { - glog.V(4).Infof("failed to get azure provider") - return nil, err - } +var _ volume.Provisioner = &azureDiskProvisioner{} +var _ volume.Deleter = &azureDiskDeleter{} - return plugin.newDeleterInternal(spec, azure) +func (d *azureDiskDeleter) GetPath() string { + return getPath(d.podUID, d.dataDisk.diskName, d.plugin.host) } -func (plugin *azureDataDiskPlugin) newDeleterInternal(spec *volume.Spec, azure azureCloudProvider) (volume.Deleter, error) { - if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.AzureDisk == nil { - return nil, fmt.Errorf("invalid PV spec") +func (d *azureDiskDeleter) Delete() error { + volumeSource, err := getVolumeSource(d.spec) + if err != nil { + return err } - diskName := spec.PersistentVolume.Spec.AzureDisk.DiskName - diskUri := spec.PersistentVolume.Spec.AzureDisk.DataDiskURI - return &azureDiskDeleter{ - azureDisk: &azureDisk{ - volName: spec.Name(), - diskName: diskName, - diskUri: diskUri, - plugin: plugin, - }, - azureProvider: azure, - }, nil -} -func (plugin *azureDataDiskPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) { - azure, err := getAzureCloudProvider(plugin.host.GetCloudProvider()) + diskController, err := getDiskController(d.plugin.host) if err != nil { - glog.V(4).Infof("failed to get azure provider") - return nil, err - } - if len(options.PVC.Spec.AccessModes) == 0 { - options.PVC.Spec.AccessModes = plugin.GetAccessModes() + return err } - return plugin.newProvisionerInternal(options, azure) -} -func (plugin *azureDataDiskPlugin) newProvisionerInternal(options volume.VolumeOptions, azure azureCloudProvider) (volume.Provisioner, error) { - return &azureDiskProvisioner{ - azureDisk: &azureDisk{ - plugin: plugin, - }, - azureProvider: azure, - options: options, - }, nil -} + wasStandAlone := (*volumeSource.Kind != v1.AzureSharedBlobDisk) + managed := (*volumeSource.Kind == v1.AzureManagedDisk) -var _ volume.Deleter = &azureDiskDeleter{} - -func (d *azureDiskDeleter) GetPath() string { - name := azureDataDiskPluginName - return d.plugin.host.GetPodVolumeDir(d.podUID, utilstrings.EscapeQualifiedNameForDisk(name), d.volName) -} + if managed { + return diskController.DeleteManagedDisk(volumeSource.DataDiskURI) + } -func (d *azureDiskDeleter) Delete() error { - glog.V(4).Infof("deleting volume %s", d.diskUri) - return d.azureProvider.DeleteVolume(d.diskName, d.diskUri) + return diskController.DeleteBlobDisk(volumeSource.DataDiskURI, wasStandAlone) } -type azureDiskProvisioner struct { - *azureDisk - azureProvider azureCloudProvider - options volume.VolumeOptions -} +func (p *azureDiskProvisioner) Provision() (*v1.PersistentVolume, error) { + if !volume.AccessModesContainedInAll(p.plugin.GetAccessModes(), p.options.PVC.Spec.AccessModes) { + return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", p.options.PVC.Spec.AccessModes, p.plugin.GetAccessModes()) + } + supportedModes := p.plugin.GetAccessModes() -var _ volume.Provisioner = &azureDiskProvisioner{} + // perform static validation first + if p.options.PVC.Spec.Selector != nil { + return nil, fmt.Errorf("azureDisk - claim.Spec.Selector is not supported for dynamic provisioning on Azure disk") + } -func (a *azureDiskProvisioner) Provision() (*v1.PersistentVolume, error) { - if !volume.AccessModesContainedInAll(a.plugin.GetAccessModes(), a.options.PVC.Spec.AccessModes) { - return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", a.options.PVC.Spec.AccessModes, a.plugin.GetAccessModes()) + if len(p.options.PVC.Spec.AccessModes) > 1 { + return nil, fmt.Errorf("AzureDisk - multiple access modes are not supported on AzureDisk plugin") } - var sku, location, account string + if len(p.options.PVC.Spec.AccessModes) == 1 { + if p.options.PVC.Spec.AccessModes[0] != supportedModes[0] { + return nil, fmt.Errorf("AzureDisk - mode %s is not supporetd by AzureDisk plugin supported mode is %s", p.options.PVC.Spec.AccessModes[0], supportedModes) + } + } + var ( + location, account string + storageAccountType, fsType string + cachingMode v1.AzureDataDiskCachingMode + strKind string + err error + ) // maxLength = 79 - (4 for ".vhd") = 75 - name := volume.GenerateVolumeName(a.options.ClusterName, a.options.PVName, 75) - capacity := a.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] + name := volume.GenerateVolumeName(p.options.ClusterName, p.options.PVName, 75) + capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] requestBytes := capacity.Value() requestGB := int(volume.RoundUpSize(requestBytes, 1024*1024*1024)) - // Apply ProvisionerParameters (case-insensitive). We leave validation of - // the values to the cloud provider. - for k, v := range a.options.Parameters { + for k, v := range p.options.Parameters { switch strings.ToLower(k) { case "skuname": - sku = v + storageAccountType = v case "location": location = v case "storageaccount": account = v + case "storageaccounttype": + storageAccountType = v + case "kind": + strKind = v + case "cachingmode": + cachingMode = v1.AzureDataDiskCachingMode(v) + case "fstype": + fsType = strings.ToLower(v) default: - return nil, fmt.Errorf("invalid option %q for volume plugin %s", k, a.plugin.GetPluginName()) + return nil, fmt.Errorf("AzureDisk - invalid option %s in storage class", k) } } - // TODO: implement c.options.ProvisionerSelector parsing - if a.options.PVC.Spec.Selector != nil { - return nil, fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on Azure disk") + + // normalize values + fsType = normalizeFsType(fsType) + skuName, err := normalizeStorageAccountType(storageAccountType) + if err != nil { + return nil, err + } + + kind, err := normalizeKind(strFirstLetterToUpper(strKind)) + if err != nil { + return nil, err } - diskName, diskUri, sizeGB, err := a.azureProvider.CreateVolume(name, account, sku, location, requestGB) + if cachingMode, err = normalizeCachingMode(cachingMode); err != nil { + return nil, err + } + + diskController, err := getDiskController(p.plugin.host) if err != nil { return nil, err } + // create disk + diskURI := "" + if kind == v1.AzureManagedDisk { + diskURI, err = diskController.CreateManagedDisk(name, skuName, requestGB, *(p.options.CloudTags)) + if err != nil { + return nil, err + } + } else { + forceStandAlone := (kind == v1.AzureDedicatedBlobDisk) + if kind == v1.AzureDedicatedBlobDisk { + if location != "" && account != "" { + // use dedicated kind (by default) for compatibility + _, diskURI, _, err = diskController.CreateVolume(name, account, skuName, location, requestGB) + if err != nil { + return nil, err + } + } else { + if location != "" || account != "" { + return nil, fmt.Errorf("AzureDisk - location(%s) and account(%s) must be both empty or specified for dedicated kind, only one value specified is not allowed", + location, account) + } + diskURI, err = diskController.CreateBlobDisk(name, skuName, requestGB, forceStandAlone) + if err != nil { + return nil, err + } + } + } else { + diskURI, err = diskController.CreateBlobDisk(name, skuName, requestGB, forceStandAlone) + if err != nil { + return nil, err + } + } + } + pv := &v1.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{ - Name: a.options.PVName, + Name: p.options.PVName, Labels: map[string]string{}, Annotations: map[string]string{ - volumehelper.VolumeDynamicallyCreatedByKey: "azure-disk-dynamic-provisioner", + "volumehelper.VolumeDynamicallyCreatedByKey": "azure-disk-dynamic-provisioner", }, }, Spec: v1.PersistentVolumeSpec{ - PersistentVolumeReclaimPolicy: a.options.PersistentVolumeReclaimPolicy, - AccessModes: a.options.PVC.Spec.AccessModes, + PersistentVolumeReclaimPolicy: p.options.PersistentVolumeReclaimPolicy, + AccessModes: supportedModes, Capacity: v1.ResourceList{ - v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)), + v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", requestGB)), }, PersistentVolumeSource: v1.PersistentVolumeSource{ AzureDisk: &v1.AzureDiskVolumeSource{ - DiskName: diskName, - DataDiskURI: diskUri, + CachingMode: &cachingMode, + DiskName: name, + DataDiskURI: diskURI, + Kind: &kind, + FSType: &fsType, }, }, }, diff --git a/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/vhd_util.go b/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/vhd_util.go deleted file mode 100644 index 8db5093b76f5..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/volume/azure_dd/vhd_util.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package azure_dd - -import ( - "io/ioutil" - "os" - "path" - "regexp" - "strconv" - "strings" - - "github.com/golang/glog" - "k8s.io/kubernetes/pkg/util/exec" -) - -type ioHandler interface { - ReadDir(dirname string) ([]os.FileInfo, error) - WriteFile(filename string, data []byte, perm os.FileMode) error - Readlink(name string) (string, error) -} - -type osIOHandler struct{} - -func (handler *osIOHandler) ReadDir(dirname string) ([]os.FileInfo, error) { - return ioutil.ReadDir(dirname) -} -func (handler *osIOHandler) WriteFile(filename string, data []byte, perm os.FileMode) error { - return ioutil.WriteFile(filename, data, perm) -} -func (handler *osIOHandler) Readlink(name string) (string, error) { - return os.Readlink(name) -} - -// exclude those used by azure as resource and OS root in /dev/disk/azure -func listAzureDiskPath(io ioHandler) []string { - azureDiskPath := "/dev/disk/azure/" - var azureDiskList []string - if dirs, err := io.ReadDir(azureDiskPath); err == nil { - for _, f := range dirs { - name := f.Name() - diskPath := azureDiskPath + name - if link, linkErr := io.Readlink(diskPath); linkErr == nil { - sd := link[(strings.LastIndex(link, "/") + 1):] - azureDiskList = append(azureDiskList, sd) - } - } - } - glog.V(12).Infof("Azure sys disks paths: %v", azureDiskList) - return azureDiskList -} - -// given a LUN find the VHD device path like /dev/sdd -// exclude those disks used by Azure resources and OS root -func findDiskByLun(lun int, io ioHandler, exe exec.Interface) (string, error) { - azureDisks := listAzureDiskPath(io) - return findDiskByLunWithConstraint(lun, io, exe, azureDisks) -} - -// look for device /dev/sdX and validate it is a VHD -// return empty string if no disk is found -func findDiskByLunWithConstraint(lun int, io ioHandler, exe exec.Interface, azureDisks []string) (string, error) { - var err error - sys_path := "/sys/bus/scsi/devices" - if dirs, err := io.ReadDir(sys_path); err == nil { - for _, f := range dirs { - name := f.Name() - // look for path like /sys/bus/scsi/devices/3:0:0:1 - arr := strings.Split(name, ":") - if len(arr) < 4 { - continue - } - // extract LUN from the path. - // LUN is the last index of the array, i.e. 1 in /sys/bus/scsi/devices/3:0:0:1 - l, err := strconv.Atoi(arr[3]) - if err != nil { - // unknown path format, continue to read the next one - glog.Errorf("failed to parse lun from %v (%v), err %v", arr[3], name, err) - continue - } - if lun == l { - // find the matching LUN - // read vendor and model to ensure it is a VHD disk - vendor := path.Join(sys_path, name, "vendor") - model := path.Join(sys_path, name, "model") - out, err := exe.Command("cat", vendor, model).CombinedOutput() - if err != nil { - glog.Errorf("failed to cat device vendor and model, err: %v", err) - continue - } - matched, err := regexp.MatchString("^MSFT[ ]{0,}\nVIRTUAL DISK[ ]{0,}\n$", strings.ToUpper(string(out))) - if err != nil || !matched { - glog.V(4).Infof("doesn't match VHD, output %v, error %v", string(out), err) - continue - } - // find a disk, validate name - dir := path.Join(sys_path, name, "block") - if dev, err := io.ReadDir(dir); err == nil { - found := false - for _, diskName := range azureDisks { - glog.V(12).Infof("validating disk %q with sys disk %q", dev[0].Name(), diskName) - if string(dev[0].Name()) == diskName { - found = true - break - } - } - if !found { - return "/dev/" + dev[0].Name(), nil - } - } - } - } - } - return "", err -} - -// rescan scsi bus -func scsiHostRescan(io ioHandler) { - scsi_path := "/sys/class/scsi_host/" - if dirs, err := io.ReadDir(scsi_path); err == nil { - for _, f := range dirs { - name := scsi_path + f.Name() + "/scan" - data := []byte("- - -") - if err = io.WriteFile(name, data, 0666); err != nil { - glog.Errorf("failed to rescan scsi host %s", name) - } - } - } else { - glog.Errorf("failed to read %s, err %v", scsi_path, err) - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/fc/disk_manager.go b/vendor/k8s.io/kubernetes/pkg/volume/fc/disk_manager.go index 77ea372daea4..ab5774554e36 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/fc/disk_manager.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/fc/disk_manager.go @@ -67,3 +67,47 @@ func diskSetUp(manager diskManager, b fcDiskMounter, volPath string, mounter mou return nil } + +// utility to tear down a disk based filesystem +func diskTearDown(manager diskManager, c fcDiskUnmounter, volPath string, mounter mount.Interface) error { + noMnt, err := mounter.IsLikelyNotMountPoint(volPath) + if err != nil { + glog.Errorf("cannot validate mountpoint %s", volPath) + return err + } + if noMnt { + return os.Remove(volPath) + } + + refs, err := mount.GetMountRefs(mounter, volPath) + if err != nil { + glog.Errorf("failed to get reference count %s", volPath) + return err + } + if err := mounter.Unmount(volPath); err != nil { + glog.Errorf("failed to unmount %s", volPath) + return err + } + // If len(refs) is 1, then all bind mounts have been removed, and the + // remaining reference is the global mount. It is safe to detach. + if len(refs) == 1 { + mntPath := refs[0] + if err := manager.DetachDisk(c, mntPath); err != nil { + glog.Errorf("failed to detach disk from %s", mntPath) + return err + } + } + + noMnt, mntErr := mounter.IsLikelyNotMountPoint(volPath) + if mntErr != nil { + glog.Errorf("isMountpoint check failed: %v", mntErr) + return err + } + if noMnt { + if err := os.Remove(volPath); err != nil { + return err + } + } + return nil + +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/fc/fc.go b/vendor/k8s.io/kubernetes/pkg/volume/fc/fc.go index 9d1befd93eff..2d98773a99ea 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/fc/fc.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/fc/fc.go @@ -63,7 +63,7 @@ func (plugin *fcPlugin) GetVolumeName(spec *volume.Spec) (string, error) { } // TargetWWNs are the FibreChannel target worldwide names - return fmt.Sprintf("%v:%v", volumeSource.TargetWWNs, *volumeSource.Lun), nil + return fmt.Sprintf("%v", volumeSource.TargetWWNs), nil } func (plugin *fcPlugin) CanSupport(spec *volume.Spec) bool { @@ -231,7 +231,13 @@ func (c *fcDiskUnmounter) TearDown() error { } func (c *fcDiskUnmounter) TearDownAt(dir string) error { - return util.UnmountPath(dir, c.mounter) + if pathExists, pathErr := util.PathExists(dir); pathErr != nil { + return fmt.Errorf("Error checking if path exists: %v", pathErr) + } else if !pathExists { + glog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir) + return nil + } + return diskTearDown(c.manager, *c, dir, c.mounter) } func getVolumeSource(spec *volume.Spec) (*v1.FCVolumeSource, bool, error) { diff --git a/vendor/k8s.io/kubernetes/pkg/volume/fc/fc_util.go b/vendor/k8s.io/kubernetes/pkg/volume/fc/fc_util.go index 162431933c4c..feded0c2723c 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/fc/fc_util.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/fc/fc_util.go @@ -167,16 +167,16 @@ func (util *FCUtil) AttachDisk(b fcDiskMounter) (string, error) { } // mount it globalPDPath := util.MakeGlobalPDName(*b.fcDisk) - if err := os.MkdirAll(globalPDPath, 0750); err != nil { - return devicePath, fmt.Errorf("fc: failed to mkdir %s, error", globalPDPath) - } - noMnt, err := b.mounter.IsLikelyNotMountPoint(globalPDPath) if !noMnt { glog.Infof("fc: %s already mounted", globalPDPath) return devicePath, nil } + if err := os.MkdirAll(globalPDPath, 0750); err != nil { + return devicePath, fmt.Errorf("fc: failed to mkdir %s, error", globalPDPath) + } + err = b.mounter.FormatAndMount(devicePath, globalPDPath, b.fsType, nil) if err != nil { return devicePath, fmt.Errorf("fc: failed to mount fc volume %s [%s] to %s, error %v", devicePath, b.fsType, globalPDPath, err) diff --git a/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs.go b/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs.go index 4e2aa638ed11..d5cd6a802e8a 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs.go @@ -349,7 +349,7 @@ func (b *glusterfsMounter) setUpAtInternal(dir string) error { const invalidOption = "Invalid option auto_unmount" if dstrings.Contains(errs.Error(), invalidOption) { - // Give a try without `auto_unmount mount option, because + // Give a try without `auto_unmount` mount option, because // it could be that gluster fuse client is older version and // mount.glusterfs is unaware of `auto_unmount`. noAutoMountOptions := make([]string, 0, len(mountOptions)) @@ -358,7 +358,6 @@ func (b *glusterfsMounter) setUpAtInternal(dir string) error { noAutoMountOptions = append(noAutoMountOptions, opt) } } - errs = b.mounter.Mount(ip+":"+b.path, dir, "glusterfs", noAutoMountOptions) if errs == nil { glog.Infof("glusterfs: successfully mounted %s", dir) diff --git a/vendor/k8s.io/kubernetes/pkg/volume/iscsi/attacher.go b/vendor/k8s.io/kubernetes/pkg/volume/iscsi/attacher.go index 4b4525e79e45..7ec17614a65f 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/iscsi/attacher.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/iscsi/attacher.go @@ -148,11 +148,6 @@ func (detacher *iscsiDetacher) UnmountDevice(deviceMountPath string) error { if err != nil { return fmt.Errorf("iscsi: failed to detach disk: %s\nError: %v", deviceMountPath, err) } - glog.V(4).Infof("iscsi: %q is unmounted, deleting the directory", deviceMountPath) - err = os.RemoveAll(deviceMountPath) - if err != nil { - return fmt.Errorf("iscsi: failed to delete the directory: %s\nError: %v", deviceMountPath, err) - } glog.V(4).Infof("iscsi: successfully detached disk: %s", deviceMountPath) return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/iscsi/disk_manager.go b/vendor/k8s.io/kubernetes/pkg/volume/iscsi/disk_manager.go index 458d972a0d1e..697efa241d85 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/iscsi/disk_manager.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/iscsi/disk_manager.go @@ -90,3 +90,36 @@ func diskSetUp(manager diskManager, b iscsiDiskMounter, volPath string, mounter return nil } + +// utility to tear down a disk based filesystem +func diskTearDown(manager diskManager, c iscsiDiskUnmounter, volPath string, mounter mount.Interface) error { + notMnt, err := mounter.IsLikelyNotMountPoint(volPath) + if err != nil { + glog.Errorf("cannot validate mountpoint %s", volPath) + return err + } + if notMnt { + return os.Remove(volPath) + } + _, err = mount.GetMountRefs(mounter, volPath) + if err != nil { + glog.Errorf("failed to get reference count %s", volPath) + return err + } + if err := mounter.Unmount(volPath); err != nil { + glog.Errorf("failed to unmount %s", volPath) + return err + } + notMnt, mntErr := mounter.IsLikelyNotMountPoint(volPath) + if mntErr != nil { + glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) + return err + } + if notMnt { + if err := os.Remove(volPath); err != nil { + return err + } + } + return nil + +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi.go b/vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi.go index ad5d5f74b087..d1d450507638 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi.go @@ -263,7 +263,13 @@ func (c *iscsiDiskUnmounter) TearDown() error { } func (c *iscsiDiskUnmounter) TearDownAt(dir string) error { - return ioutil.UnmountPath(dir, c.mounter) + if pathExists, pathErr := ioutil.PathExists(dir); pathErr != nil { + return fmt.Errorf("Error checking if path exists: %v", pathErr) + } else if !pathExists { + glog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir) + return nil + } + return diskTearDown(c.manager, *c, dir, c.mounter) } func portalMounter(portal string) string { diff --git a/vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi_util.go b/vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi_util.go index dd11ad2f1028..75fad95bb203 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi_util.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi_util.go @@ -272,9 +272,6 @@ func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) (string, error) { glog.Errorf("iscsi: failed to get any path for iscsi disk, last err seen:\n%v", lastErr) return "", fmt.Errorf("failed to get any path for iscsi disk, last err seen:\n%v", lastErr) } - if lastErr != nil { - glog.Errorf("iscsi: last error occurred during iscsi init:\n%v", lastErr) - } //Make sure we use a valid devicepath to find mpio device. devicePath = devicePaths[0] diff --git a/vendor/k8s.io/kubernetes/pkg/volume/local/local.go b/vendor/k8s.io/kubernetes/pkg/volume/local/local.go index bf2313622789..9e4f91fd7532 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/local/local.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/local/local.go @@ -198,7 +198,7 @@ func (m *localVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { return fmt.Errorf("invalid path: %s %v", m.globalPath, err) } - notMnt, err := m.mounter.IsLikelyNotMountPoint(dir) + notMnt, err := m.mounter.IsNotMountPoint(dir) glog.V(4).Infof("LocalVolume mount setup: PodDir(%s) VolDir(%s) Mounted(%t) Error(%v), ReadOnly(%t)", dir, m.globalPath, !notMnt, err, m.readOnly) if err != nil && !os.IsNotExist(err) { glog.Errorf("cannot validate mount point: %s %v", dir, err) @@ -223,9 +223,9 @@ func (m *localVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { err = m.mounter.Mount(m.globalPath, dir, "", options) if err != nil { glog.Errorf("Mount of volume %s failed: %v", dir, err) - notMnt, mntErr := m.mounter.IsLikelyNotMountPoint(dir) + notMnt, mntErr := m.mounter.IsNotMountPoint(dir) if mntErr != nil { - glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) + glog.Errorf("IsNotMountPoint check failed: %v", mntErr) return err } if !notMnt { @@ -233,9 +233,9 @@ func (m *localVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { glog.Errorf("Failed to unmount: %v", mntErr) return err } - notMnt, mntErr = m.mounter.IsLikelyNotMountPoint(dir) + notMnt, mntErr = m.mounter.IsNotMountPoint(dir) if mntErr != nil { - glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) + glog.Errorf("IsNotMountPoint check failed: %v", mntErr) return err } if !notMnt { @@ -269,5 +269,5 @@ func (u *localVolumeUnmounter) TearDown() error { // TearDownAt unmounts the bind mount func (u *localVolumeUnmounter) TearDownAt(dir string) error { glog.V(4).Infof("Unmounting volume %q at path %q\n", u.volName, dir) - return util.UnmountPath(dir, u.mounter) + return util.UnmountMountPoint(dir, u.mounter, true) /* extensiveMountPointCheck = true */ } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/portworx/portworx.go b/vendor/k8s.io/kubernetes/pkg/volume/portworx/portworx.go index 291baa06e08e..82f31d3af58d 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/portworx/portworx.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/portworx/portworx.go @@ -266,7 +266,7 @@ func (b *portworxVolumeMounter) SetUp(fsGroup *int64) error { // SetUpAt attaches the disk and bind mounts to the volume path. func (b *portworxVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { notMnt, err := b.mounter.IsLikelyNotMountPoint(dir) - glog.V(4).Infof("Portworx Volume set up: %s %v %v", dir, !notMnt, err) + glog.Infof("Portworx Volume set up. Dir: %s %v %v", dir, !notMnt, err) if err != nil && !os.IsNotExist(err) { glog.Errorf("Cannot validate mountpoint: %s", dir) return err @@ -291,7 +291,7 @@ func (b *portworxVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { if !b.readOnly { volume.SetVolumeOwnership(b, fsGroup) } - glog.V(4).Infof("Portworx Volume %s mounted to %s", b.volumeID, dir) + glog.Infof("Portworx Volume %s setup at %s", b.volumeID, dir) return nil } @@ -314,8 +314,8 @@ func (c *portworxVolumeUnmounter) TearDown() error { // Unmounts the bind mount, and detaches the disk only if the PD // resource was the last reference to that disk on the kubelet. func (c *portworxVolumeUnmounter) TearDownAt(dir string) error { - glog.V(4).Infof("Portworx Volume TearDown of %s", dir) - // Call Portworx Unmount for Portworx's book-keeping. + glog.Infof("Portworx Volume TearDown of %s", dir) + if err := c.manager.UnmountVolume(c, dir); err != nil { return err } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/portworx/portworx_util.go b/vendor/k8s.io/kubernetes/pkg/volume/portworx/portworx_util.go index 54f9c6137623..0a727df98184 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/portworx/portworx_util.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/portworx/portworx_util.go @@ -43,12 +43,14 @@ type PortworxVolumeUtil struct { // CreateVolume creates a Portworx volume. func (util *PortworxVolumeUtil) CreateVolume(p *portworxVolumeProvisioner) (string, int, map[string]string, error) { - driver, err := util.getPortworxDriver(p.plugin.host) + driver, err := util.getPortworxDriver(p.plugin.host, false /*localOnly*/) if err != nil || driver == nil { glog.Errorf("Failed to get portworx driver. Err: %v", err) return "", 0, nil, err } + glog.Infof("Creating Portworx volume for PVC: %v", p.options.PVC.Name) + capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] // Portworx Volumes are specified in GB requestGB := int(volume.RoundUpSize(capacity.Value(), 1024*1024*1024)) @@ -56,6 +58,7 @@ func (util *PortworxVolumeUtil) CreateVolume(p *portworxVolumeProvisioner) (stri specHandler := osdspec.NewSpecHandler() spec, err := specHandler.SpecFromOpts(p.options.Parameters) if err != nil { + glog.Errorf("Error parsing parameters for PVC: %v. Err: %v", p.options.PVC.Name, err) return "", 0, nil, err } spec.Size = uint64(requestGB * 1024 * 1024 * 1024) @@ -68,14 +71,16 @@ func (util *PortworxVolumeUtil) CreateVolume(p *portworxVolumeProvisioner) (stri locator.VolumeLabels[pvcClaimLabel] = p.options.PVC.Name volumeID, err := driver.Create(&locator, &source, spec) if err != nil { - glog.V(2).Infof("Error creating Portworx Volume : %v", err) + glog.Errorf("Error creating Portworx Volume : %v", err) } + + glog.Infof("Successfully created Portworx volume for PVC: %v", p.options.PVC.Name) return volumeID, requestGB, nil, err } // DeleteVolume deletes a Portworx volume func (util *PortworxVolumeUtil) DeleteVolume(d *portworxVolumeDeleter) error { - driver, err := util.getPortworxDriver(d.plugin.host) + driver, err := util.getPortworxDriver(d.plugin.host, false /*localOnly*/) if err != nil || driver == nil { glog.Errorf("Failed to get portworx driver. Err: %v", err) return err @@ -83,7 +88,7 @@ func (util *PortworxVolumeUtil) DeleteVolume(d *portworxVolumeDeleter) error { err = driver.Delete(d.volumeID) if err != nil { - glog.V(2).Infof("Error deleting Portworx Volume (%v): %v", d.volName, err) + glog.Errorf("Error deleting Portworx Volume (%v): %v", d.volName, err) return err } return nil @@ -91,7 +96,7 @@ func (util *PortworxVolumeUtil) DeleteVolume(d *portworxVolumeDeleter) error { // AttachVolume attaches a Portworx Volume func (util *PortworxVolumeUtil) AttachVolume(m *portworxVolumeMounter) (string, error) { - driver, err := util.getPortworxDriver(m.plugin.host) + driver, err := util.getPortworxDriver(m.plugin.host, true /*localOnly*/) if err != nil || driver == nil { glog.Errorf("Failed to get portworx driver. Err: %v", err) return "", err @@ -99,7 +104,7 @@ func (util *PortworxVolumeUtil) AttachVolume(m *portworxVolumeMounter) (string, devicePath, err := driver.Attach(m.volName) if err != nil { - glog.V(2).Infof("Error attaching Portworx Volume (%v): %v", m.volName, err) + glog.Errorf("Error attaching Portworx Volume (%v): %v", m.volName, err) return "", err } return devicePath, nil @@ -107,7 +112,7 @@ func (util *PortworxVolumeUtil) AttachVolume(m *portworxVolumeMounter) (string, // DetachVolume detaches a Portworx Volume func (util *PortworxVolumeUtil) DetachVolume(u *portworxVolumeUnmounter) error { - driver, err := util.getPortworxDriver(u.plugin.host) + driver, err := util.getPortworxDriver(u.plugin.host, true /*localOnly*/) if err != nil || driver == nil { glog.Errorf("Failed to get portworx driver. Err: %v", err) return err @@ -115,7 +120,7 @@ func (util *PortworxVolumeUtil) DetachVolume(u *portworxVolumeUnmounter) error { err = driver.Detach(u.volName) if err != nil { - glog.V(2).Infof("Error detaching Portworx Volume (%v): %v", u.volName, err) + glog.Errorf("Error detaching Portworx Volume (%v): %v", u.volName, err) return err } return nil @@ -123,7 +128,7 @@ func (util *PortworxVolumeUtil) DetachVolume(u *portworxVolumeUnmounter) error { // MountVolume mounts a Portworx Volume on the specified mountPath func (util *PortworxVolumeUtil) MountVolume(m *portworxVolumeMounter, mountPath string) error { - driver, err := util.getPortworxDriver(m.plugin.host) + driver, err := util.getPortworxDriver(m.plugin.host, true /*localOnly*/) if err != nil || driver == nil { glog.Errorf("Failed to get portworx driver. Err: %v", err) return err @@ -131,7 +136,7 @@ func (util *PortworxVolumeUtil) MountVolume(m *portworxVolumeMounter, mountPath err = driver.Mount(m.volName, mountPath) if err != nil { - glog.V(2).Infof("Error mounting Portworx Volume (%v) on Path (%v): %v", m.volName, mountPath, err) + glog.Errorf("Error mounting Portworx Volume (%v) on Path (%v): %v", m.volName, mountPath, err) return err } return nil @@ -139,7 +144,7 @@ func (util *PortworxVolumeUtil) MountVolume(m *portworxVolumeMounter, mountPath // UnmountVolume unmounts a Portworx Volume func (util *PortworxVolumeUtil) UnmountVolume(u *portworxVolumeUnmounter, mountPath string) error { - driver, err := util.getPortworxDriver(u.plugin.host) + driver, err := util.getPortworxDriver(u.plugin.host, true /*localOnly*/) if err != nil || driver == nil { glog.Errorf("Failed to get portworx driver. Err: %v", err) return err @@ -147,7 +152,7 @@ func (util *PortworxVolumeUtil) UnmountVolume(u *portworxVolumeUnmounter, mountP err = driver.Unmount(u.volName, mountPath) if err != nil { - glog.V(2).Infof("Error unmounting Portworx Volume (%v) on Path (%v): %v", u.volName, mountPath, err) + glog.Errorf("Error unmounting Portworx Volume (%v) on Path (%v): %v", u.volName, mountPath, err) return err } return nil @@ -181,13 +186,34 @@ func createDriverClient(hostname string) (*osdclient.Client, error) { } } -func (util *PortworxVolumeUtil) getPortworxDriver(volumeHost volume.VolumeHost) (volumeapi.VolumeDriver, error) { +// getPortworxDriver() returns a Portworx volume driver which can be used for volume operations +// localOnly: If true, the returned driver will be connected to Portworx API server on volume host. +// If false, driver will be connected to API server on volume host or Portworx k8s service cluster IP +// This flag is required to explicitly force certain operations (mount, unmount, detach, attach) to +// go to the volume host instead of the k8s service which might route it to any host. This pertains to how +// Portworx mounts and attaches a volume to the running container. The node getting these requests needs to +// see the pod container mounts (specifically /var/lib/kubelet/pods/) +// Operations like create and delete volume don't need to be restricted to local volume host since +// any node in the Portworx cluster can co-ordinate the create/delete request and forward the operations to +// the Portworx node that will own/owns the data. +func (util *PortworxVolumeUtil) getPortworxDriver(volumeHost volume.VolumeHost, localOnly bool) (volumeapi.VolumeDriver, error) { + var err error + if localOnly { + util.portworxClient, err = createDriverClient(volumeHost.GetHostName()) + if err != nil { + return nil, err + } else { + glog.V(4).Infof("Using portworx local service at: %v as api endpoint", volumeHost.GetHostName()) + return volumeclient.VolumeDriver(util.portworxClient), nil + } + } + + // check if existing saved client is valid if isValid, _ := isClientValid(util.portworxClient); isValid { return volumeclient.VolumeDriver(util.portworxClient), nil } // create new client - var err error util.portworxClient, err = createDriverClient(volumeHost.GetHostName()) // for backward compatibility if err != nil || util.portworxClient == nil { // Create client from portworx service @@ -215,7 +241,7 @@ func (util *PortworxVolumeUtil) getPortworxDriver(volumeHost volume.VolumeHost) return nil, err } - glog.Infof("Using portworx service at: %v as api endpoint", svc.Spec.ClusterIP) + glog.Infof("Using portworx cluster service at: %v as api endpoint", svc.Spec.ClusterIP) } else { glog.Infof("Using portworx service at: %v as api endpoint", volumeHost.GetHostName()) } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/scaleio/BUILD b/vendor/k8s.io/kubernetes/pkg/volume/scaleio/BUILD index 99d375f8de3d..58825b407632 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/scaleio/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/volume/scaleio/BUILD @@ -24,6 +24,7 @@ go_test( "//pkg/volume:go_default_library", "//pkg/volume/testing:go_default_library", "//vendor/github.com/codedellemc/goscaleio/types/v1:go_default_library", + "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/client-go/util/testing:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_client.go b/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_client.go index bfcb6f0d062b..8742905ed09d 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_client.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_client.go @@ -45,7 +45,7 @@ type sioInterface interface { FindVolume(name string) (*siotypes.Volume, error) Volume(sioVolumeID) (*siotypes.Volume, error) CreateVolume(name string, sizeGB int64) (*siotypes.Volume, error) - AttachVolume(sioVolumeID) error + AttachVolume(sioVolumeID, bool) error DetachVolume(sioVolumeID) error DeleteVolume(sioVolumeID) error IID() (string, error) @@ -217,8 +217,9 @@ func (c *sioClient) CreateVolume(name string, sizeGB int64) (*siotypes.Volume, e return c.Volume(sioVolumeID(createResponse.ID)) } -// AttachVolume maps the scaleio volume to an sdc node. -func (c *sioClient) AttachVolume(id sioVolumeID) error { +// AttachVolume maps the scaleio volume to an sdc node. If the multipleMappings flag +// is true, ScaleIO will allow other SDC to map to that volume. +func (c *sioClient) AttachVolume(id sioVolumeID, multipleMappings bool) error { if err := c.init(); err != nil { glog.Error(log("failed to init'd client in attach volume: %v", err)) return err @@ -232,7 +233,7 @@ func (c *sioClient) AttachVolume(id sioVolumeID) error { params := &siotypes.MapVolumeSdcParam{ SdcID: iid, - AllowMultipleMappings: "false", + AllowMultipleMappings: strconv.FormatBool(multipleMappings), AllSdcs: "", } volClient := sio.NewVolume(c.client) diff --git a/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_mgr.go b/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_mgr.go index ca10677dd718..83d5e498dc87 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_mgr.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_mgr.go @@ -27,7 +27,7 @@ import ( type storageInterface interface { CreateVolume(string, int64) (*siotypes.Volume, error) - AttachVolume(string) (string, error) + AttachVolume(string, bool) (string, error) IsAttached(string) (bool, error) DetachVolume(string) error DeleteVolume(string) error @@ -103,8 +103,9 @@ func (m *sioMgr) CreateVolume(volName string, sizeGB int64) (*siotypes.Volume, e return vol, nil } -// AttachVolume maps a ScaleIO volume to the running node -func (m *sioMgr) AttachVolume(volName string) (string, error) { +// AttachVolume maps a ScaleIO volume to the running node. If flag multiMaps, +// ScaleIO will allow other SDC to map to volume. +func (m *sioMgr) AttachVolume(volName string, multipleMappings bool) (string, error) { client, err := m.getClient() if err != nil { glog.Error(log("attach volume failed: %v", err)) @@ -139,7 +140,7 @@ func (m *sioMgr) AttachVolume(volName string) (string, error) { } // attach volume, get deviceName - if err := client.AttachVolume(sioVolumeID(vol.ID)); err != nil { + if err := client.AttachVolume(sioVolumeID(vol.ID), multipleMappings); err != nil { glog.Error(log("attachment for volume %s failed :%v", volName, err)) return "", err } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_mgr_test.go b/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_mgr_test.go index c8f4b44927b7..3d580b6b99bd 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_mgr_test.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_mgr_test.go @@ -99,7 +99,7 @@ func TestMgrCreateVolume(t *testing.T) { func TestMgrAttachVolume(t *testing.T) { mgr := newTestMgr(t) mgr.CreateVolume("test-vol-0001", 8*1024*1024) - device, err := mgr.AttachVolume("test-vol-0001") + device, err := mgr.AttachVolume("test-vol-0001", false) if err != nil { t.Fatal(err) } @@ -111,8 +111,8 @@ func TestMgrAttachVolume(t *testing.T) { func TestMgrAttachVolume_AlreadyAttached(t *testing.T) { mgr := newTestMgr(t) mgr.CreateVolume("test-vol-0001", 8*1024*1024) - mgr.AttachVolume("test-vol-0001") - dev, err := mgr.AttachVolume("test-vol-0001") + mgr.AttachVolume("test-vol-0001", false) + dev, err := mgr.AttachVolume("test-vol-0001", false) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -124,7 +124,8 @@ func TestMgrAttachVolume_AlreadyAttached(t *testing.T) { func TestMgrAttachVolume_VolumeNotFoundError(t *testing.T) { mgr := newTestMgr(t) mgr.CreateVolume("test-vol-0001", 8*1024*1024) - _, err := mgr.AttachVolume("test-vol-0002") + _, err := mgr.AttachVolume("test-vol-0002", false) + if err == nil { t.Error("attachVolume should fail with volume not found error") } @@ -137,7 +138,7 @@ func TestMgrAttachVolume_WaitForAttachError(t *testing.T) { c := mgr.client.(*fakeSio) close(c.waitAttachCtrl) }() - _, err := mgr.AttachVolume("test-vol-0001") + _, err := mgr.AttachVolume("test-vol-0001", false) if err == nil { t.Error("attachVolume should fail with attach timeout error") } @@ -146,7 +147,7 @@ func TestMgrAttachVolume_WaitForAttachError(t *testing.T) { func TestMgrDetachVolume(t *testing.T) { mgr := newTestMgr(t) mgr.CreateVolume("test-vol-0001", 8*1024*1024) - mgr.AttachVolume("test-vol-0001") + mgr.AttachVolume("test-vol-0001", false) if err := mgr.DetachVolume("test-vol-0001"); err != nil { t.Fatal(err) } @@ -162,7 +163,7 @@ func TestMgrDetachVolume(t *testing.T) { func TestMgrDetachVolume_VolumeNotFound(t *testing.T) { mgr := newTestMgr(t) mgr.CreateVolume("test-vol-0001", 8*1024*1024) - mgr.AttachVolume("test-vol-0001") + mgr.AttachVolume("test-vol-0001", false) err := mgr.DetachVolume("test-vol-0002") if err == nil { t.Fatal("expected a volume not found failure") @@ -181,7 +182,7 @@ func TestMgrDetachVolume_VolumeNotAttached(t *testing.T) { func TestMgrDetachVolume_VolumeAlreadyDetached(t *testing.T) { mgr := newTestMgr(t) mgr.CreateVolume("test-vol-0001", 8*1024*1024) - mgr.AttachVolume("test-vol-0001") + mgr.AttachVolume("test-vol-0001", false) mgr.DetachVolume("test-vol-0001") err := mgr.DetachVolume("test-vol-0001") if err != nil { @@ -192,7 +193,7 @@ func TestMgrDetachVolume_VolumeAlreadyDetached(t *testing.T) { func TestMgrDetachVolume_WaitForDetachError(t *testing.T) { mgr := newTestMgr(t) mgr.CreateVolume("test-vol-0001", 8*1024*1024) - mgr.AttachVolume("test-vol-0001") + mgr.AttachVolume("test-vol-0001", false) err := mgr.DetachVolume("test-vol-0001") if err != nil { t.Error("detachVolume failed") @@ -227,6 +228,7 @@ type fakeSio struct { waitAttachCtrl chan struct{} waitDetachCtrl chan struct{} devs map[string]string + isMultiMap bool } func newFakeSio() *fakeSio { @@ -261,7 +263,8 @@ func (f *fakeSio) CreateVolume(volName string, sizeGB int64) (*siotypes.Volume, return f.volume, nil } -func (f *fakeSio) AttachVolume(id sioVolumeID) error { +func (f *fakeSio) AttachVolume(id sioVolumeID, multiMaps bool) error { + f.isMultiMap = multiMaps _, err := f.Volume(id) if err != nil { return err diff --git a/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_plugin.go b/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_plugin.go index ff45c41b0220..04bf70793280 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_plugin.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_plugin.go @@ -149,6 +149,7 @@ var _ volume.PersistentVolumePlugin = &sioPlugin{} func (p *sioPlugin) GetAccessModes() []api.PersistentVolumeAccessMode { return []api.PersistentVolumeAccessMode{ api.ReadWriteOnce, + api.ReadOnlyMany, } } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_util.go b/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_util.go index 42cc31c9f4ee..2ee7d918dcc1 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_util.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_util.go @@ -68,11 +68,13 @@ var ( nsSep = "%" sdcRootPath = "/opt/emc/scaleio/sdc/bin" - secretNotFoundErr = errors.New("secret not found") - configMapNotFoundErr = errors.New("configMap not found") - gatewayNotProvidedErr = errors.New("gateway not provided") - secretRefNotProvidedErr = errors.New("secret ref not provided") - systemNotProvidedErr = errors.New("secret not provided") + secretNotFoundErr = errors.New("secret not found") + configMapNotFoundErr = errors.New("configMap not found") + gatewayNotProvidedErr = errors.New("ScaleIO gateway not provided") + secretRefNotProvidedErr = errors.New("secret ref not provided") + systemNotProvidedErr = errors.New("ScaleIO system not provided") + storagePoolNotProvidedErr = errors.New("ScaleIO storage pool not provided") + protectionDomainNotProvidedErr = errors.New("ScaleIO protection domain not provided") ) // mapScaleIOVolumeSource maps attributes from a ScaleIOVolumeSource to config @@ -107,6 +109,12 @@ func validateConfigs(config map[string]string) error { if config[confKey.system] == "" { return systemNotProvidedErr } + if config[confKey.storagePool] == "" { + return storagePoolNotProvidedErr + } + if config[confKey.protectionDomain] == "" { + return protectionDomainNotProvidedErr + } return nil } @@ -119,8 +127,6 @@ func applyConfigDefaults(config map[string]string) { b = false } config[confKey.sslEnabled] = strconv.FormatBool(b) - config[confKey.protectionDomain] = defaultString(config[confKey.protectionDomain], "default") - config[confKey.storagePool] = defaultString(config[confKey.storagePool], "default") config[confKey.storageMode] = defaultString(config[confKey.storageMode], "ThinProvisioned") config[confKey.fsType] = defaultString(config[confKey.fsType], "xfs") b, err = strconv.ParseBool(config[confKey.readOnly]) diff --git a/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_util_test.go b/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_util_test.go index 2de09752f12b..97e5512f8b9f 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_util_test.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_util_test.go @@ -115,10 +115,10 @@ func TestUtilApplyConfigDefaults(t *testing.T) { if data[confKey.system] != "sio" { t.Error("Unexpected system value") } - if data[confKey.protectionDomain] != "default" { + if data[confKey.protectionDomain] != "" { t.Error("Unexpected protection domain value") } - if data[confKey.storagePool] != "default" { + if data[confKey.storagePool] != "" { t.Error("Unexpected storage pool value") } if data[confKey.volumeName] != "sio-vol" { diff --git a/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_volume.go b/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_volume.go index 3abebd6a3829..b3c2177b8df6 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_volume.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_volume.go @@ -88,7 +88,7 @@ func (v *sioVolume) SetUpAt(dir string, fsGroup *int64) error { v.plugin.volumeMtx.LockKey(v.volSpecName) defer v.plugin.volumeMtx.UnlockKey(v.volSpecName) - glog.V(4).Info(log("setting up volume %s", v.volSpecName)) + glog.V(4).Info(log("setting up volume for PV.spec %s", v.volSpecName)) if err := v.setSioMgr(); err != nil { glog.Error(log("setup failed to create scalio manager: %v", err)) return err @@ -104,18 +104,36 @@ func (v *sioVolume) SetUpAt(dir string, fsGroup *int64) error { return nil } - // attach the volume and mount + // should multiple-mapping be enabled + enableMultiMaps := false + isROM := false + if v.spec.PersistentVolume != nil { + ams := v.spec.PersistentVolume.Spec.AccessModes + for _, am := range ams { + if am == api.ReadOnlyMany { + enableMultiMaps = true + isROM = true + } + } + } + glog.V(4).Info(log("multiple mapping enabled = %v", enableMultiMaps)) + volName := v.volName - devicePath, err := v.sioMgr.AttachVolume(volName) + devicePath, err := v.sioMgr.AttachVolume(volName, enableMultiMaps) if err != nil { glog.Error(log("setup of volume %v: %v", v.volSpecName, err)) return err } options := []string{} - if v.source.ReadOnly { - options = append(options, "ro") - } else { + switch { + default: + options = append(options, "rw") + case isROM && !v.source.ReadOnly: options = append(options, "rw") + case isROM: + options = append(options, "ro") + case v.source.ReadOnly: + options = append(options, "ro") } glog.V(4).Info(log("mounting device %s -> %s", devicePath, dir)) @@ -140,7 +158,12 @@ func (v *sioVolume) SetUpAt(dir string, fsGroup *int64) error { return err } - glog.V(4).Info(log("successfully setup volume %s attached %s:%s as %s", v.volSpecName, v.volName, devicePath, dir)) + if !v.readOnly && fsGroup != nil { + glog.V(4).Info(log("applying value FSGroup ownership")) + volume.SetVolumeOwnership(v, fsGroup) + } + + glog.V(4).Info(log("successfully setup PV %s: volume %s mapped as %s mounted at %s", v.volSpecName, v.volName, devicePath, dir)) return nil } @@ -191,7 +214,7 @@ func (v *sioVolume) TearDownAt(dir string) error { // use "last attempt wins" strategy to detach volume from node // only allow volume to detach when it is not busy (not being used by other pods) if !deviceBusy { - glog.V(4).Info(log("teardown is attempting to detach/unmap volume for %s", v.volSpecName)) + glog.V(4).Info(log("teardown is attempting to detach/unmap volume for PV %s", v.volSpecName)) if err := v.resetSioMgr(); err != nil { glog.Error(log("teardown failed, unable to reset scalio mgr: %v", err)) } @@ -224,7 +247,7 @@ func (v *sioVolume) Delete() error { return err } - glog.V(4).Info(log("successfully deleted pvc %s", v.volSpecName)) + glog.V(4).Info(log("successfully deleted PV %s with volume %s", v.volSpecName, v.volName)) return nil } @@ -234,17 +257,30 @@ func (v *sioVolume) Delete() error { var _ volume.Provisioner = &sioVolume{} func (v *sioVolume) Provision() (*api.PersistentVolume, error) { - glog.V(4).Info(log("attempting to dynamically provision pvc %v", v.options.PVName)) + glog.V(4).Info(log("attempting to dynamically provision pvc %v", v.options.PVC.Name)) if !volume.AccessModesContainedInAll(v.plugin.GetAccessModes(), v.options.PVC.Spec.AccessModes) { return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", v.options.PVC.Spec.AccessModes, v.plugin.GetAccessModes()) } // setup volume attrributes - name := v.generateVolName() + genName := v.generateName("k8svol", 11) + var oneGig int64 = 1024 * 1024 * 1024 + var eightGig int64 = 8 * oneGig + capacity := v.options.PVC.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)] volSizeBytes := capacity.Value() - volSizeGB := int64(volume.RoundUpSize(volSizeBytes, 1024*1024*1024)) + volSizeGB := int64(volume.RoundUpSize(volSizeBytes, oneGig)) + + if volSizeBytes == 0 { + return nil, fmt.Errorf("invalid volume size of 0 specified") + } + + if volSizeBytes < eightGig { + volSizeGB = int64(volume.RoundUpSize(eightGig, oneGig)) + glog.V(4).Info(log("capacity less than 8Gi found, adjusted to %dGi", volSizeGB)) + + } // create sio manager if err := v.setSioMgrFromConfig(); err != nil { @@ -253,14 +289,15 @@ func (v *sioVolume) Provision() (*api.PersistentVolume, error) { } // create volume - vol, err := v.sioMgr.CreateVolume(name, volSizeGB) + volName := genName + vol, err := v.sioMgr.CreateVolume(volName, volSizeGB) if err != nil { glog.Error(log("provision failed while creating volume: %v", err)) return nil, err } // prepare data for pv - v.configData[confKey.volumeName] = name + v.configData[confKey.volumeName] = volName sslEnabled, err := strconv.ParseBool(v.configData[confKey.sslEnabled]) if err != nil { glog.Warning(log("failed to parse parameter sslEnabled, setting to false")) @@ -273,9 +310,10 @@ func (v *sioVolume) Provision() (*api.PersistentVolume, error) { } // describe created pv + pvName := genName pv := &api.PersistentVolume{ ObjectMeta: meta.ObjectMeta{ - Name: v.options.PVName, + Name: pvName, Namespace: v.options.PVC.Namespace, Labels: map[string]string{}, Annotations: map[string]string{ @@ -299,7 +337,7 @@ func (v *sioVolume) Provision() (*api.PersistentVolume, error) { ProtectionDomain: v.configData[confKey.protectionDomain], StoragePool: v.configData[confKey.storagePool], StorageMode: v.configData[confKey.storageMode], - VolumeName: name, + VolumeName: volName, FSType: v.configData[confKey.fsType], ReadOnly: readOnly, }, @@ -310,14 +348,14 @@ func (v *sioVolume) Provision() (*api.PersistentVolume, error) { pv.Spec.AccessModes = v.plugin.GetAccessModes() } - glog.V(4).Info(log("provisioner dynamically created pvc %v with volume %s successfully", pv.Name, vol.Name)) + glog.V(4).Info(log("provisioner created pv %v and volume %s successfully", pvName, vol.Name)) return pv, nil } // setSioMgr creates scaleio mgr from cached config data if found // otherwise, setups new config data and create mgr func (v *sioVolume) setSioMgr() error { - glog.V(4).Info(log("setting up sio mgr for vol %s", v.volSpecName)) + glog.V(4).Info(log("setting up sio mgr for spec %s", v.volSpecName)) podDir := v.plugin.host.GetPodPluginDir(v.podUID, sioPluginName) configName := path.Join(podDir, sioConfigFileName) if v.sioMgr == nil { @@ -455,6 +493,6 @@ func (v *sioVolume) setSioMgrFromSpec() error { return nil } -func (v *sioVolume) generateVolName() string { - return "sio-" + strings.Replace(string(uuid.NewUUID()), "-", "", -1)[0:25] +func (v *sioVolume) generateName(prefix string, size int) string { + return fmt.Sprintf("%s-%s", prefix, strings.Replace(string(uuid.NewUUID()), "-", "", -1)[0:size]) } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_volume_test.go b/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_volume_test.go index 536a833304d8..6609922287bc 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_volume_test.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/scaleio/sio_volume_test.go @@ -23,6 +23,7 @@ import ( "strings" "testing" + "github.com/golang/glog" meta "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" utiltesting "k8s.io/client-go/util/testing" @@ -149,6 +150,7 @@ func TestVolumeMounterUnmounter(t *testing.T) { VolumeName: testSioVol, FSType: "ext4", SecretRef: &api.LocalObjectReference{Name: "sio-secret"}, + ReadOnly: false, }, }, } @@ -191,6 +193,10 @@ func TestVolumeMounterUnmounter(t *testing.T) { } } + if sio.isMultiMap { + t.Errorf("SetUp() - expecting multiple volume disabled by default") + } + // rebuild spec builtSpec, err := sioPlug.ConstructVolumeSpec(volume.NewSpecFromVolume(vol).Name(), path) if err != nil { @@ -235,25 +241,23 @@ func TestVolumeProvisioner(t *testing.T) { plug, err := plugMgr.FindPluginByName(sioPluginName) if err != nil { - t.Errorf("Can't find the plugin %v", sioPluginName) + t.Fatalf("Can't find the plugin %v", sioPluginName) } sioPlug, ok := plug.(*sioPlugin) if !ok { - t.Errorf("Cannot assert plugin to be type sioPlugin") + t.Fatal("Cannot assert plugin to be type sioPlugin") } options := volume.VolumeOptions{ ClusterName: "testcluster", - PVName: "pvc-sio-dynamic-vol", PVC: volumetest.CreateTestPVC("100Mi", []api.PersistentVolumeAccessMode{api.ReadWriteOnce}), PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete, } + options.PVC.Name = "testpvc" options.PVC.Namespace = testns - // incomplete options, test should fail - _, err = sioPlug.NewProvisioner(options) - if err == nil { - t.Fatal("expected failure due to incomplete options") + options.PVC.Spec.AccessModes = []api.PersistentVolumeAccessMode{ + api.ReadOnlyMany, } options.Parameters = map[string]string{ @@ -288,10 +292,9 @@ func TestVolumeProvisioner(t *testing.T) { // validate provision actualSpecName := spec.Name actualVolName := spec.Spec.PersistentVolumeSource.ScaleIO.VolumeName - if !strings.HasPrefix(actualSpecName, "pvc-") { - t.Errorf("expecting volume name to start with pov-, got %s", actualSpecName) + if !strings.HasPrefix(actualSpecName, "k8svol-") { + t.Errorf("expecting volume name to start with k8svol-, got %s", actualSpecName) } - vol, err := sio.FindVolume(actualVolName) if err != nil { t.Fatalf("failed getting volume %v: %v", actualVolName, err) @@ -299,6 +302,9 @@ func TestVolumeProvisioner(t *testing.T) { if vol.Name != actualVolName { t.Errorf("expected volume name to be %s, got %s", actualVolName, vol.Name) } + if vol.SizeInKb != 8*1024*1024 { + glog.V(4).Info(log("unexpected volume size")) + } // mount dynamic vol sioMounter, err := sioPlug.NewMounter( @@ -315,8 +321,14 @@ func TestVolumeProvisioner(t *testing.T) { } sioVol.sioMgr.client = sio if err := sioMounter.SetUp(nil); err != nil { - t.Errorf("Expected success, got: %v", err) + t.Fatalf("Expected success, got: %v", err) + } + + // isMultiMap applied + if !sio.isMultiMap { + t.Errorf("SetUp() expecting attached volume with multi-mapping") } + // teardown dynamic vol sioUnmounter, err := sioPlug.NewUnmounter(spec.Name, podUID) if err != nil { @@ -351,3 +363,83 @@ func TestVolumeProvisioner(t *testing.T) { t.Errorf("Deleter did not delete path %v: %v", path, err) } } + +func TestVolumeProvisionerWithIncompleteConfig(t *testing.T) { + plugMgr, tmpDir := newPluginMgr(t) + defer os.RemoveAll(tmpDir) + + plug, err := plugMgr.FindPluginByName(sioPluginName) + if err != nil { + t.Fatalf("Can't find the plugin %v", sioPluginName) + } + sioPlug, ok := plug.(*sioPlugin) + if !ok { + t.Fatal("Cannot assert plugin to be type sioPlugin") + } + + options := volume.VolumeOptions{ + ClusterName: "testcluster", + PVName: "pvc-sio-dynamic-vol", + PVC: volumetest.CreateTestPVC("100Mi", []api.PersistentVolumeAccessMode{api.ReadWriteOnce}), + PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete, + } + options.PVC.Namespace = testns + + options.PVC.Spec.AccessModes = []api.PersistentVolumeAccessMode{ + api.ReadWriteOnce, + } + + // incomplete options, test should fail + _, err = sioPlug.NewProvisioner(options) + if err == nil { + t.Fatal("expected failure due to incomplete options") + } +} + +func TestVolumeProvisionerWithZeroCapacity(t *testing.T) { + plugMgr, tmpDir := newPluginMgr(t) + defer os.RemoveAll(tmpDir) + + plug, err := plugMgr.FindPluginByName(sioPluginName) + if err != nil { + t.Fatalf("Can't find the plugin %v", sioPluginName) + } + sioPlug, ok := plug.(*sioPlugin) + if !ok { + t.Fatal("Cannot assert plugin to be type sioPlugin") + } + + options := volume.VolumeOptions{ + ClusterName: "testcluster", + PVName: "pvc-sio-dynamic-vol", + PVC: volumetest.CreateTestPVC("0Mi", []api.PersistentVolumeAccessMode{api.ReadWriteOnce}), + PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete, + } + options.PVC.Namespace = testns + + options.PVC.Spec.AccessModes = []api.PersistentVolumeAccessMode{ + api.ReadWriteOnce, + } + + options.Parameters = map[string]string{ + confKey.gateway: "http://test.scaleio:11111", + confKey.system: "sio", + confKey.protectionDomain: testSioPD, + confKey.storagePool: "default", + confKey.secretRef: "sio-secret", + } + + provisioner, _ := sioPlug.NewProvisioner(options) + sio := newFakeSio() + sioVol := provisioner.(*sioVolume) + if err := sioVol.setSioMgrFromConfig(); err != nil { + t.Fatalf("failed to create scaleio mgr from config: %v", err) + } + sioVol.sioMgr.client = sio + + _, err = provisioner.Provision() + if err == nil { + t.Fatalf("call to Provision() should fail with invalid capacity") + } + +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/BUILD b/vendor/k8s.io/kubernetes/pkg/volume/util/BUILD index ddaade57f51b..c2ff2f56693b 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/BUILD @@ -17,7 +17,6 @@ go_library( "doc.go", "fs.go", "io_util.go", - "metrics.go", "util.go", ], tags = ["automanaged"], @@ -28,7 +27,6 @@ go_library( "//pkg/client/clientset_generated/clientset:go_default_library", "//pkg/util/mount:go_default_library", "//vendor/github.com/golang/glog:go_default_library", - "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/metrics.go b/vendor/k8s.io/kubernetes/pkg/volume/util/metrics.go deleted file mode 100644 index 087bbfff4169..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/metrics.go +++ /dev/null @@ -1,63 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "time" - - "github.com/prometheus/client_golang/prometheus" -) - -var storageOperationMetric = prometheus.NewHistogramVec( - prometheus.HistogramOpts{ - Name: "storage_operation_duration_seconds", - Help: "Storage operation duration", - }, - []string{"volume_plugin", "operation_name"}, -) - -var storageOperationErrorMetric = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "storage_operation_errors_total", - Help: "Storage operation errors", - }, - []string{"volume_plugin", "operation_name"}, -) - -func init() { - registerMetrics() -} - -func registerMetrics() { - prometheus.MustRegister(storageOperationMetric) - prometheus.MustRegister(storageOperationErrorMetric) -} - -// OperationCompleteHook returns a hook to call when an operation is completed -func OperationCompleteHook(plugin, operationName string) func(error) { - requestTime := time.Now() - opComplete := func(err error) { - timeTaken := time.Since(requestTime).Seconds() - // Create metric with operation name and plugin name - if err != nil { - storageOperationErrorMetric.WithLabelValues(plugin, operationName).Inc() - } else { - storageOperationMetric.WithLabelValues(plugin, operationName).Observe(timeTaken) - } - } - return opComplete -} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations/nestedpendingoperations.go b/vendor/k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations/nestedpendingoperations.go index a55ea70deca2..64d70900a37b 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations/nestedpendingoperations.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations/nestedpendingoperations.go @@ -55,7 +55,7 @@ type NestedPendingOperations interface { // concatenation of volumeName and podName is removed from the list of // executing operations allowing a new operation to be started with the // volumeName without error. - Run(volumeName v1.UniqueVolumeName, podName types.UniquePodName, operationFunc func() error, operationCompleteFunc func(error)) error + Run(volumeName v1.UniqueVolumeName, podName types.UniquePodName, operationFunc func() error) error // Wait blocks until all operations are completed. This is typically // necessary during tests - the test should wait until all operations finish @@ -94,8 +94,7 @@ type operation struct { func (grm *nestedPendingOperations) Run( volumeName v1.UniqueVolumeName, podName types.UniquePodName, - operationFunc func() error, - operationCompleteFunc func(error)) error { + operationFunc func() error) error { grm.lock.Lock() defer grm.lock.Unlock() opExists, previousOpIndex := grm.isOperationExists(volumeName, podName) @@ -133,7 +132,6 @@ func (grm *nestedPendingOperations) Run( defer k8sRuntime.HandleCrash() // Handle completion of and error, if any, from operationFunc() defer grm.operationComplete(volumeName, podName, &err) - defer operationCompleteFunc(err) // Handle panic, if any, from operationFunc() defer k8sRuntime.RecoverFromPanic(&err) return operationFunc() diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations/nestedpendingoperations_test.go b/vendor/k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations/nestedpendingoperations_test.go index 19e0d62fe480..ce079407a63b 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations/nestedpendingoperations_test.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations/nestedpendingoperations_test.go @@ -50,7 +50,7 @@ func Test_NewGoRoutineMap_Positive_SingleOp(t *testing.T) { operation := func() error { return nil } // Act - err := grm.Run(volumeName, "" /* operationSubName */, operation, func(error) {}) + err := grm.Run(volumeName, "" /* operationSubName */, operation) // Assert if err != nil { @@ -66,8 +66,8 @@ func Test_NewGoRoutineMap_Positive_TwoOps(t *testing.T) { operation := func() error { return nil } // Act - err1 := grm.Run(volume1Name, "" /* operationSubName */, operation, func(error) {}) - err2 := grm.Run(volume2Name, "" /* operationSubName */, operation, func(error) {}) + err1 := grm.Run(volume1Name, "" /* operationSubName */, operation) + err2 := grm.Run(volume2Name, "" /* operationSubName */, operation) // Assert if err1 != nil { @@ -88,8 +88,8 @@ func Test_NewGoRoutineMap_Positive_TwoSubOps(t *testing.T) { operation := func() error { return nil } // Act - err1 := grm.Run(volumeName, operation1PodName, operation, func(error) {}) - err2 := grm.Run(volumeName, operation2PodName, operation, func(error) {}) + err1 := grm.Run(volumeName, operation1PodName, operation) + err2 := grm.Run(volumeName, operation2PodName, operation) // Assert if err1 != nil { @@ -108,7 +108,7 @@ func Test_NewGoRoutineMap_Positive_SingleOpWithExpBackoff(t *testing.T) { operation := func() error { return nil } // Act - err := grm.Run(volumeName, "" /* operationSubName */, operation, func(error) {}) + err := grm.Run(volumeName, "" /* operationSubName */, operation) // Assert if err != nil { @@ -122,7 +122,7 @@ func Test_NewGoRoutineMap_Positive_SecondOpAfterFirstCompletes(t *testing.T) { volumeName := v1.UniqueVolumeName("volume-name") operation1DoneCh := make(chan interface{}, 0 /* bufferSize */) operation1 := generateCallbackFunc(operation1DoneCh) - err1 := grm.Run(volumeName, "" /* operationSubName */, operation1, func(error) {}) + err1 := grm.Run(volumeName, "" /* operationSubName */, operation1) if err1 != nil { t.Fatalf("NewGoRoutine failed. Expected: Actual: <%v>", err1) } @@ -133,7 +133,7 @@ func Test_NewGoRoutineMap_Positive_SecondOpAfterFirstCompletes(t *testing.T) { err2 := retryWithExponentialBackOff( time.Duration(initialOperationWaitTimeShort), func() (bool, error) { - err := grm.Run(volumeName, "" /* operationSubName */, operation2, func(error) {}) + err := grm.Run(volumeName, "" /* operationSubName */, operation2) if err != nil { t.Logf("Warning: NewGoRoutine failed with %v. Will retry.", err) return false, nil @@ -154,7 +154,7 @@ func Test_NewGoRoutineMap_Positive_SecondOpAfterFirstCompletesWithExpBackoff(t * volumeName := v1.UniqueVolumeName("volume-name") operation1DoneCh := make(chan interface{}, 0 /* bufferSize */) operation1 := generateCallbackFunc(operation1DoneCh) - err1 := grm.Run(volumeName, "" /* operationSubName */, operation1, func(error) {}) + err1 := grm.Run(volumeName, "" /* operationSubName */, operation1) if err1 != nil { t.Fatalf("NewGoRoutine failed. Expected: Actual: <%v>", err1) } @@ -165,7 +165,7 @@ func Test_NewGoRoutineMap_Positive_SecondOpAfterFirstCompletesWithExpBackoff(t * err2 := retryWithExponentialBackOff( time.Duration(initialOperationWaitTimeShort), func() (bool, error) { - err := grm.Run(volumeName, "" /* operationSubName */, operation2, func(error) {}) + err := grm.Run(volumeName, "" /* operationSubName */, operation2) if err != nil { t.Logf("Warning: NewGoRoutine failed with %v. Will retry.", err) return false, nil @@ -185,7 +185,7 @@ func Test_NewGoRoutineMap_Positive_SecondOpAfterFirstPanics(t *testing.T) { grm := NewNestedPendingOperations(false /* exponentialBackOffOnError */) volumeName := v1.UniqueVolumeName("volume-name") operation1 := generatePanicFunc() - err1 := grm.Run(volumeName, "" /* operationSubName */, operation1, func(error) {}) + err1 := grm.Run(volumeName, "" /* operationSubName */, operation1) if err1 != nil { t.Fatalf("NewGoRoutine failed. Expected: Actual: <%v>", err1) } @@ -195,7 +195,7 @@ func Test_NewGoRoutineMap_Positive_SecondOpAfterFirstPanics(t *testing.T) { err2 := retryWithExponentialBackOff( time.Duration(initialOperationWaitTimeShort), func() (bool, error) { - err := grm.Run(volumeName, "" /* operationSubName */, operation2, func(error) {}) + err := grm.Run(volumeName, "" /* operationSubName */, operation2) if err != nil { t.Logf("Warning: NewGoRoutine failed with %v. Will retry.", err) return false, nil @@ -215,7 +215,7 @@ func Test_NewGoRoutineMap_Positive_SecondOpAfterFirstPanicsWithExpBackoff(t *tes grm := NewNestedPendingOperations(true /* exponentialBackOffOnError */) volumeName := v1.UniqueVolumeName("volume-name") operation1 := generatePanicFunc() - err1 := grm.Run(volumeName, "" /* operationSubName */, operation1, func(error) {}) + err1 := grm.Run(volumeName, "" /* operationSubName */, operation1) if err1 != nil { t.Fatalf("NewGoRoutine failed. Expected: Actual: <%v>", err1) } @@ -225,7 +225,7 @@ func Test_NewGoRoutineMap_Positive_SecondOpAfterFirstPanicsWithExpBackoff(t *tes err2 := retryWithExponentialBackOff( time.Duration(initialOperationWaitTimeLong), // Longer duration to accommodate for backoff func() (bool, error) { - err := grm.Run(volumeName, "" /* operationSubName */, operation2, func(error) {}) + err := grm.Run(volumeName, "" /* operationSubName */, operation2) if err != nil { t.Logf("Warning: NewGoRoutine failed with %v. Will retry.", err) return false, nil @@ -246,14 +246,14 @@ func Test_NewGoRoutineMap_Negative_SecondOpBeforeFirstCompletes(t *testing.T) { volumeName := v1.UniqueVolumeName("volume-name") operation1DoneCh := make(chan interface{}, 0 /* bufferSize */) operation1 := generateWaitFunc(operation1DoneCh) - err1 := grm.Run(volumeName, "" /* operationSubName */, operation1, func(error) {}) + err1 := grm.Run(volumeName, "" /* operationSubName */, operation1) if err1 != nil { t.Fatalf("NewGoRoutine failed. Expected: Actual: <%v>", err1) } operation2 := generateNoopFunc() // Act - err2 := grm.Run(volumeName, "" /* operationSubName */, operation2, func(error) {}) + err2 := grm.Run(volumeName, "" /* operationSubName */, operation2) // Assert if err2 == nil { @@ -271,14 +271,14 @@ func Test_NewGoRoutineMap_Negative_SecondSubOpBeforeFirstCompletes2(t *testing.T operationPodName := types.UniquePodName("operation-podname") operation1DoneCh := make(chan interface{}, 0 /* bufferSize */) operation1 := generateWaitFunc(operation1DoneCh) - err1 := grm.Run(volumeName, operationPodName, operation1, func(error) {}) + err1 := grm.Run(volumeName, operationPodName, operation1) if err1 != nil { t.Fatalf("NewGoRoutine failed. Expected: Actual: <%v>", err1) } operation2 := generateNoopFunc() // Act - err2 := grm.Run(volumeName, operationPodName, operation2, func(error) {}) + err2 := grm.Run(volumeName, operationPodName, operation2) // Assert if err2 == nil { @@ -296,14 +296,14 @@ func Test_NewGoRoutineMap_Negative_SecondSubOpBeforeFirstCompletes(t *testing.T) operationPodName := types.UniquePodName("operation-podname") operation1DoneCh := make(chan interface{}, 0 /* bufferSize */) operation1 := generateWaitFunc(operation1DoneCh) - err1 := grm.Run(volumeName, operationPodName, operation1, func(error) {}) + err1 := grm.Run(volumeName, operationPodName, operation1) if err1 != nil { t.Fatalf("NewGoRoutine failed. Expected: Actual: <%v>", err1) } operation2 := generateNoopFunc() // Act - err2 := grm.Run(volumeName, operationPodName, operation2, func(error) {}) + err2 := grm.Run(volumeName, operationPodName, operation2) // Assert if err2 == nil { @@ -320,14 +320,14 @@ func Test_NewGoRoutineMap_Negative_SecondOpBeforeFirstCompletesWithExpBackoff(t volumeName := v1.UniqueVolumeName("volume-name") operation1DoneCh := make(chan interface{}, 0 /* bufferSize */) operation1 := generateWaitFunc(operation1DoneCh) - err1 := grm.Run(volumeName, "" /* operationSubName */, operation1, func(error) {}) + err1 := grm.Run(volumeName, "" /* operationSubName */, operation1) if err1 != nil { t.Fatalf("NewGoRoutine failed. Expected: Actual: <%v>", err1) } operation2 := generateNoopFunc() // Act - err2 := grm.Run(volumeName, "" /* operationSubName */, operation2, func(error) {}) + err2 := grm.Run(volumeName, "" /* operationSubName */, operation2) // Assert if err2 == nil { @@ -344,7 +344,7 @@ func Test_NewGoRoutineMap_Positive_ThirdOpAfterFirstCompletes(t *testing.T) { volumeName := v1.UniqueVolumeName("volume-name") operation1DoneCh := make(chan interface{}, 0 /* bufferSize */) operation1 := generateWaitFunc(operation1DoneCh) - err1 := grm.Run(volumeName, "" /* operationSubName */, operation1, func(error) {}) + err1 := grm.Run(volumeName, "" /* operationSubName */, operation1) if err1 != nil { t.Fatalf("NewGoRoutine failed. Expected: Actual: <%v>", err1) } @@ -352,7 +352,7 @@ func Test_NewGoRoutineMap_Positive_ThirdOpAfterFirstCompletes(t *testing.T) { operation3 := generateNoopFunc() // Act - err2 := grm.Run(volumeName, "" /* operationSubName */, operation2, func(error) {}) + err2 := grm.Run(volumeName, "" /* operationSubName */, operation2) // Assert if err2 == nil { @@ -367,7 +367,7 @@ func Test_NewGoRoutineMap_Positive_ThirdOpAfterFirstCompletes(t *testing.T) { err3 := retryWithExponentialBackOff( time.Duration(initialOperationWaitTimeShort), func() (bool, error) { - err := grm.Run(volumeName, "" /* operationSubName */, operation3, func(error) {}) + err := grm.Run(volumeName, "" /* operationSubName */, operation3) if err != nil { t.Logf("Warning: NewGoRoutine failed with %v. Will retry.", err) return false, nil @@ -388,7 +388,7 @@ func Test_NewGoRoutineMap_Positive_ThirdOpAfterFirstCompletesWithExpBackoff(t *t volumeName := v1.UniqueVolumeName("volume-name") operation1DoneCh := make(chan interface{}, 0 /* bufferSize */) operation1 := generateWaitFunc(operation1DoneCh) - err1 := grm.Run(volumeName, "" /* operationSubName */, operation1, func(error) {}) + err1 := grm.Run(volumeName, "" /* operationSubName */, operation1) if err1 != nil { t.Fatalf("NewGoRoutine failed. Expected: Actual: <%v>", err1) } @@ -396,7 +396,7 @@ func Test_NewGoRoutineMap_Positive_ThirdOpAfterFirstCompletesWithExpBackoff(t *t operation3 := generateNoopFunc() // Act - err2 := grm.Run(volumeName, "" /* operationSubName */, operation2, func(error) {}) + err2 := grm.Run(volumeName, "" /* operationSubName */, operation2) // Assert if err2 == nil { @@ -411,7 +411,7 @@ func Test_NewGoRoutineMap_Positive_ThirdOpAfterFirstCompletesWithExpBackoff(t *t err3 := retryWithExponentialBackOff( time.Duration(initialOperationWaitTimeShort), func() (bool, error) { - err := grm.Run(volumeName, "" /* operationSubName */, operation3, func(error) {}) + err := grm.Run(volumeName, "" /* operationSubName */, operation3) if err != nil { t.Logf("Warning: NewGoRoutine failed with %v. Will retry.", err) return false, nil @@ -471,7 +471,7 @@ func Test_NewGoRoutineMap_Positive_Wait(t *testing.T) { volumeName := v1.UniqueVolumeName("volume-name") operation1DoneCh := make(chan interface{}, 0 /* bufferSize */) operation1 := generateWaitFunc(operation1DoneCh) - err := grm.Run(volumeName, "" /* operationSubName */, operation1, func(error) {}) + err := grm.Run(volumeName, "" /* operationSubName */, operation1) if err != nil { t.Fatalf("NewGoRoutine failed. Expected: Actual: <%v>", err) } @@ -500,7 +500,7 @@ func Test_NewGoRoutineMap_Positive_WaitWithExpBackoff(t *testing.T) { volumeName := v1.UniqueVolumeName("volume-name") operation1DoneCh := make(chan interface{}, 0 /* bufferSize */) operation1 := generateWaitFunc(operation1DoneCh) - err := grm.Run(volumeName, "" /* operationSubName */, operation1, func(error) {}) + err := grm.Run(volumeName, "" /* operationSubName */, operation1) if err != nil { t.Fatalf("NewGoRoutine failed. Expected: Actual: <%v>", err) } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_executor.go b/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_executor.go index da95adbba975..0c1569095f50 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_executor.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_executor.go @@ -31,7 +31,6 @@ import ( "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" - "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations" volumetypes "k8s.io/kubernetes/pkg/volume/util/types" "k8s.io/kubernetes/pkg/volume/util/volumehelper" @@ -536,32 +535,29 @@ func (oe *operationExecutor) IsOperationPending(volumeName v1.UniqueVolumeName, func (oe *operationExecutor) AttachVolume( volumeToAttach VolumeToAttach, actualStateOfWorld ActualStateOfWorldAttacherUpdater) error { - attachFunc, plugin, err := + attachFunc, err := oe.operationGenerator.GenerateAttachVolumeFunc(volumeToAttach, actualStateOfWorld) if err != nil { return err } - opCompleteFunc := util.OperationCompleteHook(plugin, "volume_attach") return oe.pendingOperations.Run( - volumeToAttach.VolumeName, "" /* podName */, attachFunc, opCompleteFunc) + volumeToAttach.VolumeName, "" /* podName */, attachFunc) } func (oe *operationExecutor) DetachVolume( volumeToDetach AttachedVolume, verifySafeToDetach bool, actualStateOfWorld ActualStateOfWorldAttacherUpdater) error { - detachFunc, plugin, err := + detachFunc, err := oe.operationGenerator.GenerateDetachVolumeFunc(volumeToDetach, verifySafeToDetach, actualStateOfWorld) if err != nil { return err } - opCompleteFunc := util.OperationCompleteHook(plugin, "volume_detach") return oe.pendingOperations.Run( - volumeToDetach.VolumeName, "" /* podName */, detachFunc, opCompleteFunc) + volumeToDetach.VolumeName, "" /* podName */, detachFunc) } - func (oe *operationExecutor) VerifyVolumesAreAttached( attachedVolumes map[types.NodeName][]AttachedVolume, actualStateOfWorld ActualStateOfWorldAttacherUpdater) { @@ -634,11 +630,9 @@ func (oe *operationExecutor) VerifyVolumesAreAttached( if err != nil { glog.Errorf("BulkVerifyVolumes.GenerateBulkVolumeVerifyFunc error bulk verifying volumes for plugin %q with %v", pluginName, err) } - - opCompleteFunc := util.OperationCompleteHook(pluginName, "verify_volumes_are_attached") // Ugly hack to ensure - we don't do parallel bulk polling of same volume plugin uniquePluginName := v1.UniqueVolumeName(pluginName) - err = oe.pendingOperations.Run(uniquePluginName, "" /* Pod Name */, bulkVerifyVolumeFunc, opCompleteFunc) + err = oe.pendingOperations.Run(uniquePluginName, "" /* Pod Name */, bulkVerifyVolumeFunc) if err != nil { glog.Errorf("BulkVerifyVolumes.Run Error bulk volume verification for plugin %q with %v", pluginName, err) } @@ -654,10 +648,8 @@ func (oe *operationExecutor) VerifyVolumesAreAttachedPerNode( if err != nil { return err } - - opCompleteFunc := util.OperationCompleteHook("", "verify_volumes_are_attached_per_node") // Give an empty UniqueVolumeName so that this operation could be executed concurrently. - return oe.pendingOperations.Run("" /* volumeName */, "" /* podName */, volumesAreAttachedFunc, opCompleteFunc) + return oe.pendingOperations.Run("" /* volumeName */, "" /* podName */, volumesAreAttachedFunc) } func (oe *operationExecutor) MountVolume( @@ -665,7 +657,7 @@ func (oe *operationExecutor) MountVolume( volumeToMount VolumeToMount, actualStateOfWorld ActualStateOfWorldMounterUpdater, isRemount bool) error { - mountFunc, plugin, err := oe.operationGenerator.GenerateMountVolumeFunc( + mountFunc, err := oe.operationGenerator.GenerateMountVolumeFunc( waitForAttachTimeout, volumeToMount, actualStateOfWorld, isRemount) if err != nil { return err @@ -679,17 +671,15 @@ func (oe *operationExecutor) MountVolume( podName = volumehelper.GetUniquePodName(volumeToMount.Pod) } - // TODO mount_device - opCompleteFunc := util.OperationCompleteHook(plugin, "volume_mount") return oe.pendingOperations.Run( - volumeToMount.VolumeName, podName, mountFunc, opCompleteFunc) + volumeToMount.VolumeName, podName, mountFunc) } func (oe *operationExecutor) UnmountVolume( volumeToUnmount MountedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater) error { - unmountFunc, plugin, err := + unmountFunc, err := oe.operationGenerator.GenerateUnmountVolumeFunc(volumeToUnmount, actualStateOfWorld) if err != nil { return err @@ -699,39 +689,36 @@ func (oe *operationExecutor) UnmountVolume( // same volume in parallel podName := volumetypes.UniquePodName(volumeToUnmount.PodUID) - opCompleteFunc := util.OperationCompleteHook(plugin, "volume_unmount") return oe.pendingOperations.Run( - volumeToUnmount.VolumeName, podName, unmountFunc, opCompleteFunc) + volumeToUnmount.VolumeName, podName, unmountFunc) } func (oe *operationExecutor) UnmountDevice( deviceToDetach AttachedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, mounter mount.Interface) error { - unmountDeviceFunc, plugin, err := + unmountDeviceFunc, err := oe.operationGenerator.GenerateUnmountDeviceFunc(deviceToDetach, actualStateOfWorld, mounter) if err != nil { return err } - opCompleteFunc := util.OperationCompleteHook(plugin, "unmount_device") return oe.pendingOperations.Run( - deviceToDetach.VolumeName, "" /* podName */, unmountDeviceFunc, opCompleteFunc) + deviceToDetach.VolumeName, "" /* podName */, unmountDeviceFunc) } func (oe *operationExecutor) VerifyControllerAttachedVolume( volumeToMount VolumeToMount, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) error { - verifyControllerAttachedVolumeFunc, plugin, err := + verifyControllerAttachedVolumeFunc, err := oe.operationGenerator.GenerateVerifyControllerAttachedVolumeFunc(volumeToMount, nodeName, actualStateOfWorld) if err != nil { return err } - opCompleteFunc := util.OperationCompleteHook(plugin, "verify_controller_attached_volume") return oe.pendingOperations.Run( - volumeToMount.VolumeName, "" /* podName */, verifyControllerAttachedVolumeFunc, opCompleteFunc) + volumeToMount.VolumeName, "" /* podName */, verifyControllerAttachedVolumeFunc) } // TODO: this is a workaround for the unmount device issue caused by gci mounter. diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_executor_test.go b/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_executor_test.go index 941b7bf88c45..b312b29d451b 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_executor_test.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_executor_test.go @@ -239,29 +239,29 @@ func newFakeOperationGenerator(ch chan interface{}, quit chan interface{}) Opera } } -func (fopg *fakeOperationGenerator) GenerateMountVolumeFunc(waitForAttachTimeout time.Duration, volumeToMount VolumeToMount, actualStateOfWorldMounterUpdater ActualStateOfWorldMounterUpdater, isRemount bool) (func() error, string, error) { +func (fopg *fakeOperationGenerator) GenerateMountVolumeFunc(waitForAttachTimeout time.Duration, volumeToMount VolumeToMount, actualStateOfWorldMounterUpdater ActualStateOfWorldMounterUpdater, isRemount bool) (func() error, error) { return func() error { startOperationAndBlock(fopg.ch, fopg.quit) return nil - }, "", nil + }, nil } -func (fopg *fakeOperationGenerator) GenerateUnmountVolumeFunc(volumeToUnmount MountedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater) (func() error, string, error) { +func (fopg *fakeOperationGenerator) GenerateUnmountVolumeFunc(volumeToUnmount MountedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater) (func() error, error) { return func() error { startOperationAndBlock(fopg.ch, fopg.quit) return nil - }, "", nil + }, nil } -func (fopg *fakeOperationGenerator) GenerateAttachVolumeFunc(volumeToAttach VolumeToAttach, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, string, error) { +func (fopg *fakeOperationGenerator) GenerateAttachVolumeFunc(volumeToAttach VolumeToAttach, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, error) { return func() error { startOperationAndBlock(fopg.ch, fopg.quit) return nil - }, "", nil + }, nil } -func (fopg *fakeOperationGenerator) GenerateDetachVolumeFunc(volumeToDetach AttachedVolume, verifySafeToDetach bool, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, string, error) { +func (fopg *fakeOperationGenerator) GenerateDetachVolumeFunc(volumeToDetach AttachedVolume, verifySafeToDetach bool, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, error) { return func() error { startOperationAndBlock(fopg.ch, fopg.quit) return nil - }, "", nil + }, nil } func (fopg *fakeOperationGenerator) GenerateVolumesAreAttachedFunc(attachedVolumes []AttachedVolume, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, error) { return func() error { @@ -269,17 +269,17 @@ func (fopg *fakeOperationGenerator) GenerateVolumesAreAttachedFunc(attachedVolum return nil }, nil } -func (fopg *fakeOperationGenerator) GenerateUnmountDeviceFunc(deviceToDetach AttachedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, mounter mount.Interface) (func() error, string, error) { +func (fopg *fakeOperationGenerator) GenerateUnmountDeviceFunc(deviceToDetach AttachedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, mounter mount.Interface) (func() error, error) { return func() error { startOperationAndBlock(fopg.ch, fopg.quit) return nil - }, "", nil + }, nil } -func (fopg *fakeOperationGenerator) GenerateVerifyControllerAttachedVolumeFunc(volumeToMount VolumeToMount, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, string, error) { +func (fopg *fakeOperationGenerator) GenerateVerifyControllerAttachedVolumeFunc(volumeToMount VolumeToMount, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, error) { return func() error { startOperationAndBlock(fopg.ch, fopg.quit) return nil - }, "", nil + }, nil } func (fopg *fakeOperationGenerator) GenerateBulkVolumeVerifyFunc( diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_generator.go b/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_generator.go index b97ae9630f00..9e81eed67b66 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_generator.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_generator.go @@ -73,25 +73,25 @@ func NewOperationGenerator(kubeClient clientset.Interface, // OperationGenerator interface that extracts out the functions from operation_executor to make it dependency injectable type OperationGenerator interface { // Generates the MountVolume function needed to perform the mount of a volume plugin - GenerateMountVolumeFunc(waitForAttachTimeout time.Duration, volumeToMount VolumeToMount, actualStateOfWorldMounterUpdater ActualStateOfWorldMounterUpdater, isRemount bool) (func() error, string, error) + GenerateMountVolumeFunc(waitForAttachTimeout time.Duration, volumeToMount VolumeToMount, actualStateOfWorldMounterUpdater ActualStateOfWorldMounterUpdater, isRemount bool) (func() error, error) // Generates the UnmountVolume function needed to perform the unmount of a volume plugin - GenerateUnmountVolumeFunc(volumeToUnmount MountedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater) (func() error, string, error) + GenerateUnmountVolumeFunc(volumeToUnmount MountedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater) (func() error, error) // Generates the AttachVolume function needed to perform attach of a volume plugin - GenerateAttachVolumeFunc(volumeToAttach VolumeToAttach, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, string, error) + GenerateAttachVolumeFunc(volumeToAttach VolumeToAttach, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, error) // Generates the DetachVolume function needed to perform the detach of a volume plugin - GenerateDetachVolumeFunc(volumeToDetach AttachedVolume, verifySafeToDetach bool, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, string, error) + GenerateDetachVolumeFunc(volumeToDetach AttachedVolume, verifySafeToDetach bool, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, error) // Generates the VolumesAreAttached function needed to verify if volume plugins are attached GenerateVolumesAreAttachedFunc(attachedVolumes []AttachedVolume, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, error) // Generates the UnMountDevice function needed to perform the unmount of a device - GenerateUnmountDeviceFunc(deviceToDetach AttachedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, mounter mount.Interface) (func() error, string, error) + GenerateUnmountDeviceFunc(deviceToDetach AttachedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, mounter mount.Interface) (func() error, error) // Generates the function needed to check if the attach_detach controller has attached the volume plugin - GenerateVerifyControllerAttachedVolumeFunc(volumeToMount VolumeToMount, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, string, error) + GenerateVerifyControllerAttachedVolumeFunc(volumeToMount VolumeToMount, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, error) // GetVolumePluginMgr returns volume plugin manager GetVolumePluginMgr() *volume.VolumePluginMgr @@ -245,17 +245,17 @@ func (og *operationGenerator) GenerateBulkVolumeVerifyFunc( func (og *operationGenerator) GenerateAttachVolumeFunc( volumeToAttach VolumeToAttach, - actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, string, error) { + actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, error) { // Get attacher plugin attachableVolumePlugin, err := og.volumePluginMgr.FindAttachablePluginBySpec(volumeToAttach.VolumeSpec) if err != nil || attachableVolumePlugin == nil { - return nil, "", volumeToAttach.GenerateErrorDetailed("AttachVolume.FindAttachablePluginBySpec failed", err) + return nil, volumeToAttach.GenerateErrorDetailed("AttachVolume.FindAttachablePluginBySpec failed", err) } volumeAttacher, newAttacherErr := attachableVolumePlugin.NewAttacher() if newAttacherErr != nil { - return nil, attachableVolumePlugin.GetPluginName(), volumeToAttach.GenerateErrorDetailed("AttachVolume.NewAttacher failed", newAttacherErr) + return nil, volumeToAttach.GenerateErrorDetailed("AttachVolume.NewAttacher failed", newAttacherErr) } return func() error { @@ -283,7 +283,7 @@ func (og *operationGenerator) GenerateAttachVolumeFunc( } return nil - }, attachableVolumePlugin.GetPluginName(), nil + }, nil } func (og *operationGenerator) GetVolumePluginMgr() *volume.VolumePluginMgr { @@ -293,10 +293,9 @@ func (og *operationGenerator) GetVolumePluginMgr() *volume.VolumePluginMgr { func (og *operationGenerator) GenerateDetachVolumeFunc( volumeToDetach AttachedVolume, verifySafeToDetach bool, - actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, string, error) { + actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, error) { var volumeName string var attachableVolumePlugin volume.AttachableVolumePlugin - var pluginName string var err error if volumeToDetach.VolumeSpec != nil { @@ -304,35 +303,31 @@ func (og *operationGenerator) GenerateDetachVolumeFunc( attachableVolumePlugin, err = og.volumePluginMgr.FindAttachablePluginBySpec(volumeToDetach.VolumeSpec) if err != nil || attachableVolumePlugin == nil { - return nil, "", volumeToDetach.GenerateErrorDetailed("DetachVolume.FindAttachablePluginBySpec failed", err) + return nil, volumeToDetach.GenerateErrorDetailed("DetachVolume.FindAttachablePluginBySpec failed", err) } volumeName, err = attachableVolumePlugin.GetVolumeName(volumeToDetach.VolumeSpec) if err != nil { - return nil, attachableVolumePlugin.GetPluginName(), volumeToDetach.GenerateErrorDetailed("DetachVolume.GetVolumeName failed", err) + return nil, volumeToDetach.GenerateErrorDetailed("DetachVolume.GetVolumeName failed", err) } } else { + var pluginName string // Get attacher plugin and the volumeName by splitting the volume unique name in case // there's no VolumeSpec: this happens only on attach/detach controller crash recovery // when a pod has been deleted during the controller downtime pluginName, volumeName, err = volumehelper.SplitUniqueName(volumeToDetach.VolumeName) if err != nil { - return nil, pluginName, volumeToDetach.GenerateErrorDetailed("DetachVolume.SplitUniqueName failed", err) + return nil, volumeToDetach.GenerateErrorDetailed("DetachVolume.SplitUniqueName failed", err) } attachableVolumePlugin, err = og.volumePluginMgr.FindAttachablePluginByName(pluginName) if err != nil { - return nil, pluginName, volumeToDetach.GenerateErrorDetailed("DetachVolume.FindAttachablePluginBySpec failed", err) + return nil, volumeToDetach.GenerateErrorDetailed("DetachVolume.FindAttachablePluginBySpec failed", err) } } - - if pluginName == "" { - pluginName = attachableVolumePlugin.GetPluginName() - } - volumeDetacher, err := attachableVolumePlugin.NewDetacher() if err != nil { - return nil, pluginName, volumeToDetach.GenerateErrorDetailed("DetachVolume.NewDetacher failed", err) + return nil, volumeToDetach.GenerateErrorDetailed("DetachVolume.NewDetacher failed", err) } return func() error { @@ -357,24 +352,24 @@ func (og *operationGenerator) GenerateDetachVolumeFunc( volumeToDetach.VolumeName, volumeToDetach.NodeName) return nil - }, pluginName, nil + }, nil } func (og *operationGenerator) GenerateMountVolumeFunc( waitForAttachTimeout time.Duration, volumeToMount VolumeToMount, actualStateOfWorld ActualStateOfWorldMounterUpdater, - isRemount bool) (func() error, string, error) { + isRemount bool) (func() error, error) { // Get mounter plugin volumePlugin, err := og.volumePluginMgr.FindPluginBySpec(volumeToMount.VolumeSpec) if err != nil || volumePlugin == nil { - return nil, "", volumeToMount.GenerateErrorDetailed("MountVolume.FindPluginBySpec failed", err) + return nil, volumeToMount.GenerateErrorDetailed("MountVolume.FindPluginBySpec failed", err) } affinityErr := checkNodeAffinity(og, volumeToMount, volumePlugin) if affinityErr != nil { - return nil, volumePlugin.GetPluginName(), affinityErr + return nil, affinityErr } volumeMounter, newMounterErr := volumePlugin.NewMounter( @@ -384,13 +379,13 @@ func (og *operationGenerator) GenerateMountVolumeFunc( if newMounterErr != nil { eventErr, detailedErr := volumeToMount.GenerateError("MountVolume.NewMounter initialization failed", newMounterErr) og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.FailedMountVolume, eventErr.Error()) - return nil, volumePlugin.GetPluginName(), detailedErr + return nil, detailedErr } mountCheckError := checkMountOptionSupport(og, volumeToMount, volumePlugin) if mountCheckError != nil { - return nil, volumePlugin.GetPluginName(), mountCheckError + return nil, mountCheckError } // Get attacher, if possible @@ -494,23 +489,23 @@ func (og *operationGenerator) GenerateMountVolumeFunc( } return nil - }, volumePlugin.GetPluginName(), nil + }, nil } func (og *operationGenerator) GenerateUnmountVolumeFunc( volumeToUnmount MountedVolume, - actualStateOfWorld ActualStateOfWorldMounterUpdater) (func() error, string, error) { + actualStateOfWorld ActualStateOfWorldMounterUpdater) (func() error, error) { // Get mountable plugin volumePlugin, err := og.volumePluginMgr.FindPluginByName(volumeToUnmount.PluginName) if err != nil || volumePlugin == nil { - return nil, "", volumeToUnmount.GenerateErrorDetailed("UnmountVolume.FindPluginByName failed", err) + return nil, volumeToUnmount.GenerateErrorDetailed("UnmountVolume.FindPluginByName failed", err) } volumeUnmounter, newUnmounterErr := volumePlugin.NewUnmounter( volumeToUnmount.InnerVolumeSpecName, volumeToUnmount.PodUID) if newUnmounterErr != nil { - return nil, volumePlugin.GetPluginName(), volumeToUnmount.GenerateErrorDetailed("UnmountVolume.NewUnmounter failed", newUnmounterErr) + return nil, volumeToUnmount.GenerateErrorDetailed("UnmountVolume.NewUnmounter failed", newUnmounterErr) } return func() error { @@ -540,28 +535,28 @@ func (og *operationGenerator) GenerateUnmountVolumeFunc( } return nil - }, volumePlugin.GetPluginName(), nil + }, nil } func (og *operationGenerator) GenerateUnmountDeviceFunc( deviceToDetach AttachedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, - mounter mount.Interface) (func() error, string, error) { + mounter mount.Interface) (func() error, error) { // Get attacher plugin attachableVolumePlugin, err := og.volumePluginMgr.FindAttachablePluginBySpec(deviceToDetach.VolumeSpec) if err != nil || attachableVolumePlugin == nil { - return nil, "", deviceToDetach.GenerateErrorDetailed("UnmountDevice.FindAttachablePluginBySpec failed", err) + return nil, deviceToDetach.GenerateErrorDetailed("UnmountDevice.FindAttachablePluginBySpec failed", err) } volumeDetacher, err := attachableVolumePlugin.NewDetacher() if err != nil { - return nil, attachableVolumePlugin.GetPluginName(), deviceToDetach.GenerateErrorDetailed("UnmountDevice.NewDetacher failed", err) + return nil, deviceToDetach.GenerateErrorDetailed("UnmountDevice.NewDetacher failed", err) } volumeAttacher, err := attachableVolumePlugin.NewAttacher() if err != nil { - return nil, attachableVolumePlugin.GetPluginName(), deviceToDetach.GenerateErrorDetailed("UnmountDevice.NewAttacher failed", err) + return nil, deviceToDetach.GenerateErrorDetailed("UnmountDevice.NewAttacher failed", err) } return func() error { @@ -621,19 +616,13 @@ func (og *operationGenerator) GenerateUnmountDeviceFunc( } return nil - }, attachableVolumePlugin.GetPluginName(), nil + }, nil } func (og *operationGenerator) GenerateVerifyControllerAttachedVolumeFunc( volumeToMount VolumeToMount, nodeName types.NodeName, - actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, string, error) { - volumePlugin, err := - og.volumePluginMgr.FindPluginBySpec(volumeToMount.VolumeSpec) - if err != nil || volumePlugin == nil { - return nil, "", volumeToMount.GenerateErrorDetailed("VerifyControllerAttachedVolume.FindPluginBySpec failed", err) - } - + actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, error) { return func() error { if !volumeToMount.PluginIsAttachable { // If the volume does not implement the attacher interface, it is @@ -689,7 +678,7 @@ func (og *operationGenerator) GenerateVerifyControllerAttachedVolumeFunc( // Volume not attached, return error. Caller will log and retry. return volumeToMount.GenerateErrorDetailed("Volume not attached according to node status", nil) - }, volumePlugin.GetPluginName(), nil + }, nil } func (og *operationGenerator) verifyVolumeIsSafeToDetach( diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/util.go b/vendor/k8s.io/kubernetes/pkg/volume/util/util.go index a9ab2845a70e..660c3c9db8a2 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/util.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/util.go @@ -72,6 +72,15 @@ func SetReady(dir string) { // UnmountPath is a common unmount routine that unmounts the given path and // deletes the remaining directory if successful. func UnmountPath(mountPath string, mounter mount.Interface) error { + return UnmountMountPoint(mountPath, mounter, false /* extensiveMountPointCheck */) +} + +// UnmountMountPoint is a common unmount routine that unmounts the given path and +// deletes the remaining directory if successful. +// if extensiveMountPointCheck is true +// IsNotMountPoint will be called instead of IsLikelyNotMountPoint. +// IsNotMountPoint is more expensive but properly handles bind mounts. +func UnmountMountPoint(mountPath string, mounter mount.Interface, extensiveMountPointCheck bool) error { if pathExists, pathErr := PathExists(mountPath); pathErr != nil { return fmt.Errorf("Error checking if path exists: %v", pathErr) } else if !pathExists { @@ -79,16 +88,26 @@ func UnmountPath(mountPath string, mounter mount.Interface) error { return nil } - notMnt, err := mounter.IsLikelyNotMountPoint(mountPath) + var notMnt bool + var err error + + if extensiveMountPointCheck { + notMnt, err = mount.IsNotMountPoint(mounter, mountPath) + } else { + notMnt, err = mounter.IsLikelyNotMountPoint(mountPath) + } + if err != nil { return err } + if notMnt { glog.Warningf("Warning: %q is not a mountpoint, deleting", mountPath) return os.Remove(mountPath) } // Unmount the mount path + glog.V(4).Infof("%q is a mountpoint, unmounting", mountPath) if err := mounter.Unmount(mountPath); err != nil { return err } diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/noderestriction/BUILD b/vendor/k8s.io/kubernetes/plugin/pkg/admission/noderestriction/BUILD index f8b14ee14929..4bf848152f41 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/noderestriction/BUILD +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/noderestriction/BUILD @@ -15,6 +15,7 @@ go_library( deps = [ "//pkg/api:go_default_library", "//pkg/api/pod:go_default_library", + "//pkg/apis/policy:go_default_library", "//pkg/auth/nodeidentifier:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", "//pkg/client/clientset_generated/internalclientset/typed/core/internalversion:go_default_library", @@ -32,6 +33,7 @@ go_test( tags = ["automanaged"], deps = [ "//pkg/api:go_default_library", + "//pkg/apis/policy:go_default_library", "//pkg/auth/nodeidentifier:go_default_library", "//pkg/client/clientset_generated/internalclientset/fake:go_default_library", "//pkg/client/clientset_generated/internalclientset/typed/core/internalversion:go_default_library", diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/noderestriction/admission.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/noderestriction/admission.go index 1779cf57a385..e02d1a8617e3 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/noderestriction/admission.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/noderestriction/admission.go @@ -25,6 +25,7 @@ import ( "k8s.io/apiserver/pkg/admission" "k8s.io/kubernetes/pkg/api" podutil "k8s.io/kubernetes/pkg/api/pod" + "k8s.io/kubernetes/pkg/apis/policy" "k8s.io/kubernetes/pkg/auth/nodeidentifier" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" coreinternalversion "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" @@ -102,6 +103,8 @@ func (c *nodePlugin) Admit(a admission.Attributes) error { return c.admitPod(nodeName, a) case "status": return c.admitPodStatus(nodeName, a) + case "eviction": + return c.admitPodEviction(nodeName, a) default: return admission.NewForbidden(a, fmt.Errorf("unexpected pod subresource %s", a.GetSubresource())) } @@ -161,6 +164,9 @@ func (c *nodePlugin) admitPod(nodeName string, a admission.Attributes) error { if errors.IsNotFound(err) { // wasn't found in the server cache, do a live lookup before forbidding existingPod, err = c.podsGetter.Pods(a.GetNamespace()).Get(a.GetName(), v1.GetOptions{}) + if errors.IsNotFound(err) { + return err + } } if err != nil { return admission.NewForbidden(a, err) @@ -195,6 +201,45 @@ func (c *nodePlugin) admitPodStatus(nodeName string, a admission.Attributes) err } } +func (c *nodePlugin) admitPodEviction(nodeName string, a admission.Attributes) error { + switch a.GetOperation() { + case admission.Create: + // require eviction to an existing pod object + eviction, ok := a.GetObject().(*policy.Eviction) + if !ok { + return admission.NewForbidden(a, fmt.Errorf("unexpected type %T", a.GetObject())) + } + // use pod name from the admission attributes, if set, rather than from the submitted Eviction object + podName := a.GetName() + if len(podName) == 0 { + if len(eviction.Name) == 0 { + return admission.NewForbidden(a, fmt.Errorf("could not determine pod from request data")) + } + podName = eviction.Name + } + // get the existing pod from the server cache + existingPod, err := c.podsGetter.Pods(a.GetNamespace()).Get(podName, v1.GetOptions{ResourceVersion: "0"}) + if errors.IsNotFound(err) { + // wasn't found in the server cache, do a live lookup before forbidding + existingPod, err = c.podsGetter.Pods(a.GetNamespace()).Get(podName, v1.GetOptions{}) + if errors.IsNotFound(err) { + return err + } + } + if err != nil { + return admission.NewForbidden(a, err) + } + // only allow a node to evict a pod bound to itself + if existingPod.Spec.NodeName != nodeName { + return admission.NewForbidden(a, fmt.Errorf("node %s can only evict pods with spec.nodeName set to itself", nodeName)) + } + return nil + + default: + return admission.NewForbidden(a, fmt.Errorf("unexpected operation %s", a.GetOperation())) + } +} + func (c *nodePlugin) admitNode(nodeName string, a admission.Attributes) error { requestedName := a.GetName() diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/noderestriction/admission_test.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/noderestriction/admission_test.go index 9dbc8388703d..25f263cf9720 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/noderestriction/admission_test.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/noderestriction/admission_test.go @@ -24,6 +24,8 @@ import ( "k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/authentication/user" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/policy" + policyapi "k8s.io/kubernetes/pkg/apis/policy" "k8s.io/kubernetes/pkg/auth/nodeidentifier" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" coreinternalversion "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" @@ -40,6 +42,12 @@ func makeTestPod(namespace, name, node string, mirror bool) *api.Pod { return pod } +func makeTestPodEviction(name string) *policy.Eviction { + eviction := &policy.Eviction{} + eviction.Name = name + return eviction +} + func Test_nodePlugin_Admit(t *testing.T) { var ( mynode = &user.DefaultInfo{Name: "system:node:mynode", Groups: []string{"system:nodes"}} @@ -54,12 +62,22 @@ func Test_nodePlugin_Admit(t *testing.T) { mypod = makeTestPod("ns", "mypod", "mynode", false) otherpod = makeTestPod("ns", "otherpod", "othernode", false) unboundpod = makeTestPod("ns", "unboundpod", "", false) + unnamedpod = makeTestPod("ns", "", "mynode", false) + + mymirrorpodEviction = makeTestPodEviction("mymirrorpod") + othermirrorpodEviction = makeTestPodEviction("othermirrorpod") + unboundmirrorpodEviction = makeTestPodEviction("unboundmirrorpod") + mypodEviction = makeTestPodEviction("mypod") + otherpodEviction = makeTestPodEviction("otherpod") + unboundpodEviction = makeTestPodEviction("unboundpod") + unnamedEviction = makeTestPodEviction("") configmapResource = api.Resource("configmap").WithVersion("v1") configmapKind = api.Kind("ConfigMap").WithVersion("v1") - podResource = api.Resource("pods").WithVersion("v1") - podKind = api.Kind("Pod").WithVersion("v1") + podResource = api.Resource("pods").WithVersion("v1") + podKind = api.Kind("Pod").WithVersion("v1") + evictionKind = policyapi.Kind("Eviction").WithVersion("v1beta1") nodeResource = api.Resource("nodes").WithVersion("v1") nodeKind = api.Kind("Node").WithVersion("v1") @@ -123,6 +141,30 @@ func Test_nodePlugin_Admit(t *testing.T) { attributes: admission.NewAttributesRecord(nil, nil, podKind, mymirrorpod.Namespace, mymirrorpod.Name, podResource, "status", admission.Delete, mynode), err: "forbidden: unexpected operation", }, + { + name: "allow create of eviction for mirror pod bound to self", + podsGetter: existingPods, + attributes: admission.NewAttributesRecord(mymirrorpodEviction, nil, evictionKind, mymirrorpod.Namespace, mymirrorpod.Name, podResource, "eviction", admission.Create, mynode), + err: "", + }, + { + name: "forbid update of eviction for mirror pod bound to self", + podsGetter: existingPods, + attributes: admission.NewAttributesRecord(mymirrorpodEviction, nil, evictionKind, mymirrorpod.Namespace, mymirrorpod.Name, podResource, "eviction", admission.Update, mynode), + err: "forbidden: unexpected operation", + }, + { + name: "forbid delete of eviction for mirror pod bound to self", + podsGetter: existingPods, + attributes: admission.NewAttributesRecord(mymirrorpodEviction, nil, evictionKind, mymirrorpod.Namespace, mymirrorpod.Name, podResource, "eviction", admission.Delete, mynode), + err: "forbidden: unexpected operation", + }, + { + name: "allow create of unnamed eviction for mirror pod bound to self", + podsGetter: existingPods, + attributes: admission.NewAttributesRecord(unnamedEviction, nil, evictionKind, mymirrorpod.Namespace, mymirrorpod.Name, podResource, "eviction", admission.Create, mynode), + err: "", + }, // Mirror pods bound to another node { @@ -161,6 +203,30 @@ func Test_nodePlugin_Admit(t *testing.T) { attributes: admission.NewAttributesRecord(nil, nil, podKind, othermirrorpod.Namespace, othermirrorpod.Name, podResource, "status", admission.Delete, mynode), err: "forbidden: unexpected operation", }, + { + name: "forbid create of eviction for mirror pod bound to another", + podsGetter: existingPods, + attributes: admission.NewAttributesRecord(othermirrorpodEviction, nil, evictionKind, othermirrorpod.Namespace, othermirrorpod.Name, podResource, "eviction", admission.Create, mynode), + err: "spec.nodeName set to itself", + }, + { + name: "forbid update of eviction for mirror pod bound to another", + podsGetter: existingPods, + attributes: admission.NewAttributesRecord(othermirrorpodEviction, nil, evictionKind, othermirrorpod.Namespace, othermirrorpod.Name, podResource, "eviction", admission.Update, mynode), + err: "forbidden: unexpected operation", + }, + { + name: "forbid delete of eviction for mirror pod bound to another", + podsGetter: existingPods, + attributes: admission.NewAttributesRecord(othermirrorpodEviction, nil, evictionKind, othermirrorpod.Namespace, othermirrorpod.Name, podResource, "eviction", admission.Delete, mynode), + err: "forbidden: unexpected operation", + }, + { + name: "forbid create of unnamed eviction for mirror pod to another", + podsGetter: existingPods, + attributes: admission.NewAttributesRecord(unnamedEviction, nil, evictionKind, othermirrorpod.Namespace, othermirrorpod.Name, podResource, "eviction", admission.Create, mynode), + err: "spec.nodeName set to itself", + }, // Mirror pods not bound to any node { @@ -199,6 +265,30 @@ func Test_nodePlugin_Admit(t *testing.T) { attributes: admission.NewAttributesRecord(nil, nil, podKind, unboundmirrorpod.Namespace, unboundmirrorpod.Name, podResource, "status", admission.Delete, mynode), err: "forbidden: unexpected operation", }, + { + name: "forbid create of eviction for mirror pod unbound", + podsGetter: existingPods, + attributes: admission.NewAttributesRecord(unboundmirrorpodEviction, nil, evictionKind, unboundmirrorpod.Namespace, unboundmirrorpod.Name, podResource, "eviction", admission.Create, mynode), + err: "spec.nodeName set to itself", + }, + { + name: "forbid update of eviction for mirror pod unbound", + podsGetter: existingPods, + attributes: admission.NewAttributesRecord(unboundmirrorpodEviction, nil, evictionKind, unboundmirrorpod.Namespace, unboundmirrorpod.Name, podResource, "eviction", admission.Update, mynode), + err: "forbidden: unexpected operation", + }, + { + name: "forbid delete of eviction for mirror pod unbound", + podsGetter: existingPods, + attributes: admission.NewAttributesRecord(unboundmirrorpodEviction, nil, evictionKind, unboundmirrorpod.Namespace, unboundmirrorpod.Name, podResource, "eviction", admission.Delete, mynode), + err: "forbidden: unexpected operation", + }, + { + name: "forbid create of unnamed eviction for mirror pod unbound", + podsGetter: existingPods, + attributes: admission.NewAttributesRecord(unnamedEviction, nil, evictionKind, unboundmirrorpod.Namespace, unboundmirrorpod.Name, podResource, "eviction", admission.Create, mynode), + err: "spec.nodeName set to itself", + }, // Normal pods bound to us { @@ -237,6 +327,24 @@ func Test_nodePlugin_Admit(t *testing.T) { attributes: admission.NewAttributesRecord(nil, nil, podKind, mypod.Namespace, mypod.Name, podResource, "status", admission.Delete, mynode), err: "forbidden: unexpected operation", }, + { + name: "forbid update of eviction for normal pod bound to self", + podsGetter: existingPods, + attributes: admission.NewAttributesRecord(mypodEviction, nil, evictionKind, mypod.Namespace, mypod.Name, podResource, "eviction", admission.Update, mynode), + err: "forbidden: unexpected operation", + }, + { + name: "forbid delete of eviction for normal pod bound to self", + podsGetter: existingPods, + attributes: admission.NewAttributesRecord(mypodEviction, nil, evictionKind, mypod.Namespace, mypod.Name, podResource, "eviction", admission.Delete, mynode), + err: "forbidden: unexpected operation", + }, + { + name: "allow create of unnamed eviction for normal pod bound to self", + podsGetter: existingPods, + attributes: admission.NewAttributesRecord(unnamedEviction, nil, evictionKind, mypod.Namespace, mypod.Name, podResource, "eviction", admission.Create, mynode), + err: "", + }, // Normal pods bound to another { @@ -275,6 +383,30 @@ func Test_nodePlugin_Admit(t *testing.T) { attributes: admission.NewAttributesRecord(nil, nil, podKind, otherpod.Namespace, otherpod.Name, podResource, "status", admission.Delete, mynode), err: "forbidden: unexpected operation", }, + { + name: "forbid create of eviction for normal pod bound to another", + podsGetter: existingPods, + attributes: admission.NewAttributesRecord(otherpodEviction, nil, evictionKind, otherpodEviction.Namespace, otherpodEviction.Name, podResource, "eviction", admission.Create, mynode), + err: "spec.nodeName set to itself", + }, + { + name: "forbid update of eviction for normal pod bound to another", + podsGetter: existingPods, + attributes: admission.NewAttributesRecord(otherpodEviction, nil, evictionKind, otherpodEviction.Namespace, otherpodEviction.Name, podResource, "eviction", admission.Update, mynode), + err: "forbidden: unexpected operation", + }, + { + name: "forbid delete of eviction for normal pod bound to another", + podsGetter: existingPods, + attributes: admission.NewAttributesRecord(otherpodEviction, nil, evictionKind, otherpodEviction.Namespace, otherpodEviction.Name, podResource, "eviction", admission.Delete, mynode), + err: "forbidden: unexpected operation", + }, + { + name: "forbid create of eviction for normal pod bound to another", + podsGetter: existingPods, + attributes: admission.NewAttributesRecord(unnamedEviction, nil, evictionKind, otherpod.Namespace, otherpod.Name, podResource, "eviction", admission.Create, mynode), + err: "spec.nodeName set to itself", + }, // Normal pods not bound to any node { @@ -313,6 +445,30 @@ func Test_nodePlugin_Admit(t *testing.T) { attributes: admission.NewAttributesRecord(nil, nil, podKind, unboundpod.Namespace, unboundpod.Name, podResource, "status", admission.Delete, mynode), err: "forbidden: unexpected operation", }, + { + name: "forbid create of eviction for normal pod unbound", + podsGetter: existingPods, + attributes: admission.NewAttributesRecord(unboundpodEviction, nil, evictionKind, unboundpod.Namespace, unboundpod.Name, podResource, "eviction", admission.Create, mynode), + err: "spec.nodeName set to itself", + }, + { + name: "forbid update of eviction for normal pod unbound", + podsGetter: existingPods, + attributes: admission.NewAttributesRecord(unboundpodEviction, nil, evictionKind, unboundpod.Namespace, unboundpod.Name, podResource, "eviction", admission.Update, mynode), + err: "forbidden: unexpected operation", + }, + { + name: "forbid delete of eviction for normal pod unbound", + podsGetter: existingPods, + attributes: admission.NewAttributesRecord(unboundpodEviction, nil, evictionKind, unboundpod.Namespace, unboundpod.Name, podResource, "eviction", admission.Delete, mynode), + err: "forbidden: unexpected operation", + }, + { + name: "forbid create of unnamed eviction for normal unbound", + podsGetter: existingPods, + attributes: admission.NewAttributesRecord(unnamedEviction, nil, evictionKind, unboundpod.Namespace, unboundpod.Name, podResource, "eviction", admission.Create, mynode), + err: "spec.nodeName set to itself", + }, // Missing pod { @@ -321,6 +477,57 @@ func Test_nodePlugin_Admit(t *testing.T) { attributes: admission.NewAttributesRecord(nil, nil, podKind, unboundpod.Namespace, unboundpod.Name, podResource, "", admission.Delete, mynode), err: "not found", }, + { + name: "forbid create of eviction for unknown pod", + podsGetter: noExistingPods, + attributes: admission.NewAttributesRecord(mypodEviction, nil, evictionKind, mypod.Namespace, mypod.Name, podResource, "eviction", admission.Create, mynode), + err: "not found", + }, + { + name: "forbid update of eviction for unknown pod", + podsGetter: noExistingPods, + attributes: admission.NewAttributesRecord(mypodEviction, nil, evictionKind, mypod.Namespace, mypod.Name, podResource, "eviction", admission.Update, mynode), + err: "forbidden: unexpected operation", + }, + { + name: "forbid delete of eviction for unknown pod", + podsGetter: noExistingPods, + attributes: admission.NewAttributesRecord(mypodEviction, nil, evictionKind, mypod.Namespace, mypod.Name, podResource, "eviction", admission.Delete, mynode), + err: "forbidden: unexpected operation", + }, + { + name: "forbid create of unnamed eviction for unknown pod", + podsGetter: noExistingPods, + attributes: admission.NewAttributesRecord(unnamedEviction, nil, evictionKind, mypod.Namespace, mypod.Name, podResource, "eviction", admission.Create, mynode), + err: "not found", + }, + + // Eviction for unnamed pod + { + name: "allow create of eviction for unnamed pod", + podsGetter: existingPods, + attributes: admission.NewAttributesRecord(mypodEviction, nil, evictionKind, unnamedpod.Namespace, unnamedpod.Name, podResource, "eviction", admission.Create, mynode), + // use the submitted eviction resource name as the pod name + err: "", + }, + { + name: "forbid update of eviction for unnamed pod", + podsGetter: existingPods, + attributes: admission.NewAttributesRecord(mypodEviction, nil, evictionKind, unnamedpod.Namespace, unnamedpod.Name, podResource, "eviction", admission.Update, mynode), + err: "forbidden: unexpected operation", + }, + { + name: "forbid delete of eviction for unnamed pod", + podsGetter: existingPods, + attributes: admission.NewAttributesRecord(mypodEviction, nil, evictionKind, unnamedpod.Namespace, unnamedpod.Name, podResource, "eviction", admission.Delete, mynode), + err: "forbidden: unexpected operation", + }, + { + name: "forbid create of unnamed eviction for unnamed pod", + podsGetter: existingPods, + attributes: admission.NewAttributesRecord(unnamedEviction, nil, evictionKind, unnamedpod.Namespace, unnamedpod.Name, podResource, "eviction", admission.Create, mynode), + err: "could not determine pod from request data", + }, // Resource pods { diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go index 4c9cd35296e9..b2a5d284648f 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go @@ -113,6 +113,9 @@ func NodeRules() []rbac.PolicyRule { // Needed for the node to report status of pods it is running. // Use the NodeRestriction admission plugin to limit a node to updating status of pods bound to itself. rbac.NewRule("update").Groups(legacyGroup).Resources("pods/status").RuleOrDie(), + // Needed for the node to create pod evictions. + // Use the NodeRestriction admission plugin to limit a node to creating evictions for pods bound to itself. + rbac.NewRule("create").Groups(legacyGroup).Resources("pods/eviction").RuleOrDie(), // Needed for imagepullsecrets, rbd/ceph and secret volumes, and secrets in envs // Needed for configmap volume and envs diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml index d5c47f0fa6dc..bb1205bb9da4 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml +++ b/vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml @@ -678,6 +678,12 @@ items: - pods/status verbs: - update + - apiGroups: + - "" + resources: + - pods/eviction + verbs: + - create - apiGroups: - "" resources: diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/predicates.go b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/predicates.go index 06deb3014977..084f5c307803 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/predicates.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/predicates.go @@ -521,8 +521,7 @@ func GetResourceRequest(pod *v1.Pod) *schedulercache.Resource { // Account for storage requested by emptydir volumes // If the storage medium is memory, should exclude the size for _, vol := range pod.Spec.Volumes { - if vol.EmptyDir != nil && vol.EmptyDir.Medium != v1.StorageMediumMemory { - + if vol.EmptyDir != nil && vol.EmptyDir.SizeLimit != nil && vol.EmptyDir.Medium != v1.StorageMediumMemory { result.StorageScratch += vol.EmptyDir.SizeLimit.Value() } } @@ -1233,15 +1232,26 @@ func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod, node } func PodToleratesNodeTaints(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { + return podToleratesNodeTaints(pod, nodeInfo, func(t *v1.Taint) bool { + // PodToleratesNodeTaints is only interested in NoSchedule and NoExecute taints. + return t.Effect == v1.TaintEffectNoSchedule || t.Effect == v1.TaintEffectNoExecute + }) +} + +// PodToleratesNodeNoExecuteTaints checks if a pod tolertaions can tolerate the node's NoExecute taints +func PodToleratesNodeNoExecuteTaints(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { + return podToleratesNodeTaints(pod, nodeInfo, func(t *v1.Taint) bool { + return t.Effect == v1.TaintEffectNoExecute + }) +} + +func podToleratesNodeTaints(pod *v1.Pod, nodeInfo *schedulercache.NodeInfo, filter func(t *v1.Taint) bool) (bool, []algorithm.PredicateFailureReason, error) { taints, err := nodeInfo.Taints() if err != nil { return false, nil, err } - if v1helper.TolerationsTolerateTaintsWithFilter(pod.Spec.Tolerations, taints, func(t *v1.Taint) bool { - // PodToleratesNodeTaints is only interested in NoSchedule and NoExecute taints. - return t.Effect == v1.TaintEffectNoSchedule || t.Effect == v1.TaintEffectNoExecute - }) { + if v1helper.TolerationsTolerateTaintsWithFilter(pod.Spec.Tolerations, taints, filter) { return true, nil, nil } return false, []algorithm.PredicateFailureReason{ErrTaintsTolerationsNotMatch}, nil diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/predicates_test.go b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/predicates_test.go index a410641e1976..ab26a8062ff4 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/predicates_test.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/predicates_test.go @@ -81,24 +81,24 @@ var ( func makeResources(milliCPU, memory, nvidiaGPUs, pods, opaqueA, storage int64) v1.NodeResources { return v1.NodeResources{ Capacity: v1.ResourceList{ - v1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI), - v1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI), - v1.ResourcePods: *resource.NewQuantity(pods, resource.DecimalSI), - v1.ResourceNvidiaGPU: *resource.NewQuantity(nvidiaGPUs, resource.DecimalSI), - opaqueResourceA: *resource.NewQuantity(opaqueA, resource.DecimalSI), - v1.ResourceStorage: *resource.NewQuantity(storage, resource.BinarySI), + v1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI), + v1.ResourcePods: *resource.NewQuantity(pods, resource.DecimalSI), + v1.ResourceNvidiaGPU: *resource.NewQuantity(nvidiaGPUs, resource.DecimalSI), + opaqueResourceA: *resource.NewQuantity(opaqueA, resource.DecimalSI), + v1.ResourceStorageScratch: *resource.NewQuantity(storage, resource.BinarySI), }, } } func makeAllocatableResources(milliCPU, memory, nvidiaGPUs, pods, opaqueA, storage int64) v1.ResourceList { return v1.ResourceList{ - v1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI), - v1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI), - v1.ResourcePods: *resource.NewQuantity(pods, resource.DecimalSI), - v1.ResourceNvidiaGPU: *resource.NewQuantity(nvidiaGPUs, resource.DecimalSI), - opaqueResourceA: *resource.NewQuantity(opaqueA, resource.DecimalSI), - v1.ResourceStorage: *resource.NewQuantity(storage, resource.BinarySI), + v1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI), + v1.ResourcePods: *resource.NewQuantity(pods, resource.DecimalSI), + v1.ResourceNvidiaGPU: *resource.NewQuantity(nvidiaGPUs, resource.DecimalSI), + opaqueResourceA: *resource.NewQuantity(opaqueA, resource.DecimalSI), + v1.ResourceStorageScratch: *resource.NewQuantity(storage, resource.BinarySI), } } @@ -125,7 +125,7 @@ func addStorageLimit(pod *v1.Pod, sizeLimit int64, medium v1.StorageMedium) *v1. Name: "emptyDirVolumeName", VolumeSource: v1.VolumeSource{ EmptyDir: &v1.EmptyDirVolumeSource{ - SizeLimit: *resource.NewQuantity(sizeLimit, resource.BinarySI), + SizeLimit: resource.NewQuantity(sizeLimit, resource.BinarySI), Medium: medium, }, }, diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/scheduler.go b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/scheduler.go index f13f2f5c9e36..2c91cbb58e34 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/scheduler.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/scheduler.go @@ -36,8 +36,6 @@ import ( "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" "k8s.io/kubernetes/plugin/pkg/scheduler/util" - "fmt" - "github.com/golang/glog" ) @@ -186,21 +184,29 @@ func (sched *Scheduler) schedule(pod *v1.Pod) (string, error) { } // assume signals to the cache that a pod is already in the cache, so that binding can be asnychronous. -func (sched *Scheduler) assume(pod *v1.Pod, host string) error { +// assume modifies `assumed`. +func (sched *Scheduler) assume(assumed *v1.Pod, host string) error { // Optimistically assume that the binding will succeed and send it to apiserver // in the background. // If the binding fails, scheduler will release resources allocated to assumed pod // immediately. - assumed := *pod assumed.Spec.NodeName = host - if err := sched.config.SchedulerCache.AssumePod(&assumed); err != nil { + if err := sched.config.SchedulerCache.AssumePod(assumed); err != nil { glog.Errorf("scheduler cache AssumePod failed: %v", err) - // TODO: This means that a given pod is already in cache (which means it - // is either assumed or already added). This is most probably result of a - // BUG in retrying logic. As a temporary workaround (which doesn't fully - // fix the problem, but should reduce its impact), we simply return here, - // as binding doesn't make sense anyway. - // This should be fixed properly though. + + // This is most probably result of a BUG in retrying logic. + // We report an error here so that pod scheduling can be retried. + // This relies on the fact that Error will check if the pod has been bound + // to a node and if so will not add it back to the unscheduled pods queue + // (otherwise this would cause an infinite loop). + sched.config.Error(assumed, err) + sched.config.Recorder.Eventf(assumed, v1.EventTypeWarning, "FailedScheduling", "AssumePod failed: %v", err) + sched.config.PodConditionUpdater.Update(assumed, &v1.PodCondition{ + Type: v1.PodScheduled, + Status: v1.ConditionFalse, + Reason: "SchedulerError", + Message: err.Error(), + }) return err } @@ -208,7 +214,7 @@ func (sched *Scheduler) assume(pod *v1.Pod, host string) error { // predicates in equivalence cache. // If the binding fails, these invalidated item will not break anything. if sched.config.Ecache != nil { - sched.config.Ecache.InvalidateCachedPredicateItemForPodAdd(pod, host) + sched.config.Ecache.InvalidateCachedPredicateItemForPodAdd(assumed, host) } return nil } @@ -221,12 +227,12 @@ func (sched *Scheduler) bind(assumed *v1.Pod, b *v1.Binding) error { // it's atomic with setting host. err := sched.config.Binder.Bind(b) if err := sched.config.SchedulerCache.FinishBinding(assumed); err != nil { - return fmt.Errorf("scheduler cache FinishBinding failed: %v", err) + glog.Errorf("scheduler cache FinishBinding failed: %v", err) } if err != nil { glog.V(1).Infof("Failed to bind pod: %v/%v", assumed.Namespace, assumed.Name) if err := sched.config.SchedulerCache.ForgetPod(assumed); err != nil { - return fmt.Errorf("scheduler cache ForgetPod failed: %v", err) + glog.Errorf("scheduler cache ForgetPod failed: %v", err) } sched.config.Error(assumed, err) sched.config.Recorder.Eventf(assumed, v1.EventTypeWarning, "FailedScheduling", "Binding rejected: %v", err) @@ -237,6 +243,7 @@ func (sched *Scheduler) bind(assumed *v1.Pod, b *v1.Binding) error { }) return err } + metrics.BindingLatency.Observe(metrics.SinceInMicroseconds(bindingStart)) sched.config.Recorder.Eventf(assumed, v1.EventTypeNormal, "Scheduled", "Successfully assigned %v to %v", assumed.Name, b.Target.Name) return nil @@ -263,15 +270,17 @@ func (sched *Scheduler) scheduleOne() { // Tell the cache to assume that a pod now is running on a given node, even though it hasn't been bound yet. // This allows us to keep scheduling without waiting on binding to occur. - err = sched.assume(pod, suggestedHost) + assumedPod := *pod + // assume modifies `assumedPod` by setting NodeName=suggestedHost + err = sched.assume(&assumedPod, suggestedHost) if err != nil { return } // bind the pod to its host asynchronously (we can do this b/c of the assumption step above). go func() { - err := sched.bind(pod, &v1.Binding{ - ObjectMeta: metav1.ObjectMeta{Namespace: pod.Namespace, Name: pod.Name, UID: pod.UID}, + err := sched.bind(&assumedPod, &v1.Binding{ + ObjectMeta: metav1.ObjectMeta{Namespace: assumedPod.Namespace, Name: assumedPod.Name, UID: assumedPod.UID}, Target: v1.ObjectReference{ Kind: "Node", Name: suggestedHost, diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/scheduler_test.go b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/scheduler_test.go index e9c4c21fd9e3..3245451017bb 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/scheduler_test.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/scheduler_test.go @@ -116,6 +116,7 @@ func TestScheduler(t *testing.T) { sendPod *v1.Pod algo algorithm.ScheduleAlgorithm expectErrorPod *v1.Pod + expectForgetPod *v1.Pod expectAssumedPod *v1.Pod expectError error expectBind *v1.Binding @@ -140,7 +141,8 @@ func TestScheduler(t *testing.T) { expectAssumedPod: podWithID("foo", testNode.Name), injectBindError: errB, expectError: errB, - expectErrorPod: podWithID("foo", ""), + expectErrorPod: podWithID("foo", testNode.Name), + expectForgetPod: podWithID("foo", testNode.Name), eventReason: "FailedScheduling", }, { sendPod: deletingPod("foo"), @@ -152,11 +154,15 @@ func TestScheduler(t *testing.T) { for i, item := range table { var gotError error var gotPod *v1.Pod + var gotForgetPod *v1.Pod var gotAssumedPod *v1.Pod var gotBinding *v1.Binding configurator := &FakeConfigurator{ Config: &Config{ SchedulerCache: &schedulertesting.FakeCache{ + ForgetFunc: func(pod *v1.Pod) { + gotForgetPod = pod + }, AssumeFunc: func(pod *v1.Pod) { gotAssumedPod = pod }, @@ -197,6 +203,9 @@ func TestScheduler(t *testing.T) { if e, a := item.expectErrorPod, gotPod; !reflect.DeepEqual(e, a) { t.Errorf("%v: error pod: wanted %v, got %v", i, e, a) } + if e, a := item.expectForgetPod, gotForgetPod; !reflect.DeepEqual(e, a) { + t.Errorf("%v: forget pod: wanted %v, got %v", i, e, a) + } if e, a := item.expectError, gotError; !reflect.DeepEqual(e, a) { t.Errorf("%v: error: wanted %v, got %v", i, e, a) } diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache/node_info.go b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache/node_info.go index 4717b1d8e8c1..5b56943e2636 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache/node_info.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache/node_info.go @@ -80,6 +80,7 @@ func (r *Resource) ResourceList() v1.ResourceList { v1.ResourceMemory: *resource.NewQuantity(r.Memory, resource.BinarySI), v1.ResourceNvidiaGPU: *resource.NewQuantity(r.NvidiaGPU, resource.DecimalSI), v1.ResourceStorageOverlay: *resource.NewQuantity(r.StorageOverlay, resource.BinarySI), + v1.ResourceStorageScratch: *resource.NewQuantity(r.StorageScratch, resource.BinarySI), } for rName, rQuant := range r.OpaqueIntResources { result[rName] = *resource.NewQuantity(rQuant, resource.DecimalSI) @@ -372,7 +373,7 @@ func calculateResource(pod *v1.Pod) (res Resource, non0_cpu int64, non0_mem int6 // Account for storage requested by emptydir volumes // If the storage medium is memory, should exclude the size for _, vol := range pod.Spec.Volumes { - if vol.EmptyDir != nil && vol.EmptyDir.Medium != v1.StorageMediumMemory { + if vol.EmptyDir != nil && vol.EmptyDir.SizeLimit != nil && vol.EmptyDir.Medium != v1.StorageMediumMemory { res.StorageScratch += vol.EmptyDir.SizeLimit.Value() } } @@ -407,7 +408,7 @@ func (n *NodeInfo) SetNode(node *v1.Node) error { n.allocatableResource.NvidiaGPU = rQuant.Value() case v1.ResourcePods: n.allowedPodNumber = int(rQuant.Value()) - case v1.ResourceStorage: + case v1.ResourceStorageScratch: n.allocatableResource.StorageScratch = rQuant.Value() case v1.ResourceStorageOverlay: n.allocatableResource.StorageOverlay = rQuant.Value() diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/testing/fake_cache.go b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/testing/fake_cache.go index 1e302d3c24d8..068ac9ce1bb3 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/testing/fake_cache.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/testing/fake_cache.go @@ -25,6 +25,7 @@ import ( // FakeCache is used for testing type FakeCache struct { AssumeFunc func(*v1.Pod) + ForgetFunc func(*v1.Pod) } func (f *FakeCache) AssumePod(pod *v1.Pod) error { @@ -34,7 +35,10 @@ func (f *FakeCache) AssumePod(pod *v1.Pod) error { func (f *FakeCache) FinishBinding(pod *v1.Pod) error { return nil } -func (f *FakeCache) ForgetPod(pod *v1.Pod) error { return nil } +func (f *FakeCache) ForgetPod(pod *v1.Pod) error { + f.ForgetFunc(pod) + return nil +} func (f *FakeCache) AddPod(pod *v1.Pod) error { return nil } diff --git a/vendor/k8s.io/kubernetes/staging/prime-apimachinery.sh b/vendor/k8s.io/kubernetes/staging/prime-apimachinery.sh index c9d4e96fb11e..b9e809e69e49 100755 --- a/vendor/k8s.io/kubernetes/staging/prime-apimachinery.sh +++ b/vendor/k8s.io/kubernetes/staging/prime-apimachinery.sh @@ -77,5 +77,4 @@ git checkout vendor/k8s.io/code-generator/cmd/set-gen/main.go # now run gofmt to get the sorting right echo "running gofmt" -gofmt -s -w ${KUBE_ROOT}/cmd ${KUBE_ROOT}/examples ${KUBE_ROOT}/federation ${KUBE_ROOT}/pkg ${KUBE_ROOT}/plugin ${KUBE_ROOT}/test - +gofmt -s -w ${KUBE_ROOT}/cmd ${KUBE_ROOT}/examples ${KUBE_ROOT}/federation ${KUBE_ROOT}/pkg ${KUBE_ROOT}/plugin ${KUBE_ROOT}/test diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/README.md b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/README.md index 4468e616ba78..fe7f2687ba5d 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/README.md +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/README.md @@ -23,7 +23,7 @@ These act like most other Resources in Kubernetes, and may be `kubectl apply`'d, Some example use cases: * Provisioning/Management of external datastores/databases (eg. CloudSQL/RDS instances) -* Higher level abstractions around Kubernetes primitives (eg. a single Resource to define an etcd cluster, backed by a Service and a ReplicationController) +* Higher level abstractions around Kubernetes primitives (eg. a single Resource to define an etcd cluster, backed by a Service and a ReplicationController) ## Defining types diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go index cf60827ae7d7..feb97447f247 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go @@ -343,8 +343,17 @@ func (r *crdHandler) getServingInfoFor(crd *apiextensions.CustomResourceDefiniti storage: storage, requestScope: requestScope, } - storageMap[crd.UID] = ret - r.customStorage.Store(storageMap) + + storageMap2 := make(crdStorageMap, len(storageMap)) + + // Copy because we cannot write to storageMap without a race + // as it is used without locking elsewhere + for k, v := range storageMap { + storageMap2[k] = v + } + + storageMap2[crd.UID] = ret + r.customStorage.Store(storageMap2) return ret } diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go index 6c4a3305cb7c..6fac96be4079 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go @@ -1,4 +1,20 @@ -// +build !ignore_autogenerated_openshift +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ // This file was autogenerated by deepcopy-gen. Do not edit it manually! diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go index e38977b52d79..54ce6ad59e01 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go @@ -1,4 +1,20 @@ -// +build !ignore_autogenerated_openshift +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ // This file was autogenerated by deepcopy-gen. Do not edit it manually! diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go index 828df44351b6..8884c738ed93 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go @@ -1138,7 +1138,7 @@ func mergePatchIntoOriginal(original, patch map[string]interface{}, t reflect.Ty return err } case !foundOriginal && !foundPatch: - return nil + continue } // Split all items into patch items and server-only items and then enforce the order. diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/strategicpatch/patch_test.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/strategicpatch/patch_test.go index 507c8cffa17e..7f6372db6ad5 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/strategicpatch/patch_test.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/util/strategicpatch/patch_test.go @@ -5966,6 +5966,75 @@ retainKeysMergingList: retainKeysMergingList: - name: bar - name: foo +`), + }, + }, + { + Description: "delete and reorder in one list, reorder in another", + StrategicMergePatchRawTestCaseData: StrategicMergePatchRawTestCaseData{ + Original: []byte(` +mergingList: +- name: a + value: a +- name: b + value: b +mergeItemPtr: +- name: c + value: c +- name: d + value: d +`), + Current: []byte(` +mergingList: +- name: a + value: a +- name: b + value: b +mergeItemPtr: +- name: c + value: c +- name: d + value: d +`), + Modified: []byte(` +mergingList: +- name: b + value: b +mergeItemPtr: +- name: d + value: d +- name: c + value: c +`), + TwoWay: []byte(` +$setElementOrder/mergingList: +- name: b +$setElementOrder/mergeItemPtr: +- name: d +- name: c +mergingList: +- $patch: delete + name: a +`), + ThreeWay: []byte(` +$setElementOrder/mergingList: +- name: b +$setElementOrder/mergeItemPtr: +- name: d +- name: c +mergingList: +- $patch: delete + name: a +`), + Result: []byte(` +mergingList: +- name: b + value: b +mergeItemPtr: +- name: d + value: d +- name: c + value: c `), }, }, @@ -5993,9 +6062,12 @@ func TestStrategicMergePatch(t *testing.T) { testThreeWayPatch(t, c) } - for _, c := range strategicMergePatchRawTestCases { - testTwoWayPatchForRawTestCase(t, c) - testThreeWayPatchForRawTestCase(t, c) + // run multiple times to exercise different map traversal orders + for i := 0; i < 10; i++ { + for _, c := range strategicMergePatchRawTestCases { + testTwoWayPatchForRawTestCase(t, c) + testThreeWayPatchForRawTestCase(t, c) + } } } diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/Godeps/Godeps.json b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/Godeps/Godeps.json index 7f6082d50e02..6801e750d5f9 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/Godeps/Godeps.json +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/Godeps/Godeps.json @@ -252,23 +252,23 @@ }, { "ImportPath": "github.com/coreos/go-oidc/http", - "Rev": "be73733bb8cc830d0205609b95d125215f8e9c70" + "Rev": "a4973d9a4225417aecf5d450a9522f00c1f7130f" }, { "ImportPath": "github.com/coreos/go-oidc/jose", - "Rev": "be73733bb8cc830d0205609b95d125215f8e9c70" + "Rev": "a4973d9a4225417aecf5d450a9522f00c1f7130f" }, { "ImportPath": "github.com/coreos/go-oidc/key", - "Rev": "be73733bb8cc830d0205609b95d125215f8e9c70" + "Rev": "a4973d9a4225417aecf5d450a9522f00c1f7130f" }, { "ImportPath": "github.com/coreos/go-oidc/oauth2", - "Rev": "be73733bb8cc830d0205609b95d125215f8e9c70" + "Rev": "a4973d9a4225417aecf5d450a9522f00c1f7130f" }, { "ImportPath": "github.com/coreos/go-oidc/oidc", - "Rev": "be73733bb8cc830d0205609b95d125215f8e9c70" + "Rev": "a4973d9a4225417aecf5d450a9522f00c1f7130f" }, { "ImportPath": "github.com/coreos/go-semver/semver", @@ -416,31 +416,31 @@ }, { "ImportPath": "github.com/gophercloud/gophercloud", - "Rev": "ce1e02c3ccfdb7fab257340dc4d603ec3035fa11" + "Rev": "ed590d9afe113c6107cd60717b196155e6579e78" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack", - "Rev": "ce1e02c3ccfdb7fab257340dc4d603ec3035fa11" + "Rev": "ed590d9afe113c6107cd60717b196155e6579e78" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/identity/v2/tenants", - "Rev": "ce1e02c3ccfdb7fab257340dc4d603ec3035fa11" + "Rev": "ed590d9afe113c6107cd60717b196155e6579e78" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/identity/v2/tokens", - "Rev": "ce1e02c3ccfdb7fab257340dc4d603ec3035fa11" + "Rev": "ed590d9afe113c6107cd60717b196155e6579e78" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens", - "Rev": "ce1e02c3ccfdb7fab257340dc4d603ec3035fa11" + "Rev": "ed590d9afe113c6107cd60717b196155e6579e78" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/utils", - "Rev": "ce1e02c3ccfdb7fab257340dc4d603ec3035fa11" + "Rev": "ed590d9afe113c6107cd60717b196155e6579e78" }, { "ImportPath": "github.com/gophercloud/gophercloud/pagination", - "Rev": "ce1e02c3ccfdb7fab257340dc4d603ec3035fa11" + "Rev": "ed590d9afe113c6107cd60717b196155e6579e78" }, { "ImportPath": "github.com/grpc-ecosystem/go-grpc-prometheus", diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/admission/initializer/BUILD b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/admission/initializer/BUILD new file mode 100644 index 000000000000..51ca5030cd1c --- /dev/null +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/admission/initializer/BUILD @@ -0,0 +1,38 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_library( + name = "go_default_library", + srcs = [ + "initializer.go", + "interfaces.go", + ], + tags = ["automanaged"], + deps = [ + "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", + "//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", + "//vendor/k8s.io/client-go/informers:go_default_library", + "//vendor/k8s.io/client-go/kubernetes:go_default_library", + ], +) + +go_test( + name = "go_default_xtest", + srcs = ["initializer_test.go"], + tags = ["automanaged"], + deps = [ + "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", + "//vendor/k8s.io/apiserver/pkg/admission/initializer:go_default_library", + "//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", + "//vendor/k8s.io/client-go/informers:go_default_library", + "//vendor/k8s.io/client-go/kubernetes:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/fake:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/admission/initializer/initializer_test.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/admission/initializer/initializer_test.go new file mode 100644 index 000000000000..03540d907ff3 --- /dev/null +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/admission/initializer/initializer_test.go @@ -0,0 +1,122 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package initializer_test + +import ( + "testing" + "time" + + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/initializer" + "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/fake" +) + +// TestWantsAuthorizer ensures that the authorizer is injected +// when the WantsAuthorizer interface is implemented by a plugin. +func TestWantsAuthorizer(t *testing.T) { + target, err := initializer.New(nil, nil, &TestAuthorizer{}) + if err != nil { + t.Fatalf("expected to create an instance of initializer but got an error = %s", err.Error()) + } + wantAuthorizerAdmission := &WantAuthorizerAdmission{} + target.Initialize(wantAuthorizerAdmission) + if wantAuthorizerAdmission.auth == nil { + t.Errorf("expected authorizer to be initialized but found nil") + } +} + +// TestWantsExternalKubeClientSet ensures that the clienset is injected +// when the WantsExternalKubeClientSet interface is implemented by a plugin. +func TestWantsExternalKubeClientSet(t *testing.T) { + cs := &fake.Clientset{} + target, err := initializer.New(cs, nil, &TestAuthorizer{}) + if err != nil { + t.Fatalf("expected to create an instance of initializer but got an error = %s", err.Error()) + } + wantExternalKubeClientSet := &WantExternalKubeClientSet{} + target.Initialize(wantExternalKubeClientSet) + if wantExternalKubeClientSet.cs != cs { + t.Errorf("expected clientset to be initialized") + } +} + +// TestWantsExternalKubeInformerFactory ensures that the informer factory is injected +// when the WantsExternalKubeInformerFactory interface is implemented by a plugin. +func TestWantsExternalKubeInformerFactory(t *testing.T) { + cs := &fake.Clientset{} + sf := informers.NewSharedInformerFactory(cs, time.Duration(1)*time.Second) + target, err := initializer.New(cs, sf, &TestAuthorizer{}) + if err != nil { + t.Fatalf("expected to create an instance of initializer but got an error = %s", err.Error()) + } + wantExternalKubeInformerFactory := &WantExternalKubeInformerFactory{} + target.Initialize(wantExternalKubeInformerFactory) + if wantExternalKubeInformerFactory.sf != sf { + t.Errorf("expected informer factory to be initialized") + } +} + +// WantExternalKubeInformerFactory is a test stub that fulfills the WantsExternalKubeInformerFactory interface +type WantExternalKubeInformerFactory struct { + sf informers.SharedInformerFactory +} + +func (self *WantExternalKubeInformerFactory) SetExternalKubeInformerFactory(sf informers.SharedInformerFactory) { + self.sf = sf +} +func (self *WantExternalKubeInformerFactory) Admit(a admission.Attributes) error { return nil } +func (self *WantExternalKubeInformerFactory) Handles(o admission.Operation) bool { return false } +func (self *WantExternalKubeInformerFactory) Validate() error { return nil } + +var _ admission.Interface = &WantExternalKubeInformerFactory{} +var _ initializer.WantsExternalKubeInformerFactory = &WantExternalKubeInformerFactory{} + +// WantExternalKubeClientSet is a test stub that fulfills the WantsExternalKubeClientSet interface +type WantExternalKubeClientSet struct { + cs kubernetes.Interface +} + +func (self *WantExternalKubeClientSet) SetExternalKubeClientSet(cs kubernetes.Interface) { self.cs = cs } +func (self *WantExternalKubeClientSet) Admit(a admission.Attributes) error { return nil } +func (self *WantExternalKubeClientSet) Handles(o admission.Operation) bool { return false } +func (self *WantExternalKubeClientSet) Validate() error { return nil } + +var _ admission.Interface = &WantExternalKubeClientSet{} +var _ initializer.WantsExternalKubeClientSet = &WantExternalKubeClientSet{} + +// WantAuthorizerAdmission is a test stub that fulfills the WantsAuthorizer interface. +type WantAuthorizerAdmission struct { + auth authorizer.Authorizer +} + +func (self *WantAuthorizerAdmission) SetAuthorizer(a authorizer.Authorizer) { self.auth = a } +func (self *WantAuthorizerAdmission) Admit(a admission.Attributes) error { return nil } +func (self *WantAuthorizerAdmission) Handles(o admission.Operation) bool { return false } +func (self *WantAuthorizerAdmission) Validate() error { return nil } + +var _ admission.Interface = &WantAuthorizerAdmission{} +var _ initializer.WantsAuthorizer = &WantAuthorizerAdmission{} + +// TestAuthorizer is a test stub for testing that fulfills the authorizer interface. +type TestAuthorizer struct{} + +func (t *TestAuthorizer) Authorize(a authorizer.Attributes) (authorized bool, reason string, err error) { + return false, "", nil +} diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go index 4e4e04697f32..d371a09cc81d 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go @@ -222,19 +222,13 @@ func (l *lifecycle) Validate() error { // accessReviewResources are resources which give a view into permissions in a namespace. Users must be allowed to create these // resources because returning "not found" errors allows someone to search for the "people I'm going to fire in 2017" namespace. var accessReviewResources = map[schema.GroupResource]bool{ - {Group: "authorization.k8s.io", Resource: "localsubjectaccessreviews"}: true, - schema.GroupResource{Group: "", Resource: "subjectaccessreviews"}: true, - schema.GroupResource{Group: "", Resource: "localsubjectaccessreviews"}: true, - schema.GroupResource{Group: "", Resource: "resourceaccessreviews"}: true, - schema.GroupResource{Group: "", Resource: "localresourceaccessreviews"}: true, - schema.GroupResource{Group: "", Resource: "selfsubjectrulesreviews"}: true, - schema.GroupResource{Group: "", Resource: "subjectrulesreviews"}: true, - schema.GroupResource{Group: "authorization.openshift.io", Resource: "subjectaccessreviews"}: true, - schema.GroupResource{Group: "authorization.openshift.io", Resource: "localsubjectaccessreviews"}: true, - schema.GroupResource{Group: "authorization.openshift.io", Resource: "resourceaccessreviews"}: true, - schema.GroupResource{Group: "authorization.openshift.io", Resource: "localresourceaccessreviews"}: true, - schema.GroupResource{Group: "authorization.openshift.io", Resource: "selfsubjectrulesreviews"}: true, - schema.GroupResource{Group: "authorization.openshift.io", Resource: "subjectrulesreviews"}: true, + {Group: "authorization.k8s.io", Resource: "localsubjectaccessreviews"}: true, + schema.GroupResource{Group: "", Resource: "subjectaccessreviews"}: true, + schema.GroupResource{Group: "", Resource: "localsubjectaccessreviews"}: true, + schema.GroupResource{Group: "", Resource: "resourceaccessreviews"}: true, + schema.GroupResource{Group: "", Resource: "localresourceaccessreviews"}: true, + schema.GroupResource{Group: "", Resource: "selfsubjectrulesreviews"}: true, + schema.GroupResource{Group: "", Resource: "subjectrulesreviews"}: true, } func isAccessReview(a admission.Attributes) bool { diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission_test.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission_test.go index 7b2e2b064130..0fd9d7ab56c3 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission_test.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission_test.go @@ -135,6 +135,24 @@ func TestAdmissionNamespaceDoesNotExist(t *testing.T) { } t.Errorf("expected error returned from admission handler: %v", actions) } + + // verify create operations in the namespace cause an error + err = handler.Admit(admission.NewAttributesRecord(&pod, nil, v1.SchemeGroupVersion.WithKind("Pod").GroupKind().WithVersion("version"), pod.Namespace, pod.Name, v1.Resource("pods").WithVersion("version"), "", admission.Create, nil)) + if err == nil { + t.Errorf("Expected error rejecting creates in a namespace when it is missing") + } + + // verify update operations in the namespace cause an error + err = handler.Admit(admission.NewAttributesRecord(&pod, nil, v1.SchemeGroupVersion.WithKind("Pod").GroupKind().WithVersion("version"), pod.Namespace, pod.Name, v1.Resource("pods").WithVersion("version"), "", admission.Update, nil)) + if err == nil { + t.Errorf("Expected error rejecting updates in a namespace when it is missing") + } + + // verify delete operations in the namespace can proceed + err = handler.Admit(admission.NewAttributesRecord(nil, nil, v1.SchemeGroupVersion.WithKind("Pod").GroupKind().WithVersion("version"), pod.Namespace, pod.Name, v1.Resource("pods").WithVersion("version"), "", admission.Delete, nil)) + if err != nil { + t.Errorf("Unexpected error returned from admission handler: %v", err) + } } // TestAdmissionNamespaceActive verifies a resource is admitted when the namespace is active. diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/apis/example/v1/generated.proto b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/apis/example/v1/generated.proto index 72075566c790..409db6da3eea 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/apis/example/v1/generated.proto +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/apis/example/v1/generated.proto @@ -209,4 +209,3 @@ message PodStatus { // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startTime = 7; } - diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/authentication/request/x509/testdata/generate.sh b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/authentication/request/x509/testdata/generate.sh index 07171057db58..260e649eba97 100755 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/authentication/request/x509/testdata/generate.sh +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/authentication/request/x509/testdata/generate.sh @@ -21,4 +21,3 @@ cfssl sign -ca root.pem -ca-key root-key.pem -config intermediate.config.json in cfssl gencert -ca intermediate.pem -ca-key intermediate-key.pem -config client.config.json --profile=valid client.csr.json | cfssljson -bare client-valid cfssl gencert -ca intermediate.pem -ca-key intermediate-key.pem -config client.config.json --profile=expired client.csr.json | cfssljson -bare client-expired - diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/endpoints/installer.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/endpoints/installer.go index 080082732c18..c82f4f720de0 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/endpoints/installer.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/endpoints/installer.go @@ -554,9 +554,9 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag reqScope.MetaGroupVersion = *a.group.MetaGroupVersion } for _, action := range actions { - producedObject := storageMeta.ProducesObject(action.Verb) - if producedObject == nil { - producedObject = defaultVersionedObject + versionedObject := storageMeta.ProducesObject(action.Verb) + if versionedObject == nil { + versionedObject = defaultVersionedObject } reqScope.Namer = action.Namer @@ -589,6 +589,15 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag routes := []*restful.RouteBuilder{} + // If there is a subresource, kind should be the parent's kind. + if hasSubresource { + fqParentKind, err := a.getResourceKind(resource, a.group.Storage[resource]) + if err != nil { + return nil, err + } + kind = fqParentKind.Kind + } + verbOverrider, needOverride := storage.(StorageMetricsOverride) switch action.Verb { @@ -616,8 +625,8 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). Operation("read"+namespaced+kind+strings.Title(subresource)+operationSuffix). Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...). - Returns(http.StatusOK, "OK", producedObject). - Writes(producedObject) + Returns(http.StatusOK, "OK", versionedObject). + Writes(versionedObject) if isGetterWithOptions { if err := addObjectParams(ws, route, versionedGetOptions); err != nil { return nil, err @@ -673,9 +682,9 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). Operation("replace"+namespaced+kind+strings.Title(subresource)+operationSuffix). Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...). - Returns(http.StatusOK, "OK", producedObject). - Reads(defaultVersionedObject). - Writes(producedObject) + Returns(http.StatusOK, "OK", versionedObject). + Reads(versionedObject). + Writes(versionedObject) addParams(route, action.Params) routes = append(routes, route) case "PATCH": // Partially update a resource @@ -690,9 +699,9 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag Consumes(string(types.JSONPatchType), string(types.MergePatchType), string(types.StrategicMergePatchType)). Operation("patch"+namespaced+kind+strings.Title(subresource)+operationSuffix). Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...). - Returns(http.StatusOK, "OK", producedObject). + Returns(http.StatusOK, "OK", versionedObject). Reads(metav1.Patch{}). - Writes(producedObject) + Writes(versionedObject) addParams(route, action.Params) routes = append(routes, route) case "POST": // Create a resource. @@ -713,9 +722,9 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). Operation("create"+namespaced+kind+strings.Title(subresource)+operationSuffix). Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...). - Returns(http.StatusOK, "OK", producedObject). - Reads(defaultVersionedObject). - Writes(producedObject) + Returns(http.StatusOK, "OK", versionedObject). + Reads(versionedObject). + Writes(versionedObject) addParams(route, action.Params) routes = append(routes, route) case "DELETE": // Delete a resource. @@ -810,10 +819,6 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag routes = append(routes, buildProxyRoute(ws, "OPTIONS", a.prefix, action.Path, kind, resource, subresource, namespaced, requestScope, hasSubresource, action.Params, proxyHandler, operationSuffix)) case "CONNECT": for _, method := range connecter.ConnectMethods() { - connectProducedObject := storageMeta.ProducesObject(method) - if connectProducedObject == nil { - connectProducedObject = "string" - } doc := "connect " + method + " requests to " + kind if hasSubresource { doc = "connect " + method + " requests to " + subresource + " of " + kind @@ -825,7 +830,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag Operation("connect" + strings.Title(strings.ToLower(method)) + namespaced + kind + strings.Title(subresource) + operationSuffix). Produces("*/*"). Consumes("*/*"). - Writes(connectProducedObject) + Writes("string") if versionedConnectOptions != nil { if err := addObjectParams(ws, route, versionedConnectOptions); err != nil { return nil, err diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/endpoints/openapi/openapi.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/endpoints/openapi/openapi.go index 899d4dff6d62..a057bae8377f 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/endpoints/openapi/openapi.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/endpoints/openapi/openapi.go @@ -63,10 +63,6 @@ func GetOperationIDAndTags(r *restful.Route) (string, []string, error) { op := r.Operation path := r.Path var tags []string - // TODO: This is hacky, figure out where this name conflict is created and fix it at the root. - if strings.HasPrefix(path, "/apis/extensions/v1beta1/namespaces/{namespace}/") && strings.HasSuffix(op, "ScaleScale") { - op = op[:len(op)-10] + strings.Title(strings.Split(path[48:], "/")[0]) + "Scale" - } prefix, exists := verbs.GetPrefix(op) if !exists { return op, tags, fmt.Errorf("operation names should start with a verb. Cannot determine operation verb from %v", op) diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/storage_factory.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/storage_factory.go index c193f01cc2a8..9554e5234996 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/storage_factory.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/storage_factory.go @@ -17,8 +17,6 @@ limitations under the License. package registry import ( - "github.com/golang/glog" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/registry/generic" "k8s.io/apiserver/pkg/storage" @@ -28,10 +26,11 @@ import ( ) // Creates a cacher based given storageConfig. -func StorageWithCacher(capacity int) generic.StorageDecorator { +func StorageWithCacher(defaultCapacity int) generic.StorageDecorator { return func( copier runtime.ObjectCopier, storageConfig *storagebackend.Config, + requestedSize *int, objectType runtime.Object, resourcePrefix string, keyFunc func(obj runtime.Object) (string, error), @@ -39,13 +38,15 @@ func StorageWithCacher(capacity int) generic.StorageDecorator { getAttrsFunc storage.AttrFunc, triggerFunc storage.TriggerPublisherFunc) (storage.Interface, factory.DestroyFunc) { - s, d := generic.NewRawStorage(storageConfig) - if capacity == 0 { - glog.V(5).Infof("Storage caching is disabled for %T", objectType) - return s, d + capacity := defaultCapacity + if requestedSize != nil && *requestedSize == 0 { + panic("StorageWithCacher must not be called with zero cache size") + } + if requestedSize != nil { + capacity = *requestedSize } - glog.V(5).Infof("Storage caching is enabled for %T with capacity %v", objectType, capacity) + s, d := generic.NewRawStorage(storageConfig) // TODO: we would change this later to make storage always have cacher and hide low level KV layer inside. // Currently it has two layers of same storage interface -- cacher and low level kv. cacherConfig := storage.CacherConfig{ diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/store.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/store.go index dc5c445cc5ab..10460a46c1a3 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/store.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/store.go @@ -167,6 +167,10 @@ type Store struct { Storage storage.Interface // Called to cleanup clients used by the underlying Storage; optional. DestroyFunc func() + // Maximum size of the watch history cached in memory, in number of entries. + // This value is ignored if Storage is non-nil. Nil is replaced with a default value. + // A zero integer will disable caching. + WatchCacheSize *int } // Note: the rest.StandardStorage interface aggregates the common REST verbs @@ -409,7 +413,7 @@ func (e *Store) WaitForInitialized(ctx genericapirequest.Context, obj runtime.Ob // shouldDeleteDuringUpdate checks if a Update is removing all the object's // finalizers. If so, it further checks if the object's -// DeletionGracePeriodSeconds is 0. +// DeletionGracePeriodSeconds is 0. If so, it returns true. func (e *Store) shouldDeleteDuringUpdate(ctx genericapirequest.Context, key string, obj, existing runtime.Object) bool { newMeta, err := meta.Accessor(obj) if err != nil { @@ -827,8 +831,8 @@ func (e *Store) updateForGracefulDeletionAndFinalizers(ctx genericapirequest.Con if err != nil { return nil, err } - needsUpdate, newFinalizers := deletionFinalizersForGarbageCollection(e, existingAccessor, options) - if needsUpdate { + shouldUpdate, newFinalizers := deletionFinalizersForGarbageCollection(e, existingAccessor, options) + if shouldUpdate { existingAccessor.SetFinalizers(newFinalizers) } @@ -1309,6 +1313,7 @@ func (e *Store) CompleteWithOptions(options *generic.StoreOptions) error { e.Storage, e.DestroyFunc = opts.Decorator( e.Copier, opts.StorageConfig, + e.WatchCacheSize, e.NewFunc(), prefix, keyFunc, diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/registry/generic/rest/proxy.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/registry/generic/rest/proxy.go index e6ef8cb92498..6cc091b2379d 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/registry/generic/rest/proxy.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/registry/generic/rest/proxy.go @@ -64,11 +64,20 @@ type ErrorResponder interface { Error(err error) } +// normalizeLocation returns the result of parsing the full URL, with scheme set to http if missing +func normalizeLocation(location *url.URL) *url.URL { + normalized, _ := url.Parse(location.String()) + if len(normalized.Scheme) == 0 { + normalized.Scheme = "http" + } + return normalized +} + // NewUpgradeAwareProxyHandler creates a new proxy handler with a default flush interval. Responder is required for returning // errors to the caller. func NewUpgradeAwareProxyHandler(location *url.URL, transport http.RoundTripper, wrapTransport, upgradeRequired bool, responder ErrorResponder) *UpgradeAwareProxyHandler { return &UpgradeAwareProxyHandler{ - Location: location, + Location: normalizeLocation(location), Transport: transport, WrapTransport: wrapTransport, UpgradeRequired: upgradeRequired, @@ -79,9 +88,6 @@ func NewUpgradeAwareProxyHandler(location *url.URL, transport http.RoundTripper, // ServeHTTP handles the proxy request func (h *UpgradeAwareProxyHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { - if len(h.Location.Scheme) == 0 { - h.Location.Scheme = "http" - } if h.tryUpgrade(w, req) { return } diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/registry/generic/rest/proxy_test.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/registry/generic/rest/proxy_test.go index 96ebed4d0b7b..ba61d30c7f12 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/registry/generic/rest/proxy_test.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/registry/generic/rest/proxy_test.go @@ -248,11 +248,7 @@ func TestServeHTTP(t *testing.T) { responder := &fakeResponder{t: t} backendURL, _ := url.Parse(backendServer.URL) backendURL.Path = test.requestPath - proxyHandler := &UpgradeAwareProxyHandler{ - Location: backendURL, - Responder: responder, - UpgradeRequired: test.upgradeRequired, - } + proxyHandler := NewUpgradeAwareProxyHandler(backendURL, nil, false, test.upgradeRequired, responder) proxyServer := httptest.NewServer(proxyHandler) defer proxyServer.Close() proxyURL, _ := url.Parse(proxyServer.URL) @@ -428,12 +424,8 @@ func TestProxyUpgrade(t *testing.T) { serverURL, _ := url.Parse(backendServer.URL) serverURL.Path = backendPath - proxyHandler := &UpgradeAwareProxyHandler{ - Location: serverURL, - Transport: tc.ProxyTransport, - InterceptRedirects: redirect, - Responder: &noErrorsAllowed{t: t}, - } + proxyHandler := NewUpgradeAwareProxyHandler(serverURL, tc.ProxyTransport, false, false, &noErrorsAllowed{t: t}) + proxyHandler.InterceptRedirects = redirect proxy := httptest.NewServer(proxyHandler) defer proxy.Close() @@ -479,14 +471,15 @@ func TestProxyUpgradeErrorResponse(t *testing.T) { return &fakeConn{err: expectedErr}, nil } responder = &fakeResponder{t: t, w: w} - proxyHandler := &UpgradeAwareProxyHandler{ - Location: &url.URL{ + proxyHandler := NewUpgradeAwareProxyHandler( + &url.URL{ Host: "fake-backend", }, - UpgradeRequired: true, - Responder: responder, - Transport: transport, - } + transport, + false, + true, + responder, + ) proxyHandler.ServeHTTP(w, r) })) defer proxy.Close() @@ -545,9 +538,7 @@ func TestDefaultProxyTransport(t *testing.T) { for _, test := range tests { locURL, _ := url.Parse(test.location) URL, _ := url.Parse(test.url) - h := UpgradeAwareProxyHandler{ - Location: locURL, - } + h := NewUpgradeAwareProxyHandler(locURL, nil, false, false, nil) result := h.defaultProxyTransport(URL, nil) transport := result.(*corsRemovingTransport).RoundTripper.(*proxy.Transport) if transport.Scheme != test.expectedScheme { @@ -721,11 +712,7 @@ func TestProxyRequestContentLengthAndTransferEncoding(t *testing.T) { responder := &fakeResponder{t: t} backendURL, _ := url.Parse(downstreamServer.URL) - proxyHandler := &UpgradeAwareProxyHandler{ - Location: backendURL, - Responder: responder, - UpgradeRequired: false, - } + proxyHandler := NewUpgradeAwareProxyHandler(backendURL, nil, false, false, responder) proxyServer := httptest.NewServer(proxyHandler) defer proxyServer.Close() diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/registry/generic/storage_decorator.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/registry/generic/storage_decorator.go index 6c65230f35be..ab9aeb42adcf 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/registry/generic/storage_decorator.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/registry/generic/storage_decorator.go @@ -26,9 +26,11 @@ import ( // StorageDecorator is a function signature for producing a storage.Interface // and an associated DestroyFunc from given parameters. +// A zero capacity means to disable caching, nil means to use a default. type StorageDecorator func( copier runtime.ObjectCopier, config *storagebackend.Config, + capacity *int, objectType runtime.Object, resourcePrefix string, keyFunc func(obj runtime.Object) (string, error), @@ -41,6 +43,7 @@ type StorageDecorator func( func UndecoratedStorage( copier runtime.ObjectCopier, config *storagebackend.Config, + capacity *int, objectType runtime.Object, resourcePrefix string, keyFunc func(obj runtime.Object) (string, error), diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/config.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/config.go index 78fa466c340d..f95807c74a3e 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/config.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/config.go @@ -151,8 +151,11 @@ type Config struct { // RESTOptionsGetter is used to construct RESTStorage types via the generic registry. RESTOptionsGetter genericregistry.RESTOptionsGetter - // If specified, requests will be allocated a random timeout between this value, and twice this value. - // Note that it is up to the request handlers to ignore or honor this timeout. In seconds. + // If specified, all requests except those which match the LongRunningFunc predicate will timeout + // after this duration. + RequestTimeout time.Duration + // If specified, long running requests such as watch will be allocated a random timeout between this value, and + // twice this value. Note that it is up to the request handlers to ignore or honor this timeout. In seconds. MinRequestTimeout int // MaxRequestsInFlight is the maximum number of parallel non-long-running requests. Every further // request has to wait. Applies only to non-mutating requests. @@ -221,6 +224,7 @@ func NewConfig(codecs serializer.CodecFactory) *Config { EnableProfiling: true, MaxRequestsInFlight: 400, MaxMutatingRequestsInFlight: 200, + RequestTimeout: time.Duration(60) * time.Second, MinRequestTimeout: 1800, // Default to treating watch as a long-running operation @@ -483,7 +487,7 @@ func DefaultBuildHandlerChain(apiHandler http.Handler, c *Config) http.Handler { handler = genericapifilters.WithAuthentication(handler, c.RequestContextMapper, c.Authenticator, failedHandler) handler = genericfilters.WithCORS(handler, c.CorsAllowedOriginList, nil, nil, nil, "true") handler = genericfilters.WithPanicRecovery(handler) - handler = genericfilters.WithTimeoutForNonLongRunningRequests(handler, c.RequestContextMapper, c.LongRunningFunc) + handler = genericfilters.WithTimeoutForNonLongRunningRequests(handler, c.RequestContextMapper, c.LongRunningFunc, c.RequestTimeout) handler = genericapifilters.WithRequestInfo(handler, c.RequestInfoResolver, c.RequestContextMapper) handler = apirequest.WithRequestContext(handler, c.RequestContextMapper) return handler diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/filters/maxinflight.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/filters/maxinflight.go index a0208ed034df..182256c6ef70 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/filters/maxinflight.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/filters/maxinflight.go @@ -27,6 +27,7 @@ import ( "k8s.io/apiserver/pkg/endpoints/metrics" apirequest "k8s.io/apiserver/pkg/endpoints/request" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/server/httplog" "github.com/golang/glog" ) @@ -127,6 +128,10 @@ func WithMaxInFlightLimit( } func tooManyRequests(req *http.Request, w http.ResponseWriter) { + // "Too Many Requests" response is returned before logger is setup for the request. + // So we need to explicitly log it here. + defer httplog.NewLogged(req, &w).Log() + // Return a 429 status indicating "Too Many Requests" w.Header().Set("Retry-After", retryAfter) http.Error(w, "Too many requests, please try again later.", http.StatusTooManyRequests) diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/filters/timeout.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/filters/timeout.go index 6bc496bdbf22..5bf10d9fca55 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/filters/timeout.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/filters/timeout.go @@ -31,12 +31,10 @@ import ( apirequest "k8s.io/apiserver/pkg/endpoints/request" ) -const globalTimeout = time.Minute - var errConnKilled = fmt.Errorf("kill connection/stream") -// WithTimeoutForNonLongRunningRequests times out non-long-running requests after the time given by globalTimeout. -func WithTimeoutForNonLongRunningRequests(handler http.Handler, requestContextMapper apirequest.RequestContextMapper, longRunning apirequest.LongRunningRequestCheck) http.Handler { +// WithTimeoutForNonLongRunningRequests times out non-long-running requests after the time given by timeout. +func WithTimeoutForNonLongRunningRequests(handler http.Handler, requestContextMapper apirequest.RequestContextMapper, longRunning apirequest.LongRunningRequestCheck, timeout time.Duration) http.Handler { if longRunning == nil { return handler } @@ -45,13 +43,13 @@ func WithTimeoutForNonLongRunningRequests(handler http.Handler, requestContextMa ctx, ok := requestContextMapper.Get(req) if !ok { // if this happens, the handler chain isn't setup correctly because there is no context mapper - return time.After(globalTimeout), func() {}, apierrors.NewInternalError(fmt.Errorf("no context found for request during timeout")) + return time.After(timeout), func() {}, apierrors.NewInternalError(fmt.Errorf("no context found for request during timeout")) } requestInfo, ok := apirequest.RequestInfoFrom(ctx) if !ok { // if this happens, the handler chain isn't setup correctly because there is no request info - return time.After(globalTimeout), func() {}, apierrors.NewInternalError(fmt.Errorf("no request info found for request during timeout")) + return time.After(timeout), func() {}, apierrors.NewInternalError(fmt.Errorf("no request info found for request during timeout")) } if longRunning(req, requestInfo) { @@ -72,7 +70,7 @@ func WithTimeoutForNonLongRunningRequests(handler http.Handler, requestContextMa metrics.MonitorRequest(req, strings.ToUpper(requestInfo.Verb), "", requestInfo.Path, "", scope, http.StatusGatewayTimeout, 0, now) } } - return time.After(globalTimeout), metricFn, apierrors.NewTimeoutError(fmt.Sprintf("request did not complete within %s", globalTimeout), 0) + return time.After(timeout), metricFn, apierrors.NewTimeoutError(fmt.Sprintf("request did not complete within %s", timeout), 0) } return WithTimeout(handler, timeoutFunc) } diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/genericapiserver.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/genericapiserver.go index 410e813b11f7..f9d4370f717e 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/genericapiserver.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/genericapiserver.go @@ -269,7 +269,7 @@ type preparedGenericAPIServer struct { // PrepareRun does post API installation setup steps. func (s *GenericAPIServer) PrepareRun() preparedGenericAPIServer { if s.swaggerConfig != nil { - routes.Swagger{Config: s.swaggerConfig}.Install(s.SwaggerAPIContainers(), s.Handler.GoRestfulContainer) + routes.Swagger{Config: s.swaggerConfig}.Install(s.Handler.GoRestfulContainer) } if err := s.PrepareOpenAPIService(); err != nil { panic(err) diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/BUILD b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/BUILD new file mode 100644 index 000000000000..ce6df23f97a9 --- /dev/null +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/BUILD @@ -0,0 +1,37 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_library( + name = "go_default_library", + srcs = [ + "config.go", + "types.go", + ], + tags = ["automanaged"], + deps = [ + "//vendor/github.com/ghodss/yaml:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apiserver/pkg/storage/value:go_default_library", + "//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/aes:go_default_library", + "//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/identity:go_default_library", + "//vendor/k8s.io/apiserver/pkg/storage/value/encrypt/secretbox:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["encryptionconfig_test.go"], + library = ":go_default_library", + tags = ["automanaged"], + deps = [ + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apiserver/pkg/storage/value:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/encryptionconfig_test.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/encryptionconfig_test.go new file mode 100644 index 000000000000..7ffa32fdeca9 --- /dev/null +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/encryptionconfig_test.go @@ -0,0 +1,246 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package encryptionconfig + +import ( + "bytes" + "strings" + "testing" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/storage/value" +) + +const ( + sampleText = "abcdefghijklmnopqrstuvwxyz" + + sampleContextText = "0123456789" + + correctConfigWithIdentityFirst = ` +kind: EncryptionConfig +apiVersion: v1 +resources: + - resources: + - secrets + - namespaces + providers: + - identity: {} + - aesgcm: + keys: + - name: key1 + secret: c2VjcmV0IGlzIHNlY3VyZQ== + - name: key2 + secret: dGhpcyBpcyBwYXNzd29yZA== + - aescbc: + keys: + - name: key1 + secret: c2VjcmV0IGlzIHNlY3VyZQ== + - name: key2 + secret: dGhpcyBpcyBwYXNzd29yZA== + - secretbox: + keys: + - name: key1 + secret: YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXoxMjM0NTY= +` + + correctConfigWithAesGcmFirst = ` +kind: EncryptionConfig +apiVersion: v1 +resources: + - resources: + - secrets + providers: + - aesgcm: + keys: + - name: key1 + secret: c2VjcmV0IGlzIHNlY3VyZQ== + - name: key2 + secret: dGhpcyBpcyBwYXNzd29yZA== + - secretbox: + keys: + - name: key1 + secret: YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXoxMjM0NTY= + - aescbc: + keys: + - name: key1 + secret: c2VjcmV0IGlzIHNlY3VyZQ== + - name: key2 + secret: dGhpcyBpcyBwYXNzd29yZA== + - identity: {} +` + + correctConfigWithAesCbcFirst = ` +kind: EncryptionConfig +apiVersion: v1 +resources: + - resources: + - secrets + providers: + - aescbc: + keys: + - name: key1 + secret: c2VjcmV0IGlzIHNlY3VyZQ== + - name: key2 + secret: dGhpcyBpcyBwYXNzd29yZA== + - identity: {} + - secretbox: + keys: + - name: key1 + secret: YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXoxMjM0NTY= + - aesgcm: + keys: + - name: key1 + secret: c2VjcmV0IGlzIHNlY3VyZQ== + - name: key2 + secret: dGhpcyBpcyBwYXNzd29yZA== +` + + correctConfigWithSecretboxFirst = ` +kind: EncryptionConfig +apiVersion: v1 +resources: + - resources: + - secrets + providers: + - secretbox: + keys: + - name: key1 + secret: YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXoxMjM0NTY= + - aescbc: + keys: + - name: key1 + secret: c2VjcmV0IGlzIHNlY3VyZQ== + - name: key2 + secret: dGhpcyBpcyBwYXNzd29yZA== + - identity: {} + - aesgcm: + keys: + - name: key1 + secret: c2VjcmV0IGlzIHNlY3VyZQ== + - name: key2 + secret: dGhpcyBpcyBwYXNzd29yZA== +` + + incorrectConfigNoSecretForKey = ` +kind: EncryptionConfig +apiVersion: v1 +resources: + - resources: + - namespaces + - secrets + providers: + - aesgcm: + keys: + - name: key1 +` + + incorrectConfigInvalidKey = ` +kind: EncryptionConfig +apiVersion: v1 +resources: + - resources: + - namespaces + - secrets + providers: + - aesgcm: + keys: + - name: key1 + secret: c2VjcmV0IGlzIHNlY3VyZQ== + - name: key2 + secret: YSBzZWNyZXQgYSBzZWNyZXQ= +` +) + +func TestEncryptionProviderConfigCorrect(t *testing.T) { + // Creates two transformers with different ordering of identity and AES transformers. + // Transforms data using one of them, and tries to untransform using both of them. + // Repeats this for both the possible combinations. + + identityFirstTransformerOverrides, err := ParseEncryptionConfiguration(strings.NewReader(correctConfigWithIdentityFirst)) + if err != nil { + t.Fatalf("error while parsing configuration file: %s.\nThe file was:\n%s", err, correctConfigWithIdentityFirst) + } + + aesGcmFirstTransformerOverrides, err := ParseEncryptionConfiguration(strings.NewReader(correctConfigWithAesGcmFirst)) + if err != nil { + t.Fatalf("error while parsing configuration file: %s.\nThe file was:\n%s", err, correctConfigWithAesGcmFirst) + } + + aesCbcFirstTransformerOverrides, err := ParseEncryptionConfiguration(strings.NewReader(correctConfigWithAesCbcFirst)) + if err != nil { + t.Fatalf("error while parsing configuration file: %s.\nThe file was:\n%s", err, correctConfigWithAesCbcFirst) + } + + secretboxFirstTransformerOverrides, err := ParseEncryptionConfiguration(strings.NewReader(correctConfigWithSecretboxFirst)) + if err != nil { + t.Fatalf("error while parsing configuration file: %s.\nThe file was:\n%s", err, correctConfigWithSecretboxFirst) + } + + // Pick the transformer for any of the returned resources. + identityFirstTransformer := identityFirstTransformerOverrides[schema.ParseGroupResource("secrets")] + aesGcmFirstTransformer := aesGcmFirstTransformerOverrides[schema.ParseGroupResource("secrets")] + aesCbcFirstTransformer := aesCbcFirstTransformerOverrides[schema.ParseGroupResource("secrets")] + secretboxFirstTransformer := secretboxFirstTransformerOverrides[schema.ParseGroupResource("secrets")] + + context := value.DefaultContext([]byte(sampleContextText)) + originalText := []byte(sampleText) + + transformers := []struct { + Transformer value.Transformer + Name string + }{ + {aesGcmFirstTransformer, "aesGcmFirst"}, + {aesCbcFirstTransformer, "aesCbcFirst"}, + {secretboxFirstTransformer, "secretboxFirst"}, + {identityFirstTransformer, "identityFirst"}, + } + + for _, testCase := range transformers { + transformedData, err := testCase.Transformer.TransformToStorage(originalText, context) + if err != nil { + t.Fatalf("%s: error while transforming data to storage: %s", testCase.Name, err) + } + + for _, transformer := range transformers { + untransformedData, stale, err := transformer.Transformer.TransformFromStorage(transformedData, context) + if err != nil { + t.Fatalf("%s: error while reading using %s transformer: %s", testCase.Name, transformer.Name, err) + } + if stale != (transformer.Name != testCase.Name) { + t.Fatalf("%s: wrong stale information on reading using %s transformer, should be %v", testCase.Name, transformer.Name, testCase.Name == transformer.Name) + } + if bytes.Compare(untransformedData, originalText) != 0 { + t.Fatalf("%s: %s transformer transformed data incorrectly. Expected: %v, got %v", testCase.Name, transformer.Name, originalText, untransformedData) + } + } + } + +} + +// Throw error if key has no secret +func TestEncryptionProviderConfigNoSecretForKey(t *testing.T) { + if _, err := ParseEncryptionConfiguration(strings.NewReader(incorrectConfigNoSecretForKey)); err == nil { + t.Fatalf("invalid configuration file (one key has no secret) got parsed:\n%s", incorrectConfigNoSecretForKey) + } +} + +// Throw error if invalid key for AES +func TestEncryptionProviderConfigInvalidKey(t *testing.T) { + if _, err := ParseEncryptionConfiguration(strings.NewReader(incorrectConfigInvalidKey)); err == nil { + t.Fatalf("invalid configuration file (bad AES key) got parsed:\n%s", incorrectConfigInvalidKey) + } +} diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/options/etcd.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/options/etcd.go index 7386637bc976..bdaa0f2af674 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/options/etcd.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/options/etcd.go @@ -18,8 +18,6 @@ package options import ( "fmt" - "strconv" - "strings" "github.com/spf13/pflag" @@ -47,8 +45,6 @@ type EtcdOptions struct { EnableWatchCache bool // Set DefaultWatchCacheSize to zero to disable watch caches for those resources that have no explicit cache size set DefaultWatchCacheSize int - // WatchCacheSizes represents override to a given resource - WatchCacheSizes []string } func NewEtcdOptions(backendConfig *storagebackend.Config) *EtcdOptions { @@ -87,17 +83,10 @@ func (s *EtcdOptions) AddFlags(fs *pflag.FlagSet) { "Enables the generic garbage collector. MUST be synced with the corresponding flag "+ "of the kube-controller-manager.") + // TODO: enable cache in integration tests. fs.BoolVar(&s.EnableWatchCache, "watch-cache", s.EnableWatchCache, "Enable watch caching in the apiserver") - fs.IntVar(&s.DefaultWatchCacheSize, "default-watch-cache-size", s.DefaultWatchCacheSize, - "Default watch cache size. If zero, watch cache will be disabled for resources that do not have a default watch size set.") - - fs.StringSliceVar(&s.WatchCacheSizes, "watch-cache-sizes", s.WatchCacheSizes, ""+ - "List of watch cache sizes for every resource (pods, nodes, etc.), comma separated. "+ - "The individual override format: resource#size, where size is a number. It takes effect "+ - "when watch-cache is enabled.") - fs.StringVar(&s.StorageConfig.Type, "storage-backend", s.StorageConfig.Type, "The storage backend for persistence. Options: 'etcd3' (default), 'etcd2'.") @@ -149,15 +138,7 @@ func (f *SimpleRestOptionsFactory) GetRESTOptions(resource schema.GroupResource) ResourcePrefix: resource.Group + "/" + resource.Resource, } if f.Options.EnableWatchCache { - sizes, err := ParseWatchCacheSizes(f.Options.WatchCacheSizes) - if err != nil { - return generic.RESTOptions{}, err - } - cacheSize, ok := sizes[resource] - if !ok { - cacheSize = f.Options.DefaultWatchCacheSize - } - ret.Decorator = genericregistry.StorageWithCacher(cacheSize) + ret.Decorator = genericregistry.StorageWithCacher(f.Options.DefaultWatchCacheSize) } return ret, nil } @@ -181,52 +162,8 @@ func (f *storageFactoryRestOptionsFactory) GetRESTOptions(resource schema.GroupR ResourcePrefix: f.StorageFactory.ResourcePrefix(resource), } if f.Options.EnableWatchCache { - sizes, err := ParseWatchCacheSizes(f.Options.WatchCacheSizes) - if err != nil { - return generic.RESTOptions{}, err - } - cacheSize, ok := sizes[resource] - if !ok { - cacheSize = f.Options.DefaultWatchCacheSize - } - ret.Decorator = genericregistry.StorageWithCacher(cacheSize) + ret.Decorator = genericregistry.StorageWithCacher(f.Options.DefaultWatchCacheSize) } return ret, nil } - -// ParseWatchCacheSizes turns a list of cache size values into a map of group resources -// to requested sizes. -func ParseWatchCacheSizes(cacheSizes []string) (map[schema.GroupResource]int, error) { - watchCacheSizes := make(map[schema.GroupResource]int) - for _, c := range cacheSizes { - tokens := strings.Split(c, "#") - if len(tokens) != 2 { - return nil, fmt.Errorf("invalid value of watch cache size: %s", c) - } - - size, err := strconv.Atoi(tokens[1]) - if err != nil { - return nil, fmt.Errorf("invalid size of watch cache size: %s", c) - } - if size < 0 { - return nil, fmt.Errorf("watch cache size cannot be negative: %s", c) - } - - watchCacheSizes[schema.ParseGroupResource(tokens[0])] = size - } - return watchCacheSizes, nil -} - -// WriteWatchCacheSizes turns a map of cache size values into a list of string specifications. -func WriteWatchCacheSizes(watchCacheSizes map[schema.GroupResource]int) ([]string, error) { - var cacheSizes []string - - for resource, size := range watchCacheSizes { - if size < 0 { - return nil, fmt.Errorf("watch cache size cannot be negative for resource %s", resource) - } - cacheSizes = append(cacheSizes, fmt.Sprintf("%s#%d", resource.String(), size)) - } - return cacheSizes, nil -} diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/options/server_run_options.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/options/server_run_options.go index be5d37424409..57ae12e9d10d 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/options/server_run_options.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/options/server_run_options.go @@ -19,6 +19,7 @@ package options import ( "fmt" "net" + "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/serializer" @@ -39,8 +40,10 @@ type ServerRunOptions struct { ExternalHost string MaxRequestsInFlight int MaxMutatingRequestsInFlight int + RequestTimeout time.Duration MinRequestTimeout int TargetRAMMB int + WatchCacheSizes []string } func NewServerRunOptions() *ServerRunOptions { @@ -48,6 +51,7 @@ func NewServerRunOptions() *ServerRunOptions { return &ServerRunOptions{ MaxRequestsInFlight: defaults.MaxRequestsInFlight, MaxMutatingRequestsInFlight: defaults.MaxMutatingRequestsInFlight, + RequestTimeout: defaults.RequestTimeout, MinRequestTimeout: defaults.MinRequestTimeout, } } @@ -58,6 +62,7 @@ func (s *ServerRunOptions) ApplyTo(c *server.Config) error { c.ExternalAddress = s.ExternalHost c.MaxRequestsInFlight = s.MaxRequestsInFlight c.MaxMutatingRequestsInFlight = s.MaxMutatingRequestsInFlight + c.RequestTimeout = s.RequestTimeout c.MinRequestTimeout = s.MinRequestTimeout c.PublicAddress = s.AdvertiseAddress @@ -121,11 +126,21 @@ func (s *ServerRunOptions) AddUniversalFlags(fs *pflag.FlagSet) { "The maximum number of mutating requests in flight at a given time. When the server exceeds this, "+ "it rejects requests. Zero for no limit.") + fs.DurationVar(&s.RequestTimeout, "request-timeout", s.RequestTimeout, ""+ + "An optional field indicating the duration a handler must keep a request open before timing "+ + "it out. This is the default request timeout for requests but may be overridden by flags such as "+ + "--min-request-timeout for specific types of requests.") + fs.IntVar(&s.MinRequestTimeout, "min-request-timeout", s.MinRequestTimeout, ""+ "An optional field indicating the minimum number of seconds a handler must keep "+ "a request open before timing it out. Currently only honored by the watch request "+ "handler, which picks a randomized value above this number as the connection timeout, "+ "to spread out load.") + fs.StringSliceVar(&s.WatchCacheSizes, "watch-cache-sizes", s.WatchCacheSizes, ""+ + "List of watch cache sizes for every resource (pods, nodes, etc.), comma separated. "+ + "The individual override format: resource#size, where size is a number. It takes effect "+ + "when watch-cache is enabled.") + utilfeature.DefaultFeatureGate.AddFlag(fs) } diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/routes/swagger.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/routes/swagger.go index 9218a1e18594..08e342ef5682 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/routes/swagger.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/routes/swagger.go @@ -30,6 +30,7 @@ type Swagger struct { } // Install adds the SwaggerUI webservice to the given mux. -func (s Swagger) Install(webserviceContainers []*restful.Container, c *restful.Container) { - swagger.RegisterSwaggerService(*s.Config, webserviceContainers, c) +func (s Swagger) Install(c *restful.Container) { + s.Config.WebServices = c.RegisteredWebServices() + swagger.RegisterSwaggerService(*s.Config, c) } diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/utils.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/utils.go index cdbbd28a7455..83fcbd5049c4 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/utils.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/utils.go @@ -169,7 +169,6 @@ func configureTestCluster(t *testing.T, name string, https bool) *EtcdTestServer if err != nil { t.Fatal(err) } - m.AuthToken = "simple" clusterStr := fmt.Sprintf("%s=http://%s", name, pln.Addr().String()) m.InitialPeerURLsMap, err = types.NewURLsMap(clusterStr) @@ -191,7 +190,7 @@ func (m *EtcdTestServer) launch(t *testing.T) error { if m.s, err = etcdserver.NewServer(&m.ServerConfig); err != nil { return fmt.Errorf("failed to initialize the etcd server: %v", err) } - m.s.SyncTicker = time.NewTicker(500 * time.Millisecond) + m.s.SyncTicker = time.Tick(500 * time.Millisecond) m.s.Start() m.raftHandler = &testutil.PauseableHandler{Next: v2http.NewPeerHandler(m.s)} for _, ln := range m.PeerListeners { diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/Godeps/Godeps.json b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/Godeps/Godeps.json index b854e513252a..5ac98b9b6656 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/Godeps/Godeps.json +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/Godeps/Godeps.json @@ -16,15 +16,19 @@ }, { "ImportPath": "github.com/Azure/go-autorest/autorest", - "Rev": "d7c034a8af24eda120dd6460bfcd6d9ed14e43ca" + "Rev": "58f6f26e200fa5dfb40c9cd1c83f3e2c860d779d" + }, + { + "ImportPath": "github.com/Azure/go-autorest/autorest/adal", + "Rev": "58f6f26e200fa5dfb40c9cd1c83f3e2c860d779d" }, { "ImportPath": "github.com/Azure/go-autorest/autorest/azure", - "Rev": "d7c034a8af24eda120dd6460bfcd6d9ed14e43ca" + "Rev": "58f6f26e200fa5dfb40c9cd1c83f3e2c860d779d" }, { "ImportPath": "github.com/Azure/go-autorest/autorest/date", - "Rev": "d7c034a8af24eda120dd6460bfcd6d9ed14e43ca" + "Rev": "58f6f26e200fa5dfb40c9cd1c83f3e2c860d779d" }, { "ImportPath": "github.com/PuerkitoBio/purell", @@ -36,23 +40,23 @@ }, { "ImportPath": "github.com/coreos/go-oidc/http", - "Rev": "be73733bb8cc830d0205609b95d125215f8e9c70" + "Rev": "a4973d9a4225417aecf5d450a9522f00c1f7130f" }, { "ImportPath": "github.com/coreos/go-oidc/jose", - "Rev": "be73733bb8cc830d0205609b95d125215f8e9c70" + "Rev": "a4973d9a4225417aecf5d450a9522f00c1f7130f" }, { "ImportPath": "github.com/coreos/go-oidc/key", - "Rev": "be73733bb8cc830d0205609b95d125215f8e9c70" + "Rev": "a4973d9a4225417aecf5d450a9522f00c1f7130f" }, { "ImportPath": "github.com/coreos/go-oidc/oauth2", - "Rev": "be73733bb8cc830d0205609b95d125215f8e9c70" + "Rev": "a4973d9a4225417aecf5d450a9522f00c1f7130f" }, { "ImportPath": "github.com/coreos/go-oidc/oidc", - "Rev": "be73733bb8cc830d0205609b95d125215f8e9c70" + "Rev": "a4973d9a4225417aecf5d450a9522f00c1f7130f" }, { "ImportPath": "github.com/coreos/pkg/health", diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/discovery/restmapper.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/discovery/restmapper.go index 756669001f3e..9651716bd130 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/discovery/restmapper.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/discovery/restmapper.go @@ -49,6 +49,7 @@ func NewRESTMapper(groupResources []*APIGroupResources, versionInterfaces meta.V for _, group := range groupResources { groupPriority = append(groupPriority, group.Group.Name) + // Make sure the preferred version comes first if len(group.Group.PreferredVersion.Version) != 0 { preferred := group.Group.PreferredVersion.Version if _, ok := group.VersionedResources[preferred]; ok { @@ -72,6 +73,21 @@ func NewRESTMapper(groupResources []*APIGroupResources, versionInterfaces meta.V continue } + // Add non-preferred versions after the preferred version, in case there are resources that only exist in those versions + if discoveryVersion.Version != group.Group.PreferredVersion.Version { + resourcePriority = append(resourcePriority, schema.GroupVersionResource{ + Group: group.Group.Name, + Version: discoveryVersion.Version, + Resource: meta.AnyResource, + }) + + kindPriority = append(kindPriority, schema.GroupVersionKind{ + Group: group.Group.Name, + Version: discoveryVersion.Version, + Kind: meta.AnyKind, + }) + } + gv := schema.GroupVersion{Group: group.Group.Name, Version: discoveryVersion.Version} versionMapper := meta.NewDefaultRESTMapper([]schema.GroupVersion{gv}, versionInterfaces) diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/discovery/restmapper_test.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/discovery/restmapper_test.go index 6bc16ccbf413..40f89a8c210a 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/discovery/restmapper_test.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/discovery/restmapper_test.go @@ -67,6 +67,32 @@ func TestRESTMapper(t *testing.T) { }, }, }, + + // This group tests finding and prioritizing resources that only exist in non-preferred versions + { + Group: metav1.APIGroup{ + Name: "unpreferred", + Versions: []metav1.GroupVersionForDiscovery{ + {Version: "v1"}, + {Version: "v2beta1"}, + {Version: "v2alpha1"}, + }, + PreferredVersion: metav1.GroupVersionForDiscovery{Version: "v1"}, + }, + VersionedResources: map[string][]metav1.APIResource{ + "v1": { + {Name: "broccoli", Namespaced: true, Kind: "Broccoli"}, + }, + "v2beta1": { + {Name: "broccoli", Namespaced: true, Kind: "Broccoli"}, + {Name: "peas", Namespaced: true, Kind: "Pea"}, + }, + "v2alpha1": { + {Name: "broccoli", Namespaced: true, Kind: "Broccoli"}, + {Name: "peas", Namespaced: true, Kind: "Pea"}, + }, + }, + }, } restMapper := NewRESTMapper(resources, nil) @@ -123,6 +149,16 @@ func TestRESTMapper(t *testing.T) { Kind: "Job", }, }, + { + input: schema.GroupVersionResource{ + Resource: "peas", + }, + want: schema.GroupVersionKind{ + Group: "unpreferred", + Version: "v2beta1", + Kind: "Pea", + }, + }, } for _, tc := range kindTCs { diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/examples/third-party-resources-deprecated/README.md b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/examples/third-party-resources-deprecated/README.md index a461e63d73a1..175f295943fd 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/examples/third-party-resources-deprecated/README.md +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/examples/third-party-resources-deprecated/README.md @@ -5,7 +5,7 @@ This particular example demonstrates how to perform basic operations such as: * How to register a new ThirdPartyResource (custom Resource type) -* How to create/get/list instances of your new Resource type (update/delete/etc work as well but are not demonstrated) +* How to create/get/list instances of your new Resource type (update/delete/etc work as well but are not demonstrated) * How to setup a controller on Resource handling create/update/delete events ## Running @@ -23,7 +23,7 @@ These act like most other Resources in Kubernetes, and may be `kubectl apply`'d, Some example use cases: * Provisioning/Management of external datastores/databases (eg. CloudSQL/RDS instances) -* Higher level abstractions around Kubernetes primitives (eg. a single Resource to define an etcd cluster, backed by a Service and a ReplicationController) +* Higher level abstractions around Kubernetes primitives (eg. a single Resource to define an etcd cluster, backed by a Service and a ReplicationController) ## Defining types diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/api/types.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/api/types.go index 5cabed666e63..af2a0c31eb4c 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/api/types.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/api/types.go @@ -615,7 +615,7 @@ type EmptyDirVolumeSource struct { // The default is nil which means that the limit is undefined. // More info: http://kubernetes.io/docs/user-guide/volumes#emptydir // +optional - SizeLimit resource.Quantity + SizeLimit *resource.Quantity } // StorageMedium defines ways that storage can be allocated to a volume. diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/api/v1/generated.pb.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/api/v1/generated.pb.go index dcfc99ea0fd1..c5a99f6787ce 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/api/v1/generated.pb.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/api/v1/generated.pb.go @@ -2591,14 +2591,16 @@ func (m *EmptyDirVolumeSource) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Medium))) i += copy(dAtA[i:], m.Medium) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SizeLimit.Size())) - n31, err := m.SizeLimit.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.SizeLimit != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SizeLimit.Size())) + n31, err := m.SizeLimit.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n31 } - i += n31 return i, nil } @@ -10033,8 +10035,10 @@ func (m *EmptyDirVolumeSource) Size() (n int) { _ = l l = len(m.Medium) n += 1 + l + sovGenerated(uint64(l)) - l = m.SizeLimit.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.SizeLimit != nil { + l = m.SizeLimit.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -12949,7 +12953,7 @@ func (this *EmptyDirVolumeSource) String() string { } s := strings.Join([]string{`&EmptyDirVolumeSource{`, `Medium:` + fmt.Sprintf("%v", this.Medium) + `,`, - `SizeLimit:` + strings.Replace(strings.Replace(this.SizeLimit.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `SizeLimit:` + strings.Replace(fmt.Sprintf("%v", this.SizeLimit), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, `}`, }, "") return s @@ -19966,6 +19970,9 @@ func (m *EmptyDirVolumeSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } + if m.SizeLimit == nil { + m.SizeLimit = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + } if err := m.SizeLimit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -44391,720 +44398,720 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 11429 bytes of a gzipped FileDescriptorProto + // 11430 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x6d, 0x8c, 0x24, 0xc7, 0x75, 0x98, 0x7a, 0x66, 0xf6, 0x63, 0xde, 0x7e, 0xd7, 0xed, 0x1d, 0x97, 0x2b, 0xf2, 0xf6, 0xd8, 0x14, 0xe9, 0x23, 0x79, 0xdc, 0xd3, 0x1d, 0x49, 0x91, 0x12, 0x65, 0x5a, 0xbb, 0x3b, 0xbb, 0x77, 0xeb, 0xfb, 0x1a, 0xd6, 0xec, 0xdd, 0x51, 0x14, 0x43, 0xb2, 0x6f, 0xba, 0x76, 0xb7, 0x79, 0xb3, 0xdd, 0xc3, 0xee, 0x9e, 0xbd, 0x5b, 0x1a, 0x06, 0x6c, 0x45, 0xb0, 0x14, 0x40, 0x49, 0x64, 0x38, - 0x02, 0x02, 0x27, 0x80, 0x02, 0x03, 0x71, 0x94, 0x6f, 0x2b, 0x82, 0x3e, 0x0c, 0xcb, 0x09, 0xe2, - 0x48, 0x8e, 0x1c, 0x24, 0x8e, 0x00, 0x23, 0xb1, 0x02, 0xc3, 0x6b, 0x6b, 0x85, 0xf8, 0x4f, 0x80, - 0xfc, 0x48, 0xfe, 0x6d, 0x3e, 0x10, 0xd4, 0x67, 0x57, 0xf5, 0xf4, 0x6c, 0xf7, 0x2c, 0x6f, 0xd7, - 0x94, 0x90, 0x7f, 0x33, 0xf5, 0x5e, 0xbd, 0xaa, 0xae, 0x8f, 0x57, 0xef, 0xbd, 0x7a, 0xef, 0x15, - 0x9c, 0xbb, 0xfb, 0x52, 0x34, 0xef, 0x05, 0xe7, 0xef, 0x76, 0xee, 0x90, 0xd0, 0x27, 0x31, 0x89, - 0xce, 0xb7, 0xef, 0x6e, 0x9c, 0x77, 0xda, 0xde, 0xf9, 0xed, 0x0b, 0xe7, 0x37, 0x88, 0x4f, 0x42, - 0x27, 0x26, 0xee, 0x7c, 0x3b, 0x0c, 0xe2, 0x00, 0x3d, 0xc2, 0xb1, 0xe7, 0x13, 0xec, 0xf9, 0xf6, - 0xdd, 0x8d, 0x79, 0xa7, 0xed, 0xcd, 0x6f, 0x5f, 0x98, 0x7d, 0x76, 0xc3, 0x8b, 0x37, 0x3b, 0x77, - 0xe6, 0x9b, 0xc1, 0xd6, 0xf9, 0x8d, 0x60, 0x23, 0x38, 0xcf, 0x2a, 0xdd, 0xe9, 0xac, 0xb3, 0x7f, - 0xec, 0x0f, 0xfb, 0xc5, 0x89, 0xcd, 0x3e, 0x2f, 0x9a, 0x76, 0xda, 0xde, 0x96, 0xd3, 0xdc, 0xf4, - 0x7c, 0x12, 0xee, 0xa8, 0xc6, 0x43, 0x12, 0x05, 0x9d, 0xb0, 0x49, 0xd2, 0x5d, 0x38, 0xb0, 0x56, - 0x74, 0x7e, 0x8b, 0xc4, 0x4e, 0x46, 0xc7, 0x67, 0xcf, 0xf7, 0xaa, 0x15, 0x76, 0xfc, 0xd8, 0xdb, - 0xea, 0x6e, 0xe6, 0x63, 0x79, 0x15, 0xa2, 0xe6, 0x26, 0xd9, 0x72, 0xba, 0xea, 0x3d, 0xd7, 0xab, - 0x5e, 0x27, 0xf6, 0x5a, 0xe7, 0x3d, 0x3f, 0x8e, 0xe2, 0x30, 0x5d, 0xc9, 0xfe, 0x63, 0x0b, 0xce, - 0x2c, 0xdc, 0x6e, 0x2c, 0xb7, 0x9c, 0x28, 0xf6, 0x9a, 0x8b, 0xad, 0xa0, 0x79, 0xb7, 0x11, 0x07, - 0x21, 0xb9, 0x15, 0xb4, 0x3a, 0x5b, 0xa4, 0xc1, 0x06, 0x02, 0x9d, 0x83, 0xe1, 0x6d, 0xf6, 0x7f, - 0xb5, 0x36, 0x63, 0x9d, 0xb1, 0xce, 0x56, 0x17, 0x27, 0xbf, 0xbf, 0x3b, 0xf7, 0xa1, 0xbd, 0xdd, - 0xb9, 0xe1, 0x5b, 0xa2, 0x1c, 0x2b, 0x0c, 0xf4, 0x24, 0x0c, 0xae, 0x47, 0x6b, 0x3b, 0x6d, 0x32, - 0x53, 0x62, 0xb8, 0xe3, 0x02, 0x77, 0x70, 0xa5, 0x41, 0x4b, 0xb1, 0x80, 0xa2, 0xf3, 0x50, 0x6d, - 0x3b, 0x61, 0xec, 0xc5, 0x5e, 0xe0, 0xcf, 0x94, 0xcf, 0x58, 0x67, 0x07, 0x16, 0xa7, 0x04, 0x6a, - 0xb5, 0x2e, 0x01, 0x38, 0xc1, 0xa1, 0xdd, 0x08, 0x89, 0xe3, 0xde, 0xf0, 0x5b, 0x3b, 0x33, 0x95, - 0x33, 0xd6, 0xd9, 0xe1, 0xa4, 0x1b, 0x58, 0x94, 0x63, 0x85, 0x61, 0x7f, 0xbb, 0x04, 0xc3, 0x0b, - 0xeb, 0xeb, 0x9e, 0xef, 0xc5, 0x3b, 0xe8, 0x6d, 0x18, 0xf5, 0x03, 0x97, 0xc8, 0xff, 0xec, 0x2b, - 0x46, 0x2e, 0x3e, 0x3d, 0x7f, 0xd0, 0xa2, 0x9a, 0xbf, 0xae, 0xd5, 0x58, 0x9c, 0xdc, 0xdb, 0x9d, - 0x1b, 0xd5, 0x4b, 0xb0, 0x41, 0x11, 0xbd, 0x01, 0x23, 0xed, 0xc0, 0x55, 0x0d, 0x94, 0x58, 0x03, - 0x4f, 0x1d, 0xdc, 0x40, 0x3d, 0xa9, 0xb0, 0x38, 0xb1, 0xb7, 0x3b, 0x37, 0xa2, 0x15, 0x60, 0x9d, - 0x1c, 0x6a, 0xc1, 0x04, 0xfd, 0xeb, 0xc7, 0x9e, 0x6a, 0xa1, 0xcc, 0x5a, 0x78, 0x36, 0xbf, 0x05, - 0xad, 0xd2, 0xe2, 0x89, 0xbd, 0xdd, 0xb9, 0x89, 0x54, 0x21, 0x4e, 0x93, 0xb6, 0xdf, 0x83, 0xf1, - 0x85, 0x38, 0x76, 0x9a, 0x9b, 0xc4, 0xe5, 0xf3, 0x8b, 0x9e, 0x87, 0x8a, 0xef, 0x6c, 0x11, 0x31, - 0xfb, 0x67, 0xc4, 0xb0, 0x57, 0xae, 0x3b, 0x5b, 0x64, 0x7f, 0x77, 0x6e, 0xf2, 0xa6, 0xef, 0xbd, - 0xdb, 0x11, 0x6b, 0x86, 0x96, 0x61, 0x86, 0x8d, 0x2e, 0x02, 0xb8, 0x64, 0xdb, 0x6b, 0x92, 0xba, - 0x13, 0x6f, 0x8a, 0xd5, 0x80, 0x44, 0x5d, 0xa8, 0x29, 0x08, 0xd6, 0xb0, 0xec, 0xcf, 0x5a, 0x50, - 0x5d, 0xd8, 0x0e, 0x3c, 0xb7, 0x1e, 0xb8, 0x11, 0xea, 0xc0, 0x44, 0x3b, 0x24, 0xeb, 0x24, 0x54, - 0x45, 0x33, 0xd6, 0x99, 0xf2, 0xd9, 0x91, 0x8b, 0x17, 0x73, 0xbe, 0xdb, 0xac, 0xb4, 0xec, 0xc7, - 0xe1, 0xce, 0xe2, 0x43, 0xa2, 0xe9, 0x89, 0x14, 0x14, 0xa7, 0xdb, 0xb0, 0xbf, 0x5b, 0x82, 0x93, - 0x0b, 0xef, 0x75, 0x42, 0x52, 0xf3, 0xa2, 0xbb, 0xe9, 0xad, 0xe0, 0x7a, 0xd1, 0xdd, 0xeb, 0xc9, - 0x60, 0xa8, 0x35, 0x58, 0x13, 0xe5, 0x58, 0x61, 0xa0, 0x67, 0x61, 0x88, 0xfe, 0xbe, 0x89, 0x57, - 0xc5, 0xd7, 0x9f, 0x10, 0xc8, 0x23, 0x35, 0x27, 0x76, 0x6a, 0x1c, 0x84, 0x25, 0x0e, 0xba, 0x06, - 0x23, 0x4d, 0xb6, 0x73, 0x37, 0xae, 0x05, 0x2e, 0x61, 0x33, 0x5c, 0x5d, 0x7c, 0x86, 0xa2, 0x2f, - 0x25, 0xc5, 0xfb, 0xbb, 0x73, 0x33, 0xbc, 0x6f, 0x82, 0x84, 0x06, 0xc3, 0x7a, 0x7d, 0x64, 0xab, - 0x8d, 0x58, 0x61, 0x94, 0x20, 0x63, 0x13, 0x9e, 0xd5, 0xf6, 0xd4, 0x00, 0xdb, 0x53, 0xa3, 0xd9, - 0xfb, 0x09, 0x5d, 0x80, 0xca, 0x5d, 0xcf, 0x77, 0x67, 0x06, 0x19, 0xad, 0x47, 0xe9, 0xf4, 0x5f, - 0xf1, 0x7c, 0x77, 0x7f, 0x77, 0x6e, 0xca, 0xe8, 0x0e, 0x2d, 0xc4, 0x0c, 0xd5, 0xfe, 0x47, 0x96, - 0x18, 0xc6, 0x15, 0xaf, 0x65, 0x72, 0x94, 0x8b, 0x00, 0x11, 0x69, 0x86, 0x24, 0xd6, 0x06, 0x52, - 0xad, 0x8c, 0x86, 0x82, 0x60, 0x0d, 0x8b, 0xf2, 0x8b, 0x68, 0xd3, 0x09, 0xd9, 0x02, 0x13, 0xc3, - 0xa9, 0xf8, 0x45, 0x43, 0x02, 0x70, 0x82, 0x63, 0xf0, 0x8b, 0x72, 0x2e, 0xbf, 0xf8, 0x3d, 0x0b, - 0x86, 0x16, 0x3d, 0xdf, 0xf5, 0xfc, 0x0d, 0xf4, 0x36, 0x0c, 0x53, 0x76, 0xee, 0x3a, 0xb1, 0x23, - 0x58, 0xc5, 0x47, 0xe5, 0x7a, 0xd3, 0xb9, 0xab, 0x5c, 0x71, 0xd1, 0x3c, 0xc5, 0xa6, 0xeb, 0xee, - 0xc6, 0x9d, 0x77, 0x48, 0x33, 0xbe, 0x46, 0x62, 0x27, 0xf9, 0x9c, 0xa4, 0x0c, 0x2b, 0xaa, 0xe8, - 0x26, 0x0c, 0xc6, 0x4e, 0xb8, 0x41, 0x62, 0xc1, 0x29, 0x72, 0xf6, 0x31, 0xa7, 0x81, 0xe9, 0x2a, - 0x25, 0x7e, 0x93, 0x24, 0x3c, 0x75, 0x8d, 0x11, 0xc1, 0x82, 0x98, 0xdd, 0x84, 0xd1, 0x25, 0xa7, - 0xed, 0xdc, 0xf1, 0x5a, 0x5e, 0xec, 0x91, 0x08, 0xfd, 0x0c, 0x94, 0x1d, 0xd7, 0x65, 0x7b, 0xa6, - 0xba, 0x78, 0x72, 0x6f, 0x77, 0xae, 0xbc, 0xe0, 0xd2, 0x29, 0x03, 0x85, 0xb5, 0x83, 0x29, 0x06, - 0x7a, 0x1a, 0x2a, 0x6e, 0x18, 0xb4, 0x67, 0x4a, 0x0c, 0xf3, 0x14, 0x9d, 0xdd, 0x5a, 0x18, 0xb4, - 0x53, 0xa8, 0x0c, 0xc7, 0xfe, 0x5e, 0x09, 0xd0, 0x12, 0x69, 0x6f, 0xae, 0x34, 0x8c, 0x39, 0x3d, - 0x0b, 0xc3, 0x5b, 0x81, 0xef, 0xc5, 0x41, 0x18, 0x89, 0x06, 0xd9, 0x52, 0xba, 0x26, 0xca, 0xb0, - 0x82, 0xa2, 0x33, 0x50, 0x69, 0x27, 0x1c, 0x61, 0x54, 0x72, 0x13, 0xc6, 0x0b, 0x18, 0x84, 0x62, - 0x74, 0x22, 0x12, 0x8a, 0x2d, 0xa0, 0x30, 0x6e, 0x46, 0x24, 0xc4, 0x0c, 0x92, 0xac, 0x20, 0xba, - 0xb6, 0xc4, 0x02, 0x4f, 0xad, 0x20, 0x0a, 0xc1, 0x1a, 0x16, 0x7a, 0x0b, 0xaa, 0xfc, 0x1f, 0x26, - 0xeb, 0x6c, 0xb5, 0xe7, 0xf2, 0x91, 0xab, 0x41, 0xd3, 0x69, 0xa5, 0x07, 0x7f, 0x8c, 0xad, 0x38, - 0x49, 0x08, 0x27, 0x34, 0x8d, 0x15, 0x37, 0x98, 0xbb, 0xe2, 0xfe, 0xb6, 0x05, 0x68, 0xc9, 0xf3, - 0x5d, 0x12, 0x1e, 0xc3, 0x69, 0xdb, 0xdf, 0x66, 0xf8, 0x13, 0xda, 0xb5, 0x60, 0xab, 0x1d, 0xf8, - 0xc4, 0x8f, 0x97, 0x02, 0xdf, 0xe5, 0x27, 0xf0, 0x27, 0xa0, 0x12, 0xd3, 0xa6, 0x78, 0xb7, 0x9e, - 0x94, 0xd3, 0x42, 0x1b, 0xd8, 0xdf, 0x9d, 0x3b, 0xd5, 0x5d, 0x83, 0x75, 0x81, 0xd5, 0x41, 0x1f, - 0x87, 0xc1, 0x28, 0x76, 0xe2, 0x4e, 0x24, 0x3a, 0xfa, 0x98, 0xec, 0x68, 0x83, 0x95, 0xee, 0xef, - 0xce, 0x4d, 0xa8, 0x6a, 0xbc, 0x08, 0x8b, 0x0a, 0xe8, 0x29, 0x18, 0xda, 0x22, 0x51, 0xe4, 0x6c, - 0x48, 0x9e, 0x38, 0x21, 0xea, 0x0e, 0x5d, 0xe3, 0xc5, 0x58, 0xc2, 0xd1, 0xe3, 0x30, 0x40, 0xc2, - 0x30, 0x08, 0xc5, 0x8a, 0x18, 0x13, 0x88, 0x03, 0xcb, 0xb4, 0x10, 0x73, 0x98, 0xfd, 0x5f, 0x2c, - 0x98, 0x50, 0x7d, 0xe5, 0x6d, 0x1d, 0xc3, 0x96, 0x77, 0x01, 0x9a, 0xf2, 0x03, 0x23, 0xb6, 0xd1, - 0xb4, 0x36, 0xb2, 0x97, 0x5f, 0xf7, 0x80, 0x26, 0x6d, 0xa8, 0xa2, 0x08, 0x6b, 0x74, 0xed, 0x7f, - 0x6b, 0xc1, 0x89, 0xd4, 0xb7, 0x5d, 0xf5, 0xa2, 0x18, 0xbd, 0xd1, 0xf5, 0x7d, 0xf3, 0xc5, 0xbe, - 0x8f, 0xd6, 0x66, 0x5f, 0xa7, 0xd6, 0x8b, 0x2c, 0xd1, 0xbe, 0x0d, 0xc3, 0x80, 0x17, 0x93, 0x2d, - 0xf9, 0x59, 0xcf, 0x16, 0xfc, 0x2c, 0xde, 0xbf, 0x64, 0x96, 0x56, 0x29, 0x0d, 0xcc, 0x49, 0xd9, - 0xff, 0xcb, 0x82, 0xea, 0x52, 0xe0, 0xaf, 0x7b, 0x1b, 0xd7, 0x9c, 0xf6, 0x31, 0xcc, 0x4f, 0x03, - 0x2a, 0x8c, 0x3a, 0xff, 0x84, 0x0b, 0x79, 0x9f, 0x20, 0x3a, 0x36, 0x4f, 0xcf, 0x3d, 0x2e, 0x5f, - 0x28, 0x36, 0x45, 0x8b, 0x30, 0x23, 0x36, 0xfb, 0x22, 0x54, 0x15, 0x02, 0x9a, 0x84, 0xf2, 0x5d, - 0xc2, 0x85, 0xcf, 0x2a, 0xa6, 0x3f, 0xd1, 0x34, 0x0c, 0x6c, 0x3b, 0xad, 0x8e, 0xd8, 0xbc, 0x98, - 0xff, 0xf9, 0x44, 0xe9, 0x25, 0xcb, 0xfe, 0x1e, 0xdb, 0x81, 0xa2, 0x91, 0x65, 0x7f, 0x5b, 0x30, - 0x87, 0xcf, 0x59, 0x30, 0xdd, 0xca, 0x60, 0x4a, 0x62, 0x4c, 0x0e, 0xc3, 0xce, 0x1e, 0x11, 0xdd, - 0x9e, 0xce, 0x82, 0xe2, 0xcc, 0xd6, 0x28, 0xaf, 0x0f, 0xda, 0x74, 0xc1, 0x39, 0x2d, 0xd6, 0x75, - 0x21, 0x36, 0xdc, 0x10, 0x65, 0x58, 0x41, 0xed, 0xbf, 0xb0, 0x60, 0x5a, 0x7d, 0xc7, 0x15, 0xb2, - 0xd3, 0x20, 0x2d, 0xd2, 0x8c, 0x83, 0xf0, 0x83, 0xf2, 0x25, 0x8f, 0xf2, 0x39, 0xe1, 0x3c, 0x69, - 0x44, 0x10, 0x28, 0x5f, 0x21, 0x3b, 0x7c, 0x82, 0xf4, 0x0f, 0x2d, 0x1f, 0xf8, 0xa1, 0xbf, 0x63, - 0xc1, 0x98, 0xfa, 0xd0, 0x63, 0xd8, 0x72, 0x57, 0xcd, 0x2d, 0xf7, 0x33, 0x05, 0xd7, 0x6b, 0x8f, - 0xcd, 0xf6, 0xb7, 0x4a, 0x94, 0x6d, 0x08, 0x9c, 0x7a, 0x18, 0xd0, 0x41, 0xa2, 0x1c, 0xff, 0x03, - 0x32, 0x4b, 0xfd, 0x7d, 0xec, 0x15, 0xb2, 0xb3, 0x16, 0x50, 0x69, 0x22, 0xfb, 0x63, 0x8d, 0x49, - 0xad, 0x1c, 0x38, 0xa9, 0x7f, 0x50, 0x82, 0x93, 0x6a, 0x58, 0x8c, 0x53, 0xfa, 0xa7, 0x72, 0x60, - 0x2e, 0xc0, 0x88, 0x4b, 0xd6, 0x9d, 0x4e, 0x2b, 0x56, 0x0a, 0xc8, 0x00, 0xd7, 0x4c, 0x6b, 0x49, - 0x31, 0xd6, 0x71, 0xfa, 0x18, 0xcb, 0xaf, 0x8c, 0x30, 0x7e, 0x1e, 0x3b, 0x74, 0xd5, 0x53, 0x09, - 0x4f, 0xd3, 0x28, 0x47, 0x75, 0x8d, 0x52, 0x68, 0x8f, 0x8f, 0xc3, 0x80, 0xb7, 0x45, 0xcf, 0xfc, - 0x92, 0x79, 0x94, 0xaf, 0xd2, 0x42, 0xcc, 0x61, 0xe8, 0x09, 0x18, 0x6a, 0x06, 0x5b, 0x5b, 0x8e, - 0xef, 0xce, 0x94, 0x99, 0xcc, 0x39, 0x42, 0xc5, 0x82, 0x25, 0x5e, 0x84, 0x25, 0x0c, 0x3d, 0x02, - 0x15, 0x27, 0xdc, 0x88, 0x66, 0x2a, 0x0c, 0x67, 0x98, 0xb6, 0xb4, 0x10, 0x6e, 0x44, 0x98, 0x95, - 0x52, 0x59, 0xf2, 0x5e, 0x10, 0xde, 0xf5, 0xfc, 0x8d, 0x9a, 0x17, 0x32, 0xc1, 0x50, 0x93, 0x25, - 0x6f, 0x2b, 0x08, 0xd6, 0xb0, 0x50, 0x1d, 0x06, 0xda, 0x41, 0x18, 0x47, 0x33, 0x83, 0x6c, 0xe0, - 0x9f, 0xc9, 0xdd, 0x7e, 0xfc, 0xbb, 0xeb, 0x41, 0x18, 0x27, 0x9f, 0x42, 0xff, 0x45, 0x98, 0x13, - 0x42, 0x4b, 0x50, 0x26, 0xfe, 0xf6, 0xcc, 0x10, 0xa3, 0xf7, 0x91, 0x83, 0xe9, 0x2d, 0xfb, 0xdb, - 0xb7, 0x9c, 0x30, 0xe1, 0x57, 0xcb, 0xfe, 0x36, 0xa6, 0xb5, 0x51, 0x13, 0xaa, 0xd2, 0x7e, 0x15, - 0xcd, 0x0c, 0x17, 0x59, 0x8a, 0x58, 0xa0, 0x63, 0xf2, 0x6e, 0xc7, 0x0b, 0xc9, 0x16, 0xf1, 0xe3, - 0x28, 0x51, 0xac, 0x24, 0x34, 0xc2, 0x09, 0x5d, 0xd4, 0x84, 0x51, 0x2e, 0x7f, 0x5e, 0x0b, 0x3a, - 0x7e, 0x1c, 0xcd, 0x54, 0x59, 0x97, 0x73, 0x8c, 0x1d, 0xb7, 0x92, 0x1a, 0x8b, 0xd3, 0x82, 0xfc, - 0xa8, 0x56, 0x18, 0x61, 0x83, 0x28, 0x7a, 0x03, 0xc6, 0x5a, 0xde, 0x36, 0xf1, 0x49, 0x14, 0xd5, - 0xc3, 0xe0, 0x0e, 0x99, 0x01, 0xf6, 0x35, 0x8f, 0xe7, 0x29, 0xfe, 0xc1, 0x1d, 0xb2, 0x38, 0xb5, - 0xb7, 0x3b, 0x37, 0x76, 0x55, 0xaf, 0x8d, 0x4d, 0x62, 0xe8, 0x2d, 0x18, 0xa7, 0xc2, 0xae, 0x97, - 0x90, 0x1f, 0x29, 0x4e, 0x1e, 0xed, 0xed, 0xce, 0x8d, 0x63, 0xa3, 0x3a, 0x4e, 0x91, 0x43, 0x6b, - 0x50, 0x6d, 0x79, 0xeb, 0xa4, 0xb9, 0xd3, 0x6c, 0x91, 0x99, 0x51, 0x46, 0x3b, 0x67, 0x73, 0x5e, - 0x95, 0xe8, 0x5c, 0xc1, 0x50, 0x7f, 0x71, 0x42, 0x08, 0xdd, 0x82, 0x53, 0x31, 0x09, 0xb7, 0x3c, - 0xdf, 0xa1, 0x9b, 0x4a, 0x48, 0xbf, 0xcc, 0xba, 0x32, 0xc6, 0x56, 0xed, 0x69, 0x31, 0xb0, 0xa7, - 0xd6, 0x32, 0xb1, 0x70, 0x8f, 0xda, 0xe8, 0x06, 0x4c, 0xb0, 0xfd, 0x54, 0xef, 0xb4, 0x5a, 0xf5, - 0xa0, 0xe5, 0x35, 0x77, 0x66, 0xc6, 0x19, 0xc1, 0x27, 0xa4, 0xcd, 0x64, 0xd5, 0x04, 0x53, 0xc5, - 0x30, 0xf9, 0x87, 0xd3, 0xb5, 0x51, 0x0b, 0x26, 0x22, 0xd2, 0xec, 0x84, 0x5e, 0xbc, 0x43, 0xd7, - 0x3e, 0xb9, 0x1f, 0xcf, 0x4c, 0x14, 0x51, 0x74, 0x1b, 0x66, 0x25, 0x6e, 0xb0, 0x4a, 0x15, 0xe2, - 0x34, 0x69, 0xca, 0x2a, 0xa2, 0xd8, 0xf5, 0xfc, 0x99, 0x49, 0xc6, 0x81, 0xd4, 0xfe, 0x6a, 0xd0, - 0x42, 0xcc, 0x61, 0xcc, 0x7e, 0x40, 0x7f, 0xdc, 0xa0, 0x5c, 0x7a, 0x8a, 0x21, 0x26, 0xf6, 0x03, - 0x09, 0xc0, 0x09, 0x0e, 0x15, 0x0d, 0xe2, 0x78, 0x67, 0x06, 0x31, 0x54, 0xb5, 0xd5, 0xd6, 0xd6, - 0x3e, 0x8d, 0x69, 0x39, 0xba, 0x05, 0x43, 0xc4, 0xdf, 0x5e, 0x09, 0x83, 0xad, 0x99, 0x13, 0x45, - 0x78, 0xc0, 0x32, 0x47, 0xe6, 0xe7, 0x47, 0xa2, 0xc2, 0x88, 0x62, 0x2c, 0x89, 0xa1, 0xfb, 0x30, - 0x93, 0x31, 0x4b, 0x7c, 0x52, 0xa6, 0xd9, 0xa4, 0x7c, 0x52, 0xd4, 0x9d, 0x59, 0xeb, 0x81, 0xb7, - 0x7f, 0x00, 0x0c, 0xf7, 0xa4, 0x6e, 0xdf, 0x81, 0x71, 0xc5, 0xa8, 0xd8, 0x7c, 0xa3, 0x39, 0x18, - 0xa0, 0xbc, 0x58, 0x2a, 0xf4, 0x55, 0x3a, 0xa8, 0x94, 0x45, 0x47, 0x98, 0x97, 0xb3, 0x41, 0xf5, - 0xde, 0x23, 0x8b, 0x3b, 0x31, 0xe1, 0x8a, 0x5d, 0x59, 0x1b, 0x54, 0x09, 0xc0, 0x09, 0x8e, 0xfd, - 0x7f, 0xb9, 0x98, 0x94, 0x70, 0xc3, 0x02, 0x27, 0xc1, 0x39, 0x18, 0xde, 0x0c, 0xa2, 0x98, 0x62, - 0xb3, 0x36, 0x06, 0x12, 0xc1, 0xe8, 0xb2, 0x28, 0xc7, 0x0a, 0x03, 0xbd, 0x0c, 0x63, 0x4d, 0xbd, - 0x01, 0x71, 0x8c, 0x9d, 0x14, 0x55, 0xcc, 0xd6, 0xb1, 0x89, 0x8b, 0x5e, 0x82, 0x61, 0x66, 0x18, - 0x6f, 0x06, 0x2d, 0xa1, 0x42, 0xca, 0x53, 0x79, 0xb8, 0x2e, 0xca, 0xf7, 0xb5, 0xdf, 0x58, 0x61, - 0x53, 0x45, 0x9c, 0x76, 0x61, 0xb5, 0x2e, 0x0e, 0x10, 0xa5, 0x88, 0x5f, 0x66, 0xa5, 0x58, 0x40, - 0xed, 0xdf, 0x2a, 0x69, 0xa3, 0x4c, 0x15, 0x20, 0x82, 0x5e, 0x87, 0xa1, 0x7b, 0x8e, 0x17, 0x7b, - 0xfe, 0x86, 0x90, 0x1e, 0x9e, 0x2b, 0x78, 0x9a, 0xb0, 0xea, 0xb7, 0x79, 0x55, 0x7e, 0xf2, 0x89, - 0x3f, 0x58, 0x12, 0xa4, 0xb4, 0xc3, 0x8e, 0xef, 0x53, 0xda, 0xa5, 0xfe, 0x69, 0x63, 0x5e, 0x95, - 0xd3, 0x16, 0x7f, 0xb0, 0x24, 0x88, 0xd6, 0x01, 0xe4, 0x5a, 0x22, 0xae, 0x30, 0x48, 0x7f, 0xac, - 0x1f, 0xf2, 0x6b, 0xaa, 0xf6, 0xe2, 0x38, 0x3d, 0x6b, 0x93, 0xff, 0x58, 0xa3, 0x6c, 0xc7, 0x4c, - 0x08, 0xeb, 0xee, 0x16, 0xfa, 0x0c, 0xdd, 0xd2, 0x4e, 0x18, 0x13, 0x77, 0x21, 0x4e, 0xdb, 0xf4, - 0x0f, 0x16, 0xb1, 0xd7, 0xbc, 0x2d, 0xa2, 0x6f, 0x7f, 0x41, 0x04, 0x27, 0xf4, 0xec, 0x6f, 0x95, - 0x61, 0xa6, 0x57, 0x77, 0xe9, 0x92, 0x24, 0xf7, 0xbd, 0x78, 0x89, 0x8a, 0x49, 0x96, 0xb9, 0x24, - 0x97, 0x45, 0x39, 0x56, 0x18, 0x74, 0x6d, 0x44, 0xde, 0x86, 0x54, 0x96, 0x06, 0x92, 0xb5, 0xd1, - 0x60, 0xa5, 0x58, 0x40, 0x29, 0x5e, 0x48, 0x9c, 0x48, 0xdc, 0x87, 0x68, 0x6b, 0x08, 0xb3, 0x52, - 0x2c, 0xa0, 0xba, 0x41, 0xa4, 0x92, 0x63, 0x10, 0x31, 0x86, 0x68, 0xe0, 0xc1, 0x0e, 0x11, 0x7a, - 0x13, 0x60, 0xdd, 0xf3, 0xbd, 0x68, 0x93, 0x51, 0x1f, 0xec, 0x9b, 0xba, 0x12, 0xb2, 0x56, 0x14, - 0x15, 0xac, 0x51, 0x44, 0x2f, 0xc0, 0x88, 0xda, 0x9e, 0xab, 0xb5, 0x99, 0x21, 0xd3, 0x86, 0x9e, - 0xf0, 0xaa, 0x1a, 0xd6, 0xf1, 0xec, 0x77, 0xd2, 0xeb, 0x45, 0xec, 0x0a, 0x6d, 0x7c, 0xad, 0xa2, - 0xe3, 0x5b, 0x3a, 0x78, 0x7c, 0xed, 0xff, 0x5c, 0x86, 0x09, 0xa3, 0xb1, 0x4e, 0x54, 0x80, 0xa3, - 0xbd, 0x4a, 0x0f, 0x2c, 0x27, 0x26, 0x62, 0x4f, 0x9e, 0xeb, 0x67, 0xd3, 0xe8, 0xc7, 0x1b, 0xdd, - 0x0b, 0x9c, 0x12, 0xda, 0x84, 0x6a, 0xcb, 0x89, 0x98, 0x49, 0x85, 0x88, 0xbd, 0xd8, 0x1f, 0xd9, - 0x44, 0xfd, 0x70, 0xa2, 0x58, 0x3b, 0x3d, 0x78, 0x2b, 0x09, 0x71, 0x7a, 0xda, 0x52, 0x61, 0x47, - 0x5e, 0xc2, 0xa9, 0xee, 0x50, 0x89, 0x68, 0x07, 0x73, 0x18, 0x7a, 0x09, 0x46, 0x43, 0xc2, 0x56, - 0xca, 0x12, 0x95, 0xe7, 0xd8, 0xd2, 0x1b, 0x48, 0x04, 0x3f, 0xac, 0xc1, 0xb0, 0x81, 0x99, 0xc8, - 0xfd, 0x83, 0x07, 0xc8, 0xfd, 0x4f, 0xc1, 0x10, 0xfb, 0xa1, 0x56, 0x85, 0x9a, 0xa1, 0x55, 0x5e, - 0x8c, 0x25, 0x3c, 0xbd, 0x88, 0x86, 0x0b, 0x2e, 0xa2, 0xa7, 0x61, 0xbc, 0xe6, 0x90, 0xad, 0xc0, - 0x5f, 0xf6, 0xdd, 0x76, 0xe0, 0xf9, 0x31, 0x9a, 0x81, 0x0a, 0x3b, 0x4f, 0xf8, 0x7e, 0xaf, 0x50, - 0x0a, 0xb8, 0x42, 0x65, 0x77, 0xfb, 0x4f, 0x4a, 0x30, 0x56, 0x23, 0x2d, 0x12, 0x13, 0xae, 0xf7, - 0x44, 0x68, 0x05, 0xd0, 0x46, 0xe8, 0x34, 0x49, 0x9d, 0x84, 0x5e, 0xe0, 0x36, 0x48, 0x33, 0xf0, - 0xd9, 0xdd, 0x15, 0x3d, 0x20, 0x4f, 0xed, 0xed, 0xce, 0xa1, 0x4b, 0x5d, 0x50, 0x9c, 0x51, 0x03, - 0xb9, 0x30, 0xd6, 0x0e, 0x89, 0x61, 0x37, 0xb4, 0xf2, 0x45, 0x8d, 0xba, 0x5e, 0x85, 0x4b, 0xc3, - 0x46, 0x11, 0x36, 0x89, 0xa2, 0x4f, 0xc1, 0x64, 0x10, 0xb6, 0x37, 0x1d, 0xbf, 0x46, 0xda, 0xc4, - 0x77, 0xa9, 0x0a, 0x20, 0xac, 0x1d, 0xd3, 0x7b, 0xbb, 0x73, 0x93, 0x37, 0x52, 0x30, 0xdc, 0x85, - 0x8d, 0x5e, 0x87, 0xa9, 0x76, 0x18, 0xb4, 0x9d, 0x0d, 0xb6, 0x64, 0x84, 0xb4, 0xc2, 0x79, 0xd3, - 0xb9, 0xbd, 0xdd, 0xb9, 0xa9, 0x7a, 0x1a, 0xb8, 0xbf, 0x3b, 0x77, 0x82, 0x0d, 0x19, 0x2d, 0x49, - 0x80, 0xb8, 0x9b, 0x8c, 0xfd, 0x2e, 0x9c, 0xac, 0x05, 0xf7, 0xfc, 0x7b, 0x4e, 0xe8, 0x2e, 0xd4, - 0x57, 0x35, 0xe3, 0xc4, 0x6b, 0x52, 0xf9, 0xe5, 0x77, 0x82, 0x39, 0x27, 0x9b, 0x46, 0x83, 0xab, - 0x1d, 0x2b, 0x5e, 0x8b, 0xf4, 0x30, 0x87, 0xfc, 0xe3, 0x92, 0xd1, 0x66, 0x82, 0xaf, 0xee, 0x2e, - 0xac, 0x9e, 0x77, 0x17, 0x9f, 0x81, 0xe1, 0x75, 0x8f, 0xb4, 0x5c, 0x4c, 0xd6, 0xc5, 0x6c, 0x5d, - 0x28, 0x72, 0xb9, 0xb3, 0x42, 0xeb, 0x48, 0xeb, 0x18, 0x57, 0xa2, 0x57, 0x04, 0x19, 0xac, 0x08, - 0xa2, 0x0e, 0x4c, 0x4a, 0x3d, 0x4c, 0x42, 0xc5, 0x66, 0x7f, 0xae, 0x98, 0x9a, 0x67, 0x36, 0xc3, - 0xa6, 0x17, 0xa7, 0x08, 0xe2, 0xae, 0x26, 0xa8, 0xfe, 0xbc, 0x45, 0x8f, 0xba, 0x0a, 0x5b, 0xfa, - 0x4c, 0x7f, 0x66, 0xa6, 0x00, 0x56, 0x6a, 0xff, 0xa6, 0x05, 0x0f, 0x75, 0x8d, 0x96, 0xb0, 0x93, - 0x1c, 0xd9, 0x1c, 0xa5, 0x8d, 0x15, 0xa5, 0x7c, 0x63, 0x85, 0xfd, 0x5b, 0x16, 0x4c, 0x2f, 0x6f, - 0xb5, 0xe3, 0x9d, 0x9a, 0x67, 0xde, 0xb9, 0xbc, 0x08, 0x83, 0x5b, 0xc4, 0xf5, 0x3a, 0x5b, 0x62, - 0x5e, 0xe7, 0xe4, 0xc1, 0x70, 0x8d, 0x95, 0xee, 0xef, 0xce, 0x8d, 0x35, 0xe2, 0x20, 0x74, 0x36, - 0x08, 0x2f, 0xc0, 0x02, 0x9d, 0x5d, 0x29, 0x79, 0xef, 0x91, 0xab, 0xde, 0x96, 0x27, 0xaf, 0xf2, - 0x0e, 0x34, 0xf2, 0xcd, 0xcb, 0xa1, 0x9d, 0x7f, 0xb5, 0xe3, 0xf8, 0xb1, 0x17, 0xef, 0x98, 0xf2, - 0x32, 0x23, 0x84, 0x13, 0x9a, 0xf6, 0x8f, 0x2c, 0x98, 0x90, 0x1c, 0x68, 0xc1, 0x75, 0x43, 0x12, - 0x45, 0x68, 0x16, 0x4a, 0x5e, 0x5b, 0xf4, 0x14, 0x44, 0xed, 0xd2, 0x6a, 0x1d, 0x97, 0xbc, 0x36, - 0x7a, 0x1d, 0xaa, 0xfc, 0x2e, 0x30, 0x59, 0x7e, 0x7d, 0xde, 0x2d, 0x32, 0xed, 0x73, 0x4d, 0xd2, - 0xc0, 0x09, 0x39, 0x29, 0x87, 0xb3, 0xb3, 0xad, 0x6c, 0xde, 0x4c, 0x5d, 0x16, 0xe5, 0x58, 0x61, - 0xa0, 0xb3, 0x30, 0xec, 0x07, 0x2e, 0xbf, 0xae, 0xe5, 0x9c, 0x80, 0x2d, 0xea, 0xeb, 0xa2, 0x0c, - 0x2b, 0xa8, 0xfd, 0x45, 0x0b, 0x46, 0xe5, 0x37, 0x16, 0x54, 0x09, 0xe8, 0x36, 0x4c, 0xd4, 0x81, - 0x64, 0x1b, 0x52, 0x91, 0x9e, 0x41, 0x0c, 0x49, 0xbe, 0xdc, 0x8f, 0x24, 0x6f, 0xff, 0x76, 0x09, - 0xc6, 0x65, 0x77, 0x1a, 0x9d, 0x3b, 0x11, 0xa1, 0x82, 0x4e, 0xd5, 0xe1, 0x83, 0x4f, 0xe4, 0x4a, - 0x7e, 0x36, 0x4f, 0xdb, 0x33, 0xe6, 0x2c, 0x99, 0xe5, 0x05, 0x49, 0x07, 0x27, 0x24, 0xd1, 0x36, - 0x4c, 0xf9, 0x41, 0xcc, 0x0e, 0x50, 0x05, 0x2f, 0x76, 0x97, 0x92, 0x6e, 0xe7, 0x61, 0xd1, 0xce, - 0xd4, 0xf5, 0x34, 0x3d, 0xdc, 0xdd, 0x04, 0xba, 0x21, 0xad, 0x58, 0x65, 0xd6, 0xd6, 0xd3, 0xc5, - 0xda, 0xea, 0x6d, 0xc4, 0xb2, 0x7f, 0xdf, 0x82, 0xaa, 0x44, 0x3b, 0x8e, 0x4b, 0xb5, 0xdb, 0x30, - 0x14, 0xb1, 0x29, 0x92, 0xc3, 0x75, 0xae, 0xd8, 0x27, 0xf0, 0x79, 0x4d, 0xa4, 0x06, 0xfe, 0x3f, - 0xc2, 0x92, 0x1a, 0x33, 0xe7, 0xab, 0x0f, 0xf9, 0xc0, 0x99, 0xf3, 0x55, 0xcf, 0x7a, 0xdf, 0x9d, - 0x8d, 0x19, 0xf6, 0x06, 0x2a, 0xfa, 0xb6, 0x43, 0xb2, 0xee, 0xdd, 0x4f, 0x8b, 0xbe, 0x75, 0x56, - 0x8a, 0x05, 0x14, 0xad, 0xc3, 0x68, 0x53, 0x1a, 0xbc, 0x13, 0x16, 0xf2, 0xd1, 0x82, 0xb7, 0x0b, - 0xea, 0xa2, 0x8a, 0xfb, 0x4b, 0x2d, 0x69, 0x94, 0xb0, 0x41, 0x97, 0xf2, 0xa9, 0xe4, 0x2e, 0xbe, - 0x5c, 0xd0, 0x34, 0x14, 0x92, 0x38, 0x69, 0xa1, 0xe7, 0x35, 0xbc, 0xfd, 0x55, 0x0b, 0x06, 0xb9, - 0x85, 0xb4, 0x98, 0x99, 0x59, 0xbb, 0x82, 0x4b, 0xc6, 0xf3, 0x16, 0x2d, 0x14, 0x37, 0x72, 0xe8, - 0x36, 0x54, 0xd9, 0x0f, 0x66, 0xed, 0x29, 0x17, 0x71, 0x1e, 0xe3, 0xed, 0xeb, 0x5d, 0xbd, 0x25, - 0x09, 0xe0, 0x84, 0x96, 0xfd, 0x9d, 0x32, 0x65, 0x7d, 0x09, 0xaa, 0x21, 0x3d, 0x58, 0xc7, 0x21, - 0x3d, 0x94, 0x8e, 0x5e, 0x7a, 0x78, 0x17, 0x26, 0x9a, 0xda, 0x15, 0x60, 0x32, 0xe3, 0x17, 0x0b, - 0x2e, 0x2b, 0xed, 0xde, 0x90, 0x5b, 0x04, 0x97, 0x4c, 0x72, 0x38, 0x4d, 0x1f, 0x11, 0x18, 0xe5, - 0xeb, 0x41, 0xb4, 0x57, 0x61, 0xed, 0x9d, 0x2f, 0xb2, 0xc2, 0xf4, 0xc6, 0xd8, 0x2a, 0x6e, 0x68, - 0x84, 0xb0, 0x41, 0xd6, 0xfe, 0xf5, 0x01, 0x18, 0x58, 0xde, 0x26, 0x7e, 0x7c, 0x0c, 0xac, 0x6e, - 0x0b, 0xc6, 0x3d, 0x7f, 0x3b, 0x68, 0x6d, 0x13, 0x97, 0xc3, 0x0f, 0x77, 0xbc, 0x9f, 0x12, 0x8d, - 0x8c, 0xaf, 0x1a, 0xc4, 0x70, 0x8a, 0xf8, 0x51, 0xd8, 0x22, 0x5e, 0x85, 0x41, 0xbe, 0x32, 0x84, - 0x21, 0x22, 0xe7, 0xc6, 0x80, 0x0d, 0xac, 0xd8, 0x41, 0x89, 0xc5, 0x84, 0x5f, 0x56, 0x08, 0x42, - 0xe8, 0x1d, 0x18, 0x5f, 0xf7, 0xc2, 0x28, 0x5e, 0xf3, 0xb6, 0xa8, 0x0e, 0xb9, 0xd5, 0x3e, 0x84, - 0x15, 0x42, 0x8d, 0xc8, 0x8a, 0x41, 0x09, 0xa7, 0x28, 0xa3, 0x0d, 0x18, 0xa3, 0x4a, 0x70, 0xd2, - 0xd4, 0x50, 0xdf, 0x4d, 0x29, 0x23, 0xe4, 0x55, 0x9d, 0x10, 0x36, 0xe9, 0x52, 0x96, 0xd4, 0x64, - 0x4a, 0xf3, 0x30, 0x93, 0x6e, 0x14, 0x4b, 0xe2, 0xda, 0x32, 0x87, 0x51, 0xce, 0xc6, 0x7c, 0x71, - 0xaa, 0x26, 0x67, 0x4b, 0x3c, 0x6e, 0xec, 0xaf, 0xd3, 0xb3, 0x98, 0x8e, 0xe1, 0x31, 0x1c, 0x5f, - 0x97, 0xcd, 0xe3, 0xeb, 0xf1, 0x02, 0x33, 0xdb, 0xe3, 0xe8, 0x7a, 0x1b, 0x46, 0xb4, 0x89, 0x47, - 0xe7, 0xa1, 0xda, 0x94, 0xee, 0x22, 0x82, 0x8b, 0x2b, 0x51, 0x4a, 0xf9, 0x91, 0xe0, 0x04, 0x87, - 0x8e, 0x0b, 0x15, 0x41, 0xd3, 0xce, 0x65, 0x54, 0x40, 0xc5, 0x0c, 0x62, 0x3f, 0x07, 0xb0, 0x7c, - 0x9f, 0x34, 0x17, 0xb8, 0x12, 0xa9, 0xdd, 0x20, 0x5a, 0xbd, 0x6f, 0x10, 0xed, 0xaf, 0x59, 0x30, - 0xbe, 0xb2, 0x64, 0x28, 0x0d, 0xf3, 0x00, 0x5c, 0x36, 0xbe, 0x7d, 0xfb, 0xba, 0xb4, 0x90, 0x73, - 0x33, 0xa6, 0x2a, 0xc5, 0x1a, 0x06, 0x7a, 0x18, 0xca, 0xad, 0x8e, 0x2f, 0x44, 0xd6, 0xa1, 0xbd, - 0xdd, 0xb9, 0xf2, 0xd5, 0x8e, 0x8f, 0x69, 0x99, 0xe6, 0xc5, 0x55, 0x2e, 0xec, 0xc5, 0x95, 0xef, - 0x02, 0xfd, 0xe5, 0x32, 0x4c, 0xae, 0xb4, 0xc8, 0x7d, 0xa3, 0xd7, 0x4f, 0xc2, 0xa0, 0x1b, 0x7a, - 0xdb, 0x24, 0x4c, 0x0b, 0x02, 0x35, 0x56, 0x8a, 0x05, 0xb4, 0xb0, 0x63, 0xd9, 0x5b, 0xdd, 0x07, - 0xf9, 0xd1, 0x39, 0xd5, 0xe5, 0x7e, 0x33, 0x5a, 0x87, 0x21, 0x7e, 0xe3, 0x1c, 0xcd, 0x0c, 0xb0, - 0xa5, 0xf8, 0xf2, 0xc1, 0x9d, 0x49, 0x8f, 0xcf, 0xbc, 0xb0, 0xe0, 0x70, 0x97, 0x1e, 0xc5, 0xcb, - 0x44, 0x29, 0x96, 0xc4, 0x67, 0x3f, 0x01, 0xa3, 0x3a, 0x66, 0x5f, 0xbe, 0x3d, 0x7f, 0xd5, 0x82, - 0x13, 0x2b, 0xad, 0xa0, 0x79, 0x37, 0xe5, 0xf9, 0xf7, 0x02, 0x8c, 0xd0, 0xcd, 0x14, 0x19, 0x6e, - 0xb1, 0x86, 0xcb, 0xb0, 0x00, 0x61, 0x1d, 0x4f, 0xab, 0x76, 0xf3, 0xe6, 0x6a, 0x2d, 0xcb, 0xd3, - 0x58, 0x80, 0xb0, 0x8e, 0x67, 0xff, 0xa1, 0x05, 0x8f, 0x5e, 0x5a, 0x5a, 0xae, 0x93, 0x30, 0xf2, - 0xa2, 0x98, 0xf8, 0x71, 0x97, 0xb3, 0x33, 0x95, 0x19, 0x5d, 0xad, 0x2b, 0x89, 0xcc, 0x58, 0x63, - 0xbd, 0x10, 0xd0, 0x0f, 0x8a, 0xc7, 0xff, 0x57, 0x2d, 0x38, 0x71, 0xc9, 0x8b, 0x31, 0x69, 0x07, - 0x69, 0x67, 0xe3, 0x90, 0xb4, 0x83, 0xc8, 0x8b, 0x83, 0x70, 0x27, 0xed, 0x6c, 0x8c, 0x15, 0x04, - 0x6b, 0x58, 0xbc, 0xe5, 0x6d, 0x2f, 0xa2, 0x3d, 0x2d, 0x99, 0xaa, 0x2e, 0x16, 0xe5, 0x58, 0x61, - 0xd0, 0x0f, 0x73, 0xbd, 0x90, 0x89, 0x0c, 0x3b, 0x62, 0x07, 0xab, 0x0f, 0xab, 0x49, 0x00, 0x4e, - 0x70, 0xec, 0xbf, 0x6b, 0xc1, 0xc9, 0x4b, 0xad, 0x4e, 0x14, 0x93, 0x70, 0x3d, 0x32, 0x3a, 0xfb, - 0x1c, 0x54, 0x89, 0x14, 0xee, 0x45, 0x5f, 0xd5, 0xa1, 0xa1, 0xa4, 0x7e, 0xee, 0xe9, 0xac, 0xf0, - 0x0a, 0x38, 0xd4, 0xf6, 0xe7, 0xfe, 0xf9, 0xbb, 0x25, 0x18, 0xbb, 0xbc, 0xb6, 0x56, 0xbf, 0x44, - 0x62, 0xc1, 0x25, 0xf3, 0xcd, 0x5e, 0x58, 0xd3, 0xc8, 0x0f, 0x12, 0x7e, 0x3a, 0xb1, 0xd7, 0x9a, - 0xe7, 0xd1, 0x28, 0xf3, 0xab, 0x7e, 0x7c, 0x23, 0x6c, 0xc4, 0xa1, 0xe7, 0x6f, 0x64, 0xea, 0xf0, - 0x92, 0x97, 0x97, 0x7b, 0xf1, 0x72, 0xf4, 0x1c, 0x0c, 0xb2, 0x70, 0x18, 0x29, 0x7c, 0x7c, 0x58, - 0xc9, 0x09, 0xac, 0x74, 0x7f, 0x77, 0xae, 0x7a, 0x13, 0xaf, 0xf2, 0x3f, 0x58, 0xa0, 0xa2, 0xb7, - 0x60, 0x64, 0x33, 0x8e, 0xdb, 0x97, 0x89, 0xe3, 0x92, 0x50, 0xf2, 0x89, 0xb3, 0x07, 0xf3, 0x09, - 0x3a, 0x1c, 0xbc, 0x42, 0xb2, 0xb5, 0x92, 0xb2, 0x08, 0xeb, 0x14, 0xed, 0x06, 0x40, 0x02, 0x7b, - 0x40, 0x3a, 0x88, 0xfd, 0xcb, 0x25, 0x18, 0xba, 0xec, 0xf8, 0x6e, 0x8b, 0x84, 0x68, 0x05, 0x2a, - 0xe4, 0x3e, 0x69, 0x8a, 0x83, 0x3c, 0xa7, 0xeb, 0xc9, 0x61, 0xc7, 0x2d, 0x77, 0xf4, 0x3f, 0x66, - 0xf5, 0x11, 0x86, 0x21, 0xda, 0xef, 0x4b, 0xca, 0x0f, 0xfd, 0x99, 0xfc, 0x51, 0x50, 0x8b, 0x82, - 0x9f, 0x94, 0xa2, 0x08, 0x4b, 0x42, 0xcc, 0x02, 0xd5, 0x6c, 0x37, 0x28, 0x7b, 0x8b, 0x8b, 0x69, - 0x76, 0x6b, 0x4b, 0x75, 0x8e, 0x2e, 0xe8, 0x72, 0x0b, 0x94, 0x2c, 0xc4, 0x09, 0x39, 0x7b, 0x0d, - 0xaa, 0x74, 0xf2, 0x17, 0x5a, 0x9e, 0x73, 0xb0, 0x19, 0xec, 0x19, 0xa8, 0x4a, 0x43, 0x54, 0x24, - 0x9c, 0xda, 0x19, 0x55, 0x69, 0xa7, 0x8a, 0x70, 0x02, 0xb7, 0x5f, 0x82, 0x69, 0x76, 0x8f, 0xec, - 0xc4, 0x9b, 0xc6, 0x5e, 0xcc, 0x5d, 0xf4, 0xf6, 0x37, 0x2a, 0x30, 0xb5, 0xda, 0x58, 0x6a, 0x98, - 0x36, 0xcf, 0x97, 0x60, 0x94, 0x1f, 0xfb, 0x74, 0x29, 0x3b, 0x2d, 0x51, 0x5f, 0xdd, 0x7d, 0xac, - 0x69, 0x30, 0x6c, 0x60, 0xa2, 0x47, 0xa1, 0xec, 0xbd, 0xeb, 0xa7, 0xbd, 0x11, 0x57, 0x5f, 0xbd, - 0x8e, 0x69, 0x39, 0x05, 0x53, 0x09, 0x82, 0xb3, 0x4e, 0x05, 0x56, 0x52, 0xc4, 0x2b, 0x30, 0xee, - 0x45, 0xcd, 0xc8, 0x5b, 0xf5, 0x29, 0x5f, 0x71, 0x9a, 0x72, 0x53, 0x24, 0x22, 0x3f, 0xed, 0xaa, - 0x82, 0xe2, 0x14, 0xb6, 0xc6, 0xc7, 0x07, 0x0a, 0x4b, 0x21, 0xb9, 0x6e, 0xee, 0x54, 0xc0, 0x6a, - 0xb3, 0xaf, 0x8b, 0x98, 0x6f, 0x93, 0x10, 0xb0, 0xf8, 0x07, 0x47, 0x58, 0xc2, 0xd0, 0x25, 0x98, - 0x6a, 0x6e, 0x3a, 0xed, 0x85, 0x4e, 0xbc, 0x59, 0xf3, 0xa2, 0x66, 0xb0, 0x4d, 0xc2, 0x1d, 0x26, - 0x00, 0x0f, 0x27, 0x36, 0x2d, 0x05, 0x58, 0xba, 0xbc, 0x50, 0xa7, 0x98, 0xb8, 0xbb, 0x8e, 0x29, - 0x90, 0xc0, 0x11, 0x08, 0x24, 0x0b, 0x30, 0x21, 0x5b, 0x6d, 0x90, 0x88, 0x1d, 0x11, 0x23, 0xac, - 0x9f, 0x2a, 0xc0, 0x48, 0x14, 0xab, 0x5e, 0xa6, 0xf1, 0xed, 0x77, 0xa0, 0xaa, 0x7c, 0xf1, 0xa4, - 0x0b, 0xaa, 0xd5, 0xc3, 0x05, 0x35, 0x9f, 0xb9, 0x4b, 0xeb, 0x7c, 0x39, 0xd3, 0x3a, 0xff, 0x4f, - 0x2d, 0x48, 0x9c, 0x89, 0x10, 0x86, 0x6a, 0x3b, 0x60, 0x37, 0x79, 0xa1, 0xbc, 0x32, 0x7f, 0x22, - 0x67, 0xcf, 0x73, 0x9e, 0xc3, 0x07, 0xa4, 0x2e, 0xeb, 0xe2, 0x84, 0x0c, 0xba, 0x0a, 0x43, 0xed, - 0x90, 0x34, 0x62, 0x16, 0x3f, 0xd2, 0x07, 0x45, 0xbe, 0x10, 0x78, 0x4d, 0x2c, 0x49, 0xd8, 0xff, - 0xd2, 0x02, 0xe0, 0x66, 0x70, 0xc7, 0xdf, 0x20, 0xc7, 0xa0, 0x58, 0x5f, 0x87, 0x4a, 0xd4, 0x26, - 0xcd, 0x62, 0x77, 0xb1, 0x49, 0xcf, 0x1a, 0x6d, 0xd2, 0x4c, 0xa6, 0x83, 0xfe, 0xc3, 0x8c, 0x8e, - 0xfd, 0x6d, 0x80, 0xf1, 0x04, 0x8d, 0x2a, 0x37, 0xe8, 0x59, 0x23, 0x70, 0xe2, 0xe1, 0x54, 0xe0, - 0x44, 0x95, 0x61, 0x6b, 0xb1, 0x12, 0x31, 0x94, 0xb7, 0x9c, 0xfb, 0x42, 0x97, 0x7a, 0xa1, 0x68, - 0x87, 0x68, 0x4b, 0xf3, 0xd7, 0x9c, 0xfb, 0x5c, 0x74, 0x7d, 0x46, 0x2e, 0xa4, 0x6b, 0xce, 0xfd, - 0x7d, 0x7e, 0xe3, 0xca, 0xb8, 0x13, 0x55, 0xde, 0x3e, 0xfb, 0x67, 0xc9, 0x7f, 0x76, 0x0c, 0xd1, - 0xe6, 0x58, 0xab, 0x9e, 0x2f, 0x4c, 0xc1, 0x7d, 0xb6, 0xea, 0xf9, 0xe9, 0x56, 0x3d, 0xbf, 0x40, - 0xab, 0x1e, 0xf3, 0x30, 0x1e, 0x12, 0x77, 0x34, 0xcc, 0x3d, 0x73, 0xe4, 0xe2, 0xc7, 0xfb, 0x6a, - 0x5a, 0x5c, 0xf6, 0xf0, 0xe6, 0xcf, 0x4b, 0x79, 0x5d, 0x94, 0xe6, 0x76, 0x41, 0x36, 0x8d, 0xfe, - 0x9e, 0x05, 0xe3, 0xe2, 0x37, 0x26, 0xef, 0x76, 0x48, 0x14, 0x0b, 0xb9, 0xe0, 0x53, 0x87, 0xe9, - 0x8d, 0x20, 0xc1, 0x3b, 0xf5, 0x31, 0xc9, 0x7e, 0x4d, 0x60, 0x6e, 0xdf, 0x52, 0xfd, 0x41, 0xdf, - 0xb6, 0x60, 0x7a, 0xcb, 0xb9, 0xcf, 0x5b, 0xe4, 0x65, 0xd8, 0x89, 0xbd, 0x40, 0xb8, 0xa0, 0xae, - 0xf4, 0xbb, 0x4e, 0xba, 0x08, 0xf1, 0xee, 0x4a, 0xef, 0xb2, 0xe9, 0x2c, 0x94, 0xdc, 0x4e, 0x67, - 0xf6, 0x70, 0x76, 0x1d, 0x86, 0xe5, 0xc2, 0xcc, 0xd0, 0x94, 0x6a, 0xba, 0xf8, 0xd3, 0xf7, 0x05, - 0x9a, 0xa6, 0x59, 0xb1, 0x76, 0xc4, 0x52, 0x3c, 0xd2, 0x76, 0xde, 0x81, 0x51, 0x7d, 0xdd, 0x1d, - 0x69, 0x5b, 0xef, 0xc2, 0x89, 0x8c, 0x55, 0x75, 0xa4, 0x4d, 0xde, 0x83, 0x87, 0x7b, 0xae, 0x8f, - 0xa3, 0x6c, 0xd8, 0xfe, 0x5d, 0x4b, 0x67, 0x9d, 0xc7, 0x60, 0xb7, 0xba, 0x66, 0xda, 0xad, 0xce, - 0x16, 0xdd, 0x43, 0x3d, 0x8c, 0x57, 0xeb, 0x7a, 0xf7, 0xe9, 0x91, 0x80, 0xd6, 0x60, 0xb0, 0x45, - 0x4b, 0xe4, 0xb5, 0xe1, 0xb9, 0x7e, 0x76, 0x69, 0x22, 0x81, 0xb1, 0xf2, 0x08, 0x0b, 0x5a, 0xf6, - 0xb7, 0x2d, 0xa8, 0xfc, 0x25, 0x86, 0x75, 0x75, 0x91, 0x16, 0xa9, 0x09, 0xe6, 0xb1, 0x73, 0x6f, - 0xf9, 0x7e, 0x4c, 0xfc, 0x88, 0x89, 0xf1, 0x99, 0x43, 0xf4, 0x7f, 0x4a, 0x30, 0x42, 0x9b, 0x92, - 0x9e, 0x32, 0x2f, 0xc3, 0x58, 0xcb, 0xb9, 0x43, 0x5a, 0xd2, 0xe6, 0x9e, 0x56, 0x7a, 0xaf, 0xea, - 0x40, 0x6c, 0xe2, 0xd2, 0xca, 0xeb, 0xfa, 0x95, 0x84, 0x10, 0x92, 0x54, 0x65, 0xe3, 0xbe, 0x02, - 0x9b, 0xb8, 0x54, 0xeb, 0xba, 0xe7, 0xc4, 0xcd, 0x4d, 0xa1, 0x10, 0xab, 0xee, 0xde, 0xa6, 0x85, - 0x98, 0xc3, 0xa8, 0xb0, 0x27, 0x57, 0xec, 0x2d, 0x12, 0x32, 0x61, 0x8f, 0x0b, 0xd5, 0x4a, 0xd8, - 0xc3, 0x26, 0x18, 0xa7, 0xf1, 0xd1, 0x27, 0x60, 0x9c, 0x0e, 0x4e, 0xd0, 0x89, 0xa5, 0x1f, 0xd0, - 0x00, 0xf3, 0x03, 0x62, 0x6e, 0xe4, 0x6b, 0x06, 0x04, 0xa7, 0x30, 0x51, 0x1d, 0xa6, 0x3d, 0xbf, - 0xd9, 0xea, 0xb8, 0xe4, 0xa6, 0xef, 0xf9, 0x5e, 0xec, 0x39, 0x2d, 0xef, 0x3d, 0xe2, 0x0a, 0xb1, - 0x5b, 0xb9, 0x6c, 0xad, 0x66, 0xe0, 0xe0, 0xcc, 0x9a, 0xf6, 0x5b, 0x70, 0xe2, 0x6a, 0xe0, 0xb8, - 0x8b, 0x4e, 0xcb, 0xf1, 0x9b, 0x24, 0x5c, 0xf5, 0x37, 0x72, 0x7d, 0x0a, 0xf4, 0x7b, 0xff, 0x52, - 0xde, 0xbd, 0xbf, 0x1d, 0x02, 0xd2, 0x1b, 0x10, 0x3e, 0x71, 0x6f, 0xc0, 0x90, 0xc7, 0x9b, 0x12, - 0x1b, 0xe1, 0x42, 0x9e, 0x4c, 0xde, 0xd5, 0x47, 0xcd, 0xc7, 0x8b, 0x17, 0x60, 0x49, 0x92, 0x6a, - 0x70, 0x59, 0x42, 0x7c, 0xbe, 0xea, 0x6d, 0xbf, 0x00, 0x53, 0xac, 0x66, 0x9f, 0x8a, 0xdf, 0x5f, - 0xb3, 0x60, 0xe2, 0x7a, 0x2a, 0x00, 0xfa, 0x49, 0x18, 0x8c, 0x48, 0x98, 0x61, 0x59, 0x6d, 0xb0, - 0x52, 0x2c, 0xa0, 0x0f, 0xdc, 0x5a, 0xf3, 0x6b, 0x25, 0xa8, 0x32, 0xa7, 0xec, 0x36, 0x55, 0xe2, - 0x8e, 0x5e, 0x5e, 0xbe, 0x66, 0xc8, 0xcb, 0x39, 0x16, 0x03, 0xd5, 0xb1, 0x5e, 0xe2, 0x32, 0xba, - 0xa9, 0x02, 0x83, 0x0b, 0x19, 0x0b, 0x12, 0x82, 0x3c, 0x78, 0x74, 0xdc, 0x8c, 0x23, 0x96, 0x41, - 0xc3, 0xec, 0x02, 0x5f, 0xe1, 0x7e, 0xe0, 0x2e, 0xf0, 0x55, 0xcf, 0x7a, 0x70, 0xc9, 0xba, 0xd6, - 0x79, 0x76, 0x8e, 0xfc, 0x1c, 0x73, 0xb5, 0x65, 0x7b, 0x58, 0xc5, 0xd7, 0xcf, 0x09, 0xd7, 0x59, - 0x51, 0xba, 0xcf, 0x18, 0x9e, 0xf8, 0xc7, 0xd3, 0x27, 0x24, 0x55, 0xec, 0xcb, 0x30, 0x91, 0x1a, - 0x3a, 0xf4, 0x02, 0x0c, 0xb4, 0x37, 0x9d, 0x88, 0xa4, 0x9c, 0x9e, 0x06, 0xea, 0xb4, 0x70, 0x7f, - 0x77, 0x6e, 0x5c, 0x55, 0x60, 0x25, 0x98, 0x63, 0xdb, 0x9f, 0x2b, 0x41, 0xe5, 0x7a, 0xe0, 0x1e, - 0xc7, 0x52, 0xbb, 0x6c, 0x2c, 0xb5, 0x27, 0xf3, 0xf3, 0xb5, 0xf4, 0x5c, 0x65, 0xf5, 0xd4, 0x2a, - 0x3b, 0x5b, 0x80, 0xd6, 0xc1, 0x0b, 0x6c, 0x0b, 0x46, 0x58, 0x3e, 0x18, 0xe1, 0x94, 0xf5, 0x9c, - 0xa1, 0xe2, 0xcd, 0xa5, 0x54, 0xbc, 0x09, 0x0d, 0x55, 0x53, 0xf4, 0x9e, 0x82, 0x21, 0xe1, 0x04, - 0x94, 0x76, 0x34, 0x16, 0xb8, 0x58, 0xc2, 0xed, 0x7f, 0x51, 0x06, 0x23, 0xff, 0x0c, 0xfa, 0x7d, - 0x0b, 0xe6, 0x43, 0x1e, 0xb4, 0xe5, 0xd6, 0x3a, 0xa1, 0xe7, 0x6f, 0x34, 0x9a, 0x9b, 0xc4, 0xed, - 0xb4, 0x3c, 0x7f, 0x63, 0x75, 0xc3, 0x0f, 0x54, 0xf1, 0xf2, 0x7d, 0xd2, 0xec, 0x30, 0x9b, 0x7b, - 0xe1, 0xb4, 0x37, 0xea, 0x02, 0xfc, 0xe2, 0xde, 0xee, 0xdc, 0x3c, 0xee, 0xab, 0x15, 0xdc, 0x67, - 0xaf, 0xd0, 0x0f, 0x2d, 0x38, 0xcf, 0x33, 0xb0, 0x14, 0xff, 0x92, 0x42, 0xaa, 0x71, 0x5d, 0x12, - 0x4d, 0xc8, 0xad, 0x91, 0x70, 0x6b, 0xf1, 0x45, 0x31, 0xc8, 0xe7, 0xeb, 0xfd, 0xb5, 0x8a, 0xfb, - 0xed, 0xa6, 0xfd, 0xaf, 0xcb, 0x30, 0x46, 0xc7, 0x33, 0x49, 0xa1, 0xf0, 0x82, 0xb1, 0x4c, 0x1e, - 0x4b, 0x2d, 0x93, 0x29, 0x03, 0xf9, 0xc1, 0x64, 0x4f, 0x88, 0x60, 0xaa, 0xe5, 0x44, 0xf1, 0x65, + 0x02, 0x02, 0x27, 0x80, 0x02, 0x03, 0x71, 0x94, 0xc4, 0xf9, 0x50, 0x04, 0x7d, 0x18, 0x96, 0x13, + 0xc4, 0x91, 0x1c, 0x39, 0x48, 0x1c, 0x01, 0x46, 0x62, 0x05, 0x86, 0xd7, 0xd6, 0x0a, 0xf1, 0x9f, + 0x00, 0xf9, 0x91, 0xfc, 0xdb, 0x7c, 0x20, 0xa8, 0xcf, 0xae, 0xea, 0xe9, 0xd9, 0xee, 0x59, 0xde, + 0xae, 0x29, 0x21, 0xff, 0x66, 0xde, 0x7b, 0xf5, 0xaa, 0xba, 0x3e, 0x5e, 0xbd, 0x7a, 0xf5, 0xde, + 0x2b, 0x38, 0x77, 0xf7, 0xa5, 0x68, 0xde, 0x0b, 0xce, 0xdf, 0xed, 0xdc, 0x21, 0xa1, 0x4f, 0x62, + 0x12, 0x9d, 0x6f, 0xdf, 0xdd, 0x38, 0xef, 0xb4, 0xbd, 0xf3, 0xdb, 0x17, 0xce, 0x6f, 0x10, 0x9f, + 0x84, 0x4e, 0x4c, 0xdc, 0xf9, 0x76, 0x18, 0xc4, 0x01, 0x7a, 0x84, 0x53, 0xcf, 0x27, 0xd4, 0xf3, + 0xed, 0xbb, 0x1b, 0xf3, 0x4e, 0xdb, 0x9b, 0xdf, 0xbe, 0x30, 0xfb, 0xec, 0x86, 0x17, 0x6f, 0x76, + 0xee, 0xcc, 0x37, 0x83, 0xad, 0xf3, 0x1b, 0xc1, 0x46, 0x70, 0x9e, 0x15, 0xba, 0xd3, 0x59, 0x67, + 0xff, 0xd8, 0x1f, 0xf6, 0x8b, 0x33, 0x9b, 0x7d, 0x5e, 0x54, 0xed, 0xb4, 0xbd, 0x2d, 0xa7, 0xb9, + 0xe9, 0xf9, 0x24, 0xdc, 0x51, 0x95, 0x87, 0x24, 0x0a, 0x3a, 0x61, 0x93, 0xa4, 0x9b, 0x70, 0x60, + 0xa9, 0xe8, 0xfc, 0x16, 0x89, 0x9d, 0x8c, 0x86, 0xcf, 0x9e, 0xef, 0x55, 0x2a, 0xec, 0xf8, 0xb1, + 0xb7, 0xd5, 0x5d, 0xcd, 0xc7, 0xf2, 0x0a, 0x44, 0xcd, 0x4d, 0xb2, 0xe5, 0x74, 0x95, 0x7b, 0xae, + 0x57, 0xb9, 0x4e, 0xec, 0xb5, 0xce, 0x7b, 0x7e, 0x1c, 0xc5, 0x61, 0xba, 0x90, 0xfd, 0xc7, 0x16, + 0x9c, 0x59, 0xb8, 0xdd, 0x58, 0x6e, 0x39, 0x51, 0xec, 0x35, 0x17, 0x5b, 0x41, 0xf3, 0x6e, 0x23, + 0x0e, 0x42, 0x72, 0x2b, 0x68, 0x75, 0xb6, 0x48, 0x83, 0x75, 0x04, 0x3a, 0x07, 0xc3, 0xdb, 0xec, + 0xff, 0x6a, 0x6d, 0xc6, 0x3a, 0x63, 0x9d, 0xad, 0x2e, 0x4e, 0x7e, 0x7f, 0x77, 0xee, 0x43, 0x7b, + 0xbb, 0x73, 0xc3, 0xb7, 0x04, 0x1c, 0x2b, 0x0a, 0xf4, 0x24, 0x0c, 0xae, 0x47, 0x6b, 0x3b, 0x6d, + 0x32, 0x53, 0x62, 0xb4, 0xe3, 0x82, 0x76, 0x70, 0xa5, 0x41, 0xa1, 0x58, 0x60, 0xd1, 0x79, 0xa8, + 0xb6, 0x9d, 0x30, 0xf6, 0x62, 0x2f, 0xf0, 0x67, 0xca, 0x67, 0xac, 0xb3, 0x03, 0x8b, 0x53, 0x82, + 0xb4, 0x5a, 0x97, 0x08, 0x9c, 0xd0, 0xd0, 0x66, 0x84, 0xc4, 0x71, 0x6f, 0xf8, 0xad, 0x9d, 0x99, + 0xca, 0x19, 0xeb, 0xec, 0x70, 0xd2, 0x0c, 0x2c, 0xe0, 0x58, 0x51, 0xd8, 0xdf, 0x2e, 0xc1, 0xf0, + 0xc2, 0xfa, 0xba, 0xe7, 0x7b, 0xf1, 0x0e, 0x7a, 0x1b, 0x46, 0xfd, 0xc0, 0x25, 0xf2, 0x3f, 0xfb, + 0x8a, 0x91, 0x8b, 0x4f, 0xcf, 0x1f, 0x34, 0xa9, 0xe6, 0xaf, 0x6b, 0x25, 0x16, 0x27, 0xf7, 0x76, + 0xe7, 0x46, 0x75, 0x08, 0x36, 0x38, 0xa2, 0x37, 0x60, 0xa4, 0x1d, 0xb8, 0xaa, 0x82, 0x12, 0xab, + 0xe0, 0xa9, 0x83, 0x2b, 0xa8, 0x27, 0x05, 0x16, 0x27, 0xf6, 0x76, 0xe7, 0x46, 0x34, 0x00, 0xd6, + 0xd9, 0xa1, 0x16, 0x4c, 0xd0, 0xbf, 0x7e, 0xec, 0xa9, 0x1a, 0xca, 0xac, 0x86, 0x67, 0xf3, 0x6b, + 0xd0, 0x0a, 0x2d, 0x9e, 0xd8, 0xdb, 0x9d, 0x9b, 0x48, 0x01, 0x71, 0x9a, 0xb5, 0xfd, 0x1e, 0x8c, + 0x2f, 0xc4, 0xb1, 0xd3, 0xdc, 0x24, 0x2e, 0x1f, 0x5f, 0xf4, 0x3c, 0x54, 0x7c, 0x67, 0x8b, 0x88, + 0xd1, 0x3f, 0x23, 0xba, 0xbd, 0x72, 0xdd, 0xd9, 0x22, 0xfb, 0xbb, 0x73, 0x93, 0x37, 0x7d, 0xef, + 0xdd, 0x8e, 0x98, 0x33, 0x14, 0x86, 0x19, 0x35, 0xba, 0x08, 0xe0, 0x92, 0x6d, 0xaf, 0x49, 0xea, + 0x4e, 0xbc, 0x29, 0x66, 0x03, 0x12, 0x65, 0xa1, 0xa6, 0x30, 0x58, 0xa3, 0xb2, 0x3f, 0x6b, 0x41, + 0x75, 0x61, 0x3b, 0xf0, 0xdc, 0x7a, 0xe0, 0x46, 0xa8, 0x03, 0x13, 0xed, 0x90, 0xac, 0x93, 0x50, + 0x81, 0x66, 0xac, 0x33, 0xe5, 0xb3, 0x23, 0x17, 0x2f, 0xe6, 0x7c, 0xb7, 0x59, 0x68, 0xd9, 0x8f, + 0xc3, 0x9d, 0xc5, 0x87, 0x44, 0xd5, 0x13, 0x29, 0x2c, 0x4e, 0xd7, 0x61, 0x7f, 0xb7, 0x04, 0x27, + 0x17, 0xde, 0xeb, 0x84, 0xa4, 0xe6, 0x45, 0x77, 0xd3, 0x4b, 0xc1, 0xf5, 0xa2, 0xbb, 0xd7, 0x93, + 0xce, 0x50, 0x73, 0xb0, 0x26, 0xe0, 0x58, 0x51, 0xa0, 0x67, 0x61, 0x88, 0xfe, 0xbe, 0x89, 0x57, + 0xc5, 0xd7, 0x9f, 0x10, 0xc4, 0x23, 0x35, 0x27, 0x76, 0x6a, 0x1c, 0x85, 0x25, 0x0d, 0xba, 0x06, + 0x23, 0x4d, 0xb6, 0x72, 0x37, 0xae, 0x05, 0x2e, 0x61, 0x23, 0x5c, 0x5d, 0x7c, 0x86, 0x92, 0x2f, + 0x25, 0xe0, 0xfd, 0xdd, 0xb9, 0x19, 0xde, 0x36, 0xc1, 0x42, 0xc3, 0x61, 0xbd, 0x3c, 0xb2, 0xd5, + 0x42, 0xac, 0x30, 0x4e, 0x90, 0xb1, 0x08, 0xcf, 0x6a, 0x6b, 0x6a, 0x80, 0xad, 0xa9, 0xd1, 0xec, + 0xf5, 0x84, 0x2e, 0x40, 0xe5, 0xae, 0xe7, 0xbb, 0x33, 0x83, 0x8c, 0xd7, 0xa3, 0x74, 0xf8, 0xaf, + 0x78, 0xbe, 0xbb, 0xbf, 0x3b, 0x37, 0x65, 0x34, 0x87, 0x02, 0x31, 0x23, 0xb5, 0xff, 0x91, 0x25, + 0xba, 0x71, 0xc5, 0x6b, 0x99, 0x12, 0xe5, 0x22, 0x40, 0x44, 0x9a, 0x21, 0x89, 0xb5, 0x8e, 0x54, + 0x33, 0xa3, 0xa1, 0x30, 0x58, 0xa3, 0xa2, 0xf2, 0x22, 0xda, 0x74, 0x42, 0x36, 0xc1, 0x44, 0x77, + 0x2a, 0x79, 0xd1, 0x90, 0x08, 0x9c, 0xd0, 0x18, 0xf2, 0xa2, 0x9c, 0x2b, 0x2f, 0x7e, 0xcf, 0x82, + 0xa1, 0x45, 0xcf, 0x77, 0x3d, 0x7f, 0x03, 0xbd, 0x0d, 0xc3, 0x54, 0x9c, 0xbb, 0x4e, 0xec, 0x08, + 0x51, 0xf1, 0x51, 0x39, 0xdf, 0x74, 0xe9, 0x2a, 0x67, 0x5c, 0x34, 0x4f, 0xa9, 0xe9, 0xbc, 0xbb, + 0x71, 0xe7, 0x1d, 0xd2, 0x8c, 0xaf, 0x91, 0xd8, 0x49, 0x3e, 0x27, 0x81, 0x61, 0xc5, 0x15, 0xdd, + 0x84, 0xc1, 0xd8, 0x09, 0x37, 0x48, 0x2c, 0x24, 0x45, 0xce, 0x3a, 0xe6, 0x3c, 0x30, 0x9d, 0xa5, + 0xc4, 0x6f, 0x92, 0x44, 0xa6, 0xae, 0x31, 0x26, 0x58, 0x30, 0xb3, 0x9b, 0x30, 0xba, 0xe4, 0xb4, + 0x9d, 0x3b, 0x5e, 0xcb, 0x8b, 0x3d, 0x12, 0xa1, 0x9f, 0x81, 0xb2, 0xe3, 0xba, 0x6c, 0xcd, 0x54, + 0x17, 0x4f, 0xee, 0xed, 0xce, 0x95, 0x17, 0x5c, 0x3a, 0x64, 0xa0, 0xa8, 0x76, 0x30, 0xa5, 0x40, + 0x4f, 0x43, 0xc5, 0x0d, 0x83, 0xf6, 0x4c, 0x89, 0x51, 0x9e, 0xa2, 0xa3, 0x5b, 0x0b, 0x83, 0x76, + 0x8a, 0x94, 0xd1, 0xd8, 0xdf, 0x2b, 0x01, 0x5a, 0x22, 0xed, 0xcd, 0x95, 0x86, 0x31, 0xa6, 0x67, + 0x61, 0x78, 0x2b, 0xf0, 0xbd, 0x38, 0x08, 0x23, 0x51, 0x21, 0x9b, 0x4a, 0xd7, 0x04, 0x0c, 0x2b, + 0x2c, 0x3a, 0x03, 0x95, 0x76, 0x22, 0x11, 0x46, 0xa5, 0x34, 0x61, 0xb2, 0x80, 0x61, 0x28, 0x45, + 0x27, 0x22, 0xa1, 0x58, 0x02, 0x8a, 0xe2, 0x66, 0x44, 0x42, 0xcc, 0x30, 0xc9, 0x0c, 0xa2, 0x73, + 0x4b, 0x4c, 0xf0, 0xd4, 0x0c, 0xa2, 0x18, 0xac, 0x51, 0xa1, 0xb7, 0xa0, 0xca, 0xff, 0x61, 0xb2, + 0xce, 0x66, 0x7b, 0xae, 0x1c, 0xb9, 0x1a, 0x34, 0x9d, 0x56, 0xba, 0xf3, 0xc7, 0xd8, 0x8c, 0x93, + 0x8c, 0x70, 0xc2, 0xd3, 0x98, 0x71, 0x83, 0xb9, 0x33, 0xee, 0x6f, 0x5b, 0x80, 0x96, 0x3c, 0xdf, + 0x25, 0xe1, 0x31, 0xec, 0xb6, 0xfd, 0x2d, 0x86, 0x3f, 0xa1, 0x4d, 0x0b, 0xb6, 0xda, 0x81, 0x4f, + 0xfc, 0x78, 0x29, 0xf0, 0x5d, 0xbe, 0x03, 0x7f, 0x02, 0x2a, 0x31, 0xad, 0x8a, 0x37, 0xeb, 0x49, + 0x39, 0x2c, 0xb4, 0x82, 0xfd, 0xdd, 0xb9, 0x53, 0xdd, 0x25, 0x58, 0x13, 0x58, 0x19, 0xf4, 0x71, + 0x18, 0x8c, 0x62, 0x27, 0xee, 0x44, 0xa2, 0xa1, 0x8f, 0xc9, 0x86, 0x36, 0x18, 0x74, 0x7f, 0x77, + 0x6e, 0x42, 0x15, 0xe3, 0x20, 0x2c, 0x0a, 0xa0, 0xa7, 0x60, 0x68, 0x8b, 0x44, 0x91, 0xb3, 0x21, + 0x65, 0xe2, 0x84, 0x28, 0x3b, 0x74, 0x8d, 0x83, 0xb1, 0xc4, 0xa3, 0xc7, 0x61, 0x80, 0x84, 0x61, + 0x10, 0x8a, 0x19, 0x31, 0x26, 0x08, 0x07, 0x96, 0x29, 0x10, 0x73, 0x9c, 0xfd, 0x5f, 0x2c, 0x98, + 0x50, 0x6d, 0xe5, 0x75, 0x1d, 0xc3, 0x92, 0x77, 0x01, 0x9a, 0xf2, 0x03, 0x23, 0xb6, 0xd0, 0xb4, + 0x3a, 0xb2, 0xa7, 0x5f, 0x77, 0x87, 0x26, 0x75, 0x28, 0x50, 0x84, 0x35, 0xbe, 0xf6, 0xbf, 0xb5, + 0xe0, 0x44, 0xea, 0xdb, 0xae, 0x7a, 0x51, 0x8c, 0xde, 0xe8, 0xfa, 0xbe, 0xf9, 0x62, 0xdf, 0x47, + 0x4b, 0xb3, 0xaf, 0x53, 0xf3, 0x45, 0x42, 0xb4, 0x6f, 0xc3, 0x30, 0xe0, 0xc5, 0x64, 0x4b, 0x7e, + 0xd6, 0xb3, 0x05, 0x3f, 0x8b, 0xb7, 0x2f, 0x19, 0xa5, 0x55, 0xca, 0x03, 0x73, 0x56, 0xf6, 0xff, + 0xb2, 0xa0, 0xba, 0x14, 0xf8, 0xeb, 0xde, 0xc6, 0x35, 0xa7, 0x7d, 0x0c, 0xe3, 0xd3, 0x80, 0x0a, + 0xe3, 0xce, 0x3f, 0xe1, 0x42, 0xde, 0x27, 0x88, 0x86, 0xcd, 0xd3, 0x7d, 0x8f, 0xeb, 0x17, 0x4a, + 0x4c, 0x51, 0x10, 0x66, 0xcc, 0x66, 0x5f, 0x84, 0xaa, 0x22, 0x40, 0x93, 0x50, 0xbe, 0x4b, 0xb8, + 0xf2, 0x59, 0xc5, 0xf4, 0x27, 0x9a, 0x86, 0x81, 0x6d, 0xa7, 0xd5, 0x11, 0x8b, 0x17, 0xf3, 0x3f, + 0x9f, 0x28, 0xbd, 0x64, 0xd9, 0xdf, 0x63, 0x2b, 0x50, 0x54, 0xb2, 0xec, 0x6f, 0x0b, 0xe1, 0xf0, + 0x39, 0x0b, 0xa6, 0x5b, 0x19, 0x42, 0x49, 0xf4, 0xc9, 0x61, 0xc4, 0xd9, 0x23, 0xa2, 0xd9, 0xd3, + 0x59, 0x58, 0x9c, 0x59, 0x1b, 0x95, 0xf5, 0x41, 0x9b, 0x4e, 0x38, 0xa7, 0xc5, 0x9a, 0x2e, 0xd4, + 0x86, 0x1b, 0x02, 0x86, 0x15, 0xd6, 0xfe, 0x0b, 0x0b, 0xa6, 0xd5, 0x77, 0x5c, 0x21, 0x3b, 0x0d, + 0xd2, 0x22, 0xcd, 0x38, 0x08, 0x3f, 0x28, 0x5f, 0xf2, 0x28, 0x1f, 0x13, 0x2e, 0x93, 0x46, 0x04, + 0x83, 0xf2, 0x15, 0xb2, 0xc3, 0x07, 0x48, 0xff, 0xd0, 0xf2, 0x81, 0x1f, 0xfa, 0x3b, 0x16, 0x8c, + 0xa9, 0x0f, 0x3d, 0x86, 0x25, 0x77, 0xd5, 0x5c, 0x72, 0x3f, 0x53, 0x70, 0xbe, 0xf6, 0x58, 0x6c, + 0x7f, 0xab, 0x44, 0xc5, 0x86, 0xa0, 0xa9, 0x87, 0x01, 0xed, 0x24, 0x2a, 0xf1, 0x3f, 0x20, 0xa3, + 0xd4, 0xdf, 0xc7, 0x5e, 0x21, 0x3b, 0x6b, 0x01, 0xd5, 0x26, 0xb2, 0x3f, 0xd6, 0x18, 0xd4, 0xca, + 0x81, 0x83, 0xfa, 0x07, 0x25, 0x38, 0xa9, 0xba, 0xc5, 0xd8, 0xa5, 0x7f, 0x2a, 0x3b, 0xe6, 0x02, + 0x8c, 0xb8, 0x64, 0xdd, 0xe9, 0xb4, 0x62, 0x75, 0x00, 0x19, 0xe0, 0x27, 0xd3, 0x5a, 0x02, 0xc6, + 0x3a, 0x4d, 0x1f, 0x7d, 0xf9, 0x95, 0x11, 0x26, 0xcf, 0x63, 0x87, 0xce, 0x7a, 0xaa, 0xe1, 0x69, + 0x27, 0xca, 0x51, 0xfd, 0x44, 0x29, 0x4e, 0x8f, 0x8f, 0xc3, 0x80, 0xb7, 0x45, 0xf7, 0xfc, 0x92, + 0xb9, 0x95, 0xaf, 0x52, 0x20, 0xe6, 0x38, 0xf4, 0x04, 0x0c, 0x35, 0x83, 0xad, 0x2d, 0xc7, 0x77, + 0x67, 0xca, 0x4c, 0xe7, 0x1c, 0xa1, 0x6a, 0xc1, 0x12, 0x07, 0x61, 0x89, 0x43, 0x8f, 0x40, 0xc5, + 0x09, 0x37, 0xa2, 0x99, 0x0a, 0xa3, 0x19, 0xa6, 0x35, 0x2d, 0x84, 0x1b, 0x11, 0x66, 0x50, 0xaa, + 0x4b, 0xde, 0x0b, 0xc2, 0xbb, 0x9e, 0xbf, 0x51, 0xf3, 0x42, 0xa6, 0x18, 0x6a, 0xba, 0xe4, 0x6d, + 0x85, 0xc1, 0x1a, 0x15, 0xaa, 0xc3, 0x40, 0x3b, 0x08, 0xe3, 0x68, 0x66, 0x90, 0x75, 0xfc, 0x33, + 0xb9, 0xcb, 0x8f, 0x7f, 0x77, 0x3d, 0x08, 0xe3, 0xe4, 0x53, 0xe8, 0xbf, 0x08, 0x73, 0x46, 0x68, + 0x09, 0xca, 0xc4, 0xdf, 0x9e, 0x19, 0x62, 0xfc, 0x3e, 0x72, 0x30, 0xbf, 0x65, 0x7f, 0xfb, 0x96, + 0x13, 0x26, 0xf2, 0x6a, 0xd9, 0xdf, 0xc6, 0xb4, 0x34, 0x6a, 0x42, 0x55, 0xda, 0xaf, 0xa2, 0x99, + 0xe1, 0x22, 0x53, 0x11, 0x0b, 0x72, 0x4c, 0xde, 0xed, 0x78, 0x21, 0xd9, 0x22, 0x7e, 0x1c, 0x25, + 0x07, 0x2b, 0x89, 0x8d, 0x70, 0xc2, 0x17, 0x35, 0x61, 0x94, 0xeb, 0x9f, 0xd7, 0x82, 0x8e, 0x1f, + 0x47, 0x33, 0x55, 0xd6, 0xe4, 0x1c, 0x63, 0xc7, 0xad, 0xa4, 0xc4, 0xe2, 0xb4, 0x60, 0x3f, 0xaa, + 0x01, 0x23, 0x6c, 0x30, 0x45, 0x6f, 0xc0, 0x58, 0xcb, 0xdb, 0x26, 0x3e, 0x89, 0xa2, 0x7a, 0x18, + 0xdc, 0x21, 0x33, 0xc0, 0xbe, 0xe6, 0xf1, 0xbc, 0x83, 0x7f, 0x70, 0x87, 0x2c, 0x4e, 0xed, 0xed, + 0xce, 0x8d, 0x5d, 0xd5, 0x4b, 0x63, 0x93, 0x19, 0x7a, 0x0b, 0xc6, 0xa9, 0xb2, 0xeb, 0x25, 0xec, + 0x47, 0x8a, 0xb3, 0x47, 0x7b, 0xbb, 0x73, 0xe3, 0xd8, 0x28, 0x8e, 0x53, 0xec, 0xd0, 0x1a, 0x54, + 0x5b, 0xde, 0x3a, 0x69, 0xee, 0x34, 0x5b, 0x64, 0x66, 0x94, 0xf1, 0xce, 0x59, 0x9c, 0x57, 0x25, + 0x39, 0x3f, 0x60, 0xa8, 0xbf, 0x38, 0x61, 0x84, 0x6e, 0xc1, 0xa9, 0x98, 0x84, 0x5b, 0x9e, 0xef, + 0xd0, 0x45, 0x25, 0xb4, 0x5f, 0x66, 0x5d, 0x19, 0x63, 0xb3, 0xf6, 0xb4, 0xe8, 0xd8, 0x53, 0x6b, + 0x99, 0x54, 0xb8, 0x47, 0x69, 0x74, 0x03, 0x26, 0xd8, 0x7a, 0xaa, 0x77, 0x5a, 0xad, 0x7a, 0xd0, + 0xf2, 0x9a, 0x3b, 0x33, 0xe3, 0x8c, 0xe1, 0x13, 0xd2, 0x66, 0xb2, 0x6a, 0xa2, 0xe9, 0xc1, 0x30, + 0xf9, 0x87, 0xd3, 0xa5, 0x51, 0x0b, 0x26, 0x22, 0xd2, 0xec, 0x84, 0x5e, 0xbc, 0x43, 0xe7, 0x3e, + 0xb9, 0x1f, 0xcf, 0x4c, 0x14, 0x39, 0xe8, 0x36, 0xcc, 0x42, 0xdc, 0x60, 0x95, 0x02, 0xe2, 0x34, + 0x6b, 0x2a, 0x2a, 0xa2, 0xd8, 0xf5, 0xfc, 0x99, 0x49, 0x26, 0x81, 0xd4, 0xfa, 0x6a, 0x50, 0x20, + 0xe6, 0x38, 0x66, 0x3f, 0xa0, 0x3f, 0x6e, 0x50, 0x29, 0x3d, 0xc5, 0x08, 0x13, 0xfb, 0x81, 0x44, + 0xe0, 0x84, 0x86, 0xaa, 0x06, 0x71, 0xbc, 0x33, 0x83, 0x18, 0xa9, 0x5a, 0x6a, 0x6b, 0x6b, 0x9f, + 0xc6, 0x14, 0x8e, 0x6e, 0xc1, 0x10, 0xf1, 0xb7, 0x57, 0xc2, 0x60, 0x6b, 0xe6, 0x44, 0x11, 0x19, + 0xb0, 0xcc, 0x89, 0xf9, 0xfe, 0x91, 0x1c, 0x61, 0x04, 0x18, 0x4b, 0x66, 0xe8, 0x3e, 0xcc, 0x64, + 0x8c, 0x12, 0x1f, 0x94, 0x69, 0x36, 0x28, 0x9f, 0x14, 0x65, 0x67, 0xd6, 0x7a, 0xd0, 0xed, 0x1f, + 0x80, 0xc3, 0x3d, 0xb9, 0xdb, 0x77, 0x60, 0x5c, 0x09, 0x2a, 0x36, 0xde, 0x68, 0x0e, 0x06, 0xa8, + 0x2c, 0x96, 0x07, 0xfa, 0x2a, 0xed, 0x54, 0x2a, 0xa2, 0x23, 0xcc, 0xe1, 0xac, 0x53, 0xbd, 0xf7, + 0xc8, 0xe2, 0x4e, 0x4c, 0xf8, 0xc1, 0xae, 0xac, 0x75, 0xaa, 0x44, 0xe0, 0x84, 0xc6, 0xfe, 0xbf, + 0x5c, 0x4d, 0x4a, 0xa4, 0x61, 0x81, 0x9d, 0xe0, 0x1c, 0x0c, 0x6f, 0x06, 0x51, 0x4c, 0xa9, 0x59, + 0x1d, 0x03, 0x89, 0x62, 0x74, 0x59, 0xc0, 0xb1, 0xa2, 0x40, 0x2f, 0xc3, 0x58, 0x53, 0xaf, 0x40, + 0x6c, 0x63, 0x27, 0x45, 0x11, 0xb3, 0x76, 0x6c, 0xd2, 0xa2, 0x97, 0x60, 0x98, 0x19, 0xc6, 0x9b, + 0x41, 0x4b, 0x1c, 0x21, 0xe5, 0xae, 0x3c, 0x5c, 0x17, 0xf0, 0x7d, 0xed, 0x37, 0x56, 0xd4, 0xf4, + 0x20, 0x4e, 0x9b, 0xb0, 0x5a, 0x17, 0x1b, 0x88, 0x3a, 0x88, 0x5f, 0x66, 0x50, 0x2c, 0xb0, 0xf6, + 0x3f, 0x2f, 0x69, 0xbd, 0x4c, 0x0f, 0x40, 0x04, 0xbd, 0x0e, 0x43, 0xf7, 0x1c, 0x2f, 0xf6, 0xfc, + 0x0d, 0xa1, 0x3d, 0x3c, 0x57, 0x70, 0x37, 0x61, 0xc5, 0x6f, 0xf3, 0xa2, 0x7c, 0xe7, 0x13, 0x7f, + 0xb0, 0x64, 0x48, 0x79, 0x87, 0x1d, 0xdf, 0xa7, 0xbc, 0x4b, 0xfd, 0xf3, 0xc6, 0xbc, 0x28, 0xe7, + 0x2d, 0xfe, 0x60, 0xc9, 0x10, 0xad, 0x03, 0xc8, 0xb9, 0x44, 0x5c, 0x61, 0x90, 0xfe, 0x58, 0x3f, + 0xec, 0xd7, 0x54, 0xe9, 0xc5, 0x71, 0xba, 0xd7, 0x26, 0xff, 0xb1, 0xc6, 0xd9, 0x8e, 0x99, 0x12, + 0xd6, 0xdd, 0x2c, 0xf4, 0x19, 0xba, 0xa4, 0x9d, 0x30, 0x26, 0xee, 0x42, 0x9c, 0xb6, 0xe9, 0x1f, + 0xac, 0x62, 0xaf, 0x79, 0x5b, 0x44, 0x5f, 0xfe, 0x82, 0x09, 0x4e, 0xf8, 0xd9, 0xdf, 0x2a, 0xc3, + 0x4c, 0xaf, 0xe6, 0xd2, 0x29, 0x49, 0xee, 0x7b, 0xf1, 0x12, 0x55, 0x93, 0x2c, 0x73, 0x4a, 0x2e, + 0x0b, 0x38, 0x56, 0x14, 0x74, 0x6e, 0x44, 0xde, 0x86, 0x3c, 0x2c, 0x0d, 0x24, 0x73, 0xa3, 0xc1, + 0xa0, 0x58, 0x60, 0x29, 0x5d, 0x48, 0x9c, 0x48, 0xdc, 0x87, 0x68, 0x73, 0x08, 0x33, 0x28, 0x16, + 0x58, 0xdd, 0x20, 0x52, 0xc9, 0x31, 0x88, 0x18, 0x5d, 0x34, 0xf0, 0x60, 0xbb, 0x08, 0xbd, 0x09, + 0xb0, 0xee, 0xf9, 0x5e, 0xb4, 0xc9, 0xb8, 0x0f, 0xf6, 0xcd, 0x5d, 0x29, 0x59, 0x2b, 0x8a, 0x0b, + 0xd6, 0x38, 0xa2, 0x17, 0x60, 0x44, 0x2d, 0xcf, 0xd5, 0xda, 0xcc, 0x90, 0x69, 0x43, 0x4f, 0x64, + 0x55, 0x0d, 0xeb, 0x74, 0xf6, 0x3b, 0xe9, 0xf9, 0x22, 0x56, 0x85, 0xd6, 0xbf, 0x56, 0xd1, 0xfe, + 0x2d, 0x1d, 0xdc, 0xbf, 0xf6, 0x7f, 0x2e, 0xc3, 0x84, 0x51, 0x59, 0x27, 0x2a, 0x20, 0xd1, 0x5e, + 0xa5, 0x1b, 0x96, 0x13, 0x13, 0xb1, 0x26, 0xcf, 0xf5, 0xb3, 0x68, 0xf4, 0xed, 0x8d, 0xae, 0x05, + 0xce, 0x09, 0x6d, 0x42, 0xb5, 0xe5, 0x44, 0xcc, 0xa4, 0x42, 0xc4, 0x5a, 0xec, 0x8f, 0x6d, 0x72, + 0xfc, 0x70, 0xa2, 0x58, 0xdb, 0x3d, 0x78, 0x2d, 0x09, 0x73, 0xba, 0xdb, 0x52, 0x65, 0x47, 0x5e, + 0xc2, 0xa9, 0xe6, 0x50, 0x8d, 0x68, 0x07, 0x73, 0x1c, 0x7a, 0x09, 0x46, 0x43, 0xc2, 0x66, 0xca, + 0x12, 0xd5, 0xe7, 0xd8, 0xd4, 0x1b, 0x48, 0x14, 0x3f, 0xac, 0xe1, 0xb0, 0x41, 0x99, 0xe8, 0xfd, + 0x83, 0x07, 0xe8, 0xfd, 0x4f, 0xc1, 0x10, 0xfb, 0xa1, 0x66, 0x85, 0x1a, 0xa1, 0x55, 0x0e, 0xc6, + 0x12, 0x9f, 0x9e, 0x44, 0xc3, 0x05, 0x27, 0xd1, 0xd3, 0x30, 0x5e, 0x73, 0xc8, 0x56, 0xe0, 0x2f, + 0xfb, 0x6e, 0x3b, 0xf0, 0xfc, 0x18, 0xcd, 0x40, 0x85, 0xed, 0x27, 0x7c, 0xbd, 0x57, 0x28, 0x07, + 0x5c, 0xa1, 0xba, 0xbb, 0xfd, 0x27, 0x25, 0x18, 0xab, 0x91, 0x16, 0x89, 0x09, 0x3f, 0xf7, 0x44, + 0x68, 0x05, 0xd0, 0x46, 0xe8, 0x34, 0x49, 0x9d, 0x84, 0x5e, 0xe0, 0x36, 0x48, 0x33, 0xf0, 0xd9, + 0xdd, 0x15, 0xdd, 0x20, 0x4f, 0xed, 0xed, 0xce, 0xa1, 0x4b, 0x5d, 0x58, 0x9c, 0x51, 0x02, 0xb9, + 0x30, 0xd6, 0x0e, 0x89, 0x61, 0x37, 0xb4, 0xf2, 0x55, 0x8d, 0xba, 0x5e, 0x84, 0x6b, 0xc3, 0x06, + 0x08, 0x9b, 0x4c, 0xd1, 0xa7, 0x60, 0x32, 0x08, 0xdb, 0x9b, 0x8e, 0x5f, 0x23, 0x6d, 0xe2, 0xbb, + 0xf4, 0x08, 0x20, 0xac, 0x1d, 0xd3, 0x7b, 0xbb, 0x73, 0x93, 0x37, 0x52, 0x38, 0xdc, 0x45, 0x8d, + 0x5e, 0x87, 0xa9, 0x76, 0x18, 0xb4, 0x9d, 0x0d, 0x36, 0x65, 0x84, 0xb6, 0xc2, 0x65, 0xd3, 0xb9, + 0xbd, 0xdd, 0xb9, 0xa9, 0x7a, 0x1a, 0xb9, 0xbf, 0x3b, 0x77, 0x82, 0x75, 0x19, 0x85, 0x24, 0x48, + 0xdc, 0xcd, 0xc6, 0x7e, 0x17, 0x4e, 0xd6, 0x82, 0x7b, 0xfe, 0x3d, 0x27, 0x74, 0x17, 0xea, 0xab, + 0x9a, 0x71, 0xe2, 0x35, 0x79, 0xf8, 0xe5, 0x77, 0x82, 0x39, 0x3b, 0x9b, 0xc6, 0x83, 0x1f, 0x3b, + 0x56, 0xbc, 0x16, 0xe9, 0x61, 0x0e, 0xf9, 0xc7, 0x25, 0xa3, 0xce, 0x84, 0x5e, 0xdd, 0x5d, 0x58, + 0x3d, 0xef, 0x2e, 0x3e, 0x03, 0xc3, 0xeb, 0x1e, 0x69, 0xb9, 0x98, 0xac, 0x8b, 0xd1, 0xba, 0x50, + 0xe4, 0x72, 0x67, 0x85, 0x96, 0x91, 0xd6, 0x31, 0x7e, 0x88, 0x5e, 0x11, 0x6c, 0xb0, 0x62, 0x88, + 0x3a, 0x30, 0x29, 0xcf, 0x61, 0x12, 0x2b, 0x16, 0xfb, 0x73, 0xc5, 0x8e, 0x79, 0x66, 0x35, 0x6c, + 0x78, 0x71, 0x8a, 0x21, 0xee, 0xaa, 0x82, 0x9e, 0x9f, 0xb7, 0xe8, 0x56, 0x57, 0x61, 0x53, 0x9f, + 0x9d, 0x9f, 0x99, 0x29, 0x80, 0x41, 0xed, 0xdf, 0xb4, 0xe0, 0xa1, 0xae, 0xde, 0x12, 0x76, 0x92, + 0x23, 0x1b, 0xa3, 0xb4, 0xb1, 0xa2, 0x94, 0x6f, 0xac, 0xb0, 0x7f, 0xcb, 0x82, 0xe9, 0xe5, 0xad, + 0x76, 0xbc, 0x53, 0xf3, 0xcc, 0x3b, 0x97, 0x17, 0x61, 0x70, 0x8b, 0xb8, 0x5e, 0x67, 0x4b, 0x8c, + 0xeb, 0x9c, 0xdc, 0x18, 0xae, 0x31, 0xe8, 0xfe, 0xee, 0xdc, 0x58, 0x23, 0x0e, 0x42, 0x67, 0x83, + 0x70, 0x00, 0x16, 0xe4, 0x6c, 0x7b, 0xf5, 0xde, 0x23, 0x57, 0xbd, 0x2d, 0x4f, 0x5e, 0xe5, 0x1d, + 0x68, 0xe4, 0x9b, 0x97, 0x5d, 0x3b, 0xff, 0x6a, 0xc7, 0xf1, 0x63, 0x2f, 0xde, 0x11, 0xd7, 0x49, + 0x92, 0x09, 0x4e, 0xf8, 0xd9, 0x3f, 0xb2, 0x60, 0x42, 0x4a, 0x9f, 0x05, 0xd7, 0x0d, 0x49, 0x14, + 0xa1, 0x59, 0x28, 0x79, 0x6d, 0xd1, 0x4a, 0x10, 0xad, 0x2c, 0xad, 0xd6, 0x71, 0xc9, 0x6b, 0xa3, + 0xd7, 0xa1, 0xca, 0xef, 0x01, 0x93, 0xa9, 0xd7, 0xe7, 0xbd, 0x22, 0x6b, 0xcb, 0x9a, 0xe4, 0x81, + 0x13, 0x76, 0x52, 0x07, 0x67, 0xfb, 0x5a, 0xd9, 0xbc, 0x95, 0xba, 0x2c, 0xe0, 0x58, 0x51, 0xa0, + 0xb3, 0x30, 0xec, 0x07, 0x2e, 0xbf, 0xaa, 0xe5, 0x52, 0x80, 0x4d, 0xe8, 0xeb, 0x02, 0x86, 0x15, + 0xd6, 0xfe, 0xa2, 0x05, 0xa3, 0xf2, 0x1b, 0x0b, 0x1e, 0x07, 0xe8, 0x12, 0x4c, 0x8e, 0x02, 0xc9, + 0x12, 0xa4, 0xea, 0x3c, 0xc3, 0x18, 0x5a, 0x7c, 0xb9, 0x1f, 0x2d, 0xde, 0xfe, 0xed, 0x12, 0x8c, + 0xcb, 0xe6, 0x34, 0x3a, 0x77, 0x22, 0x42, 0x95, 0x9c, 0xaa, 0xc3, 0x3b, 0x9f, 0xc8, 0x59, 0xfc, + 0x6c, 0xde, 0x49, 0xcf, 0x18, 0xb3, 0x44, 0x89, 0x5a, 0x90, 0x7c, 0x70, 0xc2, 0x12, 0x6d, 0xc3, + 0x94, 0x1f, 0xc4, 0x6c, 0xf3, 0x54, 0xf8, 0x62, 0xf7, 0x28, 0xe9, 0x7a, 0x1e, 0x16, 0xf5, 0x4c, + 0x5d, 0x4f, 0xf3, 0xc3, 0xdd, 0x55, 0xa0, 0x1b, 0xd2, 0x82, 0x55, 0x66, 0x75, 0x3d, 0x5d, 0xac, + 0xae, 0xde, 0x06, 0x2c, 0xfb, 0xf7, 0x2d, 0xa8, 0x4a, 0xb2, 0xe3, 0xb8, 0x50, 0xbb, 0x0d, 0x43, + 0x11, 0x1b, 0x22, 0xd9, 0x5d, 0xe7, 0x8a, 0x7d, 0x02, 0x1f, 0xd7, 0x44, 0x63, 0xe0, 0xff, 0x23, + 0x2c, 0xb9, 0x31, 0x53, 0xbe, 0xfa, 0x90, 0x0f, 0x9c, 0x29, 0x5f, 0xb5, 0xac, 0xf7, 0xbd, 0xd9, + 0x98, 0x61, 0x6b, 0xa0, 0x6a, 0x6f, 0x3b, 0x24, 0xeb, 0xde, 0xfd, 0xb4, 0xda, 0x5b, 0x67, 0x50, + 0x2c, 0xb0, 0x68, 0x1d, 0x46, 0x9b, 0xd2, 0xd8, 0x9d, 0x88, 0x90, 0x8f, 0x16, 0xbc, 0x59, 0x50, + 0x97, 0x54, 0xdc, 0x57, 0x6a, 0x49, 0xe3, 0x84, 0x0d, 0xbe, 0x54, 0x4e, 0x25, 0xf7, 0xf0, 0xe5, + 0x82, 0x66, 0xa1, 0x90, 0xc4, 0x49, 0x0d, 0x3d, 0xaf, 0xe0, 0xed, 0xaf, 0x5a, 0x30, 0xc8, 0xad, + 0xa3, 0xc5, 0x4c, 0xcc, 0xda, 0xf5, 0x5b, 0xd2, 0x9f, 0xb7, 0x28, 0x50, 0xdc, 0xc6, 0xa1, 0xdb, + 0x50, 0x65, 0x3f, 0x98, 0xa5, 0xa7, 0x5c, 0xc4, 0x71, 0x8c, 0xd7, 0xaf, 0x37, 0xf5, 0x96, 0x64, + 0x80, 0x13, 0x5e, 0xf6, 0x77, 0xca, 0x54, 0xf4, 0x25, 0xa4, 0x86, 0xe6, 0x60, 0x1d, 0x87, 0xe6, + 0x50, 0x3a, 0x7a, 0xcd, 0xe1, 0x5d, 0x98, 0x68, 0x6a, 0xd7, 0x7f, 0xc9, 0x88, 0x5f, 0x2c, 0x38, + 0xad, 0xb4, 0x3b, 0x43, 0x6e, 0x0d, 0x5c, 0x32, 0xd9, 0xe1, 0x34, 0x7f, 0x44, 0x60, 0x94, 0xcf, + 0x07, 0x51, 0x5f, 0x85, 0xd5, 0x77, 0xbe, 0xc8, 0x0c, 0xd3, 0x2b, 0x63, 0xb3, 0xb8, 0xa1, 0x31, + 0xc2, 0x06, 0x5b, 0xfb, 0xd7, 0x07, 0x60, 0x60, 0x79, 0x9b, 0xf8, 0xf1, 0x31, 0x88, 0xba, 0x2d, + 0x18, 0xf7, 0xfc, 0xed, 0xa0, 0xb5, 0x4d, 0x5c, 0x8e, 0x3f, 0xdc, 0xf6, 0x7e, 0x4a, 0x54, 0x32, + 0xbe, 0x6a, 0x30, 0xc3, 0x29, 0xe6, 0x47, 0x61, 0x87, 0x78, 0x15, 0x06, 0xf9, 0xcc, 0x10, 0x46, + 0x88, 0x9c, 0xdb, 0x02, 0xd6, 0xb1, 0x62, 0x05, 0x25, 0xd6, 0x12, 0x7e, 0x51, 0x21, 0x18, 0xa1, + 0x77, 0x60, 0x7c, 0xdd, 0x0b, 0xa3, 0x78, 0xcd, 0xdb, 0xa2, 0xe7, 0xc7, 0xad, 0xf6, 0x21, 0x2c, + 0x10, 0xaa, 0x47, 0x56, 0x0c, 0x4e, 0x38, 0xc5, 0x19, 0x6d, 0xc0, 0x18, 0x3d, 0x00, 0x27, 0x55, + 0x0d, 0xf5, 0x5d, 0x95, 0x32, 0x40, 0x5e, 0xd5, 0x19, 0x61, 0x93, 0x2f, 0x15, 0x49, 0x4d, 0x76, + 0x60, 0x1e, 0x66, 0xda, 0x8d, 0x12, 0x49, 0xfc, 0xa4, 0xcc, 0x71, 0x54, 0xb2, 0x31, 0x3f, 0x9c, + 0xaa, 0x29, 0xd9, 0x12, 0x6f, 0x1b, 0xfb, 0xeb, 0x74, 0x2f, 0xa6, 0x7d, 0x78, 0x0c, 0xdb, 0xd7, + 0x65, 0x73, 0xfb, 0x7a, 0xbc, 0xc0, 0xc8, 0xf6, 0xd8, 0xba, 0xde, 0x86, 0x11, 0x6d, 0xe0, 0xd1, + 0x79, 0xa8, 0x36, 0xa5, 0xab, 0x88, 0x90, 0xe2, 0x4a, 0x95, 0x52, 0x3e, 0x24, 0x38, 0xa1, 0xa1, + 0xfd, 0x42, 0x55, 0xd0, 0xb4, 0x63, 0x19, 0x55, 0x50, 0x31, 0xc3, 0xd8, 0xcf, 0x01, 0x2c, 0xdf, + 0x27, 0xcd, 0x05, 0x7e, 0x80, 0xd4, 0x6e, 0x0f, 0xad, 0xde, 0xb7, 0x87, 0xf6, 0xd7, 0x2c, 0x18, + 0x5f, 0x59, 0x32, 0x0e, 0x0c, 0xf3, 0x00, 0x5c, 0x37, 0xbe, 0x7d, 0xfb, 0xba, 0xb4, 0x8e, 0x73, + 0x13, 0xa6, 0x82, 0x62, 0x8d, 0x02, 0x3d, 0x0c, 0xe5, 0x56, 0xc7, 0x17, 0x2a, 0xeb, 0xd0, 0xde, + 0xee, 0x5c, 0xf9, 0x6a, 0xc7, 0xc7, 0x14, 0xa6, 0x79, 0x70, 0x95, 0x0b, 0x7b, 0x70, 0xe5, 0xbb, + 0x3f, 0x7f, 0xb9, 0x0c, 0x93, 0x2b, 0x2d, 0x72, 0xdf, 0x68, 0xf5, 0x93, 0x30, 0xe8, 0x86, 0xde, + 0x36, 0x09, 0xd3, 0x8a, 0x40, 0x8d, 0x41, 0xb1, 0xc0, 0x16, 0x76, 0x2a, 0x7b, 0xab, 0x7b, 0x23, + 0x3f, 0x3a, 0x87, 0xba, 0xdc, 0x6f, 0x46, 0xeb, 0x30, 0xc4, 0x6f, 0x9b, 0xa3, 0x99, 0x01, 0x36, + 0x15, 0x5f, 0x3e, 0xb8, 0x31, 0xe9, 0xfe, 0x99, 0x17, 0xd6, 0x1b, 0xee, 0xce, 0xa3, 0x64, 0x99, + 0x80, 0x62, 0xc9, 0x7c, 0xf6, 0x13, 0x30, 0xaa, 0x53, 0xf6, 0xe5, 0xd7, 0xf3, 0x57, 0x2d, 0x38, + 0xb1, 0xd2, 0x0a, 0x9a, 0x77, 0x53, 0x5e, 0x7f, 0x2f, 0xc0, 0x08, 0x5d, 0x4c, 0x91, 0xe1, 0x12, + 0x6b, 0xb8, 0x0b, 0x0b, 0x14, 0xd6, 0xe9, 0xb4, 0x62, 0x37, 0x6f, 0xae, 0xd6, 0xb2, 0xbc, 0x8c, + 0x05, 0x0a, 0xeb, 0x74, 0xf6, 0x1f, 0x5a, 0xf0, 0xe8, 0xa5, 0xa5, 0xe5, 0x3a, 0x09, 0x23, 0x2f, + 0x8a, 0x89, 0x1f, 0x77, 0x39, 0x3a, 0x53, 0x9d, 0xd1, 0xd5, 0x9a, 0x92, 0xe8, 0x8c, 0x35, 0xd6, + 0x0a, 0x81, 0xfd, 0xa0, 0x78, 0xfb, 0x7f, 0xd5, 0x82, 0x13, 0x97, 0xbc, 0x18, 0x93, 0x76, 0x90, + 0x76, 0x34, 0x0e, 0x49, 0x3b, 0x88, 0xbc, 0x38, 0x08, 0x77, 0xd2, 0x8e, 0xc6, 0x58, 0x61, 0xb0, + 0x46, 0xc5, 0x6b, 0xde, 0xf6, 0x22, 0xda, 0xd2, 0x92, 0x79, 0xd4, 0xc5, 0x02, 0x8e, 0x15, 0x05, + 0xfd, 0x30, 0xd7, 0x0b, 0x99, 0xca, 0xb0, 0x23, 0x56, 0xb0, 0xfa, 0xb0, 0x9a, 0x44, 0xe0, 0x84, + 0xc6, 0xfe, 0xbb, 0x16, 0x9c, 0xbc, 0xd4, 0xea, 0x44, 0x31, 0x09, 0xd7, 0x23, 0xa3, 0xb1, 0xcf, + 0x41, 0x95, 0x48, 0xe5, 0x5e, 0xb4, 0x55, 0x6d, 0x1a, 0x4a, 0xeb, 0xe7, 0x5e, 0xce, 0x8a, 0xae, + 0x80, 0x33, 0x6d, 0x7f, 0xae, 0x9f, 0xbf, 0x5b, 0x82, 0xb1, 0xcb, 0x6b, 0x6b, 0xf5, 0x4b, 0x24, + 0x16, 0x52, 0x32, 0xdf, 0xe4, 0x85, 0xb5, 0x13, 0xf9, 0x41, 0xca, 0x4f, 0x27, 0xf6, 0x5a, 0xf3, + 0x3c, 0x12, 0x65, 0x7e, 0xd5, 0x8f, 0x6f, 0x84, 0x8d, 0x38, 0xf4, 0xfc, 0x8d, 0xcc, 0x33, 0xbc, + 0x94, 0xe5, 0xe5, 0x5e, 0xb2, 0x1c, 0x3d, 0x07, 0x83, 0x2c, 0x14, 0x46, 0x2a, 0x1f, 0x1f, 0x56, + 0x7a, 0x02, 0x83, 0xee, 0xef, 0xce, 0x55, 0x6f, 0xe2, 0x55, 0xfe, 0x07, 0x0b, 0x52, 0xf4, 0x16, + 0x8c, 0x6c, 0xc6, 0x71, 0xfb, 0x32, 0x71, 0x5c, 0x12, 0x4a, 0x39, 0x71, 0xf6, 0x60, 0x39, 0x41, + 0xbb, 0x83, 0x17, 0x48, 0x96, 0x56, 0x02, 0x8b, 0xb0, 0xce, 0xd1, 0x6e, 0x00, 0x24, 0xb8, 0x07, + 0x74, 0x06, 0xb1, 0x7f, 0xb9, 0x04, 0x43, 0x97, 0x1d, 0xdf, 0x6d, 0x91, 0x10, 0xad, 0x40, 0x85, + 0xdc, 0x27, 0x4d, 0xb1, 0x91, 0xe7, 0x34, 0x3d, 0xd9, 0xec, 0xb8, 0xd5, 0x8e, 0xfe, 0xc7, 0xac, + 0x3c, 0xc2, 0x30, 0x44, 0xdb, 0x7d, 0x49, 0xf9, 0xa0, 0x3f, 0x93, 0xdf, 0x0b, 0x6a, 0x52, 0xf0, + 0x9d, 0x52, 0x80, 0xb0, 0x64, 0xc4, 0x2c, 0x50, 0xcd, 0x76, 0x83, 0x8a, 0xb7, 0xb8, 0xd8, 0xc9, + 0x6e, 0x6d, 0xa9, 0xce, 0xc9, 0x05, 0x5f, 0x6e, 0x81, 0x92, 0x40, 0x9c, 0xb0, 0xb3, 0xd7, 0xa0, + 0x4a, 0x07, 0x7f, 0xa1, 0xe5, 0x39, 0x07, 0x9b, 0xc1, 0x9e, 0x81, 0xaa, 0x34, 0x44, 0x45, 0xc2, + 0xa1, 0x9d, 0x71, 0x95, 0x76, 0xaa, 0x08, 0x27, 0x78, 0xfb, 0x25, 0x98, 0x66, 0x77, 0xc8, 0x4e, + 0xbc, 0x69, 0xac, 0xc5, 0xdc, 0x49, 0x6f, 0x7f, 0xa3, 0x02, 0x53, 0xab, 0x8d, 0xa5, 0x86, 0x69, + 0xef, 0x7c, 0x09, 0x46, 0xf9, 0xb6, 0x4f, 0xa7, 0xb2, 0xd3, 0x12, 0xe5, 0xd5, 0xbd, 0xc7, 0x9a, + 0x86, 0xc3, 0x06, 0x25, 0x7a, 0x14, 0xca, 0xde, 0xbb, 0x7e, 0xda, 0x13, 0x71, 0xf5, 0xd5, 0xeb, + 0x98, 0xc2, 0x29, 0x9a, 0x6a, 0x10, 0x5c, 0x74, 0x2a, 0xb4, 0xd2, 0x22, 0x5e, 0x81, 0x71, 0x2f, + 0x6a, 0x46, 0xde, 0xaa, 0x4f, 0xe5, 0x8a, 0xd3, 0x94, 0x8b, 0x22, 0x51, 0xf9, 0x69, 0x53, 0x15, + 0x16, 0xa7, 0xa8, 0x35, 0x39, 0x3e, 0x50, 0x58, 0x0b, 0xc9, 0x75, 0x71, 0xa7, 0x0a, 0x56, 0x9b, + 0x7d, 0x5d, 0xc4, 0xfc, 0x9a, 0x84, 0x82, 0xc5, 0x3f, 0x38, 0xc2, 0x12, 0x87, 0x2e, 0xc1, 0x54, + 0x73, 0xd3, 0x69, 0x2f, 0x74, 0xe2, 0xcd, 0x9a, 0x17, 0x35, 0x83, 0x6d, 0x12, 0xee, 0x30, 0x05, + 0x78, 0x38, 0xb1, 0x69, 0x29, 0xc4, 0xd2, 0xe5, 0x85, 0x3a, 0xa5, 0xc4, 0xdd, 0x65, 0x4c, 0x85, + 0x04, 0x8e, 0x40, 0x21, 0x59, 0x80, 0x09, 0x59, 0x6b, 0x83, 0x44, 0x6c, 0x8b, 0x18, 0x61, 0xed, + 0x54, 0xc1, 0x45, 0x02, 0xac, 0x5a, 0x99, 0xa6, 0xb7, 0xdf, 0x81, 0xaa, 0xf2, 0xc3, 0x93, 0xee, + 0xa7, 0x56, 0x0f, 0xf7, 0xd3, 0x7c, 0xe1, 0x2e, 0x2d, 0xf3, 0xe5, 0x4c, 0xcb, 0xfc, 0x3f, 0xb1, + 0x20, 0x71, 0x24, 0x42, 0x18, 0xaa, 0xed, 0x80, 0xdd, 0xe2, 0x85, 0xf2, 0xba, 0xfc, 0x89, 0x9c, + 0x35, 0xcf, 0x65, 0x0e, 0xef, 0x90, 0xba, 0x2c, 0x8b, 0x13, 0x36, 0xe8, 0x2a, 0x0c, 0xb5, 0x43, + 0xd2, 0x88, 0x59, 0xec, 0x48, 0x1f, 0x1c, 0xf9, 0x44, 0xe0, 0x25, 0xb1, 0x64, 0x61, 0xff, 0x4b, + 0x0b, 0x80, 0x9b, 0xc1, 0x1d, 0x7f, 0x83, 0x1c, 0xc3, 0xc1, 0xfa, 0x3a, 0x54, 0xa2, 0x36, 0x69, + 0x16, 0xbb, 0x87, 0x4d, 0x5a, 0xd6, 0x68, 0x93, 0x66, 0x32, 0x1c, 0xf4, 0x1f, 0x66, 0x7c, 0xec, + 0x6f, 0x03, 0x8c, 0x27, 0x64, 0xf4, 0x70, 0x83, 0x9e, 0x35, 0x82, 0x26, 0x1e, 0x4e, 0x05, 0x4d, + 0x54, 0x19, 0xb5, 0x16, 0x27, 0x11, 0x43, 0x79, 0xcb, 0xb9, 0x2f, 0xce, 0x52, 0x2f, 0x14, 0x6d, + 0x10, 0xad, 0x69, 0xfe, 0x9a, 0x73, 0x9f, 0xab, 0xae, 0xcf, 0xc8, 0x89, 0x74, 0xcd, 0xb9, 0xbf, + 0xcf, 0x6f, 0x5b, 0x99, 0x74, 0xa2, 0x87, 0xb7, 0xcf, 0xfe, 0x59, 0xf2, 0x9f, 0x6d, 0x43, 0xb4, + 0x3a, 0x56, 0xab, 0xe7, 0x0b, 0x53, 0x70, 0x9f, 0xb5, 0x7a, 0x7e, 0xba, 0x56, 0xcf, 0x2f, 0x50, + 0xab, 0xc7, 0xbc, 0x8b, 0x87, 0xc4, 0xfd, 0x0c, 0x73, 0xcd, 0x1c, 0xb9, 0xf8, 0xf1, 0xbe, 0xaa, + 0x16, 0x17, 0x3d, 0xbc, 0xfa, 0xf3, 0x52, 0x5f, 0x17, 0xd0, 0xdc, 0x26, 0xc8, 0xaa, 0xd1, 0xdf, + 0xb3, 0x60, 0x5c, 0xfc, 0xc6, 0xe4, 0xdd, 0x0e, 0x89, 0x62, 0xa1, 0x17, 0x7c, 0xea, 0x30, 0xad, + 0x11, 0x2c, 0x78, 0xa3, 0x3e, 0x26, 0xc5, 0xaf, 0x89, 0xcc, 0x6d, 0x5b, 0xaa, 0x3d, 0xe8, 0xdb, + 0x16, 0x4c, 0x6f, 0x39, 0xf7, 0x79, 0x8d, 0x1c, 0x86, 0x9d, 0xd8, 0x0b, 0x84, 0xfb, 0xe9, 0x4a, + 0xbf, 0xf3, 0xa4, 0x8b, 0x11, 0x6f, 0xae, 0xf4, 0x2c, 0x9b, 0xce, 0x22, 0xc9, 0x6d, 0x74, 0x66, + 0x0b, 0x67, 0xd7, 0x61, 0x58, 0x4e, 0xcc, 0x8c, 0x93, 0x52, 0x4d, 0x57, 0x7f, 0xfa, 0xbe, 0x3c, + 0xd3, 0x4e, 0x56, 0xac, 0x1e, 0x31, 0x15, 0x8f, 0xb4, 0x9e, 0x77, 0x60, 0x54, 0x9f, 0x77, 0x47, + 0x5a, 0xd7, 0xbb, 0x70, 0x22, 0x63, 0x56, 0x1d, 0x69, 0x95, 0xf7, 0xe0, 0xe1, 0x9e, 0xf3, 0xe3, + 0x28, 0x2b, 0xb6, 0x7f, 0xd7, 0xd2, 0x45, 0xe7, 0x31, 0xd8, 0xad, 0xae, 0x99, 0x76, 0xab, 0xb3, + 0x45, 0xd7, 0x50, 0x0f, 0xe3, 0xd5, 0xba, 0xde, 0x7c, 0xba, 0x25, 0xa0, 0x35, 0x18, 0x6c, 0x51, + 0x88, 0xbc, 0x36, 0x3c, 0xd7, 0xcf, 0x2a, 0x4d, 0x34, 0x30, 0x06, 0x8f, 0xb0, 0xe0, 0x65, 0x7f, + 0xdb, 0x82, 0xca, 0x5f, 0x62, 0x48, 0x57, 0x17, 0x6b, 0x91, 0x96, 0x60, 0x1e, 0x3b, 0xf7, 0x96, + 0xef, 0xc7, 0xc4, 0x8f, 0x98, 0x1a, 0x9f, 0xd9, 0x45, 0xff, 0xa7, 0x04, 0x23, 0xb4, 0x2a, 0xe9, + 0x25, 0xf3, 0x32, 0x8c, 0xb5, 0x9c, 0x3b, 0xa4, 0x25, 0x6d, 0xee, 0xe9, 0x43, 0xef, 0x55, 0x1d, + 0x89, 0x4d, 0x5a, 0x5a, 0x78, 0x5d, 0xbf, 0x92, 0x10, 0x4a, 0x92, 0x2a, 0x6c, 0xdc, 0x57, 0x60, + 0x93, 0x96, 0x9e, 0xba, 0xee, 0x39, 0x71, 0x73, 0x53, 0x1c, 0x88, 0x55, 0x73, 0x6f, 0x53, 0x20, + 0xe6, 0x38, 0xaa, 0xec, 0xc9, 0x19, 0x7b, 0x8b, 0x84, 0x4c, 0xd9, 0xe3, 0x4a, 0xb5, 0x52, 0xf6, + 0xb0, 0x89, 0xc6, 0x69, 0x7a, 0xf4, 0x09, 0x18, 0xa7, 0x9d, 0x13, 0x74, 0x62, 0xe9, 0x03, 0x34, + 0xc0, 0x7c, 0x80, 0x98, 0x0b, 0xf9, 0x9a, 0x81, 0xc1, 0x29, 0x4a, 0x54, 0x87, 0x69, 0xcf, 0x6f, + 0xb6, 0x3a, 0x2e, 0xb9, 0xe9, 0x7b, 0xbe, 0x17, 0x7b, 0x4e, 0xcb, 0x7b, 0x8f, 0xb8, 0x42, 0xed, + 0x56, 0xee, 0x5a, 0xab, 0x19, 0x34, 0x38, 0xb3, 0xa4, 0xfd, 0x16, 0x9c, 0xb8, 0x1a, 0x38, 0xee, + 0xa2, 0xd3, 0x72, 0xfc, 0x26, 0x09, 0x57, 0xfd, 0x8d, 0x5c, 0x9f, 0x02, 0xfd, 0xde, 0xbf, 0x94, + 0x77, 0xef, 0x6f, 0x87, 0x80, 0xf4, 0x0a, 0x84, 0x3f, 0xdc, 0x1b, 0x30, 0xe4, 0xf1, 0xaa, 0xc4, + 0x42, 0xb8, 0x90, 0xa7, 0x93, 0x77, 0xb5, 0x51, 0xf3, 0xef, 0xe2, 0x00, 0x2c, 0x59, 0xd2, 0x13, + 0x5c, 0x96, 0x12, 0x9f, 0x7f, 0xf4, 0xb6, 0x5f, 0x80, 0x29, 0x56, 0xb2, 0xcf, 0x83, 0xdf, 0x5f, + 0xb3, 0x60, 0xe2, 0x7a, 0x2a, 0xf8, 0xf9, 0x49, 0x18, 0x8c, 0x48, 0x98, 0x61, 0x59, 0x6d, 0x30, + 0x28, 0x16, 0xd8, 0x07, 0x6e, 0xad, 0xf9, 0xb5, 0x12, 0x54, 0x99, 0x43, 0x76, 0x9b, 0x1e, 0xe2, + 0x8e, 0x5e, 0x5f, 0xbe, 0x66, 0xe8, 0xcb, 0x39, 0x16, 0x03, 0xd5, 0xb0, 0x5e, 0xea, 0x32, 0xba, + 0xa9, 0x82, 0x82, 0x0b, 0x19, 0x0b, 0x12, 0x86, 0x3c, 0x70, 0x74, 0xdc, 0x8c, 0x21, 0x96, 0x01, + 0xc3, 0xec, 0x02, 0x5f, 0xd1, 0x7e, 0xe0, 0x2e, 0xf0, 0x55, 0xcb, 0x7a, 0x48, 0xc9, 0xba, 0xd6, + 0x78, 0xb6, 0x8f, 0xfc, 0x1c, 0x73, 0xb3, 0x65, 0x6b, 0x58, 0xc5, 0xd6, 0xcf, 0x09, 0xb7, 0x59, + 0x01, 0xdd, 0x67, 0x02, 0x4f, 0xfc, 0xe3, 0xa9, 0x13, 0x92, 0x22, 0xf6, 0x65, 0x98, 0x48, 0x75, + 0x1d, 0x7a, 0x01, 0x06, 0xda, 0x9b, 0x4e, 0x44, 0x52, 0x0e, 0x4f, 0x03, 0x75, 0x0a, 0xdc, 0xdf, + 0x9d, 0x1b, 0x57, 0x05, 0x18, 0x04, 0x73, 0x6a, 0xfb, 0x73, 0x25, 0xa8, 0x5c, 0x0f, 0xdc, 0xe3, + 0x98, 0x6a, 0x97, 0x8d, 0xa9, 0xf6, 0x64, 0x7e, 0xae, 0x96, 0x9e, 0xb3, 0xac, 0x9e, 0x9a, 0x65, + 0x67, 0x0b, 0xf0, 0x3a, 0x78, 0x82, 0x6d, 0xc1, 0x08, 0xcb, 0x05, 0x23, 0x9c, 0xb2, 0x9e, 0x33, + 0x8e, 0x78, 0x73, 0xa9, 0x23, 0xde, 0x84, 0x46, 0xaa, 0x1d, 0xf4, 0x9e, 0x82, 0x21, 0xe1, 0x04, + 0x94, 0x76, 0x32, 0x16, 0xb4, 0x58, 0xe2, 0xed, 0x7f, 0x51, 0x06, 0x23, 0xf7, 0x0c, 0xfa, 0x7d, + 0x0b, 0xe6, 0x43, 0x1e, 0xb0, 0xe5, 0xd6, 0x3a, 0xa1, 0xe7, 0x6f, 0x34, 0x9a, 0x9b, 0xc4, 0xed, + 0xb4, 0x3c, 0x7f, 0x63, 0x75, 0xc3, 0x0f, 0x14, 0x78, 0xf9, 0x3e, 0x69, 0x76, 0x98, 0xcd, 0xbd, + 0x70, 0xca, 0x1b, 0x75, 0x01, 0x7e, 0x71, 0x6f, 0x77, 0x6e, 0x1e, 0xf7, 0x55, 0x0b, 0xee, 0xb3, + 0x55, 0xe8, 0x87, 0x16, 0x9c, 0xe7, 0xd9, 0x57, 0x8a, 0x7f, 0x49, 0xa1, 0xa3, 0x71, 0x5d, 0x32, + 0x4d, 0xd8, 0xad, 0x91, 0x70, 0x6b, 0xf1, 0x45, 0xd1, 0xc9, 0xe7, 0xeb, 0xfd, 0xd5, 0x8a, 0xfb, + 0x6d, 0xa6, 0xfd, 0xaf, 0xcb, 0x30, 0x46, 0xfb, 0x33, 0x49, 0x9f, 0xf0, 0x82, 0x31, 0x4d, 0x1e, + 0x4b, 0x4d, 0x93, 0x29, 0x83, 0xf8, 0xc1, 0x64, 0x4e, 0x88, 0x60, 0xaa, 0xe5, 0x44, 0xf1, 0x65, 0xe2, 0x84, 0xf1, 0x1d, 0xe2, 0xb0, 0x7b, 0xe6, 0xb4, 0x0f, 0x4b, 0x81, 0xab, 0x6b, 0x65, 0x84, - 0xbb, 0x9a, 0x26, 0x86, 0xbb, 0xe9, 0xa3, 0x6d, 0x40, 0xec, 0x4e, 0x3b, 0x74, 0xfc, 0x88, 0x7f, - 0x8b, 0x27, 0x6c, 0xf4, 0xfd, 0xb5, 0x3a, 0x2b, 0x5a, 0x45, 0x57, 0xbb, 0xa8, 0xe1, 0x8c, 0x16, - 0x34, 0xaf, 0x85, 0x81, 0xa2, 0x5e, 0x0b, 0x83, 0x39, 0x1e, 0xfe, 0xbf, 0x62, 0xc1, 0x09, 0x3a, - 0x2d, 0xa6, 0x37, 0x78, 0x84, 0x02, 0x98, 0xa0, 0xcb, 0xae, 0x45, 0x62, 0x59, 0x26, 0xf6, 0x57, - 0x8e, 0x88, 0x6f, 0xd2, 0x49, 0xe4, 0xc8, 0x2b, 0x26, 0x31, 0x9c, 0xa6, 0x6e, 0x7f, 0xcd, 0x02, - 0xe6, 0x3d, 0x79, 0x0c, 0x87, 0xd9, 0x25, 0xf3, 0x30, 0xb3, 0xf3, 0x39, 0x46, 0x8f, 0x73, 0xec, - 0x79, 0x98, 0xa4, 0xd0, 0x7a, 0x18, 0xdc, 0xdf, 0x91, 0x12, 0x7f, 0xbe, 0x74, 0xf5, 0x2b, 0x25, - 0xbe, 0x6d, 0x54, 0xf4, 0x29, 0xfa, 0xbc, 0x05, 0xc3, 0x4d, 0xa7, 0xed, 0x34, 0x79, 0xf6, 0xae, - 0x02, 0x66, 0x22, 0xa3, 0xfe, 0xfc, 0x92, 0xa8, 0xcb, 0x4d, 0x1c, 0x1f, 0x95, 0x9f, 0x2e, 0x8b, - 0x73, 0xcd, 0x1a, 0xaa, 0xf1, 0xd9, 0xbb, 0x30, 0x66, 0x10, 0x3b, 0x52, 0x7d, 0xf8, 0xf3, 0x16, - 0x67, 0xfa, 0x4a, 0x67, 0xb9, 0x07, 0x53, 0xbe, 0xf6, 0x9f, 0xb2, 0x33, 0x29, 0x50, 0xcf, 0x17, - 0x67, 0xeb, 0x8c, 0x0b, 0x6a, 0x9e, 0xa2, 0x29, 0x82, 0xb8, 0xbb, 0x0d, 0xfb, 0x37, 0x2c, 0x78, - 0x48, 0x47, 0xd4, 0xc2, 0x85, 0xf3, 0x0c, 0xd8, 0x35, 0x18, 0x0e, 0xda, 0x24, 0x74, 0x12, 0xfd, - 0xec, 0xac, 0x1c, 0xff, 0x1b, 0xa2, 0x7c, 0x7f, 0x77, 0x6e, 0x5a, 0xa7, 0x2e, 0xcb, 0xb1, 0xaa, - 0x89, 0x6c, 0x18, 0x64, 0xe3, 0x12, 0x89, 0x40, 0x6f, 0x96, 0xcd, 0x8a, 0x5d, 0x90, 0x45, 0x58, - 0x40, 0xec, 0xbf, 0x69, 0xf1, 0xe5, 0xa6, 0x77, 0x1d, 0xfd, 0x02, 0x4c, 0x6e, 0x51, 0x55, 0x6e, - 0xf9, 0x7e, 0x3b, 0xe4, 0xe6, 0x77, 0x39, 0x62, 0x2f, 0x14, 0x1f, 0x31, 0xed, 0x73, 0x17, 0x67, - 0x44, 0xef, 0x27, 0xaf, 0xa5, 0xc8, 0xe2, 0xae, 0x86, 0xec, 0x7f, 0x50, 0xe2, 0x7b, 0x96, 0xc9, - 0x70, 0x4f, 0xc1, 0x50, 0x3b, 0x70, 0x97, 0x56, 0x6b, 0x58, 0x8c, 0x95, 0x62, 0x3a, 0x75, 0x5e, - 0x8c, 0x25, 0x1c, 0x5d, 0x04, 0x20, 0xf7, 0x63, 0x12, 0xfa, 0x4e, 0x4b, 0x5d, 0xe9, 0x2b, 0x51, - 0x69, 0x59, 0x41, 0xb0, 0x86, 0x45, 0xeb, 0xb4, 0xc3, 0x60, 0xdb, 0x73, 0x59, 0x9c, 0x4b, 0xd9, - 0xac, 0x53, 0x57, 0x10, 0xac, 0x61, 0x51, 0x05, 0xba, 0xe3, 0x47, 0xfc, 0x18, 0x73, 0xee, 0x88, - 0x4c, 0x4a, 0xc3, 0x89, 0x02, 0x7d, 0x53, 0x07, 0x62, 0x13, 0x17, 0x5d, 0x81, 0xc1, 0xd8, 0x61, - 0x17, 0xd5, 0x03, 0x45, 0xbc, 0x7e, 0xd6, 0x28, 0xae, 0x9e, 0xba, 0x8a, 0x56, 0xc5, 0x82, 0x84, - 0xfd, 0x9f, 0xaa, 0x00, 0x89, 0xd4, 0x85, 0x3e, 0xd7, 0xbd, 0xe1, 0x3f, 0x56, 0x54, 0x64, 0x7b, - 0x70, 0xbb, 0x1d, 0x7d, 0xc9, 0x82, 0x11, 0xa7, 0xd5, 0x0a, 0x9a, 0x4e, 0xcc, 0x86, 0xa7, 0x54, - 0x94, 0xf5, 0x88, 0x9e, 0x2c, 0x24, 0x75, 0x79, 0x67, 0x9e, 0x93, 0x97, 0xc7, 0x1a, 0x24, 0xb7, - 0x3f, 0x7a, 0x17, 0xd0, 0x47, 0xa5, 0xd4, 0xce, 0x67, 0x78, 0x36, 0x2d, 0xb5, 0x57, 0x19, 0xc3, - 0xd5, 0x04, 0x76, 0xf4, 0x96, 0x91, 0x79, 0xa8, 0x52, 0x24, 0x58, 0xd9, 0x90, 0x43, 0xf2, 0x92, - 0x0e, 0xa1, 0xd7, 0x75, 0xf7, 0xf8, 0x81, 0x22, 0xd9, 0x00, 0x34, 0x71, 0x38, 0xc7, 0x35, 0x3e, - 0x86, 0x09, 0xd7, 0x3c, 0x79, 0x85, 0x8b, 0xdf, 0x85, 0xfc, 0x16, 0x52, 0x47, 0x76, 0x72, 0xd6, - 0xa6, 0x00, 0x38, 0xdd, 0x04, 0x7a, 0x9d, 0x07, 0x2f, 0xac, 0xfa, 0xeb, 0x81, 0x70, 0xf3, 0x3b, - 0x57, 0x60, 0xce, 0x77, 0xa2, 0x98, 0x6c, 0xd1, 0x3a, 0xc9, 0xe1, 0x7a, 0x5d, 0x50, 0xc1, 0x8a, - 0x1e, 0x5a, 0x83, 0x41, 0x16, 0x9b, 0x16, 0xcd, 0x0c, 0x17, 0x31, 0x09, 0x9a, 0x21, 0xd9, 0xc9, - 0xfe, 0x61, 0x7f, 0x23, 0x2c, 0x68, 0xa1, 0xcb, 0x32, 0x29, 0x43, 0xb4, 0xea, 0xdf, 0x8c, 0x08, - 0x4b, 0xca, 0x50, 0x5d, 0xfc, 0x48, 0x92, 0x65, 0x81, 0x97, 0x67, 0xa6, 0x6b, 0x34, 0x6a, 0x52, - 0xc1, 0x46, 0xfc, 0x97, 0x59, 0x20, 0x67, 0xa0, 0x48, 0x47, 0xcd, 0x9c, 0x91, 0xc9, 0x60, 0xdf, - 0x32, 0x89, 0xe1, 0x34, 0xf5, 0x63, 0x3d, 0x52, 0x67, 0x7d, 0x98, 0x4c, 0x6f, 0xca, 0x23, 0x3d, - 0xc2, 0x7f, 0x5c, 0x81, 0x71, 0x73, 0x71, 0xa0, 0xf3, 0x50, 0x15, 0x44, 0x54, 0x8a, 0x37, 0xb5, - 0x07, 0xae, 0x49, 0x00, 0x4e, 0x70, 0x58, 0xb2, 0x3b, 0x56, 0x5d, 0x73, 0xf0, 0x4a, 0x92, 0xdd, - 0x29, 0x08, 0xd6, 0xb0, 0xa8, 0x24, 0x7c, 0x27, 0x08, 0x62, 0x75, 0x12, 0xa8, 0x75, 0xb3, 0xc8, - 0x4a, 0xb1, 0x80, 0xd2, 0x13, 0xe0, 0x2e, 0x9d, 0xcc, 0x96, 0x69, 0xde, 0x54, 0x27, 0xc0, 0x15, - 0x1d, 0x88, 0x4d, 0x5c, 0x7a, 0xa2, 0x05, 0x11, 0x5b, 0x88, 0x42, 0xde, 0x4e, 0x1c, 0xe6, 0x1a, - 0x3c, 0x5e, 0x53, 0xc2, 0xd1, 0xa7, 0xe1, 0x21, 0x15, 0x5e, 0x89, 0xb9, 0xb9, 0x58, 0xb6, 0x38, - 0x68, 0xa8, 0xcc, 0x0f, 0x2d, 0x65, 0xa3, 0xe1, 0x5e, 0xf5, 0xd1, 0x2b, 0x30, 0x2e, 0x64, 0x65, - 0x49, 0x71, 0xc8, 0xf4, 0x7b, 0xb8, 0x62, 0x40, 0x71, 0x0a, 0x1b, 0xd5, 0x60, 0x92, 0x96, 0x30, - 0x21, 0x55, 0x52, 0xe0, 0x61, 0xa2, 0xea, 0xa8, 0xbf, 0x92, 0x82, 0xe3, 0xae, 0x1a, 0x68, 0x01, - 0x26, 0xb8, 0xb0, 0x42, 0x15, 0x43, 0x36, 0x0f, 0xc2, 0x37, 0x57, 0x6d, 0x84, 0x1b, 0x26, 0x18, - 0xa7, 0xf1, 0xd1, 0x4b, 0x30, 0xea, 0x84, 0xcd, 0x4d, 0x2f, 0x26, 0xcd, 0xb8, 0x13, 0xf2, 0x94, - 0x27, 0x9a, 0xe3, 0xc8, 0x82, 0x06, 0xc3, 0x06, 0xa6, 0xfd, 0x1e, 0x9c, 0xc8, 0x08, 0x04, 0xa0, - 0x0b, 0xc7, 0x69, 0x7b, 0xf2, 0x9b, 0x52, 0xae, 0x6f, 0x0b, 0xf5, 0x55, 0xf9, 0x35, 0x1a, 0x16, - 0x5d, 0x9d, 0xcc, 0x4e, 0xae, 0x25, 0x6d, 0x55, 0xab, 0x73, 0x45, 0x02, 0x70, 0x82, 0x63, 0xff, - 0x29, 0x80, 0x66, 0xbd, 0x29, 0xe0, 0xee, 0xf4, 0x12, 0x8c, 0xca, 0x3c, 0xc4, 0x5a, 0x32, 0x4f, - 0xf5, 0x99, 0x97, 0x34, 0x18, 0x36, 0x30, 0x69, 0xdf, 0x7c, 0x69, 0x93, 0x4a, 0x3b, 0xda, 0x29, - 0x63, 0x15, 0x4e, 0x70, 0xd0, 0x39, 0x18, 0x8e, 0x48, 0x6b, 0xfd, 0xaa, 0xe7, 0xdf, 0x15, 0x0b, - 0x5b, 0x71, 0xe6, 0x86, 0x28, 0xc7, 0x0a, 0x03, 0x2d, 0x42, 0xb9, 0xe3, 0xb9, 0x62, 0x29, 0x4b, - 0xb1, 0xa1, 0x7c, 0x73, 0xb5, 0xb6, 0xbf, 0x3b, 0xf7, 0x58, 0xaf, 0xf4, 0xca, 0x54, 0x3f, 0x8f, - 0xe6, 0xe9, 0xf6, 0xa3, 0x95, 0xb3, 0x2e, 0x0c, 0x06, 0xfb, 0xbc, 0x30, 0xb8, 0x08, 0x20, 0xbe, - 0x5a, 0xae, 0xe5, 0x72, 0x32, 0x6b, 0x97, 0x14, 0x04, 0x6b, 0x58, 0x54, 0xcb, 0x6f, 0x86, 0xc4, - 0x91, 0x8a, 0x30, 0x77, 0x50, 0x1f, 0x3e, 0xbc, 0x96, 0xbf, 0x94, 0x26, 0x86, 0xbb, 0xe9, 0xa3, - 0x00, 0xa6, 0x5c, 0x11, 0xc3, 0x9b, 0x34, 0x5a, 0xed, 0xdf, 0x2b, 0x9e, 0xf9, 0xf6, 0xa4, 0x09, - 0xe1, 0x6e, 0xda, 0xe8, 0x4d, 0x98, 0x95, 0x85, 0xdd, 0x01, 0xd4, 0x6c, 0xbb, 0x94, 0x17, 0x4f, - 0xef, 0xed, 0xce, 0xcd, 0xd6, 0x7a, 0x62, 0xe1, 0x03, 0x28, 0xa0, 0x37, 0x60, 0x90, 0x5d, 0x30, - 0x45, 0x33, 0x23, 0xec, 0xc4, 0x7b, 0xbe, 0x48, 0x6c, 0x05, 0x5d, 0xf5, 0xf3, 0xec, 0x9a, 0x4a, - 0x78, 0x0d, 0x27, 0xb7, 0x76, 0xac, 0x10, 0x0b, 0x9a, 0xa8, 0x0d, 0x23, 0x8e, 0xef, 0x07, 0xb1, - 0xc3, 0x05, 0xb1, 0xd1, 0x22, 0xb2, 0xa4, 0xd6, 0xc4, 0x42, 0x52, 0x97, 0xb7, 0xa3, 0x1c, 0x11, - 0x35, 0x08, 0xd6, 0x9b, 0x40, 0xf7, 0x60, 0x22, 0xb8, 0x47, 0x19, 0xa6, 0xbc, 0x11, 0x89, 0x66, - 0xc6, 0xcc, 0x0f, 0xcb, 0x31, 0xd4, 0x1a, 0x95, 0x35, 0x4e, 0x66, 0x12, 0xc5, 0xe9, 0x56, 0xd0, - 0xbc, 0x61, 0xae, 0x1e, 0x4f, 0x7c, 0xe3, 0x13, 0x73, 0xb5, 0x6e, 0x9d, 0x66, 0x41, 0xfa, 0xdc, - 0x1f, 0x96, 0x71, 0x84, 0x89, 0x54, 0x90, 0x7e, 0x02, 0xc2, 0x3a, 0x1e, 0xda, 0x84, 0xd1, 0xe4, - 0x6e, 0x2b, 0x8c, 0x58, 0xfe, 0x1f, 0xcd, 0xdd, 0xeb, 0xe0, 0x8f, 0x5b, 0xd5, 0x6a, 0xf2, 0x48, - 0x1f, 0xbd, 0x04, 0x1b, 0x94, 0x67, 0x3f, 0x0e, 0x23, 0xda, 0x14, 0xf7, 0xe3, 0xee, 0x3d, 0xfb, - 0x0a, 0x4c, 0xa6, 0xa7, 0xae, 0x2f, 0x77, 0xf1, 0xff, 0x51, 0x82, 0x89, 0x8c, 0x8b, 0x2d, 0x96, - 0x8d, 0x39, 0xc5, 0x64, 0x93, 0xe4, 0xcb, 0x26, 0xab, 0x2c, 0x15, 0x60, 0x95, 0x92, 0x6f, 0x97, - 0x7b, 0xf2, 0x6d, 0xc1, 0x1e, 0x2b, 0xef, 0x87, 0x3d, 0x9a, 0x27, 0xd2, 0x40, 0xa1, 0x13, 0xe9, - 0x01, 0xb0, 0x54, 0xe3, 0x50, 0x1b, 0x2a, 0x70, 0xa8, 0x7d, 0xb5, 0x04, 0x93, 0x89, 0x6b, 0xbc, - 0x48, 0x83, 0x7e, 0xf4, 0x17, 0x1e, 0x6b, 0xc6, 0x85, 0x47, 0x5e, 0x96, 0xf3, 0x54, 0xff, 0x7a, - 0x5e, 0x7e, 0xbc, 0x91, 0xba, 0xfc, 0x78, 0xbe, 0x4f, 0xba, 0x07, 0x5f, 0x84, 0x7c, 0xab, 0x04, - 0x27, 0xd3, 0x55, 0x96, 0x5a, 0x8e, 0xb7, 0x75, 0x0c, 0xe3, 0xf5, 0x69, 0x63, 0xbc, 0x5e, 0xec, - 0xef, 0xbb, 0x58, 0x27, 0x7b, 0x0e, 0x9a, 0x93, 0x1a, 0xb4, 0x8f, 0x1f, 0x86, 0xf8, 0xc1, 0x23, - 0xf7, 0x47, 0x16, 0x3c, 0x9c, 0x59, 0xef, 0x18, 0x4c, 0xbc, 0xaf, 0x99, 0x26, 0xde, 0xe7, 0x0e, + 0xbb, 0x9a, 0x66, 0x86, 0xbb, 0xf9, 0xa3, 0x6d, 0x40, 0xec, 0x4e, 0x3b, 0x74, 0xfc, 0x88, 0x7f, + 0x8b, 0x27, 0x6c, 0xf4, 0xfd, 0xd5, 0x3a, 0x2b, 0x6a, 0x45, 0x57, 0xbb, 0xb8, 0xe1, 0x8c, 0x1a, + 0x34, 0xaf, 0x85, 0x81, 0xa2, 0x5e, 0x0b, 0x83, 0x39, 0xde, 0xfd, 0xbf, 0x62, 0xc1, 0x09, 0x3a, + 0x2c, 0xa6, 0x27, 0x78, 0x84, 0x02, 0x98, 0xa0, 0xd3, 0xae, 0x45, 0x62, 0x09, 0x13, 0xeb, 0x2b, + 0x47, 0xc5, 0x37, 0xf9, 0x24, 0x7a, 0xe4, 0x15, 0x93, 0x19, 0x4e, 0x73, 0xb7, 0xbf, 0x66, 0x01, + 0xf3, 0x9e, 0x3c, 0x86, 0xcd, 0xec, 0x92, 0xb9, 0x99, 0xd9, 0xf9, 0x12, 0xa3, 0xc7, 0x3e, 0xf6, + 0x3c, 0x4c, 0x52, 0x6c, 0x3d, 0x0c, 0xee, 0xef, 0x48, 0x8d, 0x3f, 0x5f, 0xbb, 0xfa, 0x95, 0x12, + 0x5f, 0x36, 0x2a, 0xf2, 0x14, 0x7d, 0xde, 0x82, 0xe1, 0xa6, 0xd3, 0x76, 0x9a, 0x3c, 0x73, 0x57, + 0x01, 0x33, 0x91, 0x51, 0x7e, 0x7e, 0x49, 0x94, 0xe5, 0x26, 0x8e, 0x8f, 0xca, 0x4f, 0x97, 0xe0, + 0x5c, 0xb3, 0x86, 0xaa, 0x7c, 0xf6, 0x2e, 0x8c, 0x19, 0xcc, 0x8e, 0xf4, 0x3c, 0xfc, 0x79, 0x8b, + 0x0b, 0x7d, 0x75, 0x66, 0xb9, 0x07, 0x53, 0xbe, 0xf6, 0x9f, 0x8a, 0x33, 0xa9, 0x50, 0xcf, 0x17, + 0x17, 0xeb, 0x4c, 0x0a, 0x6a, 0x9e, 0xa2, 0x29, 0x86, 0xb8, 0xbb, 0x0e, 0xfb, 0x37, 0x2c, 0x78, + 0x48, 0x27, 0xd4, 0x42, 0x85, 0xf3, 0x0c, 0xd8, 0x35, 0x18, 0x0e, 0xda, 0x24, 0x74, 0x92, 0xf3, + 0xd9, 0x59, 0xd9, 0xff, 0x37, 0x04, 0x7c, 0x7f, 0x77, 0x6e, 0x5a, 0xe7, 0x2e, 0xe1, 0x58, 0x95, + 0x44, 0x36, 0x0c, 0xb2, 0x7e, 0x89, 0x44, 0x90, 0x37, 0xcb, 0x64, 0xc5, 0x2e, 0xc8, 0x22, 0x2c, + 0x30, 0xf6, 0xdf, 0xb4, 0xf8, 0x74, 0xd3, 0x9b, 0x8e, 0x7e, 0x01, 0x26, 0xb7, 0xe8, 0x51, 0x6e, + 0xf9, 0x7e, 0x3b, 0xe4, 0xe6, 0x77, 0xd9, 0x63, 0x2f, 0x14, 0xef, 0x31, 0xed, 0x73, 0x17, 0x67, + 0x44, 0xeb, 0x27, 0xaf, 0xa5, 0xd8, 0xe2, 0xae, 0x8a, 0xec, 0x7f, 0x50, 0xe2, 0x6b, 0x96, 0xe9, + 0x70, 0x4f, 0xc1, 0x50, 0x3b, 0x70, 0x97, 0x56, 0x6b, 0x58, 0xf4, 0x95, 0x12, 0x3a, 0x75, 0x0e, + 0xc6, 0x12, 0x8f, 0x2e, 0x02, 0x90, 0xfb, 0x31, 0x09, 0x7d, 0xa7, 0xa5, 0xae, 0xf4, 0x95, 0xaa, + 0xb4, 0xac, 0x30, 0x58, 0xa3, 0xa2, 0x65, 0xda, 0x61, 0xb0, 0xed, 0xb9, 0x2c, 0xc6, 0xa5, 0x6c, + 0x96, 0xa9, 0x2b, 0x0c, 0xd6, 0xa8, 0xe8, 0x01, 0xba, 0xe3, 0x47, 0x7c, 0x1b, 0x73, 0xee, 0x88, + 0x2c, 0x4a, 0xc3, 0xc9, 0x01, 0xfa, 0xa6, 0x8e, 0xc4, 0x26, 0x2d, 0xba, 0x02, 0x83, 0xb1, 0xc3, + 0x2e, 0xaa, 0x07, 0x8a, 0x78, 0xfd, 0xac, 0x51, 0x5a, 0x3d, 0x6d, 0x15, 0x2d, 0x8a, 0x05, 0x0b, + 0xfb, 0x3f, 0x55, 0x01, 0x12, 0xad, 0x0b, 0x7d, 0xae, 0x7b, 0xc1, 0x7f, 0xac, 0xa8, 0xca, 0xf6, + 0xe0, 0x56, 0x3b, 0xfa, 0x92, 0x05, 0x23, 0x4e, 0xab, 0x15, 0x34, 0x9d, 0x98, 0x75, 0x4f, 0xa9, + 0xa8, 0xe8, 0x11, 0x2d, 0x59, 0x48, 0xca, 0xf2, 0xc6, 0x3c, 0x27, 0x2f, 0x8f, 0x35, 0x4c, 0x6e, + 0x7b, 0xf4, 0x26, 0xa0, 0x8f, 0x4a, 0xad, 0x9d, 0x8f, 0xf0, 0x6c, 0x5a, 0x6b, 0xaf, 0x32, 0x81, + 0xab, 0x29, 0xec, 0xe8, 0x2d, 0x23, 0xeb, 0x50, 0xa5, 0x48, 0xa0, 0xb2, 0xa1, 0x87, 0xe4, 0x25, + 0x1c, 0x42, 0xaf, 0xeb, 0xee, 0xf1, 0x03, 0x45, 0x32, 0x01, 0x68, 0xea, 0x70, 0x8e, 0x6b, 0x7c, + 0x0c, 0x13, 0xae, 0xb9, 0xf3, 0x0a, 0x17, 0xbf, 0x0b, 0xf9, 0x35, 0xa4, 0xb6, 0xec, 0x64, 0xaf, + 0x4d, 0x21, 0x70, 0xba, 0x0a, 0xf4, 0x3a, 0x0f, 0x5e, 0x58, 0xf5, 0xd7, 0x03, 0xe1, 0xe6, 0x77, + 0xae, 0xc0, 0x98, 0xef, 0x44, 0x31, 0xd9, 0xa2, 0x65, 0x92, 0xcd, 0xf5, 0xba, 0xe0, 0x82, 0x15, + 0x3f, 0xb4, 0x06, 0x83, 0x2c, 0x2e, 0x2d, 0x9a, 0x19, 0x2e, 0x62, 0x12, 0x34, 0xc3, 0xb1, 0x93, + 0xf5, 0xc3, 0xfe, 0x46, 0x58, 0xf0, 0x42, 0x97, 0x65, 0x42, 0x86, 0x68, 0xd5, 0xbf, 0x19, 0x11, + 0x96, 0x90, 0xa1, 0xba, 0xf8, 0x91, 0x24, 0xc3, 0x02, 0x87, 0x67, 0xa6, 0x6a, 0x34, 0x4a, 0x52, + 0xc5, 0x46, 0xfc, 0x97, 0x19, 0x20, 0x67, 0xa0, 0x48, 0x43, 0xcd, 0x7c, 0x91, 0x49, 0x67, 0xdf, + 0x32, 0x99, 0xe1, 0x34, 0xf7, 0x63, 0xdd, 0x52, 0x67, 0x7d, 0x98, 0x4c, 0x2f, 0xca, 0x23, 0xdd, + 0xc2, 0x7f, 0x5c, 0x81, 0x71, 0x73, 0x72, 0xa0, 0xf3, 0x50, 0x15, 0x4c, 0x54, 0x7a, 0x37, 0xb5, + 0x06, 0xae, 0x49, 0x04, 0x4e, 0x68, 0x58, 0xa2, 0x3b, 0x56, 0x5c, 0x73, 0xf0, 0x4a, 0x12, 0xdd, + 0x29, 0x0c, 0xd6, 0xa8, 0xa8, 0x26, 0x7c, 0x27, 0x08, 0x62, 0xb5, 0x13, 0xa8, 0x79, 0xb3, 0xc8, + 0xa0, 0x58, 0x60, 0xe9, 0x0e, 0x70, 0x97, 0x0e, 0x66, 0xcb, 0x34, 0x6f, 0xaa, 0x1d, 0xe0, 0x8a, + 0x8e, 0xc4, 0x26, 0x2d, 0xdd, 0xd1, 0x82, 0x88, 0x4d, 0x44, 0xa1, 0x6f, 0x27, 0x0e, 0x73, 0x0d, + 0x1e, 0xab, 0x29, 0xf1, 0xe8, 0xd3, 0xf0, 0x90, 0x0a, 0xad, 0xc4, 0xdc, 0x5c, 0x2c, 0x6b, 0x1c, + 0x34, 0x8e, 0xcc, 0x0f, 0x2d, 0x65, 0x93, 0xe1, 0x5e, 0xe5, 0xd1, 0x2b, 0x30, 0x2e, 0x74, 0x65, + 0xc9, 0x71, 0xc8, 0xf4, 0x7b, 0xb8, 0x62, 0x60, 0x71, 0x8a, 0x1a, 0xd5, 0x60, 0x92, 0x42, 0x98, + 0x92, 0x2a, 0x39, 0xf0, 0x10, 0x51, 0xb5, 0xd5, 0x5f, 0x49, 0xe1, 0x71, 0x57, 0x09, 0xb4, 0x00, + 0x13, 0x5c, 0x59, 0xa1, 0x07, 0x43, 0x36, 0x0e, 0xc2, 0x37, 0x57, 0x2d, 0x84, 0x1b, 0x26, 0x1a, + 0xa7, 0xe9, 0xd1, 0x4b, 0x30, 0xea, 0x84, 0xcd, 0x4d, 0x2f, 0x26, 0xcd, 0xb8, 0x13, 0xf2, 0x74, + 0x27, 0x9a, 0xe3, 0xc8, 0x82, 0x86, 0xc3, 0x06, 0xa5, 0xfd, 0x1e, 0x9c, 0xc8, 0x08, 0x04, 0xa0, + 0x13, 0xc7, 0x69, 0x7b, 0xf2, 0x9b, 0x52, 0xae, 0x6f, 0x0b, 0xf5, 0x55, 0xf9, 0x35, 0x1a, 0x15, + 0x9d, 0x9d, 0xcc, 0x4e, 0xae, 0x25, 0x6c, 0x55, 0xb3, 0x73, 0x45, 0x22, 0x70, 0x42, 0x63, 0xff, + 0x29, 0x80, 0x66, 0xbd, 0x29, 0xe0, 0xee, 0xf4, 0x12, 0x8c, 0xca, 0x1c, 0xc4, 0x5a, 0x22, 0x4f, + 0xf5, 0x99, 0x97, 0x34, 0x1c, 0x36, 0x28, 0x69, 0xdb, 0x7c, 0x69, 0x93, 0x4a, 0x3b, 0xda, 0x29, + 0x63, 0x15, 0x4e, 0x68, 0xd0, 0x39, 0x18, 0x8e, 0x48, 0x6b, 0xfd, 0xaa, 0xe7, 0xdf, 0x15, 0x13, + 0x5b, 0x49, 0xe6, 0x86, 0x80, 0x63, 0x45, 0x81, 0x16, 0xa1, 0xdc, 0xf1, 0x5c, 0x31, 0x95, 0xa5, + 0xda, 0x50, 0xbe, 0xb9, 0x5a, 0xdb, 0xdf, 0x9d, 0x7b, 0xac, 0x57, 0x6a, 0x65, 0x7a, 0x3e, 0x8f, + 0xe6, 0xe9, 0xf2, 0xa3, 0x85, 0xb3, 0x2e, 0x0c, 0x06, 0xfb, 0xbc, 0x30, 0xb8, 0x08, 0x20, 0xbe, + 0x5a, 0xce, 0xe5, 0x72, 0x32, 0x6a, 0x97, 0x14, 0x06, 0x6b, 0x54, 0xf4, 0x94, 0xdf, 0x0c, 0x89, + 0x23, 0x0f, 0xc2, 0xdc, 0x41, 0x7d, 0xf8, 0xf0, 0xa7, 0xfc, 0xa5, 0x34, 0x33, 0xdc, 0xcd, 0x1f, + 0x05, 0x30, 0xe5, 0x8a, 0xf8, 0xdd, 0xa4, 0xd2, 0x6a, 0xff, 0x5e, 0xf1, 0xcc, 0xb7, 0x27, 0xcd, + 0x08, 0x77, 0xf3, 0x46, 0x6f, 0xc2, 0xac, 0x04, 0x76, 0x07, 0x4f, 0xb3, 0xe5, 0x52, 0x5e, 0x3c, + 0xbd, 0xb7, 0x3b, 0x37, 0x5b, 0xeb, 0x49, 0x85, 0x0f, 0xe0, 0x80, 0xde, 0x80, 0x41, 0x76, 0xc1, + 0x14, 0xcd, 0x8c, 0xb0, 0x1d, 0xef, 0xf9, 0x22, 0xb1, 0x15, 0x74, 0xd6, 0xcf, 0xb3, 0x6b, 0x2a, + 0xe1, 0x35, 0x9c, 0xdc, 0xda, 0x31, 0x20, 0x16, 0x3c, 0x51, 0x1b, 0x46, 0x1c, 0xdf, 0x0f, 0x62, + 0x87, 0x2b, 0x62, 0xa3, 0x45, 0x74, 0x49, 0xad, 0x8a, 0x85, 0xa4, 0x2c, 0xaf, 0x47, 0x39, 0x22, + 0x6a, 0x18, 0xac, 0x57, 0x81, 0xee, 0xc1, 0x44, 0x70, 0x8f, 0x0a, 0x4c, 0x79, 0x23, 0x12, 0xcd, + 0x8c, 0x99, 0x1f, 0x96, 0x63, 0xa8, 0x35, 0x0a, 0x6b, 0x92, 0xcc, 0x64, 0x8a, 0xd3, 0xb5, 0xa0, + 0x79, 0xc3, 0x5c, 0x3d, 0x9e, 0xf8, 0xc6, 0x27, 0xe6, 0x6a, 0xdd, 0x3a, 0xcd, 0x02, 0xf4, 0xb9, + 0x3f, 0x2c, 0x93, 0x08, 0x13, 0xa9, 0x00, 0xfd, 0x04, 0x85, 0x75, 0x3a, 0xb4, 0x09, 0xa3, 0xc9, + 0xdd, 0x56, 0x18, 0xb1, 0xdc, 0x3f, 0x9a, 0xbb, 0xd7, 0xc1, 0x1f, 0xb7, 0xaa, 0x95, 0xe4, 0x91, + 0x3e, 0x3a, 0x04, 0x1b, 0x9c, 0x67, 0x3f, 0x0e, 0x23, 0xda, 0x10, 0xf7, 0xe3, 0xee, 0x3d, 0xfb, + 0x0a, 0x4c, 0xa6, 0x87, 0xae, 0x2f, 0x77, 0xf1, 0xff, 0x51, 0x82, 0x89, 0x8c, 0x8b, 0x2d, 0x96, + 0x89, 0x39, 0x25, 0x64, 0x93, 0xc4, 0xcb, 0xa6, 0xa8, 0x2c, 0x15, 0x10, 0x95, 0x52, 0x6e, 0x97, + 0x7b, 0xca, 0x6d, 0x21, 0x1e, 0x2b, 0xef, 0x47, 0x3c, 0x9a, 0x3b, 0xd2, 0x40, 0xa1, 0x1d, 0xe9, + 0x01, 0x88, 0x54, 0x63, 0x53, 0x1b, 0x2a, 0xb0, 0xa9, 0x7d, 0xb5, 0x04, 0x93, 0x89, 0x6b, 0xbc, + 0x48, 0x81, 0x7e, 0xf4, 0x17, 0x1e, 0x6b, 0xc6, 0x85, 0x47, 0x5e, 0x86, 0xf3, 0x54, 0xfb, 0x7a, + 0x5e, 0x7e, 0xbc, 0x91, 0xba, 0xfc, 0x78, 0xbe, 0x4f, 0xbe, 0x07, 0x5f, 0x84, 0x7c, 0xab, 0x04, + 0x27, 0xd3, 0x45, 0x96, 0x5a, 0x8e, 0xb7, 0x75, 0x0c, 0xfd, 0xf5, 0x69, 0xa3, 0xbf, 0x5e, 0xec, + 0xef, 0xbb, 0x58, 0x23, 0x7b, 0x76, 0x9a, 0x93, 0xea, 0xb4, 0x8f, 0x1f, 0x86, 0xf9, 0xc1, 0x3d, + 0xf7, 0x47, 0x16, 0x3c, 0x9c, 0x59, 0xee, 0x18, 0x4c, 0xbc, 0xaf, 0x99, 0x26, 0xde, 0xe7, 0x0e, 0xf1, 0x75, 0x3d, 0x6c, 0xbe, 0xbf, 0x59, 0xee, 0xf1, 0x55, 0xcc, 0x08, 0x76, 0x03, 0x46, 0x9c, - 0x66, 0x93, 0x44, 0xd1, 0xb5, 0xc0, 0x55, 0x89, 0xc5, 0x9e, 0x65, 0xa7, 0x58, 0x52, 0xbc, 0xbf, - 0x3b, 0x37, 0x9b, 0x26, 0x91, 0x80, 0xb1, 0x4e, 0xc1, 0x4c, 0x79, 0x58, 0x3a, 0xa2, 0x94, 0x87, - 0x17, 0x01, 0xb6, 0x95, 0xbe, 0x9c, 0xb6, 0xad, 0x69, 0x9a, 0xb4, 0x86, 0x85, 0xfe, 0x0a, 0x93, - 0x3d, 0xb9, 0x5f, 0x4a, 0xc5, 0x8c, 0xb2, 0xcd, 0x99, 0x3f, 0xdd, 0xc7, 0x85, 0x07, 0xf3, 0x2a, - 0x3b, 0xa4, 0x22, 0x89, 0x3e, 0x05, 0x93, 0x11, 0xcf, 0x49, 0xb1, 0xd4, 0x72, 0x22, 0x16, 0x13, - 0x22, 0xf8, 0x29, 0x8b, 0xcb, 0x6d, 0xa4, 0x60, 0xb8, 0x0b, 0xdb, 0xfe, 0x66, 0x19, 0x3e, 0x7c, - 0xc0, 0xb2, 0x45, 0x0b, 0xe6, 0xfd, 0xf0, 0x33, 0x69, 0x4b, 0xd3, 0x6c, 0x66, 0x65, 0xc3, 0xf4, - 0x94, 0x9a, 0xed, 0xd2, 0xfb, 0x9e, 0xed, 0x2f, 0xeb, 0x76, 0x41, 0xee, 0xaa, 0x7a, 0xe9, 0xd0, - 0x1b, 0xf3, 0x27, 0xf5, 0x5a, 0xe0, 0xb3, 0x16, 0x3c, 0x96, 0xf9, 0x59, 0x86, 0x3f, 0xca, 0x79, - 0xa8, 0x36, 0x69, 0xa1, 0x16, 0xc1, 0x95, 0x84, 0x4e, 0x4a, 0x00, 0x4e, 0x70, 0x0c, 0xb7, 0x93, - 0x52, 0xae, 0xdb, 0xc9, 0x1f, 0x58, 0x30, 0x9d, 0xee, 0xc4, 0x31, 0xf0, 0xad, 0x86, 0xc9, 0xb7, - 0xe6, 0xfb, 0x9b, 0xfc, 0x1e, 0x2c, 0xeb, 0xab, 0x93, 0x70, 0xaa, 0xeb, 0xd4, 0xe3, 0xa3, 0xf8, - 0x4b, 0x16, 0x4c, 0x6d, 0x30, 0x3d, 0x41, 0x0b, 0x93, 0x13, 0xdf, 0x95, 0x13, 0x5b, 0x78, 0x60, - 0x74, 0x1d, 0xd7, 0x7a, 0xba, 0x50, 0x70, 0x77, 0x63, 0xe8, 0x8b, 0x16, 0x4c, 0x3b, 0xf7, 0xa2, - 0xae, 0x47, 0x7a, 0xc4, 0x42, 0x7a, 0x25, 0xc7, 0x2c, 0x97, 0xf3, 0xbc, 0xcf, 0xe2, 0xcc, 0xde, - 0xee, 0xdc, 0x74, 0x16, 0x16, 0xce, 0x6c, 0x95, 0xce, 0xef, 0xa6, 0x08, 0x97, 0x29, 0x16, 0xf0, - 0x99, 0x15, 0x5c, 0xc3, 0xd9, 0x9a, 0x84, 0x60, 0x45, 0x11, 0xbd, 0x0d, 0xd5, 0x0d, 0x19, 0x19, - 0x97, 0x66, 0x9b, 0x3d, 0x86, 0x39, 0x2b, 0x90, 0x8e, 0x87, 0x2b, 0x28, 0x10, 0x4e, 0x88, 0xa2, - 0xcb, 0x50, 0xf6, 0xd7, 0x23, 0x11, 0x83, 0x9e, 0xe7, 0x6d, 0x64, 0xfa, 0x78, 0xf1, 0xb0, 0xdd, - 0xeb, 0x2b, 0x0d, 0x4c, 0x49, 0x50, 0x4a, 0xe1, 0x1d, 0x57, 0xd8, 0xa3, 0x73, 0x28, 0xe1, 0xc5, - 0x5a, 0x37, 0x25, 0xbc, 0x58, 0xc3, 0x94, 0x04, 0xaa, 0xc3, 0x00, 0x0b, 0xc6, 0x11, 0xc6, 0xe6, - 0x9c, 0x44, 0x05, 0x5d, 0x21, 0x47, 0x3c, 0x33, 0x27, 0x2b, 0xc6, 0x9c, 0x10, 0x5a, 0x83, 0xc1, - 0x26, 0x7b, 0x5c, 0x42, 0x58, 0x01, 0xf2, 0x52, 0x78, 0x74, 0x3d, 0x44, 0xc1, 0x6f, 0xd8, 0x78, - 0x39, 0x16, 0xb4, 0x18, 0x55, 0xd2, 0xde, 0x5c, 0x8f, 0x84, 0x9a, 0x9f, 0x47, 0xb5, 0xeb, 0x99, - 0x10, 0x41, 0x95, 0x95, 0x63, 0x41, 0x0b, 0xd5, 0xa0, 0xb4, 0xde, 0x14, 0xb1, 0x3a, 0x39, 0x46, + 0x66, 0x93, 0x44, 0xd1, 0xb5, 0xc0, 0x55, 0x49, 0xc5, 0x9e, 0x65, 0xbb, 0x58, 0x02, 0xde, 0xdf, + 0x9d, 0x9b, 0x4d, 0xb3, 0x48, 0xd0, 0x58, 0xe7, 0x60, 0xa6, 0x3b, 0x2c, 0x1d, 0x51, 0xba, 0xc3, + 0x8b, 0x00, 0xdb, 0xea, 0xbc, 0x9c, 0xb6, 0xad, 0x69, 0x27, 0x69, 0x8d, 0x0a, 0xfd, 0x15, 0xa6, + 0x7b, 0x72, 0xbf, 0x94, 0x8a, 0x19, 0x65, 0x9b, 0x33, 0x7e, 0xba, 0x8f, 0x0b, 0x0f, 0xe6, 0x55, + 0x76, 0x48, 0xc5, 0x12, 0x7d, 0x0a, 0x26, 0x23, 0x9e, 0x8f, 0x62, 0xa9, 0xe5, 0x44, 0x2c, 0x26, + 0x44, 0xc8, 0x53, 0x16, 0x97, 0xdb, 0x48, 0xe1, 0x70, 0x17, 0xb5, 0xfd, 0xcd, 0x32, 0x7c, 0xf8, + 0x80, 0x69, 0x8b, 0x16, 0xcc, 0xfb, 0xe1, 0x67, 0xd2, 0x96, 0xa6, 0xd9, 0xcc, 0xc2, 0x86, 0xe9, + 0x29, 0x35, 0xda, 0xa5, 0xf7, 0x3d, 0xda, 0x5f, 0xd6, 0xed, 0x82, 0xdc, 0x55, 0xf5, 0xd2, 0xa1, + 0x17, 0xe6, 0x4f, 0xea, 0xb5, 0xc0, 0x67, 0x2d, 0x78, 0x2c, 0xf3, 0xb3, 0x0c, 0x7f, 0x94, 0xf3, + 0x50, 0x6d, 0x52, 0xa0, 0x16, 0xc1, 0x95, 0x84, 0x4e, 0x4a, 0x04, 0x4e, 0x68, 0x0c, 0xb7, 0x93, + 0x52, 0xae, 0xdb, 0xc9, 0x1f, 0x58, 0x30, 0x9d, 0x6e, 0xc4, 0x31, 0xc8, 0xad, 0x86, 0x29, 0xb7, + 0xe6, 0xfb, 0x1b, 0xfc, 0x1e, 0x22, 0xeb, 0xab, 0x93, 0x70, 0xaa, 0x6b, 0xd7, 0xe3, 0xbd, 0xf8, + 0x4b, 0x16, 0x4c, 0x6d, 0xb0, 0x73, 0x82, 0x16, 0x26, 0x27, 0xbe, 0x2b, 0x27, 0xb6, 0xf0, 0xc0, + 0xe8, 0x3a, 0x7e, 0xea, 0xe9, 0x22, 0xc1, 0xdd, 0x95, 0xa1, 0x2f, 0x5a, 0x30, 0xed, 0xdc, 0x8b, + 0xba, 0x1e, 0xe8, 0x11, 0x13, 0xe9, 0x95, 0x1c, 0xb3, 0x5c, 0xce, 0xd3, 0x3e, 0x8b, 0x33, 0x7b, + 0xbb, 0x73, 0xd3, 0x59, 0x54, 0x38, 0xb3, 0x56, 0x3a, 0xbe, 0x9b, 0x22, 0x5c, 0xa6, 0x58, 0xc0, + 0x67, 0x56, 0x70, 0x0d, 0x17, 0x6b, 0x12, 0x83, 0x15, 0x47, 0xf4, 0x36, 0x54, 0x37, 0x64, 0x64, + 0x5c, 0x5a, 0x6c, 0xf6, 0xe8, 0xe6, 0xac, 0x40, 0x3a, 0x1e, 0xae, 0xa0, 0x50, 0x38, 0x61, 0x8a, + 0x2e, 0x43, 0xd9, 0x5f, 0x8f, 0x44, 0x0c, 0x7a, 0x9e, 0xb7, 0x91, 0xe9, 0xe3, 0xc5, 0xc3, 0x76, + 0xaf, 0xaf, 0x34, 0x30, 0x65, 0x41, 0x39, 0x85, 0x77, 0x5c, 0x61, 0x8f, 0xce, 0xe1, 0x84, 0x17, + 0x6b, 0xdd, 0x9c, 0xf0, 0x62, 0x0d, 0x53, 0x16, 0xa8, 0x0e, 0x03, 0x2c, 0x18, 0x47, 0x18, 0x9b, + 0x73, 0x12, 0x15, 0x74, 0x85, 0x1c, 0xf1, 0xac, 0x9c, 0x0c, 0x8c, 0x39, 0x23, 0xb4, 0x06, 0x83, + 0x4d, 0xf6, 0xb0, 0x84, 0xb0, 0x02, 0xe4, 0xa5, 0xf0, 0xe8, 0x7a, 0x84, 0x82, 0xdf, 0xb0, 0x71, + 0x38, 0x16, 0xbc, 0x18, 0x57, 0xd2, 0xde, 0x5c, 0x8f, 0xc4, 0x31, 0x3f, 0x8f, 0x6b, 0xd7, 0x13, + 0x21, 0x82, 0x2b, 0x83, 0x63, 0xc1, 0x0b, 0xd5, 0xa0, 0xb4, 0xde, 0x14, 0xb1, 0x3a, 0x39, 0x46, 0x66, 0x33, 0x06, 0x7b, 0x71, 0x70, 0x6f, 0x77, 0xae, 0xb4, 0xb2, 0x84, 0x4b, 0xeb, 0x4d, 0xf4, - 0x1a, 0x0c, 0xad, 0xf3, 0xa8, 0x5a, 0x91, 0xcc, 0xf7, 0x42, 0x5e, 0xe8, 0x6f, 0x57, 0x08, 0x2e, - 0x0f, 0x49, 0x11, 0x00, 0x2c, 0xc9, 0xb1, 0x3c, 0x87, 0x2a, 0x4e, 0x58, 0x64, 0xf3, 0x9d, 0xef, - 0x2f, 0xae, 0x58, 0x68, 0xbf, 0xaa, 0x14, 0x6b, 0x14, 0xe9, 0x9a, 0x77, 0xe4, 0x3b, 0x39, 0x2c, - 0x93, 0x6f, 0xee, 0x9a, 0xcf, 0x7c, 0x56, 0x87, 0xaf, 0x79, 0x05, 0xc2, 0x09, 0x51, 0xd4, 0x81, - 0xb1, 0xed, 0xa8, 0xbd, 0x49, 0xe4, 0xd6, 0x67, 0xe9, 0x7d, 0x47, 0x2e, 0x7e, 0x32, 0x27, 0x67, - 0xb3, 0xa8, 0xe2, 0x85, 0x71, 0xc7, 0x69, 0x75, 0x71, 0x30, 0x96, 0x58, 0xee, 0x96, 0x4e, 0x16, - 0x9b, 0xad, 0xd0, 0x29, 0x79, 0xb7, 0x13, 0xdc, 0xd9, 0x89, 0x89, 0x48, 0xff, 0x9b, 0x33, 0x25, - 0xaf, 0x72, 0xe4, 0xee, 0x29, 0x11, 0x00, 0x2c, 0xc9, 0xa9, 0x21, 0x63, 0xdc, 0x78, 0xb2, 0xf0, - 0x90, 0x75, 0x7d, 0x43, 0x32, 0x64, 0x8c, 0xfb, 0x26, 0x44, 0x19, 0xd7, 0x6d, 0x6f, 0x06, 0x71, - 0xe0, 0xa7, 0x78, 0xff, 0x54, 0x11, 0xae, 0x5b, 0xcf, 0xa8, 0xd9, 0xcd, 0x75, 0xb3, 0xb0, 0x70, - 0x66, 0xab, 0xc8, 0x87, 0xf1, 0x76, 0x10, 0xc6, 0xf7, 0x82, 0x50, 0xae, 0x43, 0x54, 0x48, 0x47, - 0x34, 0xea, 0x88, 0xb6, 0x99, 0xe7, 0xb1, 0x09, 0xc1, 0x29, 0xea, 0x74, 0xea, 0xa2, 0xa6, 0xd3, - 0x22, 0xab, 0x37, 0x66, 0x4e, 0x14, 0x99, 0xba, 0x06, 0x47, 0xee, 0x9e, 0x3a, 0x01, 0xc0, 0x92, - 0x1c, 0xe5, 0x75, 0x2c, 0x97, 0x3d, 0xcb, 0x66, 0x9c, 0xcb, 0xeb, 0xba, 0xbc, 0x73, 0x39, 0xaf, - 0x63, 0xc5, 0x98, 0x13, 0x42, 0xef, 0x40, 0x55, 0x08, 0xb7, 0x41, 0x34, 0x73, 0x92, 0x51, 0xfd, - 0xd9, 0x9c, 0xde, 0x72, 0xf4, 0x1b, 0x8d, 0xec, 0x53, 0x5f, 0x44, 0xff, 0x49, 0x24, 0x9c, 0x90, - 0xb7, 0x7f, 0x63, 0xb0, 0x5b, 0xec, 0x61, 0x8a, 0xcd, 0xdf, 0xe8, 0xbe, 0xb1, 0xfe, 0x54, 0xff, - 0xfa, 0xfb, 0x03, 0xbc, 0xbb, 0xfe, 0xa2, 0x05, 0xa7, 0xda, 0x99, 0x9f, 0x27, 0x04, 0x87, 0x7e, - 0xcd, 0x00, 0x7c, 0x68, 0x54, 0x8e, 0xf1, 0x6c, 0x38, 0xee, 0xd1, 0x66, 0x5a, 0x15, 0x28, 0xbf, - 0x6f, 0x55, 0xe0, 0x36, 0x0c, 0x33, 0xd9, 0x35, 0xc9, 0xef, 0xd3, 0x67, 0x2a, 0x1c, 0x26, 0x82, - 0x2c, 0x09, 0x12, 0x58, 0x11, 0xa3, 0x03, 0xf7, 0x68, 0xfa, 0x23, 0x30, 0x61, 0x60, 0x91, 0xd9, - 0x92, 0xeb, 0x59, 0x2b, 0x62, 0x24, 0x1e, 0xad, 0x1f, 0x84, 0xbc, 0x9f, 0x87, 0x80, 0x0f, 0x6e, - 0x0c, 0xd5, 0x32, 0x14, 0xbd, 0x41, 0xf3, 0x7a, 0x2a, 0x5f, 0xd9, 0x3b, 0x5e, 0x05, 0xe5, 0x1f, - 0x5a, 0x19, 0xf2, 0x34, 0x57, 0x2a, 0x3f, 0x69, 0x2a, 0x95, 0x4f, 0xa6, 0x95, 0xca, 0x2e, 0x53, - 0x92, 0xa1, 0x4f, 0x16, 0xcf, 0xcc, 0x5b, 0x34, 0x81, 0x91, 0xdd, 0x82, 0x33, 0x79, 0xcc, 0x9a, - 0xb9, 0xac, 0xb9, 0xea, 0xb2, 0x36, 0x71, 0x59, 0x73, 0x57, 0x6b, 0x98, 0x41, 0x8a, 0xe6, 0xc0, - 0xb0, 0x7f, 0xb9, 0x04, 0xe5, 0x7a, 0xe0, 0x1e, 0x83, 0x69, 0xec, 0x92, 0x61, 0x1a, 0x7b, 0x22, - 0xf7, 0xa1, 0xc8, 0x9e, 0x86, 0xb0, 0x1b, 0x29, 0x43, 0xd8, 0xcf, 0xe4, 0x93, 0x3a, 0xd8, 0xec, - 0xf5, 0xed, 0x32, 0xe8, 0x4f, 0x5d, 0xa2, 0xff, 0x70, 0x18, 0x4f, 0xe6, 0x72, 0xb1, 0xd7, 0x2f, - 0x45, 0x1b, 0xcc, 0xe3, 0x4d, 0x06, 0x62, 0xfe, 0xc4, 0x3a, 0x34, 0xdf, 0x26, 0xde, 0xc6, 0x66, - 0x4c, 0xdc, 0xf4, 0x87, 0x1d, 0x9f, 0x43, 0xf3, 0x5f, 0x58, 0x30, 0x91, 0x6a, 0x1d, 0xb5, 0xb2, - 0x22, 0xb8, 0x0e, 0x69, 0xec, 0x9a, 0xca, 0x0d, 0xf9, 0x9a, 0x07, 0x50, 0x77, 0x16, 0xd2, 0xa0, - 0xc4, 0x64, 0x6b, 0x75, 0xa9, 0x11, 0x61, 0x0d, 0x03, 0xbd, 0x00, 0x23, 0x71, 0xd0, 0x0e, 0x5a, - 0xc1, 0xc6, 0xce, 0x15, 0x22, 0xb3, 0xb3, 0xa8, 0x9b, 0xa5, 0xb5, 0x04, 0x84, 0x75, 0x3c, 0xfb, - 0x3b, 0x65, 0x48, 0x3f, 0x94, 0xfa, 0xff, 0xd7, 0xe9, 0x4f, 0xce, 0x3a, 0xfd, 0x63, 0x0b, 0x26, - 0x69, 0xeb, 0xcc, 0xc5, 0x48, 0x3a, 0x1e, 0xab, 0x67, 0x42, 0xac, 0x03, 0x9e, 0x09, 0x79, 0x92, - 0x72, 0x3b, 0x37, 0xe8, 0xc4, 0xc2, 0x04, 0xa6, 0x31, 0x31, 0x5a, 0x8a, 0x05, 0x54, 0xe0, 0x91, - 0x30, 0x14, 0x11, 0x5a, 0x3a, 0x1e, 0x09, 0x43, 0x2c, 0xa0, 0xf2, 0x15, 0x91, 0x4a, 0x8f, 0x57, - 0x44, 0x58, 0x7e, 0x33, 0xe1, 0xd6, 0x22, 0xc4, 0x0a, 0x2d, 0xbf, 0x99, 0xf4, 0x77, 0x49, 0x70, - 0xec, 0xaf, 0x97, 0x61, 0xb4, 0x1e, 0xb8, 0x49, 0x44, 0xc1, 0xf3, 0x46, 0x44, 0xc1, 0x99, 0x54, - 0x44, 0xc1, 0xa4, 0x8e, 0xfb, 0x60, 0x02, 0x0a, 0x44, 0x1e, 0x3c, 0xf6, 0xce, 0xcd, 0x21, 0x83, - 0x09, 0x8c, 0x3c, 0x78, 0x8a, 0x10, 0x36, 0xe9, 0xfe, 0x34, 0x05, 0x11, 0xfc, 0x6f, 0x0b, 0xc6, - 0xeb, 0x81, 0x4b, 0x17, 0xe8, 0x4f, 0xd3, 0x6a, 0xd4, 0xb3, 0xe7, 0x0d, 0x1e, 0x90, 0x3d, 0xef, - 0x9f, 0x5b, 0x30, 0x54, 0x0f, 0xdc, 0x63, 0x30, 0x0f, 0xaf, 0x98, 0xe6, 0xe1, 0xc7, 0x72, 0x39, - 0x6f, 0x0f, 0x8b, 0xf0, 0x37, 0xcb, 0x30, 0x46, 0x7b, 0x1c, 0x6c, 0xc8, 0xf9, 0x32, 0xc6, 0xc6, - 0x2a, 0x30, 0x36, 0x54, 0x24, 0x0c, 0x5a, 0xad, 0xe0, 0x5e, 0x7a, 0xee, 0x56, 0x58, 0x29, 0x16, - 0x50, 0x74, 0x0e, 0x86, 0xdb, 0x21, 0xd9, 0xf6, 0x82, 0x4e, 0x94, 0x8e, 0xf6, 0xac, 0x8b, 0x72, - 0xac, 0x30, 0xd0, 0xf3, 0x30, 0x1a, 0x79, 0x7e, 0x93, 0x48, 0xa7, 0x97, 0x0a, 0x73, 0x7a, 0xe1, - 0x89, 0x4a, 0xb5, 0x72, 0x6c, 0x60, 0xa1, 0xdb, 0x50, 0x65, 0xff, 0xd9, 0x0e, 0xea, 0xff, 0x19, - 0x10, 0xae, 0x0e, 0x4b, 0x02, 0x38, 0xa1, 0x85, 0x2e, 0x02, 0xc4, 0xd2, 0x3d, 0x27, 0x12, 0x61, - 0xc9, 0x4a, 0x2e, 0x55, 0x8e, 0x3b, 0x11, 0xd6, 0xb0, 0xd0, 0x33, 0x50, 0x8d, 0x1d, 0xaf, 0x75, - 0xd5, 0xf3, 0x49, 0x24, 0xdc, 0x9b, 0x44, 0xd2, 0x71, 0x51, 0x88, 0x13, 0x38, 0x3d, 0xef, 0x59, - 0xd0, 0x3b, 0x7f, 0x62, 0x68, 0x98, 0x61, 0xb3, 0xf3, 0xfe, 0xaa, 0x2a, 0xc5, 0x1a, 0x86, 0xfd, - 0x12, 0x9c, 0xac, 0x07, 0x6e, 0x3d, 0x08, 0xe3, 0x95, 0x20, 0xbc, 0xe7, 0x84, 0xae, 0x9c, 0xbf, - 0x39, 0x99, 0xeb, 0x9a, 0x9e, 0xc9, 0x03, 0xdc, 0x8a, 0x60, 0xe4, 0xae, 0x7e, 0x8e, 0x9d, 0xf8, - 0x7d, 0x86, 0xaa, 0xfc, 0xa0, 0x04, 0xa8, 0xce, 0x1c, 0x88, 0x8c, 0x17, 0xa9, 0x36, 0x61, 0x3c, - 0x22, 0x57, 0x3d, 0xbf, 0x73, 0x5f, 0x90, 0x2a, 0x16, 0x1b, 0xd4, 0x58, 0xd6, 0xeb, 0x70, 0x3b, - 0x8d, 0x59, 0x86, 0x53, 0x74, 0xe9, 0x60, 0x86, 0x1d, 0x7f, 0x21, 0xba, 0x19, 0x91, 0x50, 0xbc, - 0xc0, 0xc4, 0x06, 0x13, 0xcb, 0x42, 0x9c, 0xc0, 0xe9, 0xe2, 0x61, 0x7f, 0xae, 0x07, 0x3e, 0x0e, - 0x82, 0x58, 0x2e, 0x37, 0xf6, 0x22, 0x87, 0x56, 0x8e, 0x0d, 0x2c, 0xb4, 0x02, 0x28, 0xea, 0xb4, - 0xdb, 0x2d, 0x76, 0x53, 0xea, 0xb4, 0x2e, 0x85, 0x41, 0xa7, 0xcd, 0xfd, 0xc8, 0xc5, 0x63, 0x16, - 0x8d, 0x2e, 0x28, 0xce, 0xa8, 0x41, 0x99, 0xc5, 0x7a, 0xc4, 0x7e, 0x8b, 0x08, 0x78, 0x6e, 0x6d, - 0x6d, 0xb0, 0x22, 0x2c, 0x61, 0xf6, 0x2f, 0xb2, 0x03, 0x8e, 0x3d, 0x8d, 0x13, 0x77, 0x42, 0x82, - 0xb6, 0x60, 0xac, 0xcd, 0x0e, 0xb1, 0x38, 0x0c, 0x5a, 0x2d, 0x22, 0xe5, 0xcb, 0xc3, 0xb9, 0x30, - 0xf1, 0xc7, 0x30, 0x74, 0x72, 0xd8, 0xa4, 0x6e, 0xff, 0xb7, 0x71, 0xc6, 0xab, 0xc4, 0x65, 0xf5, - 0x90, 0x70, 0x56, 0x16, 0x92, 0xdc, 0x47, 0x8a, 0x3c, 0x72, 0x97, 0x9c, 0x03, 0xc2, 0xf5, 0x19, - 0x4b, 0x2a, 0xe8, 0x33, 0xcc, 0x15, 0x9f, 0x33, 0x88, 0xe2, 0x4f, 0x77, 0x72, 0x7c, 0xc3, 0x0d, - 0x5f, 0x90, 0xc0, 0x1a, 0x39, 0x74, 0x15, 0xc6, 0xc4, 0x4b, 0x2a, 0xc2, 0x4c, 0x51, 0x36, 0x54, - 0xec, 0x31, 0xac, 0x03, 0xf7, 0xd3, 0x05, 0xd8, 0xac, 0x8c, 0x36, 0xe0, 0x51, 0xed, 0xa5, 0xb0, - 0x0c, 0x77, 0x3b, 0xce, 0x79, 0x1e, 0xdb, 0xdb, 0x9d, 0x7b, 0x74, 0xed, 0x20, 0x44, 0x7c, 0x30, - 0x1d, 0x74, 0x03, 0x4e, 0x3a, 0xcd, 0xd8, 0xdb, 0x26, 0x35, 0xe2, 0xb8, 0x2d, 0xcf, 0x27, 0x66, - 0x9a, 0x84, 0x87, 0xf7, 0x76, 0xe7, 0x4e, 0x2e, 0x64, 0x21, 0xe0, 0xec, 0x7a, 0xe8, 0x93, 0x50, - 0x75, 0xfd, 0x48, 0x8c, 0xc1, 0xa0, 0xf1, 0x30, 0x5e, 0xb5, 0x76, 0xbd, 0xa1, 0xbe, 0x3f, 0xf9, - 0x83, 0x93, 0x0a, 0xe8, 0x5d, 0x18, 0xd5, 0xc3, 0x9f, 0xc4, 0x83, 0x8c, 0x2f, 0x16, 0xd2, 0x9f, - 0x8d, 0x98, 0x21, 0x6e, 0xc1, 0x53, 0x6e, 0xad, 0x46, 0x38, 0x91, 0xd1, 0x04, 0xfa, 0x79, 0x40, - 0x11, 0x09, 0xb7, 0xbd, 0x26, 0x59, 0x68, 0xb2, 0xec, 0xbe, 0xcc, 0xc6, 0x33, 0x6c, 0xc4, 0x77, - 0xa0, 0x46, 0x17, 0x06, 0xce, 0xa8, 0x85, 0x2e, 0x53, 0xce, 0xa3, 0x97, 0x0a, 0x2f, 0x64, 0x29, - 0x18, 0xce, 0xd4, 0x48, 0x3b, 0x24, 0x4d, 0x27, 0x26, 0xae, 0x49, 0x11, 0xa7, 0xea, 0xd1, 0x73, - 0x49, 0x3d, 0xe0, 0x00, 0xa6, 0xef, 0x6c, 0xf7, 0x23, 0x0e, 0x54, 0xcf, 0xda, 0x0c, 0xa2, 0xf8, - 0x3a, 0x89, 0xef, 0x05, 0xe1, 0x5d, 0x91, 0x11, 0x2d, 0x49, 0x95, 0x98, 0x80, 0xb0, 0x8e, 0x47, - 0x65, 0x28, 0x76, 0xf5, 0xb7, 0x5a, 0x63, 0xf7, 0x2a, 0xc3, 0xc9, 0xde, 0xb9, 0xcc, 0x8b, 0xb1, - 0x84, 0x4b, 0xd4, 0xd5, 0xfa, 0x12, 0xbb, 0x23, 0x49, 0xa1, 0xae, 0xd6, 0x97, 0xb0, 0x84, 0xa3, - 0xa0, 0xfb, 0xf9, 0xc1, 0xf1, 0x22, 0xf7, 0x55, 0xdd, 0x9c, 0xbc, 0xe0, 0x0b, 0x84, 0xf7, 0x61, - 0x52, 0x3d, 0x81, 0xc8, 0x93, 0xc6, 0x45, 0x33, 0x13, 0x6c, 0xe1, 0x1c, 0x26, 0xf7, 0x9c, 0xb2, - 0xeb, 0xad, 0xa6, 0x68, 0xe2, 0xae, 0x56, 0x8c, 0xe4, 0x1c, 0x93, 0xb9, 0x8f, 0x72, 0x9c, 0x87, - 0x6a, 0xd4, 0xb9, 0xe3, 0x06, 0x5b, 0x8e, 0xe7, 0xb3, 0x8b, 0x0c, 0x4d, 0x88, 0x69, 0x48, 0x00, - 0x4e, 0x70, 0x50, 0x1d, 0x86, 0x1d, 0xa1, 0xc2, 0x89, 0x0b, 0x87, 0x9c, 0x28, 0x7c, 0xa9, 0xf0, - 0x71, 0xeb, 0xaa, 0xfc, 0x87, 0x15, 0x15, 0xf4, 0x32, 0x8c, 0x89, 0x20, 0x32, 0xe1, 0xec, 0x79, - 0xc2, 0x0c, 0x38, 0x68, 0xe8, 0x40, 0x6c, 0xe2, 0xa2, 0x0d, 0x18, 0xa7, 0x54, 0x12, 0x06, 0x38, - 0x33, 0xdd, 0x1f, 0x0f, 0xd5, 0xd2, 0x9f, 0xeb, 0x64, 0x70, 0x8a, 0x2c, 0x72, 0xe1, 0x11, 0xa7, - 0x13, 0x07, 0x5b, 0x74, 0x27, 0x98, 0xfb, 0x64, 0x2d, 0xb8, 0x4b, 0x7c, 0x76, 0xcb, 0x30, 0xbc, - 0x78, 0x66, 0x6f, 0x77, 0xee, 0x91, 0x85, 0x03, 0xf0, 0xf0, 0x81, 0x54, 0xd0, 0x5b, 0x30, 0x12, - 0x07, 0x2d, 0xe1, 0xc3, 0x1d, 0xcd, 0x9c, 0x2a, 0x92, 0x84, 0x68, 0x4d, 0x55, 0xd0, 0xcd, 0x18, - 0x8a, 0x08, 0xd6, 0x29, 0xa2, 0x37, 0xf9, 0xae, 0x64, 0x09, 0x33, 0x49, 0x34, 0xf3, 0x50, 0x91, - 0xc1, 0x52, 0x19, 0x36, 0xcd, 0xed, 0x2b, 0x68, 0x60, 0x9d, 0xe0, 0xec, 0xcf, 0xc1, 0x54, 0x17, - 0xcb, 0xeb, 0xcb, 0xb9, 0xf5, 0x3f, 0x0e, 0x40, 0x55, 0x59, 0x0c, 0xd1, 0x79, 0xd3, 0x38, 0xfc, - 0x70, 0xda, 0x38, 0x3c, 0x4c, 0x05, 0x34, 0xdd, 0x1e, 0xfc, 0x66, 0xc6, 0xa3, 0xfa, 0x4f, 0xe7, - 0xee, 0xf1, 0xe2, 0x91, 0x6d, 0x9a, 0x8a, 0x57, 0x2e, 0x6c, 0x6f, 0xae, 0x1c, 0xa8, 0x35, 0x16, - 0x7c, 0x28, 0x92, 0xea, 0x87, 0xed, 0xc0, 0x5d, 0xad, 0xa7, 0xdf, 0x41, 0xab, 0xd3, 0x42, 0xcc, - 0x61, 0x4c, 0xae, 0xa7, 0x67, 0x36, 0x93, 0xeb, 0x87, 0x0e, 0x29, 0xd7, 0x4b, 0x02, 0x38, 0xa1, - 0x85, 0xb6, 0x61, 0xaa, 0x69, 0x3e, 0x6b, 0xa7, 0xe2, 0xd5, 0x9e, 0xed, 0xe3, 0x59, 0xb9, 0x8e, - 0xf6, 0x22, 0xcd, 0x52, 0x9a, 0x1e, 0xee, 0x6e, 0x02, 0xbd, 0x0c, 0xc3, 0xef, 0x06, 0x11, 0xbb, - 0xb6, 0x10, 0x07, 0x97, 0x8c, 0x0b, 0x1a, 0x7e, 0xf5, 0x46, 0x83, 0x95, 0xef, 0xef, 0xce, 0x8d, - 0xd4, 0x03, 0x57, 0xfe, 0xc5, 0xaa, 0x02, 0xfa, 0xac, 0x05, 0x27, 0x8d, 0x7d, 0xac, 0x7a, 0x0e, - 0x87, 0xe9, 0xf9, 0xa3, 0xa2, 0xe5, 0x93, 0xab, 0x59, 0x34, 0x71, 0x76, 0x53, 0xf6, 0x77, 0xb9, - 0x89, 0x54, 0x18, 0x4d, 0x48, 0xd4, 0x69, 0x1d, 0xc7, 0xeb, 0x10, 0x37, 0x0c, 0x7b, 0xce, 0x03, - 0x30, 0xd2, 0xff, 0x7b, 0x8b, 0x19, 0xe9, 0xd7, 0xc8, 0x56, 0xbb, 0xe5, 0xc4, 0xc7, 0xe1, 0xfb, - 0xfc, 0x19, 0x18, 0x8e, 0x45, 0x6b, 0xc5, 0x9e, 0xb6, 0xd0, 0xba, 0xc7, 0x2e, 0x2f, 0xd4, 0xc1, - 0x27, 0x4b, 0xb1, 0x22, 0x68, 0xff, 0x2b, 0x3e, 0x2b, 0x12, 0x72, 0x0c, 0x96, 0x88, 0xeb, 0xa6, - 0x25, 0xe2, 0xa9, 0xc2, 0xdf, 0xd2, 0xc3, 0x22, 0xf1, 0x1d, 0xf3, 0x0b, 0x98, 0x7e, 0xf2, 0x93, - 0x73, 0x8b, 0x64, 0xff, 0xba, 0x05, 0xd3, 0x59, 0xce, 0x08, 0x54, 0x80, 0xe1, 0xda, 0x91, 0xba, - 0x5f, 0x53, 0xa3, 0x7a, 0x4b, 0x94, 0x63, 0x85, 0x51, 0x38, 0xd7, 0x7c, 0x7f, 0x29, 0xb4, 0x6e, - 0x80, 0xf9, 0x40, 0x22, 0x7a, 0x85, 0x87, 0x3a, 0x58, 0xea, 0x05, 0xc3, 0xfe, 0xc2, 0x1c, 0xec, - 0x6f, 0x94, 0x60, 0x9a, 0x1b, 0xb9, 0x17, 0xb6, 0x03, 0xcf, 0xad, 0x07, 0xae, 0x08, 0xfc, 0x70, - 0x61, 0xb4, 0xad, 0x29, 0xb7, 0xc5, 0x52, 0xf2, 0xe8, 0xea, 0x70, 0xa2, 0x50, 0xe8, 0xa5, 0xd8, - 0xa0, 0x4a, 0x5b, 0x21, 0xdb, 0x5e, 0x53, 0xd9, 0x4c, 0x4b, 0x7d, 0x9f, 0x0c, 0xaa, 0x95, 0x65, - 0x8d, 0x0e, 0x36, 0xa8, 0x1e, 0xc1, 0x13, 0x31, 0xf6, 0xdf, 0xb7, 0xe0, 0xa1, 0x1e, 0x69, 0x7b, - 0x68, 0x73, 0xf7, 0xd8, 0xc5, 0x82, 0x78, 0x81, 0x53, 0x35, 0xc7, 0xaf, 0x1b, 0xb0, 0x80, 0xa2, - 0x3b, 0x00, 0xfc, 0xba, 0x80, 0xca, 0xd2, 0xe9, 0xbb, 0xec, 0x82, 0xc9, 0x31, 0xb4, 0xbc, 0x09, - 0x92, 0x12, 0xd6, 0xa8, 0xda, 0x5f, 0x2b, 0xc3, 0x00, 0x7f, 0xe8, 0xbd, 0x0e, 0x43, 0x9b, 0x3c, - 0x9f, 0x71, 0x7f, 0xe9, 0x94, 0x13, 0xe5, 0x85, 0x17, 0x60, 0x49, 0x06, 0x5d, 0x83, 0x13, 0x22, - 0xf4, 0xa8, 0x46, 0x5a, 0xce, 0x8e, 0xd4, 0x86, 0xf9, 0xbb, 0x21, 0x32, 0xc1, 0xfd, 0x89, 0xd5, - 0x6e, 0x14, 0x9c, 0x55, 0x0f, 0xbd, 0xd2, 0x95, 0x7e, 0x90, 0xe7, 0x89, 0x56, 0x92, 0x70, 0x4e, - 0x0a, 0xc2, 0x97, 0x61, 0xac, 0xdd, 0xa5, 0xf7, 0x6b, 0xef, 0x69, 0x9b, 0xba, 0xbe, 0x89, 0xcb, - 0x7c, 0x17, 0x3a, 0xcc, 0x67, 0x63, 0x6d, 0x33, 0x24, 0xd1, 0x66, 0xd0, 0x72, 0xc5, 0x53, 0xb0, - 0x89, 0xef, 0x42, 0x0a, 0x8e, 0xbb, 0x6a, 0x50, 0x2a, 0xeb, 0x8e, 0xd7, 0xea, 0x84, 0x24, 0xa1, - 0x32, 0x68, 0x52, 0x59, 0x49, 0xc1, 0x71, 0x57, 0x0d, 0xba, 0xb6, 0x4e, 0x8a, 0xd7, 0x43, 0x65, - 0x90, 0xba, 0x60, 0x41, 0x9f, 0x86, 0x21, 0x19, 0x40, 0x50, 0x28, 0x97, 0x8a, 0x70, 0x4c, 0x50, - 0x2f, 0x91, 0x6a, 0xef, 0xc8, 0x89, 0xd0, 0x01, 0x49, 0xef, 0x30, 0xaf, 0x54, 0xfe, 0xb9, 0x05, - 0x27, 0x32, 0x1c, 0xe1, 0x38, 0x4b, 0xdb, 0xf0, 0xa2, 0x58, 0xbd, 0x62, 0xa1, 0xb1, 0x34, 0x5e, - 0x8e, 0x15, 0x06, 0xdd, 0x2d, 0x9c, 0x69, 0xa6, 0x19, 0xa5, 0x70, 0x31, 0x11, 0xd0, 0xfe, 0x18, - 0x25, 0x3a, 0x03, 0x95, 0x4e, 0x44, 0x42, 0xf9, 0xa0, 0xa3, 0xe4, 0xf3, 0xcc, 0xce, 0xc8, 0x20, - 0x54, 0x6c, 0xdd, 0x50, 0x26, 0x3e, 0x4d, 0x6c, 0xe5, 0x46, 0x3e, 0x0e, 0xb3, 0xbf, 0x5c, 0x86, - 0x89, 0x94, 0x43, 0x2c, 0xed, 0xc8, 0x56, 0xe0, 0x7b, 0x71, 0xa0, 0xf2, 0xdb, 0xf1, 0x37, 0xe4, - 0x48, 0x7b, 0xf3, 0x9a, 0x28, 0xc7, 0x0a, 0x03, 0x3d, 0x29, 0x5f, 0x09, 0x4e, 0xbf, 0xce, 0xb1, - 0x58, 0x33, 0x1e, 0x0a, 0x2e, 0xfa, 0xb2, 0xce, 0xe3, 0x50, 0x69, 0x07, 0xea, 0xd1, 0x77, 0x35, - 0x9f, 0x78, 0xb1, 0x56, 0x0f, 0x82, 0x16, 0x66, 0x40, 0xf4, 0x84, 0xf8, 0xfa, 0xd4, 0xcd, 0x08, - 0x76, 0xdc, 0x20, 0xd2, 0x86, 0xe0, 0x29, 0x18, 0xba, 0x4b, 0x76, 0x42, 0xcf, 0xdf, 0x48, 0xdf, - 0x0b, 0x5d, 0xe1, 0xc5, 0x58, 0xc2, 0xcd, 0x64, 0xf5, 0x43, 0x47, 0xfc, 0x7a, 0xce, 0x70, 0xee, - 0x39, 0xf8, 0x4d, 0x0b, 0x26, 0x58, 0xf6, 0x59, 0x91, 0x22, 0xc1, 0x0b, 0xfc, 0x63, 0x90, 0x31, - 0x1e, 0x87, 0x81, 0x90, 0x36, 0x9a, 0x7e, 0xfe, 0x82, 0xf5, 0x04, 0x73, 0x18, 0x7a, 0x04, 0x2a, - 0xac, 0x0b, 0x74, 0x1a, 0x47, 0x79, 0x92, 0xfb, 0x9a, 0x13, 0x3b, 0x98, 0x95, 0xb2, 0x18, 0x34, - 0x4c, 0xda, 0x2d, 0x8f, 0x77, 0x3a, 0x31, 0xe7, 0x7e, 0xd0, 0x62, 0xd0, 0x32, 0x3b, 0xf9, 0xa0, - 0x62, 0xd0, 0xb2, 0x89, 0x1f, 0x2c, 0xe7, 0xff, 0xf7, 0x12, 0x9c, 0xce, 0xac, 0x97, 0xdc, 0x30, - 0xaf, 0x18, 0x37, 0xcc, 0x17, 0x53, 0x37, 0xcc, 0xf6, 0xc1, 0xb5, 0x1f, 0xcc, 0x9d, 0x73, 0xf6, - 0x55, 0x70, 0xf9, 0x18, 0xaf, 0x82, 0x2b, 0x45, 0x45, 0x9c, 0x81, 0x1c, 0x11, 0xe7, 0x8f, 0x2c, - 0x78, 0x38, 0x73, 0xc8, 0x3e, 0x70, 0x41, 0x7f, 0x99, 0xbd, 0xec, 0xa1, 0x9d, 0xfc, 0x5a, 0xb9, - 0xc7, 0x57, 0x31, 0x3d, 0xe5, 0x2c, 0xe5, 0x42, 0x0c, 0x18, 0x09, 0xe1, 0x6d, 0x94, 0x73, 0x20, - 0x5e, 0x86, 0x15, 0x14, 0x45, 0x5a, 0xd0, 0x1c, 0xef, 0xe4, 0xf2, 0x21, 0x37, 0xd4, 0xbc, 0x69, - 0x87, 0xd7, 0xf3, 0x3e, 0xa4, 0x43, 0xe9, 0x6e, 0x6b, 0x9a, 0x67, 0xf9, 0x30, 0x9a, 0xe7, 0x68, - 0xb6, 0xd6, 0x89, 0x16, 0x60, 0x62, 0xcb, 0xf3, 0xd9, 0xa3, 0xbb, 0xa6, 0xf4, 0xa4, 0x22, 0x97, - 0xaf, 0x99, 0x60, 0x9c, 0xc6, 0x9f, 0x7d, 0x19, 0xc6, 0x0e, 0x6f, 0x5d, 0xfb, 0x51, 0x19, 0x3e, - 0x7c, 0x00, 0x53, 0xe0, 0xa7, 0x83, 0x31, 0x2f, 0xda, 0xe9, 0xd0, 0x35, 0x37, 0x75, 0x98, 0x5e, - 0xef, 0xb4, 0x5a, 0x3b, 0xcc, 0x3f, 0x8b, 0xb8, 0x12, 0x43, 0x08, 0x35, 0x2a, 0x19, 0xf5, 0x4a, - 0x06, 0x0e, 0xce, 0xac, 0x89, 0x7e, 0x1e, 0x50, 0x70, 0x87, 0xa5, 0x45, 0x76, 0x93, 0xbc, 0x16, - 0x6c, 0x0a, 0xca, 0xc9, 0x56, 0xbd, 0xd1, 0x85, 0x81, 0x33, 0x6a, 0x51, 0x39, 0x95, 0x9e, 0x63, - 0x3b, 0xaa, 0x5b, 0x29, 0x39, 0x15, 0xeb, 0x40, 0x6c, 0xe2, 0xa2, 0x4b, 0x30, 0xe5, 0x6c, 0x3b, - 0x1e, 0x4f, 0x73, 0x26, 0x09, 0x70, 0x41, 0x55, 0xd9, 0xaf, 0x16, 0xd2, 0x08, 0xb8, 0xbb, 0x0e, - 0x6a, 0x1b, 0x06, 0x49, 0xfe, 0x32, 0xc3, 0x27, 0x0f, 0xb1, 0x82, 0x0b, 0x9b, 0x28, 0xed, 0x3f, - 0xb5, 0xe8, 0xd1, 0x97, 0xf1, 0x3e, 0x2b, 0x1d, 0x11, 0x65, 0x60, 0xd3, 0x82, 0x00, 0xd5, 0x88, - 0x2c, 0xe9, 0x40, 0x6c, 0xe2, 0xf2, 0xa5, 0x11, 0x25, 0xee, 0xe2, 0x86, 0xb4, 0x29, 0xe2, 0x67, - 0x15, 0x06, 0x95, 0xa0, 0x5d, 0x6f, 0xdb, 0x8b, 0x82, 0x50, 0x6c, 0xa0, 0x7e, 0x5f, 0x41, 0x57, - 0xfc, 0xb2, 0xc6, 0xc9, 0x60, 0x49, 0xcf, 0xfe, 0x4a, 0x09, 0xc6, 0x64, 0x8b, 0xaf, 0x76, 0x82, - 0xd8, 0x39, 0x86, 0x23, 0xfd, 0x55, 0xe3, 0x48, 0x3f, 0x5f, 0x2c, 0x9c, 0x98, 0x75, 0xae, 0xe7, - 0x51, 0xfe, 0xe9, 0xd4, 0x51, 0x7e, 0xa1, 0x1f, 0xa2, 0x07, 0x1f, 0xe1, 0xff, 0xc6, 0x82, 0x29, - 0x03, 0xff, 0x18, 0x4e, 0x92, 0xba, 0x79, 0x92, 0x3c, 0xd3, 0xc7, 0xd7, 0xf4, 0x38, 0x41, 0xbe, - 0x5e, 0x4a, 0x7d, 0x05, 0x3b, 0x39, 0x7e, 0x01, 0x2a, 0x9b, 0x4e, 0xe8, 0x16, 0xcb, 0xf9, 0xd9, - 0x55, 0x7d, 0xfe, 0xb2, 0x13, 0xba, 0x9c, 0xff, 0x9f, 0x53, 0xaf, 0xc7, 0x39, 0xa1, 0x9b, 0x1b, - 0x45, 0xc1, 0x1a, 0x45, 0x2f, 0xc1, 0x60, 0xd4, 0x0c, 0xda, 0xca, 0xcf, 0xf4, 0x0c, 0x7f, 0x59, - 0x8e, 0x96, 0xec, 0xef, 0xce, 0x21, 0xb3, 0x39, 0x5a, 0x8c, 0x05, 0xfe, 0xec, 0x06, 0x54, 0x55, - 0xd3, 0x47, 0xea, 0x69, 0xff, 0x5f, 0xcb, 0x70, 0x22, 0x63, 0xad, 0xa0, 0x5f, 0x34, 0xc6, 0xed, - 0xe5, 0xbe, 0x17, 0xdb, 0xfb, 0x1c, 0xb9, 0x5f, 0x64, 0x9a, 0x92, 0x2b, 0x56, 0xc7, 0x21, 0x9a, - 0xbf, 0x19, 0x91, 0x74, 0xf3, 0xb4, 0x28, 0xbf, 0x79, 0xda, 0xec, 0xb1, 0x0d, 0x3f, 0x6d, 0x48, - 0xf5, 0xf4, 0x48, 0xe7, 0xf9, 0x0b, 0x15, 0x98, 0xce, 0xca, 0x5b, 0x80, 0x7e, 0xc5, 0x4a, 0xbd, - 0x30, 0xf2, 0x4a, 0xff, 0xc9, 0x0f, 0xf8, 0xb3, 0x23, 0x22, 0xab, 0xd0, 0xbc, 0xf9, 0xe6, 0x48, - 0xee, 0x88, 0x8b, 0xd6, 0x59, 0xfc, 0x53, 0xc8, 0x5f, 0x8b, 0x91, 0x5c, 0xe1, 0x53, 0x87, 0xe8, - 0x8a, 0x78, 0x70, 0x26, 0x4a, 0xc5, 0x3f, 0xc9, 0xe2, 0xfc, 0xf8, 0x27, 0xd9, 0x87, 0x59, 0x0f, - 0x46, 0xb4, 0xef, 0x3a, 0xd2, 0x65, 0x70, 0x97, 0x1e, 0x51, 0x5a, 0xbf, 0x8f, 0x74, 0x29, 0xfc, - 0x1d, 0x0b, 0x52, 0x4e, 0x61, 0xca, 0x2c, 0x63, 0xf5, 0x34, 0xcb, 0x9c, 0x81, 0x4a, 0x18, 0xb4, - 0x48, 0xfa, 0xd1, 0x09, 0x1c, 0xb4, 0x08, 0x66, 0x10, 0xf5, 0xa0, 0x74, 0xb9, 0xd7, 0x83, 0xd2, - 0x54, 0x4f, 0x6f, 0x91, 0x6d, 0x22, 0x8d, 0x24, 0x8a, 0x8d, 0x5f, 0xa5, 0x85, 0x98, 0xc3, 0xec, - 0xdf, 0xa9, 0xc0, 0x89, 0x8c, 0x58, 0x40, 0xaa, 0x21, 0x6d, 0x38, 0x31, 0xb9, 0xe7, 0xec, 0xa4, - 0x93, 0xdf, 0x5e, 0xe2, 0xc5, 0x58, 0xc2, 0x99, 0x33, 0x2b, 0x4f, 0xa0, 0x97, 0x32, 0x5d, 0x89, - 0xbc, 0x79, 0x02, 0x7a, 0xf4, 0x4f, 0x0f, 0x5f, 0x04, 0x88, 0xa2, 0xd6, 0xb2, 0x4f, 0x25, 0x3c, - 0x57, 0x38, 0xcd, 0x26, 0x79, 0x17, 0x1b, 0x57, 0x05, 0x04, 0x6b, 0x58, 0xa8, 0x06, 0x93, 0xed, - 0x30, 0x88, 0xb9, 0x61, 0xb0, 0xc6, 0x1d, 0x2d, 0x06, 0xcc, 0x68, 0xad, 0x7a, 0x0a, 0x8e, 0xbb, - 0x6a, 0xa0, 0x17, 0x60, 0x44, 0x44, 0x70, 0xd5, 0x83, 0xa0, 0x25, 0xcc, 0x48, 0xea, 0x3a, 0xbe, - 0x91, 0x80, 0xb0, 0x8e, 0xa7, 0x55, 0x63, 0xd6, 0xc6, 0xa1, 0xcc, 0x6a, 0xdc, 0xe2, 0xa8, 0xe1, - 0xa5, 0xb2, 0x9b, 0x0c, 0x17, 0xca, 0x6e, 0x92, 0x18, 0xd6, 0xaa, 0x85, 0x2f, 0x62, 0x20, 0xd7, - 0x00, 0xf5, 0x87, 0x65, 0x18, 0xe4, 0x53, 0x71, 0x0c, 0x52, 0x5e, 0x5d, 0x98, 0x94, 0x0a, 0x65, - 0x92, 0xe0, 0xbd, 0x9a, 0xaf, 0x39, 0xb1, 0xc3, 0x59, 0x93, 0xda, 0x21, 0x89, 0x19, 0x0a, 0xcd, - 0x1b, 0x7b, 0x68, 0x36, 0x65, 0x29, 0x01, 0x4e, 0x43, 0xdb, 0x51, 0x9b, 0x00, 0x11, 0x7b, 0xfe, - 0x96, 0xd2, 0x10, 0x99, 0x79, 0x9f, 0x2f, 0xd4, 0x8f, 0x86, 0xaa, 0xc6, 0x7b, 0x93, 0x2c, 0x4b, - 0x05, 0xc0, 0x1a, 0xed, 0xd9, 0x17, 0xa1, 0xaa, 0x90, 0xf3, 0x54, 0xc8, 0x51, 0x9d, 0xb5, 0xfd, - 0x2c, 0x4c, 0xa4, 0xda, 0xea, 0x4b, 0x03, 0xfd, 0x3d, 0x0b, 0x26, 0x78, 0x97, 0x97, 0xfd, 0x6d, - 0xc1, 0x0a, 0x3e, 0x67, 0xc1, 0x74, 0x2b, 0x63, 0x27, 0x8a, 0x69, 0x3e, 0xcc, 0x1e, 0x56, 0xca, - 0x67, 0x16, 0x14, 0x67, 0xb6, 0x86, 0xce, 0xc2, 0x30, 0x7f, 0xcd, 0xdb, 0x69, 0x09, 0x0f, 0xed, - 0x51, 0x9e, 0x93, 0x9c, 0x97, 0x61, 0x05, 0xb5, 0x7f, 0x6c, 0xc1, 0x14, 0xff, 0x88, 0x2b, 0x64, - 0x47, 0xa9, 0x57, 0x1f, 0x90, 0xcf, 0x10, 0xd9, 0xd7, 0x4b, 0x3d, 0xb2, 0xaf, 0xeb, 0x5f, 0x59, - 0x3e, 0xf0, 0x2b, 0xbf, 0x61, 0x81, 0x58, 0xa1, 0xc7, 0xa0, 0x3f, 0xac, 0x9a, 0xfa, 0xc3, 0x47, - 0x8a, 0x2c, 0xfa, 0x1e, 0x8a, 0xc3, 0xaf, 0x96, 0x60, 0x92, 0x23, 0x24, 0x37, 0x32, 0x1f, 0x94, - 0xc9, 0xe9, 0xef, 0x55, 0x20, 0xf5, 0x26, 0x6c, 0xf6, 0x97, 0x1a, 0x73, 0x59, 0x39, 0x70, 0x2e, - 0xff, 0xa7, 0x05, 0x88, 0x8f, 0x49, 0xfa, 0x29, 0x74, 0x7e, 0xba, 0x69, 0xe6, 0x80, 0x84, 0x73, - 0x28, 0x08, 0xd6, 0xb0, 0x1e, 0xf0, 0x27, 0xa4, 0xee, 0xc3, 0xca, 0xf9, 0xf7, 0x61, 0x7d, 0x7c, - 0xf5, 0x77, 0xcb, 0x90, 0x76, 0xd5, 0x44, 0x6f, 0xc3, 0x68, 0xd3, 0x69, 0x3b, 0x77, 0xbc, 0x96, - 0x17, 0x7b, 0x24, 0x2a, 0x76, 0xe1, 0xbe, 0xa4, 0xd5, 0x10, 0xd7, 0x50, 0x5a, 0x09, 0x36, 0x28, - 0xa2, 0x79, 0x80, 0x76, 0xe8, 0x6d, 0x7b, 0x2d, 0xb2, 0xc1, 0x34, 0x1e, 0x16, 0xeb, 0xc1, 0xef, - 0x8e, 0x65, 0x29, 0xd6, 0x30, 0x32, 0x62, 0x03, 0xca, 0xc7, 0x11, 0x1b, 0x50, 0xe9, 0x33, 0x36, - 0x60, 0xa0, 0x50, 0x6c, 0x00, 0x86, 0x53, 0xf2, 0xf0, 0xa6, 0xff, 0x57, 0xbc, 0x16, 0x11, 0xb2, - 0x1b, 0x8f, 0x05, 0x99, 0xdd, 0xdb, 0x9d, 0x3b, 0x85, 0x33, 0x31, 0x70, 0x8f, 0x9a, 0x76, 0x07, - 0x4e, 0x34, 0x48, 0x28, 0x9f, 0xb1, 0x53, 0x7b, 0xe9, 0x4d, 0xa8, 0x86, 0xa9, 0x6d, 0xdc, 0x67, - 0xc0, 0xbf, 0x96, 0xe3, 0x4d, 0x6e, 0xdb, 0x84, 0xa4, 0xfd, 0xd7, 0x4b, 0x30, 0x24, 0x9c, 0x34, - 0x8f, 0x41, 0xf8, 0xb8, 0x62, 0x98, 0x98, 0x9e, 0xca, 0xe3, 0x7f, 0xac, 0x5b, 0x3d, 0x8d, 0x4b, - 0x8d, 0x94, 0x71, 0xe9, 0x99, 0x62, 0xe4, 0x0e, 0x36, 0x2b, 0xfd, 0x93, 0x32, 0x8c, 0x9b, 0x4e, - 0xab, 0xc7, 0x30, 0x2c, 0xaf, 0xc1, 0x50, 0x24, 0xfc, 0xa7, 0x4b, 0x45, 0x7c, 0xf6, 0xd2, 0x53, - 0x9c, 0xdc, 0xc4, 0x0b, 0x8f, 0x69, 0x49, 0x2e, 0xd3, 0x45, 0xbb, 0x7c, 0x2c, 0x2e, 0xda, 0x79, - 0xbe, 0xc4, 0x95, 0x07, 0xe1, 0x4b, 0x6c, 0x7f, 0x8f, 0xb1, 0x7c, 0xbd, 0xfc, 0x18, 0x8e, 0xf1, - 0x57, 0xcd, 0xc3, 0xe1, 0x5c, 0xa1, 0x75, 0x27, 0xba, 0xd7, 0xe3, 0x38, 0xff, 0x96, 0x05, 0x23, - 0x02, 0xf1, 0x18, 0x3e, 0xe0, 0xe7, 0xcd, 0x0f, 0x78, 0xa2, 0xd0, 0x07, 0xf4, 0xe8, 0xf9, 0x57, - 0x4a, 0xaa, 0xe7, 0xf5, 0x20, 0x8c, 0x0b, 0x65, 0x42, 0x1f, 0xa6, 0xaa, 0x5f, 0xd0, 0x0c, 0x5a, - 0x42, 0x80, 0x7b, 0x24, 0x09, 0xfd, 0xe3, 0xe5, 0xfb, 0xda, 0x6f, 0xac, 0xb0, 0x59, 0x64, 0x5a, - 0x10, 0xc6, 0xe2, 0x00, 0x4d, 0x22, 0xd3, 0x82, 0x30, 0xc6, 0x0c, 0x82, 0x5c, 0x80, 0xd8, 0x09, - 0x37, 0x48, 0x4c, 0xcb, 0x44, 0xd4, 0x6c, 0xef, 0xdd, 0xda, 0x89, 0xbd, 0xd6, 0xbc, 0xe7, 0xc7, - 0x51, 0x1c, 0xce, 0xaf, 0xfa, 0xf1, 0x8d, 0x90, 0x0b, 0xfd, 0x5a, 0x2c, 0x9f, 0xa2, 0x85, 0x35, - 0xba, 0x32, 0x48, 0x84, 0xb5, 0x31, 0x60, 0xde, 0x20, 0x5d, 0x17, 0xe5, 0x58, 0x61, 0xd8, 0x2f, - 0x32, 0xce, 0xce, 0x06, 0xa8, 0xbf, 0x30, 0xbb, 0x2f, 0x0c, 0xa9, 0xa1, 0x65, 0x66, 0xe1, 0xeb, - 0x7a, 0x30, 0x5f, 0x51, 0xf6, 0x49, 0xbb, 0xa0, 0xfb, 0x51, 0x27, 0xb1, 0x7f, 0x88, 0x74, 0x5d, - 0x3b, 0xbe, 0x58, 0x98, 0x23, 0xf7, 0x71, 0xd1, 0xc8, 0x52, 0x32, 0xb2, 0x3c, 0x74, 0xab, 0xf5, - 0x74, 0xfe, 0xfa, 0x25, 0x09, 0xc0, 0x09, 0x0e, 0x3a, 0x2f, 0x14, 0x4a, 0x6e, 0x71, 0xf9, 0x70, - 0x4a, 0xa1, 0x94, 0x43, 0xa2, 0x69, 0x94, 0x17, 0x60, 0x44, 0x3d, 0x09, 0x54, 0xe7, 0x8f, 0xb1, - 0x54, 0xb9, 0x7c, 0xb5, 0x9c, 0x14, 0x63, 0x1d, 0x07, 0xad, 0xc1, 0x44, 0xc4, 0xdf, 0x2b, 0x92, - 0xd1, 0x1a, 0xc2, 0x70, 0xf0, 0xb4, 0xbc, 0xa4, 0x6c, 0x98, 0xe0, 0x7d, 0x56, 0xc4, 0xb7, 0xb2, - 0x8c, 0xef, 0x48, 0x93, 0x40, 0xaf, 0xc0, 0x78, 0x4b, 0x7f, 0xc3, 0xb5, 0x2e, 0xec, 0x0a, 0xca, - 0xed, 0xcc, 0x78, 0xe1, 0xb5, 0x8e, 0x53, 0xd8, 0xe8, 0x35, 0x98, 0xd1, 0x4b, 0x44, 0x72, 0x21, - 0xc7, 0xdf, 0x20, 0x91, 0x78, 0xdb, 0xe4, 0x91, 0xbd, 0xdd, 0xb9, 0x99, 0xab, 0x3d, 0x70, 0x70, - 0xcf, 0xda, 0xe8, 0x25, 0x18, 0x95, 0x9f, 0xaf, 0xc5, 0x36, 0x25, 0x0e, 0x8f, 0x1a, 0x0c, 0x1b, - 0x98, 0xe8, 0x1e, 0x9c, 0x94, 0xff, 0xd7, 0x42, 0x67, 0x7d, 0xdd, 0x6b, 0x8a, 0x20, 0xb3, 0x11, - 0x46, 0x62, 0x41, 0xfa, 0x8b, 0x2f, 0x67, 0x21, 0xed, 0xef, 0xce, 0x9d, 0x11, 0xa3, 0x96, 0x09, - 0x67, 0x93, 0x98, 0x4d, 0x1f, 0x5d, 0x83, 0x13, 0x9b, 0xc4, 0x69, 0xc5, 0x9b, 0x4b, 0x9b, 0xa4, - 0x79, 0x57, 0x6e, 0x2c, 0x16, 0x31, 0xa5, 0xb9, 0x04, 0x5e, 0xee, 0x46, 0xc1, 0x59, 0xf5, 0xde, - 0xdf, 0x9d, 0xf2, 0x2f, 0xd0, 0xca, 0x9a, 0xfc, 0x80, 0xde, 0x81, 0x51, 0x7d, 0xac, 0xd3, 0x82, - 0x41, 0xfe, 0xfb, 0xbe, 0x42, 0x0e, 0x51, 0x33, 0xa0, 0xc3, 0xb0, 0x41, 0xdb, 0xfe, 0x77, 0x25, - 0x98, 0xcb, 0xc9, 0xdd, 0x95, 0xb2, 0x66, 0x59, 0x85, 0xac, 0x59, 0x0b, 0xf2, 0xcd, 0x9b, 0xeb, - 0xa9, 0x9c, 0xe9, 0xa9, 0x57, 0x6c, 0x92, 0xcc, 0xe9, 0x69, 0xfc, 0xc2, 0x9e, 0x66, 0xba, 0x41, - 0xac, 0x92, 0xeb, 0x70, 0xf7, 0xba, 0x6e, 0xe3, 0x1c, 0x38, 0x8c, 0xd0, 0xdb, 0xd3, 0xbc, 0x69, - 0x7f, 0xaf, 0x04, 0x27, 0xd5, 0x60, 0xfe, 0xf4, 0x0e, 0xe1, 0x5b, 0xdd, 0x43, 0xf8, 0x40, 0xcd, - 0xc4, 0xf6, 0x0d, 0x18, 0x6c, 0xec, 0x44, 0xcd, 0xb8, 0x55, 0xe0, 0xc4, 0x7f, 0xdc, 0xd8, 0x57, - 0xc9, 0x69, 0xc4, 0x5e, 0xb2, 0x13, 0xdb, 0xcc, 0xfe, 0xbc, 0x05, 0x13, 0x6b, 0x4b, 0xf5, 0x46, - 0xd0, 0xbc, 0x4b, 0xe2, 0x05, 0x6e, 0xd0, 0xc0, 0xe2, 0xc0, 0xb7, 0x0e, 0x79, 0x90, 0x67, 0x89, - 0x08, 0x67, 0xa0, 0xb2, 0x19, 0x44, 0x71, 0xfa, 0x52, 0xe0, 0x72, 0x10, 0xc5, 0x98, 0x41, 0xec, - 0x3f, 0xb3, 0x60, 0x80, 0x3d, 0xd4, 0x96, 0xf7, 0xc8, 0x5f, 0x91, 0xef, 0x42, 0x2f, 0xc0, 0x20, - 0x59, 0x5f, 0x27, 0xcd, 0x58, 0xcc, 0xaf, 0x0c, 0xb0, 0x19, 0x5c, 0x66, 0xa5, 0xf4, 0x44, 0x63, - 0x8d, 0xf1, 0xbf, 0x58, 0x20, 0xa3, 0xcf, 0x40, 0x35, 0xf6, 0xb6, 0xc8, 0x82, 0xeb, 0x0a, 0x2b, - 0x7c, 0x7f, 0x3e, 0x5f, 0xea, 0x84, 0x5d, 0x93, 0x44, 0x70, 0x42, 0xcf, 0xfe, 0x52, 0x09, 0x20, - 0x09, 0x9f, 0xcb, 0xfb, 0xcc, 0xc5, 0xae, 0xb7, 0x0c, 0x9f, 0xcc, 0x78, 0xcb, 0x10, 0x25, 0x04, - 0x33, 0x5e, 0x32, 0x54, 0x43, 0x55, 0x2e, 0x34, 0x54, 0x95, 0x7e, 0x86, 0x6a, 0x09, 0xa6, 0x92, - 0xf0, 0x3f, 0x33, 0x8e, 0x9a, 0xe5, 0x1b, 0x5e, 0x4b, 0x03, 0x71, 0x37, 0xbe, 0xfd, 0x25, 0x0b, - 0x84, 0x97, 0x70, 0x81, 0x05, 0xed, 0xca, 0x77, 0xc7, 0x8c, 0xd4, 0x82, 0x4f, 0x17, 0x71, 0xa0, - 0x16, 0x09, 0x05, 0x15, 0xdf, 0x37, 0xd2, 0x08, 0x1a, 0x54, 0xed, 0xdf, 0xb6, 0x60, 0x84, 0x83, - 0xaf, 0x31, 0x45, 0x34, 0xbf, 0x5f, 0x7d, 0x25, 0xb3, 0x66, 0x4f, 0x72, 0x51, 0xc2, 0x2a, 0xa9, - 0xb1, 0xfe, 0x24, 0x97, 0x04, 0xe0, 0x04, 0x07, 0x3d, 0x05, 0x43, 0x51, 0xe7, 0x0e, 0x43, 0x4f, - 0xb9, 0x0c, 0x37, 0x78, 0x31, 0x96, 0x70, 0xfb, 0x9f, 0x95, 0x60, 0x32, 0xed, 0x31, 0x8e, 0x30, - 0x0c, 0x72, 0x06, 0x92, 0xd6, 0x69, 0x0e, 0x32, 0x80, 0x6a, 0x1e, 0xe7, 0xc0, 0x1f, 0x96, 0x67, - 0x2c, 0x48, 0x50, 0x42, 0xeb, 0x30, 0xe2, 0x06, 0xf7, 0xfc, 0x7b, 0x4e, 0xe8, 0x2e, 0xd4, 0x57, - 0xc5, 0x4c, 0xe4, 0xf8, 0xf8, 0xd5, 0x92, 0x0a, 0xba, 0x3f, 0x3b, 0x33, 0xc8, 0x25, 0x20, 0xac, - 0x13, 0x46, 0x6f, 0xb2, 0x4c, 0x28, 0xeb, 0xde, 0xc6, 0x35, 0xa7, 0x5d, 0xcc, 0x9b, 0x65, 0x49, - 0xa2, 0x6b, 0x6d, 0x8c, 0x89, 0xc4, 0x29, 0x1c, 0x80, 0x13, 0x92, 0xf6, 0xaf, 0x9e, 0x04, 0x63, - 0x2d, 0x18, 0x19, 0xa7, 0xad, 0x07, 0x9e, 0x71, 0xfa, 0x0d, 0x18, 0x26, 0x5b, 0xed, 0x78, 0xa7, - 0xe6, 0x85, 0xc5, 0xde, 0x0f, 0x58, 0x16, 0xd8, 0xdd, 0xd4, 0x25, 0x04, 0x2b, 0x8a, 0x3d, 0xf2, - 0x87, 0x97, 0x3f, 0x10, 0xf9, 0xc3, 0x2b, 0x7f, 0x29, 0xf9, 0xc3, 0x5f, 0x83, 0xa1, 0x0d, 0x2f, - 0xc6, 0xa4, 0x1d, 0x88, 0xd3, 0x38, 0x67, 0xf1, 0x5c, 0xe2, 0xc8, 0xdd, 0x99, 0x65, 0x05, 0x00, - 0x4b, 0x72, 0x68, 0x4d, 0x6d, 0xaa, 0xc1, 0x22, 0x32, 0x68, 0xb7, 0x81, 0x3c, 0x73, 0x5b, 0x89, - 0x7c, 0xe1, 0x43, 0xef, 0x3f, 0x5f, 0xb8, 0xca, 0xf2, 0x3d, 0xfc, 0xa0, 0xb2, 0x7c, 0x1b, 0xd9, - 0xd2, 0xab, 0x47, 0x91, 0x2d, 0xfd, 0x4b, 0x16, 0x9c, 0x6c, 0x67, 0xbd, 0x35, 0x20, 0xf2, 0x75, - 0xff, 0xdc, 0x21, 0x5e, 0x5f, 0x30, 0x9a, 0x66, 0xf9, 0x3d, 0x32, 0xd1, 0x70, 0x76, 0xc3, 0x32, - 0xed, 0xfa, 0xc8, 0xfb, 0x4f, 0xbb, 0x7e, 0xd4, 0x89, 0xbd, 0x93, 0x24, 0xec, 0x63, 0x47, 0x92, - 0x84, 0x7d, 0xfc, 0x01, 0x26, 0x61, 0xd7, 0xd2, 0xa7, 0x4f, 0x3c, 0xd8, 0xf4, 0xe9, 0x9b, 0xe6, - 0xb9, 0xc4, 0xb3, 0x75, 0xbf, 0x50, 0xf8, 0x5c, 0x32, 0x5a, 0x38, 0xf8, 0x64, 0xe2, 0x89, 0xe4, - 0xa7, 0xde, 0x67, 0x22, 0x79, 0x23, 0x1d, 0x3b, 0x3a, 0x8a, 0x74, 0xec, 0x6f, 0xeb, 0x27, 0xe8, - 0x89, 0x22, 0x2d, 0xa8, 0x83, 0xb2, 0xbb, 0x85, 0xac, 0x33, 0xb4, 0x3b, 0xe1, 0xfb, 0xf4, 0x71, - 0x27, 0x7c, 0x3f, 0x79, 0x84, 0x09, 0xdf, 0x4f, 0x1d, 0x6b, 0xc2, 0xf7, 0x87, 0x3e, 0x20, 0x09, - 0xdf, 0x67, 0x8e, 0x2b, 0xe1, 0xfb, 0xc3, 0x0f, 0x36, 0xe1, 0xfb, 0xdb, 0x50, 0x6d, 0xcb, 0xb8, - 0xcb, 0x99, 0xd9, 0x22, 0x53, 0x97, 0x19, 0xa6, 0xc9, 0xa7, 0x4e, 0x81, 0x70, 0x42, 0x94, 0xb6, - 0x90, 0x24, 0x80, 0xff, 0x70, 0x91, 0x16, 0x32, 0xed, 0x1e, 0x07, 0xa4, 0x7d, 0xff, 0x42, 0x09, - 0x4e, 0x1f, 0xbc, 0x3b, 0x12, 0xa3, 0x49, 0x3d, 0xb1, 0x65, 0xa7, 0x8c, 0x26, 0x4c, 0xf2, 0xd4, - 0xb0, 0x0a, 0x87, 0xb3, 0x5f, 0x82, 0x29, 0xe5, 0xe7, 0xd5, 0xf2, 0x9a, 0x3b, 0xda, 0x33, 0x54, - 0x2a, 0x3e, 0xa1, 0x91, 0x46, 0xc0, 0xdd, 0x75, 0xd0, 0x02, 0x4c, 0x18, 0x85, 0xab, 0x35, 0xa1, - 0xbf, 0x28, 0x2b, 0x4d, 0xc3, 0x04, 0xe3, 0x34, 0xbe, 0xfd, 0x75, 0x0b, 0x1e, 0xea, 0x91, 0xe1, - 0xb5, 0x70, 0x8c, 0x76, 0x1b, 0x26, 0xda, 0x66, 0xd5, 0xc2, 0x29, 0x1f, 0x8c, 0x8c, 0xb2, 0xaa, - 0xd7, 0x29, 0x00, 0x4e, 0x93, 0x5f, 0x3c, 0xfb, 0xfd, 0x1f, 0x9d, 0xfe, 0xd0, 0x0f, 0x7e, 0x74, - 0xfa, 0x43, 0x3f, 0xfc, 0xd1, 0xe9, 0x0f, 0xfd, 0xd2, 0xde, 0x69, 0xeb, 0xfb, 0x7b, 0xa7, 0xad, - 0x1f, 0xec, 0x9d, 0xb6, 0x7e, 0xb8, 0x77, 0xda, 0xfa, 0xf3, 0xbd, 0xd3, 0xd6, 0x97, 0x7e, 0x7c, - 0xfa, 0x43, 0xaf, 0x97, 0xb6, 0x2f, 0xfc, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x06, 0xe5, 0xd7, - 0x49, 0x99, 0xd0, 0x00, 0x00, + 0x1a, 0x0c, 0xad, 0xf3, 0xa8, 0x5a, 0x91, 0xc8, 0xf7, 0x42, 0x5e, 0xe8, 0x6f, 0x57, 0x08, 0x2e, + 0x0f, 0x49, 0x11, 0x08, 0x2c, 0xd9, 0xb1, 0x1c, 0x87, 0x2a, 0x4e, 0x58, 0x64, 0xf2, 0x9d, 0xef, + 0x2f, 0xae, 0x58, 0x9c, 0x7e, 0x15, 0x14, 0x6b, 0x1c, 0xe9, 0x9c, 0x77, 0xe4, 0x1b, 0x39, 0x2c, + 0x8b, 0x6f, 0xee, 0x9c, 0xcf, 0x7c, 0x52, 0x87, 0xcf, 0x79, 0x85, 0xc2, 0x09, 0x53, 0xd4, 0x81, + 0xb1, 0xed, 0xa8, 0xbd, 0x49, 0xe4, 0xd2, 0x67, 0xa9, 0x7d, 0x47, 0x2e, 0x7e, 0x32, 0x27, 0x5f, + 0xb3, 0x28, 0xe2, 0x85, 0x71, 0xc7, 0x69, 0x75, 0x49, 0x30, 0x96, 0x54, 0xee, 0x96, 0xce, 0x16, + 0x9b, 0xb5, 0xd0, 0x21, 0x79, 0xb7, 0x13, 0xdc, 0xd9, 0x89, 0x89, 0x48, 0xfd, 0x9b, 0x33, 0x24, + 0xaf, 0x72, 0xe2, 0xee, 0x21, 0x11, 0x08, 0x2c, 0xd9, 0xa9, 0x2e, 0x63, 0xd2, 0x78, 0xb2, 0x70, + 0x97, 0x75, 0x7d, 0x43, 0xd2, 0x65, 0x4c, 0xfa, 0x26, 0x4c, 0x99, 0xd4, 0x6d, 0x6f, 0x06, 0x71, + 0xe0, 0xa7, 0x64, 0xff, 0x54, 0x11, 0xa9, 0x5b, 0xcf, 0x28, 0xd9, 0x2d, 0x75, 0xb3, 0xa8, 0x70, + 0x66, 0xad, 0xc8, 0x87, 0xf1, 0x76, 0x10, 0xc6, 0xf7, 0x82, 0x50, 0xce, 0x43, 0x54, 0xe8, 0x8c, + 0x68, 0x94, 0x11, 0x75, 0x33, 0xcf, 0x63, 0x13, 0x83, 0x53, 0xdc, 0xe9, 0xd0, 0x45, 0x4d, 0xa7, + 0x45, 0x56, 0x6f, 0xcc, 0x9c, 0x28, 0x32, 0x74, 0x0d, 0x4e, 0xdc, 0x3d, 0x74, 0x02, 0x81, 0x25, + 0x3b, 0x2a, 0xeb, 0x58, 0x1e, 0x7b, 0x96, 0xc9, 0x38, 0x57, 0xd6, 0x75, 0x79, 0xe7, 0x72, 0x59, + 0xc7, 0xc0, 0x98, 0x33, 0x42, 0xef, 0x40, 0x55, 0x28, 0xb7, 0x41, 0x34, 0x73, 0x92, 0x71, 0xfd, + 0xd9, 0x9c, 0xd6, 0x72, 0xf2, 0x1b, 0x8d, 0xec, 0x5d, 0x5f, 0x44, 0xff, 0x49, 0x22, 0x9c, 0xb0, + 0xb7, 0x7f, 0x63, 0xb0, 0x5b, 0xed, 0x61, 0x07, 0x9b, 0xbf, 0xd1, 0x7d, 0x63, 0xfd, 0xa9, 0xfe, + 0xcf, 0xef, 0x0f, 0xf0, 0xee, 0xfa, 0x8b, 0x16, 0x9c, 0x6a, 0x67, 0x7e, 0x9e, 0x50, 0x1c, 0xfa, + 0x35, 0x03, 0xf0, 0xae, 0x51, 0xf9, 0xc5, 0xb3, 0xf1, 0xb8, 0x47, 0x9d, 0xe9, 0xa3, 0x40, 0xf9, + 0x7d, 0x1f, 0x05, 0x6e, 0xc3, 0x30, 0xd3, 0x5d, 0x93, 0xfc, 0x3e, 0x7d, 0xa6, 0xc2, 0x61, 0x2a, + 0xc8, 0x92, 0x60, 0x81, 0x15, 0x33, 0xda, 0x71, 0x8f, 0xa6, 0x3f, 0x02, 0x13, 0x86, 0x16, 0x59, + 0x2d, 0xf9, 0x39, 0x6b, 0x45, 0xf4, 0xc4, 0xa3, 0xf5, 0x83, 0x88, 0xf7, 0xf3, 0x08, 0xf0, 0xc1, + 0x95, 0xa1, 0x5a, 0xc6, 0x41, 0x6f, 0xd0, 0xbc, 0x9e, 0xca, 0x3f, 0xec, 0x1d, 0xef, 0x01, 0xe5, + 0x1f, 0x5a, 0x19, 0xfa, 0x34, 0x3f, 0x54, 0x7e, 0xd2, 0x3c, 0x54, 0x3e, 0x99, 0x3e, 0x54, 0x76, + 0x99, 0x92, 0x8c, 0xf3, 0x64, 0xf1, 0xac, 0xbc, 0x45, 0x13, 0x18, 0xd9, 0x2d, 0x38, 0x93, 0x27, + 0xac, 0x99, 0xcb, 0x9a, 0xab, 0x2e, 0x6b, 0x13, 0x97, 0x35, 0x77, 0xb5, 0x86, 0x19, 0xa6, 0x68, + 0x0e, 0x0c, 0xfb, 0x97, 0x4b, 0x50, 0xae, 0x07, 0xee, 0x31, 0x98, 0xc6, 0x2e, 0x19, 0xa6, 0xb1, + 0x27, 0x72, 0x1f, 0x89, 0xec, 0x69, 0x08, 0xbb, 0x91, 0x32, 0x84, 0xfd, 0x4c, 0x3e, 0xab, 0x83, + 0xcd, 0x5e, 0xdf, 0x2e, 0x83, 0xfe, 0xcc, 0x25, 0xfa, 0x0f, 0x87, 0xf1, 0x64, 0x2e, 0x17, 0x7b, + 0xf9, 0x52, 0xd4, 0xc1, 0x3c, 0xde, 0x64, 0x20, 0xe6, 0x4f, 0xac, 0x43, 0xf3, 0x6d, 0xe2, 0x6d, + 0x6c, 0xc6, 0xc4, 0x4d, 0x7f, 0xd8, 0xf1, 0x39, 0x34, 0xff, 0x85, 0x05, 0x13, 0xa9, 0xda, 0x51, + 0x2b, 0x2b, 0x82, 0xeb, 0x90, 0xc6, 0xae, 0xa9, 0xdc, 0x90, 0xaf, 0x79, 0x00, 0x75, 0x67, 0x21, + 0x0d, 0x4a, 0x4c, 0xb7, 0x56, 0x97, 0x1a, 0x11, 0xd6, 0x28, 0xd0, 0x0b, 0x30, 0x12, 0x07, 0xed, + 0xa0, 0x15, 0x6c, 0xec, 0x5c, 0x21, 0x32, 0x3b, 0x8b, 0xba, 0x59, 0x5a, 0x4b, 0x50, 0x58, 0xa7, + 0xb3, 0xbf, 0x53, 0x86, 0xf4, 0x23, 0xa9, 0xff, 0x7f, 0x9e, 0xfe, 0xe4, 0xcc, 0xd3, 0x3f, 0xb6, + 0x60, 0x92, 0xd6, 0xce, 0x5c, 0x8c, 0xa4, 0xe3, 0xb1, 0x7a, 0x22, 0xc4, 0x3a, 0xe0, 0x89, 0x90, + 0x27, 0xa9, 0xb4, 0x73, 0x83, 0x4e, 0x2c, 0x4c, 0x60, 0x9a, 0x10, 0xa3, 0x50, 0x2c, 0xb0, 0x82, + 0x8e, 0x84, 0xa1, 0x88, 0xd0, 0xd2, 0xe9, 0x48, 0x18, 0x62, 0x81, 0x95, 0x2f, 0x88, 0x54, 0x7a, + 0xbc, 0x20, 0xc2, 0xf2, 0x9b, 0x09, 0xb7, 0x16, 0xa1, 0x56, 0x68, 0xf9, 0xcd, 0xa4, 0xbf, 0x4b, + 0x42, 0x63, 0x7f, 0xbd, 0x0c, 0xa3, 0xf5, 0xc0, 0x4d, 0x22, 0x0a, 0x9e, 0x37, 0x22, 0x0a, 0xce, + 0xa4, 0x22, 0x0a, 0x26, 0x75, 0xda, 0x07, 0x13, 0x50, 0x20, 0xf2, 0xe0, 0xb1, 0x37, 0x6e, 0x0e, + 0x19, 0x4c, 0x60, 0xe4, 0xc1, 0x53, 0x8c, 0xb0, 0xc9, 0xf7, 0xa7, 0x29, 0x88, 0xe0, 0x7f, 0x5b, + 0x30, 0x5e, 0x0f, 0x5c, 0x3a, 0x41, 0x7f, 0x9a, 0x66, 0xa3, 0x9e, 0x3d, 0x6f, 0xf0, 0x80, 0xec, + 0x79, 0xff, 0xcc, 0x82, 0xa1, 0x7a, 0xe0, 0x1e, 0x83, 0x79, 0x78, 0xc5, 0x34, 0x0f, 0x3f, 0x96, + 0x2b, 0x79, 0x7b, 0x58, 0x84, 0xbf, 0x59, 0x86, 0x31, 0xda, 0xe2, 0x60, 0x43, 0x8e, 0x97, 0xd1, + 0x37, 0x56, 0x81, 0xbe, 0xa1, 0x2a, 0x61, 0xd0, 0x6a, 0x05, 0xf7, 0xd2, 0x63, 0xb7, 0xc2, 0xa0, + 0x58, 0x60, 0xd1, 0x39, 0x18, 0x6e, 0x87, 0x64, 0xdb, 0x0b, 0x3a, 0x51, 0x3a, 0xda, 0xb3, 0x2e, + 0xe0, 0x58, 0x51, 0xa0, 0xe7, 0x61, 0x34, 0xf2, 0xfc, 0x26, 0x91, 0x4e, 0x2f, 0x15, 0xe6, 0xf4, + 0xc2, 0x13, 0x95, 0x6a, 0x70, 0x6c, 0x50, 0xa1, 0xdb, 0x50, 0x65, 0xff, 0xd9, 0x0a, 0xea, 0xff, + 0x09, 0x10, 0x91, 0x9f, 0x5c, 0x30, 0xc0, 0x09, 0x2f, 0x74, 0x11, 0x20, 0x96, 0xee, 0x39, 0x91, + 0x08, 0x4b, 0x56, 0x7a, 0xa9, 0x72, 0xdc, 0x89, 0xb0, 0x46, 0x85, 0x9e, 0x81, 0x6a, 0xec, 0x78, + 0xad, 0xab, 0x9e, 0x4f, 0x22, 0xe1, 0xde, 0x24, 0x92, 0x8e, 0x0b, 0x20, 0x4e, 0xf0, 0x74, 0xbf, + 0x67, 0x41, 0xef, 0xfc, 0x79, 0xa1, 0x61, 0x46, 0xcd, 0xf6, 0xfb, 0xab, 0x0a, 0x8a, 0x35, 0x0a, + 0xfb, 0x25, 0x38, 0x59, 0x0f, 0xdc, 0x7a, 0x10, 0xc6, 0x2b, 0x41, 0x78, 0xcf, 0x09, 0x5d, 0x39, + 0x7e, 0x73, 0x32, 0xd7, 0x35, 0xdd, 0x93, 0x07, 0xb8, 0x15, 0xc1, 0xc8, 0x5d, 0xfd, 0x1c, 0xdb, + 0xf1, 0xfb, 0x0c, 0x55, 0xf9, 0x41, 0x09, 0x50, 0x9d, 0x39, 0x10, 0x19, 0xaf, 0x51, 0x6d, 0xc2, + 0x78, 0x44, 0xae, 0x7a, 0x7e, 0xe7, 0xbe, 0x60, 0x55, 0x2c, 0x36, 0xa8, 0xb1, 0xac, 0x97, 0xe1, + 0x76, 0x1a, 0x13, 0x86, 0x53, 0x7c, 0x69, 0x67, 0x86, 0x1d, 0x7f, 0x21, 0xba, 0x19, 0x91, 0x50, + 0xbc, 0xbe, 0xc4, 0x3a, 0x13, 0x4b, 0x20, 0x4e, 0xf0, 0x74, 0xf2, 0xb0, 0x3f, 0xd7, 0x03, 0x1f, + 0x07, 0x41, 0x2c, 0xa7, 0x1b, 0x7b, 0x8d, 0x43, 0x83, 0x63, 0x83, 0x0a, 0xad, 0x00, 0x8a, 0x3a, + 0xed, 0x76, 0x8b, 0xdd, 0x94, 0x3a, 0xad, 0x4b, 0x61, 0xd0, 0x69, 0x73, 0x3f, 0x72, 0xf1, 0x90, + 0x45, 0xa3, 0x0b, 0x8b, 0x33, 0x4a, 0x50, 0x61, 0xb1, 0x1e, 0xb1, 0xdf, 0x22, 0x02, 0x9e, 0x5b, + 0x5b, 0x1b, 0x0c, 0x84, 0x25, 0xce, 0xfe, 0x45, 0xb6, 0xc1, 0xb1, 0x67, 0x71, 0xe2, 0x4e, 0x48, + 0xd0, 0x16, 0x8c, 0xb5, 0xd9, 0x26, 0x16, 0x87, 0x41, 0xab, 0x45, 0xa4, 0x7e, 0x79, 0x38, 0x17, + 0x26, 0xfe, 0x10, 0x86, 0xce, 0x0e, 0x9b, 0xdc, 0xed, 0xff, 0x36, 0xce, 0x64, 0x95, 0xb8, 0xac, + 0x1e, 0x12, 0xce, 0xca, 0x42, 0x93, 0xfb, 0x48, 0x91, 0x07, 0xee, 0x92, 0x7d, 0x40, 0xb8, 0x3e, + 0x63, 0xc9, 0x05, 0x7d, 0x86, 0xb9, 0xe2, 0x73, 0x01, 0x51, 0xfc, 0xd9, 0x4e, 0x4e, 0x6f, 0xb8, + 0xe1, 0x0b, 0x16, 0x58, 0x63, 0x87, 0xae, 0xc2, 0x98, 0x78, 0x45, 0x45, 0x98, 0x29, 0xca, 0xc6, + 0x11, 0x7b, 0x0c, 0xeb, 0xc8, 0xfd, 0x34, 0x00, 0x9b, 0x85, 0xd1, 0x06, 0x3c, 0xaa, 0xbd, 0x12, + 0x96, 0xe1, 0x6e, 0xc7, 0x25, 0xcf, 0x63, 0x7b, 0xbb, 0x73, 0x8f, 0xae, 0x1d, 0x44, 0x88, 0x0f, + 0xe6, 0x83, 0x6e, 0xc0, 0x49, 0xa7, 0x19, 0x7b, 0xdb, 0xa4, 0x46, 0x1c, 0xb7, 0xe5, 0xf9, 0xc4, + 0x4c, 0x93, 0xf0, 0xf0, 0xde, 0xee, 0xdc, 0xc9, 0x85, 0x2c, 0x02, 0x9c, 0x5d, 0x0e, 0x7d, 0x12, + 0xaa, 0xae, 0x1f, 0x89, 0x3e, 0x18, 0x34, 0x1e, 0xc5, 0xab, 0xd6, 0xae, 0x37, 0xd4, 0xf7, 0x27, + 0x7f, 0x70, 0x52, 0x00, 0xbd, 0x0b, 0xa3, 0x7a, 0xf8, 0x93, 0x78, 0x8c, 0xf1, 0xc5, 0x42, 0xe7, + 0x67, 0x23, 0x66, 0x88, 0x5b, 0xf0, 0x94, 0x5b, 0xab, 0x11, 0x4e, 0x64, 0x54, 0x81, 0x7e, 0x1e, + 0x50, 0x44, 0xc2, 0x6d, 0xaf, 0x49, 0x16, 0x9a, 0x2c, 0xbb, 0x2f, 0xb3, 0xf1, 0x0c, 0x1b, 0xf1, + 0x1d, 0xa8, 0xd1, 0x45, 0x81, 0x33, 0x4a, 0xa1, 0xcb, 0x54, 0xf2, 0xe8, 0x50, 0xe1, 0x85, 0x2c, + 0x15, 0xc3, 0x99, 0x1a, 0x69, 0x87, 0xa4, 0xe9, 0xc4, 0xc4, 0x35, 0x39, 0xe2, 0x54, 0x39, 0xba, + 0x2f, 0xa9, 0x07, 0x1c, 0xc0, 0xf4, 0x9d, 0xed, 0x7e, 0xc4, 0x81, 0x9e, 0xb3, 0x36, 0x83, 0x28, + 0xbe, 0x4e, 0xe2, 0x7b, 0x41, 0x78, 0x57, 0x64, 0x44, 0x4b, 0x52, 0x25, 0x26, 0x28, 0xac, 0xd3, + 0x51, 0x1d, 0x8a, 0x5d, 0xfd, 0xad, 0xd6, 0xd8, 0xbd, 0xca, 0x70, 0xb2, 0x76, 0x2e, 0x73, 0x30, + 0x96, 0x78, 0x49, 0xba, 0x5a, 0x5f, 0x62, 0x77, 0x24, 0x29, 0xd2, 0xd5, 0xfa, 0x12, 0x96, 0x78, + 0x14, 0x74, 0x3f, 0x3d, 0x38, 0x5e, 0xe4, 0xbe, 0xaa, 0x5b, 0x92, 0x17, 0x7c, 0x7d, 0xf0, 0x3e, + 0x4c, 0xaa, 0xe7, 0x0f, 0x79, 0xd2, 0xb8, 0x68, 0x66, 0x82, 0x4d, 0x9c, 0xc3, 0xe4, 0x9e, 0x53, + 0x76, 0xbd, 0xd5, 0x14, 0x4f, 0xdc, 0x55, 0x8b, 0x91, 0x9c, 0x63, 0x32, 0xf7, 0x51, 0x8e, 0xf3, + 0x50, 0x8d, 0x3a, 0x77, 0xdc, 0x60, 0xcb, 0xf1, 0x7c, 0x76, 0x91, 0xa1, 0x29, 0x31, 0x0d, 0x89, + 0xc0, 0x09, 0x0d, 0xaa, 0xc3, 0xb0, 0x23, 0x8e, 0x70, 0xe2, 0xc2, 0x21, 0x27, 0x0a, 0x5f, 0x1e, + 0xf8, 0xb8, 0x75, 0x55, 0xfe, 0xc3, 0x8a, 0x0b, 0x7a, 0x19, 0xc6, 0x44, 0x10, 0x99, 0x70, 0xf6, + 0x3c, 0x61, 0x06, 0x1c, 0x34, 0x74, 0x24, 0x36, 0x69, 0xd1, 0x06, 0x8c, 0x53, 0x2e, 0x89, 0x00, + 0x9c, 0x99, 0xee, 0x4f, 0x86, 0x6a, 0xe9, 0xcf, 0x75, 0x36, 0x38, 0xc5, 0x16, 0xb9, 0xf0, 0x88, + 0xd3, 0x89, 0x83, 0x2d, 0xba, 0x12, 0xcc, 0x75, 0xb2, 0x16, 0xdc, 0x25, 0x3e, 0xbb, 0x65, 0x18, + 0x5e, 0x3c, 0xb3, 0xb7, 0x3b, 0xf7, 0xc8, 0xc2, 0x01, 0x74, 0xf8, 0x40, 0x2e, 0xe8, 0x2d, 0x18, + 0x89, 0x83, 0x96, 0xf0, 0xe1, 0x8e, 0x66, 0x4e, 0x15, 0x49, 0x42, 0xb4, 0xa6, 0x0a, 0xe8, 0x66, + 0x0c, 0xc5, 0x04, 0xeb, 0x1c, 0xd1, 0x9b, 0x7c, 0x55, 0xb2, 0x84, 0x99, 0x24, 0x9a, 0x79, 0xa8, + 0x48, 0x67, 0xa9, 0x0c, 0x9b, 0xe6, 0xf2, 0x15, 0x3c, 0xb0, 0xce, 0x70, 0xf6, 0xe7, 0x60, 0xaa, + 0x4b, 0xe4, 0xf5, 0xe5, 0xdc, 0xfa, 0x1f, 0x07, 0xa0, 0xaa, 0x2c, 0x86, 0xe8, 0xbc, 0x69, 0x1c, + 0x7e, 0x38, 0x6d, 0x1c, 0x1e, 0xa6, 0x0a, 0x9a, 0x6e, 0x0f, 0x7e, 0x33, 0xe3, 0x41, 0xfd, 0xa7, + 0x73, 0xd7, 0x78, 0xf1, 0xc8, 0x36, 0xed, 0x88, 0x57, 0x2e, 0x6c, 0x6f, 0xae, 0x1c, 0x78, 0x6a, + 0x2c, 0xf8, 0x48, 0x24, 0x3d, 0x1f, 0xb6, 0x03, 0x77, 0xb5, 0x9e, 0x7e, 0x03, 0xad, 0x4e, 0x81, + 0x98, 0xe3, 0x98, 0x5e, 0x4f, 0xf7, 0x6c, 0xa6, 0xd7, 0x0f, 0x1d, 0x52, 0xaf, 0x97, 0x0c, 0x70, + 0xc2, 0x0b, 0x6d, 0xc3, 0x54, 0xd3, 0x7c, 0xd2, 0x4e, 0xc5, 0xab, 0x3d, 0xdb, 0xc7, 0x93, 0x72, + 0x1d, 0xed, 0x45, 0x9a, 0xa5, 0x34, 0x3f, 0xdc, 0x5d, 0x05, 0x7a, 0x19, 0x86, 0xdf, 0x0d, 0x22, + 0x76, 0x6d, 0x21, 0x36, 0x2e, 0x19, 0x17, 0x34, 0xfc, 0xea, 0x8d, 0x06, 0x83, 0xef, 0xef, 0xce, + 0x8d, 0xd4, 0x03, 0x57, 0xfe, 0xc5, 0xaa, 0x00, 0xfa, 0xac, 0x05, 0x27, 0x8d, 0x75, 0xac, 0x5a, + 0x0e, 0x87, 0x69, 0xf9, 0xa3, 0xa2, 0xe6, 0x93, 0xab, 0x59, 0x3c, 0x71, 0x76, 0x55, 0xf6, 0x77, + 0xb9, 0x89, 0x54, 0x18, 0x4d, 0x48, 0xd4, 0x69, 0x1d, 0xc7, 0xeb, 0x10, 0x37, 0x0c, 0x7b, 0xce, + 0x03, 0x30, 0xd2, 0xff, 0x7b, 0x8b, 0x19, 0xe9, 0xd7, 0xc8, 0x56, 0xbb, 0xe5, 0xc4, 0xc7, 0xe1, + 0xfb, 0xfc, 0x19, 0x18, 0x8e, 0x45, 0x6d, 0xc5, 0x9e, 0xb6, 0xd0, 0x9a, 0xc7, 0x2e, 0x2f, 0xd4, + 0xc6, 0x27, 0xa1, 0x58, 0x31, 0xb4, 0xff, 0x15, 0x1f, 0x15, 0x89, 0x39, 0x06, 0x4b, 0xc4, 0x75, + 0xd3, 0x12, 0xf1, 0x54, 0xe1, 0x6f, 0xe9, 0x61, 0x91, 0xf8, 0x8e, 0xf9, 0x05, 0xec, 0x7c, 0xf2, + 0x93, 0x73, 0x8b, 0x64, 0xff, 0xba, 0x05, 0xd3, 0x59, 0xce, 0x08, 0x54, 0x81, 0xe1, 0xa7, 0x23, + 0x75, 0xbf, 0xa6, 0x7a, 0xf5, 0x96, 0x80, 0x63, 0x45, 0x51, 0x38, 0xd7, 0x7c, 0x7f, 0x29, 0xb4, + 0x6e, 0x80, 0xf9, 0x38, 0x22, 0x7a, 0x85, 0x87, 0x3a, 0x58, 0xea, 0xf5, 0xc2, 0xfe, 0xc2, 0x1c, + 0xec, 0x6f, 0x94, 0x60, 0x9a, 0x1b, 0xb9, 0x17, 0xb6, 0x03, 0xcf, 0xad, 0x07, 0xae, 0x08, 0xfc, + 0x70, 0x61, 0xb4, 0xad, 0x1d, 0x6e, 0x8b, 0xa5, 0xe4, 0xd1, 0x8f, 0xc3, 0xc9, 0x81, 0x42, 0x87, + 0x62, 0x83, 0x2b, 0xad, 0x85, 0x6c, 0x7b, 0x4d, 0x65, 0x33, 0x2d, 0xf5, 0xbd, 0x33, 0xa8, 0x5a, + 0x96, 0x35, 0x3e, 0xd8, 0xe0, 0x7a, 0x04, 0x4f, 0xc4, 0xd8, 0x7f, 0xdf, 0x82, 0x87, 0x7a, 0xa4, + 0xed, 0xa1, 0xd5, 0xdd, 0x63, 0x17, 0x0b, 0xe2, 0xf5, 0x4d, 0x55, 0x1d, 0xbf, 0x6e, 0xc0, 0x02, + 0x8b, 0xee, 0x00, 0xf0, 0xeb, 0x02, 0xaa, 0x4b, 0xa7, 0xef, 0xb2, 0x0b, 0x26, 0xc7, 0xd0, 0xf2, + 0x26, 0x48, 0x4e, 0x58, 0xe3, 0x6a, 0x7f, 0xad, 0x0c, 0x03, 0xfc, 0x91, 0xf7, 0x3a, 0x0c, 0x6d, + 0xf2, 0x7c, 0xc6, 0xfd, 0xa5, 0x53, 0x4e, 0x0e, 0x2f, 0x1c, 0x80, 0x25, 0x1b, 0x74, 0x0d, 0x4e, + 0x88, 0xd0, 0xa3, 0x1a, 0x69, 0x39, 0x3b, 0xf2, 0x34, 0xcc, 0xdf, 0x0d, 0x91, 0x09, 0xee, 0x4f, + 0xac, 0x76, 0x93, 0xe0, 0xac, 0x72, 0xe8, 0x95, 0xae, 0xf4, 0x83, 0x3c, 0x4f, 0xb4, 0xd2, 0x84, + 0x73, 0x52, 0x10, 0xbe, 0x0c, 0x63, 0xed, 0xae, 0x73, 0xbf, 0xf6, 0x96, 0xb6, 0x79, 0xd6, 0x37, + 0x69, 0x99, 0xef, 0x42, 0x87, 0xf9, 0x6c, 0xac, 0x6d, 0x86, 0x24, 0xda, 0x0c, 0x5a, 0xae, 0x78, + 0x06, 0x36, 0xf1, 0x5d, 0x48, 0xe1, 0x71, 0x57, 0x09, 0xca, 0x65, 0xdd, 0xf1, 0x5a, 0x9d, 0x90, + 0x24, 0x5c, 0x06, 0x4d, 0x2e, 0x2b, 0x29, 0x3c, 0xee, 0x2a, 0x41, 0xe7, 0xd6, 0x49, 0xf1, 0x72, + 0xa8, 0x0c, 0x52, 0x17, 0x22, 0xe8, 0xd3, 0x30, 0x24, 0x03, 0x08, 0x0a, 0xe5, 0x52, 0x11, 0x8e, + 0x09, 0xea, 0x15, 0x52, 0xed, 0x1d, 0x39, 0x11, 0x3a, 0x20, 0xf9, 0x1d, 0xe6, 0x85, 0xca, 0x3f, + 0xb7, 0xe0, 0x44, 0x86, 0x23, 0x1c, 0x17, 0x69, 0x1b, 0x5e, 0x14, 0xab, 0x57, 0x2c, 0x34, 0x91, + 0xc6, 0xe1, 0x58, 0x51, 0xd0, 0xd5, 0xc2, 0x85, 0x66, 0x5a, 0x50, 0x0a, 0x17, 0x13, 0x81, 0xed, + 0x4f, 0x50, 0xa2, 0x33, 0x50, 0xe9, 0x44, 0x24, 0x94, 0x0f, 0x3a, 0x4a, 0x39, 0xcf, 0xec, 0x8c, + 0x0c, 0x43, 0xd5, 0xd6, 0x0d, 0x65, 0xe2, 0xd3, 0xd4, 0x56, 0x6e, 0xe4, 0xe3, 0x38, 0xfb, 0xcb, + 0x65, 0x98, 0x48, 0x39, 0xc4, 0xd2, 0x86, 0x6c, 0x05, 0xbe, 0x17, 0x07, 0x2a, 0xbf, 0x1d, 0x7f, + 0x43, 0x8e, 0xb4, 0x37, 0xaf, 0x09, 0x38, 0x56, 0x14, 0xe8, 0x49, 0xf9, 0x42, 0x70, 0xfa, 0x75, + 0x8e, 0xc5, 0x9a, 0xf1, 0x48, 0x70, 0xd1, 0x97, 0x75, 0x1e, 0x87, 0x4a, 0x3b, 0x50, 0x0f, 0xbe, + 0xab, 0xf1, 0xc4, 0x8b, 0xb5, 0x7a, 0x10, 0xb4, 0x30, 0x43, 0xa2, 0x27, 0xc4, 0xd7, 0xa7, 0x6e, + 0x46, 0xb0, 0xe3, 0x06, 0x91, 0xd6, 0x05, 0x4f, 0xc1, 0xd0, 0x5d, 0xb2, 0x13, 0x7a, 0xfe, 0x46, + 0xfa, 0x5e, 0xe8, 0x0a, 0x07, 0x63, 0x89, 0x37, 0x93, 0xd5, 0x0f, 0x1d, 0xf1, 0xeb, 0x39, 0xc3, + 0xb9, 0xfb, 0xe0, 0x37, 0x2d, 0x98, 0x60, 0xd9, 0x67, 0x45, 0x8a, 0x04, 0x2f, 0xf0, 0x8f, 0x41, + 0xc7, 0x78, 0x1c, 0x06, 0x42, 0x5a, 0x69, 0xfa, 0xf9, 0x0b, 0xd6, 0x12, 0xcc, 0x71, 0xe8, 0x11, + 0xa8, 0xb0, 0x26, 0xd0, 0x61, 0x1c, 0xe5, 0x49, 0xee, 0x6b, 0x4e, 0xec, 0x60, 0x06, 0x65, 0x31, + 0x68, 0x98, 0xb4, 0x5b, 0x1e, 0x6f, 0x74, 0x62, 0xce, 0xfd, 0xa0, 0xc5, 0xa0, 0x65, 0x36, 0xf2, + 0x41, 0xc5, 0xa0, 0x65, 0x33, 0x3f, 0x58, 0xcf, 0xff, 0xef, 0x25, 0x38, 0x9d, 0x59, 0x2e, 0xb9, + 0x61, 0x5e, 0x31, 0x6e, 0x98, 0x2f, 0xa6, 0x6e, 0x98, 0xed, 0x83, 0x4b, 0x3f, 0x98, 0x3b, 0xe7, + 0xec, 0xab, 0xe0, 0xf2, 0x31, 0x5e, 0x05, 0x57, 0x8a, 0xaa, 0x38, 0x03, 0x39, 0x2a, 0xce, 0x1f, + 0x59, 0xf0, 0x70, 0x66, 0x97, 0x7d, 0xe0, 0x82, 0xfe, 0x32, 0x5b, 0xd9, 0xe3, 0x74, 0xf2, 0x6b, + 0xe5, 0x1e, 0x5f, 0xc5, 0xce, 0x29, 0x67, 0xa9, 0x14, 0x62, 0xc8, 0x48, 0x28, 0x6f, 0xa3, 0x5c, + 0x02, 0x71, 0x18, 0x56, 0x58, 0x14, 0x69, 0x41, 0x73, 0xbc, 0x91, 0xcb, 0x87, 0x5c, 0x50, 0xf3, + 0xa6, 0x1d, 0x5e, 0xcf, 0xfb, 0x90, 0x0e, 0xa5, 0xbb, 0xad, 0x9d, 0x3c, 0xcb, 0x87, 0x39, 0x79, + 0x8e, 0x66, 0x9f, 0x3a, 0xd1, 0x02, 0x4c, 0x6c, 0x79, 0x3e, 0x7b, 0x74, 0xd7, 0xd4, 0x9e, 0x54, + 0xe4, 0xf2, 0x35, 0x13, 0x8d, 0xd3, 0xf4, 0xb3, 0x2f, 0xc3, 0xd8, 0xe1, 0xad, 0x6b, 0x3f, 0x2a, + 0xc3, 0x87, 0x0f, 0x10, 0x0a, 0x7c, 0x77, 0x30, 0xc6, 0x45, 0xdb, 0x1d, 0xba, 0xc6, 0xa6, 0x0e, + 0xd3, 0xeb, 0x9d, 0x56, 0x6b, 0x87, 0xf9, 0x67, 0x11, 0x57, 0x52, 0x08, 0xa5, 0x46, 0x25, 0xa3, + 0x5e, 0xc9, 0xa0, 0xc1, 0x99, 0x25, 0xd1, 0xcf, 0x03, 0x0a, 0xee, 0xb0, 0xb4, 0xc8, 0x6e, 0x92, + 0xd7, 0x82, 0x0d, 0x41, 0x39, 0x59, 0xaa, 0x37, 0xba, 0x28, 0x70, 0x46, 0x29, 0xaa, 0xa7, 0xd2, + 0x7d, 0x6c, 0x47, 0x35, 0x2b, 0xa5, 0xa7, 0x62, 0x1d, 0x89, 0x4d, 0x5a, 0x74, 0x09, 0xa6, 0x9c, + 0x6d, 0xc7, 0xe3, 0x69, 0xce, 0x24, 0x03, 0xae, 0xa8, 0x2a, 0xfb, 0xd5, 0x42, 0x9a, 0x00, 0x77, + 0x97, 0x41, 0x6d, 0xc3, 0x20, 0xc9, 0x5f, 0x66, 0xf8, 0xe4, 0x21, 0x66, 0x70, 0x61, 0x13, 0xa5, + 0xfd, 0xa7, 0x16, 0xdd, 0xfa, 0x32, 0xde, 0x67, 0xa5, 0x3d, 0xa2, 0x0c, 0x6c, 0x5a, 0x10, 0xa0, + 0xea, 0x91, 0x25, 0x1d, 0x89, 0x4d, 0x5a, 0x3e, 0x35, 0xa2, 0xc4, 0x5d, 0xdc, 0xd0, 0x36, 0x45, + 0xfc, 0xac, 0xa2, 0xa0, 0x1a, 0xb4, 0xeb, 0x6d, 0x7b, 0x51, 0x10, 0x8a, 0x05, 0xd4, 0xef, 0x0b, + 0xe8, 0x4a, 0x5e, 0xd6, 0x38, 0x1b, 0x2c, 0xf9, 0xd9, 0x5f, 0x29, 0xc1, 0x98, 0xac, 0xf1, 0xd5, + 0x4e, 0x10, 0x3b, 0xc7, 0xb0, 0xa5, 0xbf, 0x6a, 0x6c, 0xe9, 0xe7, 0x8b, 0x85, 0x13, 0xb3, 0xc6, + 0xf5, 0xdc, 0xca, 0x3f, 0x9d, 0xda, 0xca, 0x2f, 0xf4, 0xc3, 0xf4, 0xe0, 0x2d, 0xfc, 0xdf, 0x58, + 0x30, 0x65, 0xd0, 0x1f, 0xc3, 0x4e, 0x52, 0x37, 0x77, 0x92, 0x67, 0xfa, 0xf8, 0x9a, 0x1e, 0x3b, + 0xc8, 0xd7, 0x4b, 0xa9, 0xaf, 0x60, 0x3b, 0xc7, 0x2f, 0x40, 0x65, 0xd3, 0x09, 0xdd, 0x62, 0x39, + 0x3f, 0xbb, 0x8a, 0xcf, 0x5f, 0x76, 0x42, 0x97, 0xcb, 0xff, 0x73, 0xea, 0xf5, 0x38, 0x27, 0x74, + 0x73, 0xa3, 0x28, 0x58, 0xa5, 0xe8, 0x25, 0x18, 0x8c, 0x9a, 0x41, 0x5b, 0xf9, 0x99, 0x9e, 0xe1, + 0x2f, 0xcb, 0x51, 0xc8, 0xfe, 0xee, 0x1c, 0x32, 0xab, 0xa3, 0x60, 0x2c, 0xe8, 0x67, 0x37, 0xa0, + 0xaa, 0xaa, 0x3e, 0x52, 0x4f, 0xfb, 0xff, 0x5a, 0x86, 0x13, 0x19, 0x73, 0x05, 0xfd, 0xa2, 0xd1, + 0x6f, 0x2f, 0xf7, 0x3d, 0xd9, 0xde, 0x67, 0xcf, 0xfd, 0x22, 0x3b, 0x29, 0xb9, 0x62, 0x76, 0x1c, + 0xa2, 0xfa, 0x9b, 0x11, 0x49, 0x57, 0x4f, 0x41, 0xf9, 0xd5, 0xd3, 0x6a, 0x8f, 0xad, 0xfb, 0x69, + 0x45, 0xaa, 0xa5, 0x47, 0x3a, 0xce, 0x5f, 0xa8, 0xc0, 0x74, 0x56, 0xde, 0x02, 0xf4, 0x2b, 0x56, + 0xea, 0x85, 0x91, 0x57, 0xfa, 0x4f, 0x7e, 0xc0, 0x9f, 0x1d, 0x11, 0x59, 0x85, 0xe6, 0xcd, 0x37, + 0x47, 0x72, 0x7b, 0x5c, 0xd4, 0xce, 0xe2, 0x9f, 0x42, 0xfe, 0x5a, 0x8c, 0x94, 0x0a, 0x9f, 0x3a, + 0x44, 0x53, 0xc4, 0x83, 0x33, 0x51, 0x2a, 0xfe, 0x49, 0x82, 0xf3, 0xe3, 0x9f, 0x64, 0x1b, 0x66, + 0x3d, 0x18, 0xd1, 0xbe, 0xeb, 0x48, 0xa7, 0xc1, 0x5d, 0xba, 0x45, 0x69, 0xed, 0x3e, 0xd2, 0xa9, + 0xf0, 0x77, 0x2c, 0x48, 0x39, 0x85, 0x29, 0xb3, 0x8c, 0xd5, 0xd3, 0x2c, 0x73, 0x06, 0x2a, 0x61, + 0xd0, 0x22, 0xe9, 0x47, 0x27, 0x70, 0xd0, 0x22, 0x98, 0x61, 0xd4, 0x83, 0xd2, 0xe5, 0x5e, 0x0f, + 0x4a, 0xd3, 0x73, 0x7a, 0x8b, 0x6c, 0x13, 0x69, 0x24, 0x51, 0x62, 0xfc, 0x2a, 0x05, 0x62, 0x8e, + 0xb3, 0x7f, 0xa7, 0x02, 0x27, 0x32, 0x62, 0x01, 0xe9, 0x09, 0x69, 0xc3, 0x89, 0xc9, 0x3d, 0x67, + 0x27, 0x9d, 0xfc, 0xf6, 0x12, 0x07, 0x63, 0x89, 0x67, 0xce, 0xac, 0x3c, 0x81, 0x5e, 0xca, 0x74, + 0x25, 0xf2, 0xe6, 0x09, 0xec, 0xd1, 0x3f, 0x3d, 0x7c, 0x11, 0x20, 0x8a, 0x5a, 0xcb, 0x3e, 0xd5, + 0xf0, 0x5c, 0xe1, 0x34, 0x9b, 0xe4, 0x5d, 0x6c, 0x5c, 0x15, 0x18, 0xac, 0x51, 0xa1, 0x1a, 0x4c, + 0xb6, 0xc3, 0x20, 0xe6, 0x86, 0xc1, 0x1a, 0x77, 0xb4, 0x18, 0x30, 0xa3, 0xb5, 0xea, 0x29, 0x3c, + 0xee, 0x2a, 0x81, 0x5e, 0x80, 0x11, 0x11, 0xc1, 0x55, 0x0f, 0x82, 0x96, 0x30, 0x23, 0xa9, 0xeb, + 0xf8, 0x46, 0x82, 0xc2, 0x3a, 0x9d, 0x56, 0x8c, 0x59, 0x1b, 0x87, 0x32, 0x8b, 0x71, 0x8b, 0xa3, + 0x46, 0x97, 0xca, 0x6e, 0x32, 0x5c, 0x28, 0xbb, 0x49, 0x62, 0x58, 0xab, 0x16, 0xbe, 0x88, 0x81, + 0x5c, 0x03, 0xd4, 0x1f, 0x96, 0x61, 0x90, 0x0f, 0xc5, 0x31, 0x68, 0x79, 0x75, 0x61, 0x52, 0x2a, + 0x94, 0x49, 0x82, 0xb7, 0x6a, 0xbe, 0xe6, 0xc4, 0x0e, 0x17, 0x4d, 0x6a, 0x85, 0x24, 0x66, 0x28, + 0x34, 0x6f, 0xac, 0xa1, 0xd9, 0x94, 0xa5, 0x04, 0x38, 0x0f, 0x6d, 0x45, 0x6d, 0x02, 0x44, 0xec, + 0xf9, 0x5b, 0xca, 0x43, 0x64, 0xe6, 0x7d, 0xbe, 0x50, 0x3b, 0x1a, 0xaa, 0x18, 0x6f, 0x4d, 0x32, + 0x2d, 0x15, 0x02, 0x6b, 0xbc, 0x67, 0x5f, 0x84, 0xaa, 0x22, 0xce, 0x3b, 0x42, 0x8e, 0xea, 0xa2, + 0xed, 0x67, 0x61, 0x22, 0x55, 0x57, 0x5f, 0x27, 0xd0, 0xdf, 0xb3, 0x60, 0x82, 0x37, 0x79, 0xd9, + 0xdf, 0x16, 0xa2, 0xe0, 0x73, 0x16, 0x4c, 0xb7, 0x32, 0x56, 0xa2, 0x18, 0xe6, 0xc3, 0xac, 0x61, + 0x75, 0xf8, 0xcc, 0xc2, 0xe2, 0xcc, 0xda, 0xd0, 0x59, 0x18, 0xe6, 0xaf, 0x79, 0x3b, 0x2d, 0xe1, + 0xa1, 0x3d, 0xca, 0x73, 0x92, 0x73, 0x18, 0x56, 0x58, 0xfb, 0xc7, 0x16, 0x4c, 0xf1, 0x8f, 0xb8, + 0x42, 0x76, 0xd4, 0xf1, 0xea, 0x03, 0xf2, 0x19, 0x22, 0xfb, 0x7a, 0xa9, 0x47, 0xf6, 0x75, 0xfd, + 0x2b, 0xcb, 0x07, 0x7e, 0xe5, 0x37, 0x2c, 0x10, 0x33, 0xf4, 0x18, 0xce, 0x0f, 0xab, 0xe6, 0xf9, + 0xe1, 0x23, 0x45, 0x26, 0x7d, 0x8f, 0x83, 0xc3, 0xaf, 0x96, 0x60, 0x92, 0x13, 0x24, 0x37, 0x32, + 0x1f, 0x94, 0xc1, 0xe9, 0xef, 0x55, 0x20, 0xf5, 0x26, 0x6c, 0xf6, 0x97, 0x1a, 0x63, 0x59, 0x39, + 0x70, 0x2c, 0xff, 0xa7, 0x05, 0x88, 0xf7, 0x49, 0xfa, 0x29, 0x74, 0xbe, 0xbb, 0x69, 0xe6, 0x80, + 0x44, 0x72, 0x28, 0x0c, 0xd6, 0xa8, 0x1e, 0xf0, 0x27, 0xa4, 0xee, 0xc3, 0xca, 0xf9, 0xf7, 0x61, + 0x7d, 0x7c, 0xf5, 0x77, 0xcb, 0x90, 0x76, 0xd5, 0x44, 0x6f, 0xc3, 0x68, 0xd3, 0x69, 0x3b, 0x77, + 0xbc, 0x96, 0x17, 0x7b, 0x24, 0x2a, 0x76, 0xe1, 0xbe, 0xa4, 0x95, 0x10, 0xd7, 0x50, 0x1a, 0x04, + 0x1b, 0x1c, 0xd1, 0x3c, 0x40, 0x3b, 0xf4, 0xb6, 0xbd, 0x16, 0xd9, 0x60, 0x27, 0x1e, 0x16, 0xeb, + 0xc1, 0xef, 0x8e, 0x25, 0x14, 0x6b, 0x14, 0x19, 0xb1, 0x01, 0xe5, 0xe3, 0x88, 0x0d, 0xa8, 0xf4, + 0x19, 0x1b, 0x30, 0x50, 0x28, 0x36, 0x00, 0xc3, 0x29, 0xb9, 0x79, 0xd3, 0xff, 0x2b, 0x5e, 0x8b, + 0x08, 0xdd, 0x8d, 0xc7, 0x82, 0xcc, 0xee, 0xed, 0xce, 0x9d, 0xc2, 0x99, 0x14, 0xb8, 0x47, 0x49, + 0xbb, 0x03, 0x27, 0x1a, 0x24, 0x94, 0xcf, 0xd8, 0xa9, 0xb5, 0xf4, 0x26, 0x54, 0xc3, 0xd4, 0x32, + 0xee, 0x33, 0xe0, 0x5f, 0xcb, 0xf1, 0x26, 0x97, 0x6d, 0xc2, 0xd2, 0xfe, 0xeb, 0x25, 0x18, 0x12, + 0x4e, 0x9a, 0xc7, 0xa0, 0x7c, 0x5c, 0x31, 0x4c, 0x4c, 0x4f, 0xe5, 0xc9, 0x3f, 0xd6, 0xac, 0x9e, + 0xc6, 0xa5, 0x46, 0xca, 0xb8, 0xf4, 0x4c, 0x31, 0x76, 0x07, 0x9b, 0x95, 0x7e, 0xab, 0x0c, 0xe3, + 0xa6, 0xd3, 0xea, 0x31, 0x74, 0xcb, 0x6b, 0x30, 0x14, 0x09, 0xff, 0xe9, 0x52, 0x11, 0x9f, 0xbd, + 0xf4, 0x10, 0x27, 0x37, 0xf1, 0xc2, 0x63, 0x5a, 0xb2, 0xcb, 0x74, 0xd1, 0x2e, 0x1f, 0x8b, 0x8b, + 0x76, 0x9e, 0x2f, 0x71, 0xe5, 0x41, 0xf8, 0x12, 0xdb, 0xdf, 0x63, 0x22, 0x5f, 0x87, 0x1f, 0xc3, + 0x36, 0xfe, 0xaa, 0xb9, 0x39, 0x9c, 0x2b, 0x34, 0xef, 0x44, 0xf3, 0x7a, 0x6c, 0xe7, 0xdf, 0xb2, + 0x60, 0x44, 0x10, 0x1e, 0xc3, 0x07, 0xfc, 0xbc, 0xf9, 0x01, 0x4f, 0x14, 0xfa, 0x80, 0x1e, 0x2d, + 0xff, 0x4a, 0x49, 0xb5, 0xbc, 0x1e, 0x84, 0x71, 0xa1, 0x4c, 0xe8, 0xc3, 0xf4, 0xe8, 0x17, 0x34, + 0x83, 0x96, 0x50, 0xe0, 0x1e, 0x49, 0x42, 0xff, 0x38, 0x7c, 0x5f, 0xfb, 0x8d, 0x15, 0x35, 0x8b, + 0x4c, 0x0b, 0xc2, 0x58, 0x6c, 0xa0, 0x49, 0x64, 0x5a, 0x10, 0xc6, 0x98, 0x61, 0x90, 0x0b, 0x10, + 0x3b, 0xe1, 0x06, 0x89, 0x29, 0x4c, 0x44, 0xcd, 0xf6, 0x5e, 0xad, 0x9d, 0xd8, 0x6b, 0xcd, 0x7b, + 0x7e, 0x1c, 0xc5, 0xe1, 0xfc, 0xaa, 0x1f, 0xdf, 0x08, 0xb9, 0xd2, 0xaf, 0xc5, 0xf2, 0x29, 0x5e, + 0x58, 0xe3, 0x2b, 0x83, 0x44, 0x58, 0x1d, 0x03, 0xe6, 0x0d, 0xd2, 0x75, 0x01, 0xc7, 0x8a, 0xc2, + 0x7e, 0x91, 0x49, 0x76, 0xd6, 0x41, 0xfd, 0x85, 0xd9, 0x7d, 0x61, 0x48, 0x75, 0x2d, 0x33, 0x0b, + 0x5f, 0xd7, 0x83, 0xf9, 0x8a, 0x8a, 0x4f, 0xda, 0x04, 0xdd, 0x8f, 0x3a, 0x89, 0xfd, 0x43, 0xa4, + 0xeb, 0xda, 0xf1, 0xc5, 0xc2, 0x12, 0xb9, 0x8f, 0x8b, 0x46, 0x96, 0x92, 0x91, 0xe5, 0xa1, 0x5b, + 0xad, 0xa7, 0xf3, 0xd7, 0x2f, 0x49, 0x04, 0x4e, 0x68, 0xd0, 0x79, 0x71, 0xa0, 0xe4, 0x16, 0x97, + 0x0f, 0xa7, 0x0e, 0x94, 0xb2, 0x4b, 0xb4, 0x13, 0xe5, 0x05, 0x18, 0x51, 0x4f, 0x02, 0xd5, 0xf9, + 0x63, 0x2c, 0x55, 0xae, 0x5f, 0x2d, 0x27, 0x60, 0xac, 0xd3, 0xa0, 0x35, 0x98, 0x88, 0xf8, 0x7b, + 0x45, 0x32, 0x5a, 0x43, 0x18, 0x0e, 0x9e, 0x96, 0x97, 0x94, 0x0d, 0x13, 0xbd, 0xcf, 0x40, 0x7c, + 0x29, 0xcb, 0xf8, 0x8e, 0x34, 0x0b, 0xf4, 0x0a, 0x8c, 0xb7, 0xf4, 0x37, 0x5c, 0xeb, 0xc2, 0xae, + 0xa0, 0xdc, 0xce, 0x8c, 0x17, 0x5e, 0xeb, 0x38, 0x45, 0x8d, 0x5e, 0x83, 0x19, 0x1d, 0x22, 0x92, + 0x0b, 0x39, 0xfe, 0x06, 0x89, 0xc4, 0xdb, 0x26, 0x8f, 0xec, 0xed, 0xce, 0xcd, 0x5c, 0xed, 0x41, + 0x83, 0x7b, 0x96, 0x46, 0x2f, 0xc1, 0xa8, 0xfc, 0x7c, 0x2d, 0xb6, 0x29, 0x71, 0x78, 0xd4, 0x70, + 0xd8, 0xa0, 0x44, 0xf7, 0xe0, 0xa4, 0xfc, 0xbf, 0x16, 0x3a, 0xeb, 0xeb, 0x5e, 0x53, 0x04, 0x99, + 0x8d, 0x30, 0x16, 0x0b, 0xd2, 0x5f, 0x7c, 0x39, 0x8b, 0x68, 0x7f, 0x77, 0xee, 0x8c, 0xe8, 0xb5, + 0x4c, 0x3c, 0x1b, 0xc4, 0x6c, 0xfe, 0xe8, 0x1a, 0x9c, 0xd8, 0x24, 0x4e, 0x2b, 0xde, 0x5c, 0xda, + 0x24, 0xcd, 0xbb, 0x72, 0x61, 0xb1, 0x88, 0x29, 0xcd, 0x25, 0xf0, 0x72, 0x37, 0x09, 0xce, 0x2a, + 0xf7, 0xfe, 0xee, 0x94, 0x7f, 0x81, 0x16, 0xd6, 0xf4, 0x07, 0xf4, 0x0e, 0x8c, 0xea, 0x7d, 0x9d, + 0x56, 0x0c, 0xf2, 0xdf, 0xf7, 0x15, 0x7a, 0x88, 0x1a, 0x01, 0x1d, 0x87, 0x0d, 0xde, 0xf6, 0xbf, + 0x2b, 0xc1, 0x5c, 0x4e, 0xee, 0xae, 0x94, 0x35, 0xcb, 0x2a, 0x64, 0xcd, 0x5a, 0x90, 0x6f, 0xde, + 0x5c, 0x4f, 0xe5, 0x4c, 0x4f, 0xbd, 0x62, 0x93, 0x64, 0x4e, 0x4f, 0xd3, 0x17, 0xf6, 0x34, 0xd3, + 0x0d, 0x62, 0x95, 0x5c, 0x87, 0xbb, 0xd7, 0x75, 0x1b, 0xe7, 0xc0, 0x61, 0x94, 0xde, 0x9e, 0xe6, + 0x4d, 0xfb, 0x7b, 0x25, 0x38, 0xa9, 0x3a, 0xf3, 0xa7, 0xb7, 0x0b, 0xdf, 0xea, 0xee, 0xc2, 0x07, + 0x6a, 0x26, 0xb6, 0x6f, 0xc0, 0x60, 0x63, 0x27, 0x6a, 0xc6, 0xad, 0x02, 0x3b, 0xfe, 0xe3, 0xc6, + 0xba, 0x4a, 0x76, 0x23, 0xf6, 0x92, 0x9d, 0x58, 0x66, 0xf6, 0xe7, 0x2d, 0x98, 0x58, 0x5b, 0xaa, + 0x37, 0x82, 0xe6, 0x5d, 0x12, 0x2f, 0x70, 0x83, 0x06, 0x16, 0x1b, 0xbe, 0x75, 0xc8, 0x8d, 0x3c, + 0x4b, 0x45, 0x38, 0x03, 0x95, 0xcd, 0x20, 0x8a, 0xd3, 0x97, 0x02, 0x97, 0x83, 0x28, 0xc6, 0x0c, + 0x63, 0xff, 0x99, 0x05, 0x03, 0xec, 0xa1, 0xb6, 0xbc, 0x47, 0xfe, 0x8a, 0x7c, 0x17, 0x7a, 0x01, + 0x06, 0xc9, 0xfa, 0x3a, 0x69, 0xc6, 0x62, 0x7c, 0x65, 0x80, 0xcd, 0xe0, 0x32, 0x83, 0xd2, 0x1d, + 0x8d, 0x55, 0xc6, 0xff, 0x62, 0x41, 0x8c, 0x3e, 0x03, 0xd5, 0xd8, 0xdb, 0x22, 0x0b, 0xae, 0x2b, + 0xac, 0xf0, 0xfd, 0xf9, 0x7c, 0xa9, 0x1d, 0x76, 0x4d, 0x32, 0xc1, 0x09, 0x3f, 0xfb, 0x4b, 0x25, + 0x80, 0x24, 0x7c, 0x2e, 0xef, 0x33, 0x17, 0xbb, 0xde, 0x32, 0x7c, 0x32, 0xe3, 0x2d, 0x43, 0x94, + 0x30, 0xcc, 0x78, 0xc9, 0x50, 0x75, 0x55, 0xb9, 0x50, 0x57, 0x55, 0xfa, 0xe9, 0xaa, 0x25, 0x98, + 0x4a, 0xc2, 0xff, 0xcc, 0x38, 0x6a, 0x96, 0x6f, 0x78, 0x2d, 0x8d, 0xc4, 0xdd, 0xf4, 0xf6, 0x97, + 0x2c, 0x10, 0x5e, 0xc2, 0x05, 0x26, 0xb4, 0x2b, 0xdf, 0x1d, 0x33, 0x52, 0x0b, 0x3e, 0x5d, 0xc4, + 0x81, 0x5a, 0x24, 0x14, 0x54, 0x72, 0xdf, 0x48, 0x23, 0x68, 0x70, 0xb5, 0x7f, 0xdb, 0x82, 0x11, + 0x8e, 0xbe, 0xc6, 0x0e, 0xa2, 0xf9, 0xed, 0xea, 0x2b, 0x99, 0x35, 0x7b, 0x92, 0x8b, 0x32, 0x56, + 0x49, 0x8d, 0xf5, 0x27, 0xb9, 0x24, 0x02, 0x27, 0x34, 0xe8, 0x29, 0x18, 0x8a, 0x3a, 0x77, 0x18, + 0x79, 0xca, 0x65, 0xb8, 0xc1, 0xc1, 0x58, 0xe2, 0xed, 0x7f, 0x5a, 0x82, 0xc9, 0xb4, 0xc7, 0x38, + 0xc2, 0x30, 0xc8, 0x05, 0x48, 0xfa, 0x4c, 0x73, 0x90, 0x01, 0x54, 0xf3, 0x38, 0x07, 0xfe, 0xb0, + 0x3c, 0x13, 0x41, 0x82, 0x13, 0x5a, 0x87, 0x11, 0x37, 0xb8, 0xe7, 0xdf, 0x73, 0x42, 0x77, 0xa1, + 0xbe, 0x2a, 0x46, 0x22, 0xc7, 0xc7, 0xaf, 0x96, 0x14, 0xd0, 0xfd, 0xd9, 0x99, 0x41, 0x2e, 0x41, + 0x61, 0x9d, 0x31, 0x7a, 0x93, 0x65, 0x42, 0x59, 0xf7, 0x36, 0xae, 0x39, 0xed, 0x62, 0xde, 0x2c, + 0x4b, 0x92, 0x5c, 0xab, 0x63, 0x4c, 0x24, 0x4e, 0xe1, 0x08, 0x9c, 0xb0, 0xb4, 0x7f, 0xf5, 0x24, + 0x18, 0x73, 0xc1, 0xc8, 0x38, 0x6d, 0x3d, 0xf0, 0x8c, 0xd3, 0x6f, 0xc0, 0x30, 0xd9, 0x6a, 0xc7, + 0x3b, 0x35, 0x2f, 0x2c, 0xf6, 0x7e, 0xc0, 0xb2, 0xa0, 0xee, 0xe6, 0x2e, 0x31, 0x58, 0x71, 0xec, + 0x91, 0x3f, 0xbc, 0xfc, 0x81, 0xc8, 0x1f, 0x5e, 0xf9, 0x4b, 0xc9, 0x1f, 0xfe, 0x1a, 0x0c, 0x6d, + 0x78, 0x31, 0x26, 0xed, 0x40, 0xec, 0xc6, 0x39, 0x93, 0xe7, 0x12, 0x27, 0xee, 0xce, 0x2c, 0x2b, + 0x10, 0x58, 0xb2, 0x43, 0x6b, 0x6a, 0x51, 0x0d, 0x16, 0xd1, 0x41, 0xbb, 0x0d, 0xe4, 0x99, 0xcb, + 0x4a, 0xe4, 0x0b, 0x1f, 0x7a, 0xff, 0xf9, 0xc2, 0x55, 0x96, 0xef, 0xe1, 0x07, 0x95, 0xe5, 0xdb, + 0xc8, 0x96, 0x5e, 0x3d, 0x8a, 0x6c, 0xe9, 0x5f, 0xb2, 0xe0, 0x64, 0x3b, 0xeb, 0xad, 0x01, 0x91, + 0xaf, 0xfb, 0xe7, 0x0e, 0xf1, 0xfa, 0x82, 0x51, 0x35, 0xcb, 0xef, 0x91, 0x49, 0x86, 0xb3, 0x2b, + 0x96, 0x69, 0xd7, 0x47, 0xde, 0x7f, 0xda, 0xf5, 0xa3, 0x4e, 0xec, 0x9d, 0x24, 0x61, 0x1f, 0x3b, + 0x92, 0x24, 0xec, 0xe3, 0x0f, 0x30, 0x09, 0xbb, 0x96, 0x3e, 0x7d, 0xe2, 0xc1, 0xa6, 0x4f, 0xdf, + 0x34, 0xf7, 0x25, 0x9e, 0xad, 0xfb, 0x85, 0xc2, 0xfb, 0x92, 0x51, 0xc3, 0xc1, 0x3b, 0x13, 0x4f, + 0x24, 0x3f, 0xf5, 0x3e, 0x13, 0xc9, 0x1b, 0xe9, 0xd8, 0xd1, 0x51, 0xa4, 0x63, 0x7f, 0x5b, 0xdf, + 0x41, 0x4f, 0x14, 0xa9, 0x41, 0x6d, 0x94, 0xdd, 0x35, 0x64, 0xed, 0xa1, 0xdd, 0x09, 0xdf, 0xa7, + 0x8f, 0x3b, 0xe1, 0xfb, 0xc9, 0x23, 0x4c, 0xf8, 0x7e, 0xea, 0x58, 0x13, 0xbe, 0x3f, 0xf4, 0x01, + 0x49, 0xf8, 0x3e, 0x73, 0x5c, 0x09, 0xdf, 0x1f, 0x7e, 0xb0, 0x09, 0xdf, 0xdf, 0x86, 0x6a, 0x5b, + 0xc6, 0x5d, 0xce, 0xcc, 0x16, 0x19, 0xba, 0xcc, 0x30, 0x4d, 0x3e, 0x74, 0x0a, 0x85, 0x13, 0xa6, + 0xb4, 0x86, 0x24, 0x01, 0xfc, 0x87, 0x8b, 0xd4, 0x90, 0x69, 0xf7, 0x38, 0x20, 0xed, 0xfb, 0x17, + 0x4a, 0x70, 0xfa, 0xe0, 0xd5, 0x91, 0x18, 0x4d, 0xea, 0x89, 0x2d, 0x3b, 0x65, 0x34, 0x61, 0x9a, + 0xa7, 0x46, 0x55, 0x38, 0x9c, 0xfd, 0x12, 0x4c, 0x29, 0x3f, 0xaf, 0x96, 0xd7, 0xdc, 0xd1, 0x9e, + 0xa1, 0x52, 0xf1, 0x09, 0x8d, 0x34, 0x01, 0xee, 0x2e, 0x83, 0x16, 0x60, 0xc2, 0x00, 0xae, 0xd6, + 0xc4, 0xf9, 0x45, 0x59, 0x69, 0x1a, 0x26, 0x1a, 0xa7, 0xe9, 0xed, 0xaf, 0x5b, 0xf0, 0x50, 0x8f, + 0x0c, 0xaf, 0x85, 0x63, 0xb4, 0xdb, 0x30, 0xd1, 0x36, 0x8b, 0x16, 0x4e, 0xf9, 0x60, 0x64, 0x94, + 0x55, 0xad, 0x4e, 0x21, 0x70, 0x9a, 0xfd, 0xe2, 0xd9, 0xef, 0xff, 0xe8, 0xf4, 0x87, 0x7e, 0xf0, + 0xa3, 0xd3, 0x1f, 0xfa, 0xe1, 0x8f, 0x4e, 0x7f, 0xe8, 0x97, 0xf6, 0x4e, 0x5b, 0xdf, 0xdf, 0x3b, + 0x6d, 0xfd, 0x60, 0xef, 0xb4, 0xf5, 0xc3, 0xbd, 0xd3, 0xd6, 0x9f, 0xef, 0x9d, 0xb6, 0xbe, 0xf4, + 0xe3, 0xd3, 0x1f, 0x7a, 0xbd, 0xb4, 0x7d, 0xe1, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x6b, 0x1c, + 0xd0, 0x1f, 0x95, 0xd0, 0x00, 0x00, } diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/api/v1/types.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/api/v1/types.go index 6d7814a34929..f9bf023b426e 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/api/v1/types.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/api/v1/types.go @@ -700,7 +700,7 @@ type EmptyDirVolumeSource struct { // The default is nil which means that the limit is undefined. // More info: http://kubernetes.io/docs/user-guide/volumes#emptydir // +optional - SizeLimit resource.Quantity `json:"sizeLimit,omitempty" protobuf:"bytes,2,opt,name=sizeLimit"` + SizeLimit *resource.Quantity `json:"sizeLimit,omitempty" protobuf:"bytes,2,opt,name=sizeLimit"` } // Represents a Glusterfs mount that lasts the lifetime of a pod. diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/api/v1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/api/v1/zz_generated.conversion.go index 1832c432bc0f..e0258772ffdc 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/api/v1/zz_generated.conversion.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/api/v1/zz_generated.conversion.go @@ -21,6 +21,7 @@ limitations under the License. package v1 import ( + resource "k8s.io/apimachinery/pkg/api/resource" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" @@ -1240,7 +1241,7 @@ func Convert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in *api.D func autoConvert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource(in *EmptyDirVolumeSource, out *api.EmptyDirVolumeSource, s conversion.Scope) error { out.Medium = api.StorageMedium(in.Medium) - out.SizeLimit = in.SizeLimit + out.SizeLimit = (*resource.Quantity)(unsafe.Pointer(in.SizeLimit)) return nil } @@ -1251,7 +1252,7 @@ func Convert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource(in *EmptyDirVol func autoConvert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in *api.EmptyDirVolumeSource, out *EmptyDirVolumeSource, s conversion.Scope) error { out.Medium = StorageMedium(in.Medium) - out.SizeLimit = in.SizeLimit + out.SizeLimit = (*resource.Quantity)(unsafe.Pointer(in.SizeLimit)) return nil } diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/api/v1/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/api/v1/zz_generated.deepcopy.go index b909d9b33252..18cd4311e879 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/api/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/api/v1/zz_generated.deepcopy.go @@ -21,6 +21,7 @@ limitations under the License. package v1 import ( + resource "k8s.io/apimachinery/pkg/api/resource" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" @@ -858,7 +859,11 @@ func DeepCopy_v1_EmptyDirVolumeSource(in interface{}, out interface{}, c *conver in := in.(*EmptyDirVolumeSource) out := out.(*EmptyDirVolumeSource) *out = *in - out.SizeLimit = in.SizeLimit.DeepCopy() + if in.SizeLimit != nil { + in, out := &in.SizeLimit, &out.SizeLimit + *out = new(resource.Quantity) + **out = (*in).DeepCopy() + } return nil } } diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/api/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/api/zz_generated.deepcopy.go index cc111561b7f2..0aa95e1071b0 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/api/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/api/zz_generated.deepcopy.go @@ -21,6 +21,7 @@ limitations under the License. package api import ( + resource "k8s.io/apimachinery/pkg/api/resource" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" conversion "k8s.io/apimachinery/pkg/conversion" fields "k8s.io/apimachinery/pkg/fields" @@ -860,7 +861,11 @@ func DeepCopy_api_EmptyDirVolumeSource(in interface{}, out interface{}, c *conve in := in.(*EmptyDirVolumeSource) out := out.(*EmptyDirVolumeSource) *out = *in - out.SizeLimit = in.SizeLimit.DeepCopy() + if in.SizeLimit != nil { + in, out := &in.SizeLimit, &out.SizeLimit + *out = new(resource.Quantity) + **out = (*in).DeepCopy() + } return nil } } diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/generated.pb.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/generated.pb.go index 47fb3d5f19ec..26588f9c87cc 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/generated.pb.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/generated.pb.go @@ -3314,89 +3314,88 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 1331 bytes of a gzipped FileDescriptorProto + // 1323 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x57, 0x5b, 0x6f, 0x1b, 0x45, - 0x1b, 0xce, 0x3a, 0x4e, 0x9a, 0x6f, 0x9c, 0x26, 0xfd, 0xa6, 0x55, 0xeb, 0xa6, 0xd4, 0x8e, 0x56, - 0x08, 0xb5, 0x08, 0x76, 0xa9, 0x29, 0x88, 0x0a, 0x01, 0x8a, 0xcd, 0xa1, 0x15, 0x71, 0x0f, 0xd3, - 0x50, 0x21, 0x40, 0x82, 0xc9, 0x7a, 0xea, 0x0c, 0xf1, 0x1e, 0xb4, 0x33, 0xb6, 0x48, 0xa5, 0x4a, - 0xdc, 0x70, 0x87, 0x04, 0x37, 0xfc, 0x04, 0x24, 0xfe, 0x01, 0xd7, 0x20, 0x21, 0xf5, 0xb2, 0x97, - 0xe5, 0xc6, 0xa2, 0xee, 0x1d, 0x3f, 0x21, 0x12, 0x07, 0xcd, 0x61, 0x4f, 0x5e, 0x6f, 0x1a, 0x87, - 0xb4, 0x82, 0x3b, 0x7b, 0xe6, 0x7d, 0x9f, 0xe7, 0x3d, 0x3c, 0xf3, 0xce, 0x2c, 0x78, 0x6b, 0xfb, - 0x35, 0x66, 0x51, 0xdf, 0xde, 0xee, 0x6f, 0x92, 0xd0, 0x23, 0x9c, 0x30, 0x3b, 0xd8, 0xee, 0xda, - 0x38, 0xa0, 0xcc, 0xc6, 0x7d, 0xee, 0x33, 0x07, 0xf7, 0xa8, 0xd7, 0xb5, 0x07, 0x0d, 0xdc, 0x0b, - 0xb6, 0xf0, 0x05, 0xbb, 0x4b, 0x3c, 0x12, 0x62, 0x4e, 0x3a, 0x56, 0x10, 0xfa, 0xdc, 0x87, 0xb6, - 0x02, 0xb0, 0x12, 0x00, 0x2b, 0xd8, 0xee, 0x5a, 0x02, 0xc0, 0x4a, 0x01, 0x58, 0x11, 0xc0, 0xca, - 0x8b, 0x5d, 0xca, 0xb7, 0xfa, 0x9b, 0x96, 0xe3, 0xbb, 0x76, 0xd7, 0xef, 0xfa, 0xb6, 0xc4, 0xd9, - 0xec, 0xdf, 0x96, 0xff, 0xe4, 0x1f, 0xf9, 0x4b, 0xe1, 0xaf, 0x5c, 0xd4, 0x01, 0xe2, 0x80, 0xba, - 0xd8, 0xd9, 0xa2, 0x1e, 0x09, 0x77, 0xa2, 0x10, 0xed, 0x90, 0x30, 0xbf, 0x1f, 0x3a, 0x64, 0x3c, - 0xaa, 0x3d, 0xbd, 0x98, 0xed, 0x12, 0x8e, 0xed, 0x41, 0x2e, 0x97, 0x15, 0xbb, 0xc8, 0x2b, 0xec, - 0x7b, 0x9c, 0xba, 0x79, 0x9a, 0x57, 0x1f, 0xe7, 0xc0, 0x9c, 0x2d, 0xe2, 0xe2, 0x9c, 0xdf, 0xcb, - 0x45, 0x7e, 0x7d, 0x4e, 0x7b, 0x36, 0xf5, 0x38, 0xe3, 0x61, 0xce, 0xe9, 0x85, 0xc2, 0x56, 0x4d, - 0xca, 0xe5, 0xd2, 0x7e, 0x1b, 0x9b, 0x73, 0x35, 0xbf, 0x33, 0xc0, 0x99, 0x56, 0xe8, 0x33, 0x76, - 0x8b, 0x84, 0x8c, 0xfa, 0xde, 0xb5, 0xcd, 0xcf, 0x89, 0xc3, 0x11, 0xb9, 0x4d, 0x42, 0xe2, 0x39, - 0x04, 0xae, 0x82, 0xf2, 0x36, 0xf5, 0x3a, 0x55, 0x63, 0xd5, 0x38, 0xf7, 0xbf, 0xe6, 0xe2, 0xbd, - 0x61, 0x7d, 0x66, 0x34, 0xac, 0x97, 0xdf, 0xa7, 0x5e, 0x07, 0xc9, 0x1d, 0x61, 0xe1, 0x61, 0x97, - 0x54, 0x4b, 0x59, 0x8b, 0xab, 0xd8, 0x25, 0x48, 0xee, 0xc0, 0x06, 0x00, 0x38, 0xa0, 0x9a, 0xa0, - 0x3a, 0x2b, 0xed, 0xa0, 0xb6, 0x03, 0x6b, 0xd7, 0xaf, 0xe8, 0x1d, 0x94, 0xb2, 0x32, 0x1f, 0x95, - 0xc0, 0xa9, 0xcb, 0x7e, 0x48, 0xef, 0xf8, 0x1e, 0xc7, 0xbd, 0xeb, 0x7e, 0x67, 0x4d, 0xe7, 0x41, - 0x42, 0xf8, 0x19, 0x58, 0x10, 0x5d, 0xed, 0x60, 0x8e, 0x65, 0x5c, 0x95, 0xc6, 0x4b, 0x96, 0x56, - 0x66, 0xba, 0xc8, 0x89, 0x36, 0x85, 0xb5, 0x35, 0xb8, 0x60, 0xa9, 0xe4, 0xda, 0x84, 0xe3, 0x84, - 0x3f, 0x59, 0x43, 0x31, 0x2a, 0xf4, 0x40, 0x99, 0x05, 0xc4, 0x91, 0x39, 0x55, 0x1a, 0xeb, 0xd6, - 0x94, 0xba, 0xb7, 0x0a, 0x22, 0xbf, 0x19, 0x10, 0x27, 0xa9, 0x90, 0xf8, 0x87, 0x24, 0x0f, 0x1c, - 0x80, 0x79, 0xc6, 0x31, 0xef, 0x33, 0x59, 0x9d, 0x4a, 0xe3, 0xea, 0xa1, 0x31, 0x4a, 0xd4, 0xe6, - 0x92, 0xe6, 0x9c, 0x57, 0xff, 0x91, 0x66, 0x33, 0xbf, 0x99, 0x05, 0xab, 0x05, 0x9e, 0x2d, 0xdf, - 0xeb, 0x50, 0x4e, 0x7d, 0x0f, 0x5e, 0x06, 0x65, 0xbe, 0x13, 0x10, 0x2d, 0x81, 0x8b, 0x51, 0xf8, - 0x1b, 0x3b, 0x01, 0xd9, 0x1d, 0xd6, 0x9f, 0x7d, 0x9c, 0xbf, 0xb0, 0x43, 0x12, 0x01, 0xde, 0x8a, - 0xd3, 0x54, 0x62, 0x79, 0x33, 0x1b, 0xd6, 0xee, 0xb0, 0xbe, 0xa7, 0xee, 0xad, 0x18, 0x33, 0x9b, - 0x06, 0x1c, 0x00, 0xd8, 0xc3, 0x8c, 0x6f, 0x84, 0xd8, 0x63, 0x8a, 0x93, 0xba, 0x44, 0x97, 0xf2, - 0xf9, 0xfd, 0x49, 0x43, 0x78, 0x34, 0x57, 0x74, 0x3c, 0x70, 0x3d, 0x87, 0x86, 0x26, 0x30, 0xc0, - 0xe7, 0xc0, 0x7c, 0x48, 0x30, 0xf3, 0xbd, 0x6a, 0x59, 0xe6, 0x13, 0x97, 0x19, 0xc9, 0x55, 0xa4, - 0x77, 0xe1, 0x79, 0x70, 0xc4, 0x25, 0x8c, 0xe1, 0x2e, 0xa9, 0xce, 0x49, 0xc3, 0x65, 0x6d, 0x78, - 0xa4, 0xad, 0x96, 0x51, 0xb4, 0x6f, 0xfe, 0x6e, 0x80, 0x33, 0x05, 0x15, 0x5d, 0xa7, 0x8c, 0xc3, - 0x4f, 0x72, 0xda, 0xb7, 0xf6, 0x97, 0xa0, 0xf0, 0x96, 0xca, 0x3f, 0xa6, 0xb9, 0x17, 0xa2, 0x95, - 0x94, 0xee, 0x5d, 0x30, 0x47, 0x39, 0x71, 0x45, 0x7f, 0x66, 0xcf, 0x55, 0x1a, 0x97, 0x0f, 0x4b, - 0x86, 0xcd, 0xa3, 0x9a, 0x74, 0xee, 0x8a, 0x80, 0x47, 0x8a, 0xc5, 0xfc, 0xb3, 0x54, 0x98, 0xac, - 0x38, 0x1c, 0xf0, 0x6b, 0x03, 0x2c, 0xc9, 0xbf, 0x1b, 0x38, 0xec, 0x12, 0x31, 0x95, 0x74, 0xce, - 0xd3, 0x9f, 0xc8, 0x3d, 0x66, 0x5c, 0xf3, 0xa4, 0x0e, 0x6e, 0xe9, 0x66, 0x86, 0x0b, 0x8d, 0x71, - 0xc3, 0x0b, 0xa0, 0xe2, 0x52, 0x0f, 0x91, 0xa0, 0x47, 0x1d, 0xac, 0x34, 0x3c, 0xd7, 0x5c, 0x1e, - 0x0d, 0xeb, 0x95, 0x76, 0xb2, 0x8c, 0xd2, 0x36, 0xf0, 0x15, 0x50, 0x71, 0xf1, 0x17, 0xb1, 0xcb, - 0xac, 0x74, 0x39, 0xae, 0xf9, 0x2a, 0xed, 0x64, 0x0b, 0xa5, 0xed, 0xe0, 0x6d, 0x21, 0x18, 0x1e, - 0x52, 0x87, 0x55, 0xcb, 0xb2, 0x13, 0xaf, 0x4f, 0x9d, 0x70, 0x5b, 0xfa, 0xcb, 0x89, 0x93, 0x52, - 0x9b, 0xc4, 0x44, 0x11, 0xb8, 0xf9, 0x6b, 0x19, 0x9c, 0xdd, 0x73, 0x72, 0xc0, 0x77, 0x01, 0xf4, - 0x37, 0x19, 0x09, 0x07, 0xa4, 0xf3, 0x9e, 0xba, 0x3a, 0xc4, 0x0c, 0x17, 0x5d, 0x98, 0x6d, 0x9e, - 0x14, 0x47, 0xe5, 0x5a, 0x6e, 0x17, 0x4d, 0xf0, 0x80, 0x0e, 0x38, 0x2a, 0x0e, 0x90, 0xaa, 0x30, - 0xd5, 0xd7, 0xc5, 0x74, 0xa7, 0xf3, 0xff, 0xa3, 0x61, 0xfd, 0xe8, 0x7a, 0x1a, 0x04, 0x65, 0x31, - 0xe1, 0x1a, 0x58, 0x76, 0xfa, 0x61, 0x48, 0x3c, 0x3e, 0x56, 0xf1, 0x53, 0xba, 0x02, 0xcb, 0xad, - 0xec, 0x36, 0x1a, 0xb7, 0x17, 0x10, 0x1d, 0xc2, 0x68, 0x48, 0x3a, 0x31, 0x44, 0x39, 0x0b, 0xf1, - 0x76, 0x76, 0x1b, 0x8d, 0xdb, 0xc3, 0xbb, 0x60, 0x49, 0xa3, 0xea, 0x7a, 0x57, 0xe7, 0x64, 0x0f, - 0xdf, 0x38, 0x68, 0x0f, 0xd5, 0x0c, 0x8f, 0x55, 0xda, 0xca, 0x80, 0xa3, 0x31, 0x32, 0xf8, 0x95, - 0x01, 0x80, 0x13, 0x0d, 0x4a, 0x56, 0x9d, 0x97, 0xdc, 0x37, 0x0e, 0xeb, 0x24, 0xc7, 0x23, 0x38, - 0xb9, 0x41, 0xe3, 0x25, 0x86, 0x52, 0xc4, 0xe6, 0x1f, 0x25, 0x00, 0x12, 0x11, 0xc2, 0x8b, 0x99, - 0x5b, 0x64, 0x75, 0xec, 0x16, 0x39, 0xa6, 0x2d, 0xe5, 0x0b, 0x2f, 0x75, 0x63, 0x74, 0xc1, 0xbc, - 0x2f, 0x4f, 0xab, 0xd6, 0x4b, 0x6b, 0xea, 0x3c, 0xe2, 0xfb, 0x3d, 0x86, 0x6f, 0x02, 0x31, 0xa2, - 0xf5, 0x10, 0xd0, 0xf0, 0xf0, 0x53, 0x50, 0x0e, 0xfc, 0x4e, 0x74, 0xff, 0xae, 0x4d, 0x4d, 0x73, - 0xdd, 0xef, 0xb0, 0x0c, 0xc9, 0x82, 0xc8, 0x4e, 0xac, 0x22, 0x09, 0x0c, 0x7d, 0xb0, 0x10, 0xbd, - 0x60, 0xa5, 0xa2, 0x2a, 0x8d, 0x77, 0xa6, 0x26, 0x41, 0x1a, 0x20, 0x43, 0xb4, 0x28, 0x66, 0x79, - 0xb4, 0x83, 0x62, 0x12, 0xf3, 0xaf, 0x12, 0x58, 0x4c, 0x0b, 0xe8, 0xdf, 0xd1, 0x01, 0xa5, 0xe5, - 0x27, 0xdc, 0x01, 0x45, 0xf2, 0x14, 0x3a, 0xa0, 0x88, 0x8a, 0x3a, 0xf0, 0x7d, 0x09, 0xc0, 0xbc, - 0xfc, 0x20, 0x07, 0xf3, 0x5c, 0xde, 0x29, 0x4f, 0xe4, 0x32, 0x8b, 0xdf, 0x20, 0xfa, 0xde, 0xd2, - 0x5c, 0xe2, 0x11, 0xae, 0xa6, 0xfe, 0xd5, 0xe4, 0xb1, 0x1e, 0x1f, 0xe1, 0x76, 0xbc, 0x83, 0x52, - 0x56, 0x90, 0x80, 0x8a, 0xf2, 0xbe, 0x85, 0x7b, 0xfd, 0xe8, 0x41, 0xb5, 0xe7, 0x7b, 0xc3, 0x8a, - 0x92, 0xb7, 0x6e, 0xf4, 0xb1, 0xc7, 0x29, 0xdf, 0x49, 0x6e, 0xbb, 0x8d, 0x04, 0x0a, 0xa5, 0x71, - 0xcd, 0x1f, 0xc6, 0xeb, 0xa4, 0xf4, 0xfa, 0xdf, 0xa9, 0xd3, 0x16, 0x58, 0xd4, 0x43, 0xf8, 0x9f, - 0x14, 0xea, 0x84, 0x66, 0x59, 0x6c, 0xa5, 0xb0, 0x50, 0x06, 0xd9, 0xfc, 0xd9, 0x00, 0xc7, 0xc6, - 0x47, 0xcd, 0x58, 0xc8, 0xc6, 0xbe, 0x42, 0xbe, 0x03, 0xa0, 0x4a, 0x78, 0x6d, 0x40, 0x42, 0xdc, - 0x25, 0x2a, 0xf0, 0xd2, 0x81, 0x02, 0x8f, 0x9f, 0xcd, 0x1b, 0x39, 0x44, 0x34, 0x81, 0xc5, 0xfc, - 0x25, 0x9b, 0x84, 0xea, 0xf6, 0x41, 0x92, 0xb8, 0x0b, 0x8e, 0xeb, 0xea, 0x1c, 0x42, 0x16, 0x67, - 0x34, 0xd9, 0xf1, 0x56, 0x1e, 0x12, 0x4d, 0xe2, 0x31, 0x7f, 0x2c, 0x81, 0x13, 0x93, 0x46, 0x32, - 0x6c, 0xeb, 0x4f, 0x62, 0x95, 0xc5, 0xa5, 0xf4, 0x27, 0xf1, 0xee, 0xb0, 0x7e, 0x7e, 0xcf, 0x6f, - 0x9c, 0x08, 0x30, 0xf5, 0xfd, 0xfc, 0x21, 0xa8, 0x66, 0xaa, 0xf8, 0x01, 0xa7, 0x3d, 0x7a, 0x47, - 0xbd, 0xc4, 0xd4, 0x23, 0xf4, 0x99, 0xd1, 0xb0, 0x5e, 0xdd, 0x28, 0xb0, 0x41, 0x85, 0xde, 0xe2, - 0xc3, 0x69, 0x82, 0x0a, 0x0e, 0x26, 0xdf, 0x93, 0x53, 0x28, 0xe0, 0xa7, 0x7c, 0xe5, 0x94, 0x0a, - 0x0e, 0xb9, 0x72, 0x1f, 0x83, 0xd3, 0xd9, 0xc6, 0xe5, 0x4b, 0x77, 0x76, 0x34, 0xac, 0x9f, 0x6e, - 0x15, 0x19, 0xa1, 0x62, 0xff, 0x22, 0xf5, 0xcd, 0x3e, 0x1d, 0xf5, 0x35, 0xad, 0x7b, 0x0f, 0x6b, - 0x33, 0xf7, 0x1f, 0xd6, 0x66, 0x1e, 0x3c, 0xac, 0xcd, 0x7c, 0x39, 0xaa, 0x19, 0xf7, 0x46, 0x35, - 0xe3, 0xfe, 0xa8, 0x66, 0x3c, 0x18, 0xd5, 0x8c, 0xdf, 0x46, 0x35, 0xe3, 0xdb, 0x47, 0xb5, 0x99, - 0x8f, 0x16, 0xa2, 0x61, 0xf8, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0x4c, 0xa9, 0x91, 0xe9, 0xfe, - 0x13, 0x00, 0x00, + 0x14, 0xce, 0x3a, 0x4e, 0x1a, 0xc6, 0x69, 0x52, 0xa6, 0x55, 0xeb, 0xa6, 0xd4, 0x8e, 0x56, 0x08, + 0xb5, 0x08, 0x76, 0xa9, 0x29, 0x08, 0x84, 0x00, 0xc5, 0xe6, 0xd2, 0x8a, 0xb8, 0x97, 0x69, 0xa8, + 0x10, 0x20, 0xc1, 0x64, 0x3d, 0x75, 0x86, 0x78, 0x2f, 0xda, 0x19, 0x5b, 0xa4, 0x52, 0x25, 0x5e, + 0x78, 0x43, 0x82, 0x17, 0x7e, 0x02, 0x12, 0xff, 0x80, 0x67, 0x90, 0x90, 0xfa, 0xd8, 0xc7, 0xf2, + 0x62, 0x51, 0xf7, 0x8d, 0x9f, 0x50, 0x89, 0x8b, 0xe6, 0xb2, 0x37, 0xaf, 0xd7, 0xad, 0x43, 0x5a, + 0xc1, 0x9b, 0x3d, 0x73, 0xce, 0xf7, 0x9d, 0xcb, 0x37, 0x67, 0x66, 0xc1, 0xdb, 0xbb, 0xaf, 0x31, + 0x8b, 0xfa, 0xf6, 0x6e, 0x7f, 0x9b, 0x84, 0x1e, 0xe1, 0x84, 0xd9, 0xc1, 0x6e, 0xd7, 0xc6, 0x01, + 0x65, 0x36, 0xee, 0x73, 0x9f, 0x39, 0xb8, 0x47, 0xbd, 0xae, 0x3d, 0x68, 0xe0, 0x5e, 0xb0, 0x83, + 0xcf, 0xd9, 0x5d, 0xe2, 0x91, 0x10, 0x73, 0xd2, 0xb1, 0x82, 0xd0, 0xe7, 0x3e, 0xb4, 0x15, 0x80, + 0x95, 0x00, 0x58, 0xc1, 0x6e, 0xd7, 0x12, 0x00, 0x56, 0x0a, 0xc0, 0x8a, 0x00, 0xd6, 0x5e, 0xec, + 0x52, 0xbe, 0xd3, 0xdf, 0xb6, 0x1c, 0xdf, 0xb5, 0xbb, 0x7e, 0xd7, 0xb7, 0x25, 0xce, 0x76, 0xff, + 0x86, 0xfc, 0x27, 0xff, 0xc8, 0x5f, 0x0a, 0x7f, 0xed, 0xbc, 0x0e, 0x10, 0x07, 0xd4, 0xc5, 0xce, + 0x0e, 0xf5, 0x48, 0xb8, 0x17, 0x85, 0x68, 0x87, 0x84, 0xf9, 0xfd, 0xd0, 0x21, 0xe3, 0x51, 0x4d, + 0xf5, 0x62, 0xb6, 0x4b, 0x38, 0xb6, 0x07, 0xb9, 0x5c, 0xd6, 0xec, 0x22, 0xaf, 0xb0, 0xef, 0x71, + 0xea, 0xe6, 0x69, 0x5e, 0x7d, 0x98, 0x03, 0x73, 0x76, 0x88, 0x8b, 0x73, 0x7e, 0x2f, 0x17, 0xf9, + 0xf5, 0x39, 0xed, 0xd9, 0xd4, 0xe3, 0x8c, 0x87, 0x39, 0xa7, 0x17, 0x0a, 0x5b, 0x35, 0x21, 0x17, + 0xf3, 0x7b, 0x03, 0x9c, 0x6a, 0x85, 0x3e, 0x63, 0xd7, 0x49, 0xc8, 0xa8, 0xef, 0x5d, 0xde, 0xfe, + 0x82, 0x38, 0x1c, 0x91, 0x1b, 0x24, 0x24, 0x9e, 0x43, 0xe0, 0x3a, 0x28, 0xef, 0x52, 0xaf, 0x53, + 0x35, 0xd6, 0x8d, 0x33, 0x4f, 0x35, 0x97, 0x6f, 0x0f, 0xeb, 0x73, 0xa3, 0x61, 0xbd, 0xfc, 0x01, + 0xf5, 0x3a, 0x48, 0xee, 0x08, 0x0b, 0x0f, 0xbb, 0xa4, 0x5a, 0xca, 0x5a, 0x5c, 0xc2, 0x2e, 0x41, + 0x72, 0x07, 0x36, 0x00, 0xc0, 0x01, 0xd5, 0x04, 0xd5, 0x79, 0x69, 0x07, 0xb5, 0x1d, 0xd8, 0xb8, + 0x72, 0x51, 0xef, 0xa0, 0x94, 0x95, 0x79, 0xbf, 0x04, 0x4e, 0x5c, 0xf0, 0x43, 0x7a, 0xd3, 0xf7, + 0x38, 0xee, 0x5d, 0xf1, 0x3b, 0x1b, 0x5a, 0x24, 0x24, 0x84, 0x9f, 0x83, 0x25, 0xd1, 0x9a, 0x0e, + 0xe6, 0x58, 0xc6, 0x55, 0x69, 0xbc, 0x64, 0x69, 0x79, 0xa5, 0x2b, 0x95, 0x08, 0x4c, 0x58, 0x5b, + 0x83, 0x73, 0x96, 0x4a, 0xae, 0x4d, 0x38, 0x4e, 0xf8, 0x93, 0x35, 0x14, 0xa3, 0x42, 0x0f, 0x94, + 0x59, 0x40, 0x1c, 0x99, 0x53, 0xa5, 0xb1, 0x69, 0xcd, 0x28, 0x5e, 0xab, 0x20, 0xf2, 0x6b, 0x01, + 0x71, 0x92, 0x0a, 0x89, 0x7f, 0x48, 0xf2, 0xc0, 0x01, 0x58, 0x64, 0x1c, 0xf3, 0x3e, 0x93, 0xd5, + 0xa9, 0x34, 0x2e, 0x1d, 0x18, 0xa3, 0x44, 0x6d, 0xae, 0x68, 0xce, 0x45, 0xf5, 0x1f, 0x69, 0x36, + 0xf3, 0xdb, 0x79, 0xb0, 0x5e, 0xe0, 0xd9, 0xf2, 0xbd, 0x0e, 0xe5, 0xd4, 0xf7, 0xe0, 0x05, 0x50, + 0xe6, 0x7b, 0x01, 0xd1, 0x12, 0x38, 0x1f, 0x85, 0xbf, 0xb5, 0x17, 0x90, 0x07, 0xc3, 0xfa, 0xb3, + 0x0f, 0xf3, 0x17, 0x76, 0x48, 0x22, 0xc0, 0xeb, 0x71, 0x9a, 0x4a, 0x2c, 0x6f, 0x65, 0xc3, 0x7a, + 0x30, 0xac, 0x4f, 0x15, 0xaf, 0x15, 0x63, 0x66, 0xd3, 0x80, 0x03, 0x00, 0x7b, 0x98, 0xf1, 0xad, + 0x10, 0x7b, 0x4c, 0x71, 0x52, 0x97, 0xe8, 0x52, 0x3e, 0xff, 0x68, 0xd2, 0x10, 0x1e, 0xcd, 0x35, + 0x1d, 0x0f, 0xdc, 0xcc, 0xa1, 0xa1, 0x09, 0x0c, 0xf0, 0x39, 0xb0, 0x18, 0x12, 0xcc, 0x7c, 0xaf, + 0x5a, 0x96, 0xf9, 0xc4, 0x65, 0x46, 0x72, 0x15, 0xe9, 0x5d, 0x78, 0x16, 0x1c, 0x72, 0x09, 0x63, + 0xb8, 0x4b, 0xaa, 0x0b, 0xd2, 0x70, 0x55, 0x1b, 0x1e, 0x6a, 0xab, 0x65, 0x14, 0xed, 0x9b, 0x7f, + 0x18, 0xe0, 0x54, 0x41, 0x45, 0x37, 0x29, 0xe3, 0xf0, 0xd3, 0x9c, 0xf6, 0xad, 0x47, 0x4b, 0x50, + 0x78, 0x4b, 0xe5, 0x1f, 0xd1, 0xdc, 0x4b, 0xd1, 0x4a, 0x4a, 0xf7, 0x2e, 0x58, 0xa0, 0x9c, 0xb8, + 0xa2, 0x3f, 0xf3, 0x67, 0x2a, 0x8d, 0x0b, 0x07, 0x25, 0xc3, 0xe6, 0x61, 0x4d, 0xba, 0x70, 0x51, + 0xc0, 0x23, 0xc5, 0x62, 0xfe, 0x55, 0x2a, 0x4c, 0x56, 0x1c, 0x0e, 0xf8, 0x8d, 0x01, 0x56, 0xe4, + 0xdf, 0x2d, 0x1c, 0x76, 0x89, 0x98, 0x4a, 0x3a, 0xe7, 0xd9, 0x4f, 0xe4, 0x94, 0x19, 0xd7, 0x3c, + 0xae, 0x83, 0x5b, 0xb9, 0x96, 0xe1, 0x42, 0x63, 0xdc, 0xf0, 0x1c, 0xa8, 0xb8, 0xd4, 0x43, 0x24, + 0xe8, 0x51, 0x07, 0x2b, 0x0d, 0x2f, 0x34, 0x57, 0x47, 0xc3, 0x7a, 0xa5, 0x9d, 0x2c, 0xa3, 0xb4, + 0x0d, 0x7c, 0x05, 0x54, 0x5c, 0xfc, 0x65, 0xec, 0x32, 0x2f, 0x5d, 0x8e, 0x6a, 0xbe, 0x4a, 0x3b, + 0xd9, 0x42, 0x69, 0x3b, 0x78, 0x43, 0x08, 0x86, 0x87, 0xd4, 0x61, 0xd5, 0xb2, 0xec, 0xc4, 0x1b, + 0x33, 0x27, 0xdc, 0x96, 0xfe, 0x72, 0xe2, 0xa4, 0xd4, 0x26, 0x31, 0x51, 0x04, 0x6e, 0xfe, 0x56, + 0x06, 0xa7, 0xa7, 0x4e, 0x0e, 0xf8, 0x1e, 0x80, 0xfe, 0x36, 0x23, 0xe1, 0x80, 0x74, 0xde, 0x57, + 0x57, 0x87, 0x98, 0xe1, 0xa2, 0x0b, 0xf3, 0xcd, 0xe3, 0xe2, 0xa8, 0x5c, 0xce, 0xed, 0xa2, 0x09, + 0x1e, 0xd0, 0x01, 0x87, 0xc5, 0x01, 0x52, 0x15, 0xa6, 0xfa, 0xba, 0x98, 0xed, 0x74, 0x3e, 0x3d, + 0x1a, 0xd6, 0x0f, 0x6f, 0xa6, 0x41, 0x50, 0x16, 0x13, 0x6e, 0x80, 0x55, 0xa7, 0x1f, 0x86, 0xc4, + 0xe3, 0x63, 0x15, 0x3f, 0xa1, 0x2b, 0xb0, 0xda, 0xca, 0x6e, 0xa3, 0x71, 0x7b, 0x01, 0xd1, 0x21, + 0x8c, 0x86, 0xa4, 0x13, 0x43, 0x94, 0xb3, 0x10, 0xef, 0x64, 0xb7, 0xd1, 0xb8, 0x3d, 0xbc, 0x05, + 0x56, 0x34, 0xaa, 0xae, 0x77, 0x75, 0x41, 0xf6, 0xf0, 0xcd, 0xfd, 0xf6, 0x50, 0xcd, 0xf0, 0x58, + 0xa5, 0xad, 0x0c, 0x38, 0x1a, 0x23, 0x83, 0x5f, 0x1b, 0x00, 0x38, 0xd1, 0xa0, 0x64, 0xd5, 0x45, + 0xc9, 0x7d, 0xf5, 0xa0, 0x4e, 0x72, 0x3c, 0x82, 0x93, 0x1b, 0x34, 0x5e, 0x62, 0x28, 0x45, 0x6c, + 0xfe, 0x59, 0x02, 0x20, 0x11, 0x21, 0x3c, 0x9f, 0xb9, 0x45, 0xd6, 0xc7, 0x6e, 0x91, 0x23, 0xda, + 0x52, 0x3e, 0xd3, 0x52, 0x37, 0x46, 0x17, 0x2c, 0xfa, 0xf2, 0xb4, 0x6a, 0xbd, 0xb4, 0x66, 0xce, + 0x23, 0xbe, 0xdf, 0x63, 0xf8, 0x26, 0x10, 0x23, 0x5a, 0x0f, 0x01, 0x0d, 0x0f, 0x3f, 0x03, 0xe5, + 0xc0, 0xef, 0x44, 0xf7, 0xef, 0xc6, 0xcc, 0x34, 0x57, 0xfc, 0x0e, 0xcb, 0x90, 0x2c, 0x89, 0xec, + 0xc4, 0x2a, 0x92, 0xc0, 0xd0, 0x07, 0x4b, 0xd1, 0x33, 0x54, 0x2a, 0xaa, 0xd2, 0x78, 0x77, 0x66, + 0x12, 0xa4, 0x01, 0x32, 0x44, 0xcb, 0x62, 0x96, 0x47, 0x3b, 0x28, 0x26, 0x31, 0xff, 0x2e, 0x81, + 0xe5, 0xb4, 0x80, 0xfe, 0x1b, 0x1d, 0x50, 0x5a, 0x7e, 0xcc, 0x1d, 0x50, 0x24, 0x4f, 0xa0, 0x03, + 0x8a, 0xa8, 0xa8, 0x03, 0x3f, 0x94, 0x00, 0xcc, 0xcb, 0x0f, 0x72, 0xb0, 0xc8, 0xe5, 0x9d, 0xf2, + 0x58, 0x2e, 0xb3, 0xf8, 0x0d, 0xa2, 0xef, 0x2d, 0xcd, 0x25, 0x1e, 0xe1, 0x6a, 0xea, 0x5f, 0x4a, + 0x1e, 0xeb, 0xf1, 0x11, 0x6e, 0xc7, 0x3b, 0x28, 0x65, 0x05, 0x09, 0xa8, 0x28, 0xef, 0xeb, 0xb8, + 0xd7, 0x8f, 0x1e, 0x54, 0x53, 0xdf, 0x1b, 0x56, 0x94, 0xbc, 0x75, 0xb5, 0x8f, 0x3d, 0x4e, 0xf9, + 0x5e, 0x72, 0xdb, 0x6d, 0x25, 0x50, 0x28, 0x8d, 0x6b, 0xfe, 0x38, 0x5e, 0x27, 0xa5, 0xd7, 0xff, + 0x4f, 0x9d, 0x76, 0xc0, 0xb2, 0x1e, 0xc2, 0xff, 0xa6, 0x50, 0xc7, 0x34, 0xcb, 0x72, 0x2b, 0x85, + 0x85, 0x32, 0xc8, 0xe6, 0x2f, 0x06, 0x38, 0x32, 0x3e, 0x6a, 0xc6, 0x42, 0x36, 0x1e, 0x29, 0xe4, + 0x9b, 0x00, 0xaa, 0x84, 0x37, 0x06, 0x24, 0xc4, 0x5d, 0xa2, 0x02, 0x2f, 0xed, 0x2b, 0xf0, 0xf8, + 0xd9, 0xbc, 0x95, 0x43, 0x44, 0x13, 0x58, 0xcc, 0x5f, 0xb3, 0x49, 0xa8, 0x6e, 0xef, 0x27, 0x89, + 0x5b, 0xe0, 0xa8, 0xae, 0xce, 0x01, 0x64, 0x71, 0x4a, 0x93, 0x1d, 0x6d, 0xe5, 0x21, 0xd1, 0x24, + 0x1e, 0xf3, 0xa7, 0x12, 0x38, 0x36, 0x69, 0x24, 0xc3, 0xb6, 0xfe, 0x24, 0x56, 0x59, 0xbc, 0x9e, + 0xfe, 0x24, 0x7e, 0x30, 0xac, 0x9f, 0x9d, 0xfa, 0x8d, 0x13, 0x01, 0xa6, 0xbe, 0x9f, 0x3f, 0x02, + 0xd5, 0x4c, 0x15, 0x3f, 0xe4, 0xb4, 0x47, 0x6f, 0xaa, 0x97, 0x98, 0x7a, 0x84, 0x3e, 0x33, 0x1a, + 0xd6, 0xab, 0x5b, 0x05, 0x36, 0xa8, 0xd0, 0x5b, 0x7c, 0x38, 0x4d, 0x50, 0xc1, 0xfe, 0xe4, 0x7b, + 0x7c, 0x06, 0x05, 0xfc, 0x9c, 0xaf, 0x9c, 0x52, 0xc1, 0x01, 0x57, 0xee, 0x13, 0x70, 0x32, 0xdb, + 0xb8, 0x7c, 0xe9, 0x4e, 0x8f, 0x86, 0xf5, 0x93, 0xad, 0x22, 0x23, 0x54, 0xec, 0x5f, 0xa4, 0xbe, + 0xf9, 0x27, 0xa3, 0xbe, 0xa6, 0x75, 0xfb, 0x5e, 0x6d, 0xee, 0xce, 0xbd, 0xda, 0xdc, 0xdd, 0x7b, + 0xb5, 0xb9, 0xaf, 0x46, 0x35, 0xe3, 0xf6, 0xa8, 0x66, 0xdc, 0x19, 0xd5, 0x8c, 0xbb, 0xa3, 0x9a, + 0xf1, 0xfb, 0xa8, 0x66, 0x7c, 0x77, 0xbf, 0x36, 0xf7, 0xf1, 0x52, 0x34, 0x0c, 0xff, 0x09, 0x00, + 0x00, 0xff, 0xff, 0x88, 0x5a, 0x1f, 0xc3, 0xc3, 0x13, 0x00, 0x00, } diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/generated.proto b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/generated.proto index 21d9ea6f62b1..2f84faf16148 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/generated.proto +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/generated.proto @@ -27,7 +27,6 @@ import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v2alpha1"; diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/util/util.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/util/util.go index 356b295a3e18..389e145e8496 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/util/util.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/util/util.go @@ -84,6 +84,15 @@ func FileExists(filename string) (bool, error) { return true, nil } +func FileOrSymlinkExists(filename string) (bool, error) { + if _, err := os.Lstat(filename); os.IsNotExist(err) { + return false, nil + } else if err != nil { + return false, err + } + return true, nil +} + // ReadDirNoStat returns a string of files/directories contained // in dirname without calling lstat on them. func ReadDirNoStat(dirname string) ([]string, error) { diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/version/base.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/version/base.go index 5dbc70eaf12d..311b63d01cd7 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/version/base.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/pkg/version/base.go @@ -51,7 +51,7 @@ var ( // semantic version is a git hash, but the version itself is no // longer the direct output of "git describe", but a slight // translation to be semver compliant. - gitVersion string = "v1.7.0+$Format:%h$" + gitVersion string = "v1.7.6+$Format:%h$" gitCommit string = "$Format:%H$" // sha1 from git, output of $(git rev-parse HEAD) gitTreeState string = "not a git tree" // state of git tree, either "clean" or "dirty" diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/plugin/pkg/client/auth/azure/BUILD b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/plugin/pkg/client/auth/azure/BUILD index 0c8f30954962..00c96d666d35 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/plugin/pkg/client/auth/azure/BUILD +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/plugin/pkg/client/auth/azure/BUILD @@ -13,7 +13,7 @@ go_test( srcs = ["azure_test.go"], library = ":go_default_library", tags = ["automanaged"], - deps = ["//vendor/github.com/Azure/go-autorest/autorest/azure:go_default_library"], + deps = ["//vendor/github.com/Azure/go-autorest/autorest/adal:go_default_library"], ) go_library( @@ -22,6 +22,7 @@ go_library( tags = ["automanaged"], deps = [ "//vendor/github.com/Azure/go-autorest/autorest:go_default_library", + "//vendor/github.com/Azure/go-autorest/autorest/adal:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest/azure:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/plugin/pkg/client/auth/azure/azure.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/plugin/pkg/client/auth/azure/azure.go index 342fbb78f572..06744e742445 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/plugin/pkg/client/auth/azure/azure.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/plugin/pkg/client/auth/azure/azure.go @@ -24,6 +24,7 @@ import ( "sync" "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" "github.com/Azure/go-autorest/autorest/azure" "github.com/golang/glog" @@ -137,7 +138,7 @@ func (r *azureRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) } type azureToken struct { - token azure.Token + token adal.Token clientID string tenantID string apiserverID string @@ -234,7 +235,7 @@ func (ts *azureTokenSource) retrieveTokenFromCfg() (*azureToken, error) { } return &azureToken{ - token: azure.Token{ + token: adal.Token{ AccessToken: accessToken, RefreshToken: refreshToken, ExpiresIn: expiresIn, @@ -268,15 +269,15 @@ func (ts *azureTokenSource) storeTokenInCfg(token *azureToken) error { } func (ts *azureTokenSource) refreshToken(token *azureToken) (*azureToken, error) { - oauthConfig, err := azure.PublicCloud.OAuthConfigForTenant(token.tenantID) + oauthConfig, err := adal.NewOAuthConfig(azure.PublicCloud.ActiveDirectoryEndpoint, token.tenantID) if err != nil { return nil, fmt.Errorf("building the OAuth configuration for token refresh: %v", err) } - callback := func(t azure.Token) error { + callback := func(t adal.Token) error { return nil } - spt, err := azure.NewServicePrincipalTokenFromManualToken( + spt, err := adal.NewServicePrincipalTokenFromManualToken( *oauthConfig, token.clientID, token.apiserverID, @@ -324,12 +325,12 @@ func newAzureTokenSourceDeviceCode(environment azure.Environment, clientID strin } func (ts *azureTokenSourceDeviceCode) Token() (*azureToken, error) { - oauthConfig, err := ts.environment.OAuthConfigForTenant(ts.tenantID) + oauthConfig, err := adal.NewOAuthConfig(ts.environment.ActiveDirectoryEndpoint, ts.tenantID) if err != nil { return nil, fmt.Errorf("building the OAuth configuration for device code authentication: %v", err) } client := &autorest.Client{} - deviceCode, err := azure.InitiateDeviceAuth(client, *oauthConfig, ts.clientID, ts.apiserverID) + deviceCode, err := adal.InitiateDeviceAuth(client, *oauthConfig, ts.clientID, ts.apiserverID) if err != nil { return nil, fmt.Errorf("initialing the device code authentication: %v", err) } @@ -339,7 +340,7 @@ func (ts *azureTokenSourceDeviceCode) Token() (*azureToken, error) { return nil, fmt.Errorf("prompting the device code message: %v", err) } - token, err := azure.WaitForUserCompletion(client, deviceCode) + token, err := adal.WaitForUserCompletion(client, deviceCode) if err != nil { return nil, fmt.Errorf("waiting for device code authentication to complete: %v", err) } diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/plugin/pkg/client/auth/azure/azure_test.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/plugin/pkg/client/auth/azure/azure_test.go index 78d28b6e2fc0..b420712bc122 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/plugin/pkg/client/auth/azure/azure_test.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/plugin/pkg/client/auth/azure/azure_test.go @@ -23,7 +23,7 @@ import ( "testing" "time" - "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/adal" ) func TestAzureTokenSource(t *testing.T) { @@ -120,8 +120,8 @@ func token2Cfg(token *azureToken) map[string]string { return cfg } -func newFackeAzureToken(accessToken string, expiresOn string) azure.Token { - return azure.Token{ +func newFackeAzureToken(accessToken string, expiresOn string) adal.Token { + return adal.Token{ AccessToken: accessToken, RefreshToken: "fake", ExpiresIn: "3600", diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/cmd/client-gen/test_apis/README b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/cmd/client-gen/test_apis/README new file mode 100644 index 000000000000..454887a72a7f --- /dev/null +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/cmd/client-gen/test_apis/README @@ -0,0 +1,4 @@ +This dir can not be named "testdata" because of the way ugorji gnerates code. +Specifically, it emits a .go file and then calls `go run` on it. Because +"testdata" is a special name to Go, it decides NOT to find the vendor dir, and +therefore fails to compile. Just name it something else. diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/.gitignore b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/.gitignore new file mode 100644 index 000000000000..0e9aa466bba2 --- /dev/null +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/.gitignore @@ -0,0 +1 @@ +go-to-protobuf diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/cmd/import-boss/.gitignore b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/cmd/import-boss/.gitignore new file mode 100644 index 000000000000..a5c47b66f833 --- /dev/null +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/cmd/import-boss/.gitignore @@ -0,0 +1 @@ +import-boss diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/cmd/lister-gen/.import-restrictions b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/cmd/lister-gen/.import-restrictions new file mode 100644 index 000000000000..0967ef424bce --- /dev/null +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/cmd/lister-gen/.import-restrictions @@ -0,0 +1 @@ +{} diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/cmd/openapi-gen/README b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/cmd/openapi-gen/README new file mode 100644 index 000000000000..01f33350c07e --- /dev/null +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/cmd/openapi-gen/README @@ -0,0 +1,12 @@ +# Generate OpenAPI definitions + +- To generate definition for a specific type or package add "+k8s:openapi-gen=true" tag to the type/package comment lines. +- To exclude a type or a member from a tagged package/type, add "+k8s:openapi-gen=false" tag to the comment lines. + +# OpenAPI Extensions +OpenAPI spec can have extensions on types. To define one or more extensions on a type or its member +add "+k8s:openapi-gen=x-kubernetes-$NAME:$VALUE" to the comment lines before type/member. A type/member can +have multiple extensions. The rest of the line in the comment will be used as $VALUE so there is no need to +escape or quote the value string. Extensions can be use to pass more information to client generators or +documentation generators. For example a type my have a friendly name to be displayed in documentation or +being used in a client's fluent interface. diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/cmd/set-gen/.gitignore b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/cmd/set-gen/.gitignore new file mode 100644 index 000000000000..ffe6458c963c --- /dev/null +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/cmd/set-gen/.gitignore @@ -0,0 +1 @@ +set-gen diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/artifacts/self-contained/etcd-pod.yaml b/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/artifacts/self-contained/etcd-pod.yaml index 2d604a25501d..7c596647e0b0 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/artifacts/self-contained/etcd-pod.yaml +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/artifacts/self-contained/etcd-pod.yaml @@ -47,4 +47,3 @@ spec: defaultMode: 420 name: etcd-ca name: volume-etcd-ca - diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/helpers.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/helpers.go index 1f6790b32e34..5e36e7db26e6 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/helpers.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/helpers.go @@ -20,6 +20,7 @@ import ( "sort" "strings" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" ) @@ -85,6 +86,17 @@ func APIServiceNameToGroupVersion(apiServiceName string) schema.GroupVersion { return schema.GroupVersion{Group: tokens[1], Version: tokens[0]} } +// NewLocalAvailableAPIServiceCondition returns a condition for an available local APIService. +func NewLocalAvailableAPIServiceCondition() APIServiceCondition { + return APIServiceCondition{ + Type: Available, + Status: ConditionTrue, + LastTransitionTime: metav1.Now(), + Reason: "Local", + Message: "Local APIServices are always available", + } +} + // SetAPIServiceCondition sets the status condition. It either overwrites the existing one or // creates a new one func SetAPIServiceCondition(apiService *APIService, newCondition APIServiceCondition) { diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.proto b/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.proto index 10b7b2799ba7..9146d92c2f36 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.proto +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.proto @@ -123,4 +123,3 @@ message ServiceReference { // Name is the name of the service optional string name = 2; } - diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.deepcopy.go index 850c42d580b3..4b1049385563 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.deepcopy.go @@ -1,4 +1,20 @@ -// +build !ignore_autogenerated_openshift +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ // This file was autogenerated by deepcopy-gen. Do not edit it manually! diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/zz_generated.deepcopy.go index c15c11d9ce55..3388bcef4887 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/zz_generated.deepcopy.go @@ -1,4 +1,20 @@ -// +build !ignore_autogenerated_openshift +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ // This file was autogenerated by deepcopy-gen. Do not edit it manually! diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/apiserver/BUILD b/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/apiserver/BUILD index bea2e5baf350..3fc28a787881 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/apiserver/BUILD +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/apiserver/BUILD @@ -17,6 +17,7 @@ go_test( library = ":go_default_library", tags = ["automanaged"], deps = [ + "//vendor/golang.org/x/net/websocket:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/apiserver/handler_proxy_test.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/apiserver/handler_proxy_test.go index a11ff441ccf4..11e4c8fa8bc1 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/apiserver/handler_proxy_test.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/apiserver/handler_proxy_test.go @@ -129,7 +129,34 @@ func TestProxyHandler(t *testing.T) { expectedStatusCode: http.StatusInternalServerError, expectedBody: "missing user", }, - "proxy with user": { + "proxy with user, insecure": { + user: &user.DefaultInfo{ + Name: "username", + Groups: []string{"one", "two"}, + }, + path: "/request/path", + apiService: &apiregistration.APIService{ + ObjectMeta: metav1.ObjectMeta{Name: "v1.foo"}, + Spec: apiregistration.APIServiceSpec{ + Service: &apiregistration.ServiceReference{}, + Group: "foo", + Version: "v1", + InsecureSkipTLSVerify: true, + }, + }, + expectedStatusCode: http.StatusOK, + expectedCalled: true, + expectedHeaders: map[string][]string{ + "X-Forwarded-Proto": {"https"}, + "X-Forwarded-Uri": {"/request/path"}, + "X-Forwarded-For": {"127.0.0.1"}, + "X-Remote-User": {"username"}, + "User-Agent": {"Go-http-client/1.1"}, + "Accept-Encoding": {"gzip"}, + "X-Remote-Group": {"one", "two"}, + }, + }, + "proxy with user, cabundle": { user: &user.DefaultInfo{ Name: "username", Groups: []string{"one", "two"}, @@ -229,7 +256,6 @@ func TestProxyHandler(t *testing.T) { } func TestProxyUpgrade(t *testing.T) { - testcases := map[string]struct { APIService *apiregistration.APIService ExpectError bool diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/BUILD b/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/BUILD new file mode 100644 index 000000000000..cab894cb7846 --- /dev/null +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/BUILD @@ -0,0 +1,24 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "clientset.go", + "doc.go", + ], + tags = ["automanaged"], + deps = [ + "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/client-go/discovery:go_default_library", + "//vendor/k8s.io/client-go/rest:go_default_library", + "//vendor/k8s.io/client-go/util/flowcontrol:go_default_library", + "//vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1:go_default_library", + ], +) diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister/BUILD b/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister/BUILD index 389bb2626009..ca2525555dcc 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister/BUILD +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister/BUILD @@ -32,6 +32,7 @@ go_library( "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister/autoregister_controller.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister/autoregister_controller.go index f606d51c2452..e9f7e0739d2d 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister/autoregister_controller.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister/autoregister_controller.go @@ -26,6 +26,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/labels" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/cache" @@ -40,6 +41,11 @@ import ( const ( AutoRegisterManagedLabel = "kube-aggregator.kubernetes.io/automanaged" + + // manageOnStart is a value for the AutoRegisterManagedLabel that indicates the APIService wants to be synced one time when the controller starts. + manageOnStart = "onstart" + // manageContinuously is a value for the AutoRegisterManagedLabel that indicates the APIService wants to be synced continuously. + manageContinuously = "true" ) var ( @@ -49,7 +55,9 @@ var ( // AutoAPIServiceRegistration is an interface which callers can re-declare locally and properly cast to for // adding and removing APIServices type AutoAPIServiceRegistration interface { - // AddAPIServiceToSync adds an API service to auto-register. + // AddAPIServiceToSyncOnStart adds an API service to sync on start. + AddAPIServiceToSyncOnStart(in *apiregistration.APIService) + // AddAPIServiceToSync adds an API service to sync continuously. AddAPIServiceToSync(in *apiregistration.APIService) // RemoveAPIServiceToSync removes an API service to auto-register. RemoveAPIServiceToSync(name string) @@ -67,6 +75,13 @@ type autoRegisterController struct { syncHandler func(apiServiceName string) error + // track which services we have synced + syncedSuccessfullyLock *sync.RWMutex + syncedSuccessfully map[string]bool + + // remember names of services that existed when we started + apiServicesAtStart map[string]bool + // queue is where incoming work is placed to de-dup and to allow "easy" rate limited requeues on errors queue workqueue.RateLimitingInterface } @@ -77,7 +92,13 @@ func NewAutoRegisterController(apiServiceInformer informers.APIServiceInformer, apiServiceSynced: apiServiceInformer.Informer().HasSynced, apiServiceClient: apiServiceClient, apiServicesToSync: map[string]*apiregistration.APIService{}, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "autoregister"), + + apiServicesAtStart: map[string]bool{}, + + syncedSuccessfullyLock: &sync.RWMutex{}, + syncedSuccessfully: map[string]bool{}, + + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "autoregister"), } c.syncHandler = c.checkAPIService @@ -125,6 +146,13 @@ func (c *autoRegisterController) Run(threadiness int, stopCh <-chan struct{}) { return } + // record APIService objects that existed when we started + if services, err := c.apiServiceLister.List(labels.Everything()); err == nil { + for _, service := range services { + c.apiServicesAtStart[service.Name] = true + } + } + // start up your worker threads based on threadiness. Some controllers have multiple kinds of workers for i := 0; i < threadiness; i++ { // runWorker will loop until "something bad" happens. The .Until will then rekick the worker @@ -174,29 +202,61 @@ func (c *autoRegisterController) processNextWorkItem() bool { return true } -func (c *autoRegisterController) checkAPIService(name string) error { +// checkAPIService syncs the current APIService against a list of desired APIService objects +// +// | A. desired: not found | B. desired: sync on start | C. desired: sync always +// ------------------------------------------------|-----------------------|---------------------------|------------------------ +// 1. current: lookup error | error | error | error +// 2. current: not found | - | create once | create +// 3. current: no sync | - | - | - +// 4. current: sync on start, not present at start | - | - | - +// 5. current: sync on start, present at start | delete once | update once | update once +// 6. current: sync always | delete | update once | update +func (c *autoRegisterController) checkAPIService(name string) (err error) { desired := c.GetAPIServiceToSync(name) curr, err := c.apiServiceLister.Get(name) + // if we've never synced this service successfully, record a successful sync. + hasSynced := c.hasSyncedSuccessfully(name) + if !hasSynced { + defer func() { + if err == nil { + c.setSyncedSuccessfully(name) + } + }() + } + switch { - // we had a real error, just return it + // we had a real error, just return it (1A,1B,1C) case err != nil && !apierrors.IsNotFound(err): return err - // we don't have an entry and we don't want one + // we don't have an entry and we don't want one (2A) case apierrors.IsNotFound(err) && desired == nil: return nil - // we don't have an entry and we do want one + // the local object only wants to sync on start and has already synced (2B,5B,6B "once" enforcement) + case isAutomanagedOnStart(desired) && hasSynced: + return nil + + // we don't have an entry and we do want one (2B,2C) case apierrors.IsNotFound(err) && desired != nil: _, err := c.apiServiceClient.APIServices().Create(desired) return err - // we aren't trying to manage this APIService. If the user removes the label, he's taken over management himself - case curr.Labels[AutoRegisterManagedLabel] != "true": + // we aren't trying to manage this APIService (3A,3B,3C) + case !isAutomanaged(curr): + return nil + + // the remote object only wants to sync on start, but was added after we started (4A,4B,4C) + case isAutomanagedOnStart(curr) && !c.apiServicesAtStart[name]: + return nil + + // the remote object only wants to sync on start and has already synced (5A,5B,5C "once" enforcement) + case isAutomanagedOnStart(curr) && hasSynced: return nil - // we have a spurious APIService that we're managing, delete it + // we have a spurious APIService that we're managing, delete it (5A,6A) case desired == nil: return c.apiServiceClient.APIServices().Delete(curr.Name, nil) @@ -205,7 +265,7 @@ func (c *autoRegisterController) checkAPIService(name string) error { return nil } - // we have an entry and we have a desired, now we deconflict. Only a few fields matter. + // we have an entry and we have a desired, now we deconflict. Only a few fields matter. (5B,5C,6B,6C) apiService := &apiregistration.APIService{} if err := apiregistration.DeepCopy_apiregistration_APIService(curr, apiService, cloner); err != nil { return err @@ -222,7 +282,15 @@ func (c *autoRegisterController) GetAPIServiceToSync(name string) *apiregistrati return c.apiServicesToSync[name] } +func (c *autoRegisterController) AddAPIServiceToSyncOnStart(in *apiregistration.APIService) { + c.addAPIServiceToSync(in, manageOnStart) +} + func (c *autoRegisterController) AddAPIServiceToSync(in *apiregistration.APIService) { + c.addAPIServiceToSync(in, manageContinuously) +} + +func (c *autoRegisterController) addAPIServiceToSync(in *apiregistration.APIService, syncType string) { c.apiServicesToSyncLock.Lock() defer c.apiServicesToSyncLock.Unlock() @@ -235,7 +303,7 @@ func (c *autoRegisterController) AddAPIServiceToSync(in *apiregistration.APIServ if apiService.Labels == nil { apiService.Labels = map[string]string{} } - apiService.Labels[AutoRegisterManagedLabel] = "true" + apiService.Labels[AutoRegisterManagedLabel] = syncType c.apiServicesToSync[apiService.Name] = apiService c.queue.Add(apiService.Name) @@ -248,3 +316,31 @@ func (c *autoRegisterController) RemoveAPIServiceToSync(name string) { delete(c.apiServicesToSync, name) c.queue.Add(name) } + +func (c *autoRegisterController) hasSyncedSuccessfully(name string) bool { + c.syncedSuccessfullyLock.RLock() + defer c.syncedSuccessfullyLock.RUnlock() + return c.syncedSuccessfully[name] +} + +func (c *autoRegisterController) setSyncedSuccessfully(name string) { + c.syncedSuccessfullyLock.Lock() + defer c.syncedSuccessfullyLock.Unlock() + c.syncedSuccessfully[name] = true +} + +func automanagedType(service *apiregistration.APIService) string { + if service == nil { + return "" + } + return service.Labels[AutoRegisterManagedLabel] +} + +func isAutomanagedOnStart(service *apiregistration.APIService) bool { + return automanagedType(service) == manageOnStart +} + +func isAutomanaged(service *apiregistration.APIService) bool { + managedType := automanagedType(service) + return managedType == manageOnStart || managedType == manageContinuously +} diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister/autoregister_controller_test.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister/autoregister_controller_test.go index 9a6181f83612..0fd469810239 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister/autoregister_controller_test.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister/autoregister_controller_test.go @@ -18,6 +18,7 @@ package autoregister import ( "fmt" + "sync" "testing" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -35,6 +36,12 @@ func newAutoRegisterManagedAPIService(name string) *apiregistration.APIService { } } +func newAutoRegisterManagedOnStartAPIService(name string) *apiregistration.APIService { + return &apiregistration.APIService{ + ObjectMeta: metav1.ObjectMeta{Name: name, Labels: map[string]string{AutoRegisterManagedLabel: string("onstart")}}, + } +} + func newAutoRegisterManagedModifiedAPIService(name string) *apiregistration.APIService { return &apiregistration.APIService{ ObjectMeta: metav1.ObjectMeta{Name: name, Labels: map[string]string{AutoRegisterManagedLabel: string("true")}}, @@ -44,6 +51,15 @@ func newAutoRegisterManagedModifiedAPIService(name string) *apiregistration.APIS } } +func newAutoRegisterManagedOnStartModifiedAPIService(name string) *apiregistration.APIService { + return &apiregistration.APIService{ + ObjectMeta: metav1.ObjectMeta{Name: name, Labels: map[string]string{AutoRegisterManagedLabel: string("onstart")}}, + Spec: apiregistration.APIServiceSpec{ + Group: "something", + }, + } +} + func newAPIService(name string) *apiregistration.APIService { return &apiregistration.APIService{ ObjectMeta: metav1.ObjectMeta{Name: name}, @@ -80,6 +96,28 @@ func checkForCreate(name string, client *fake.Clientset) error { return nil } +func checkForCreateOnStart(name string, client *fake.Clientset) error { + if len(client.Actions()) == 0 { + return nil + } + if len(client.Actions()) > 1 { + return fmt.Errorf("unexpected action: %v", client.Actions()) + } + + action := client.Actions()[0] + + createAction, ok := action.(clienttesting.CreateAction) + if !ok { + return fmt.Errorf("unexpected action: %v", client.Actions()) + } + apiService := createAction.GetObject().(*apiregistration.APIService) + if apiService.Name != name || apiService.Labels[AutoRegisterManagedLabel] != "onstart" { + return fmt.Errorf("bad name or label %v", createAction) + } + + return nil +} + func checkForUpdate(name string, client *fake.Clientset) error { if len(client.Actions()) == 0 { return nil @@ -121,13 +159,16 @@ func checkForDelete(name string, client *fake.Clientset) error { func TestSync(t *testing.T) { tests := []struct { - name string - apiServiceName string - addAPIServices []*apiregistration.APIService - updateAPIServices []*apiregistration.APIService - addSyncAPIServices []*apiregistration.APIService - delSyncAPIServices []string - expectedResults func(name string, client *fake.Clientset) error + name string + apiServiceName string + addAPIServices []*apiregistration.APIService + updateAPIServices []*apiregistration.APIService + addSyncAPIServices []*apiregistration.APIService + addSyncOnStartAPIServices []*apiregistration.APIService + delSyncAPIServices []string + alreadySynced map[string]bool + presentAtStart map[string]bool + expectedResults func(name string, client *fake.Clientset) error }{ { name: "adding an API service which isn't auto-managed does nothing", @@ -166,7 +207,7 @@ func TestSync(t *testing.T) { expectedResults: checkForDelete, }, { - name: "removing auto-manged then RemoveAPIService should not touch APIService", + name: "removing auto-managed then RemoveAPIService should not touch APIService", apiServiceName: "foo", addAPIServices: []*apiregistration.APIService{}, updateAPIServices: []*apiregistration.APIService{newAPIService("foo")}, @@ -192,17 +233,104 @@ func TestSync(t *testing.T) { delSyncAPIServices: []string{}, expectedResults: checkForUpdate, }, + + { + name: "adding one to auto-register on start should create", + apiServiceName: "foo", + addAPIServices: []*apiregistration.APIService{}, + updateAPIServices: []*apiregistration.APIService{}, + addSyncOnStartAPIServices: []*apiregistration.APIService{newAPIService("foo")}, + delSyncAPIServices: []string{}, + expectedResults: checkForCreateOnStart, + }, + { + name: "adding one to auto-register on start already synced should do nothing", + apiServiceName: "foo", + addAPIServices: []*apiregistration.APIService{}, + updateAPIServices: []*apiregistration.APIService{}, + addSyncOnStartAPIServices: []*apiregistration.APIService{newAPIService("foo")}, + delSyncAPIServices: []string{}, + alreadySynced: map[string]bool{"foo": true}, + expectedResults: checkForNothing, + }, + { + name: "managed onstart apiservice present at start without a matching request should delete", + apiServiceName: "foo", + addAPIServices: []*apiregistration.APIService{newAPIService("foo")}, + updateAPIServices: []*apiregistration.APIService{newAutoRegisterManagedOnStartAPIService("foo")}, + addSyncAPIServices: []*apiregistration.APIService{}, + delSyncAPIServices: []string{}, + presentAtStart: map[string]bool{"foo": true}, + alreadySynced: map[string]bool{}, + expectedResults: checkForDelete, + }, + { + name: "managed onstart apiservice present at start without a matching request already synced once should no-op", + apiServiceName: "foo", + addAPIServices: []*apiregistration.APIService{newAPIService("foo")}, + updateAPIServices: []*apiregistration.APIService{newAutoRegisterManagedOnStartAPIService("foo")}, + addSyncAPIServices: []*apiregistration.APIService{}, + delSyncAPIServices: []string{}, + presentAtStart: map[string]bool{"foo": true}, + alreadySynced: map[string]bool{"foo": true}, + expectedResults: checkForNothing, + }, + { + name: "managed onstart apiservice not present at start without a matching request should no-op", + apiServiceName: "foo", + addAPIServices: []*apiregistration.APIService{newAPIService("foo")}, + updateAPIServices: []*apiregistration.APIService{newAutoRegisterManagedOnStartAPIService("foo")}, + addSyncAPIServices: []*apiregistration.APIService{}, + delSyncAPIServices: []string{}, + presentAtStart: map[string]bool{}, + alreadySynced: map[string]bool{}, + expectedResults: checkForNothing, + }, + { + name: "modifying onstart it should result in stomping", + apiServiceName: "foo", + addAPIServices: []*apiregistration.APIService{}, + updateAPIServices: []*apiregistration.APIService{newAutoRegisterManagedModifiedAPIService("foo")}, + addSyncOnStartAPIServices: []*apiregistration.APIService{newAutoRegisterManagedOnStartAPIService("foo")}, + delSyncAPIServices: []string{}, + expectedResults: checkForUpdate, + }, + { + name: "modifying onstart already synced should no-op", + apiServiceName: "foo", + addAPIServices: []*apiregistration.APIService{}, + updateAPIServices: []*apiregistration.APIService{newAutoRegisterManagedModifiedAPIService("foo")}, + addSyncOnStartAPIServices: []*apiregistration.APIService{newAutoRegisterManagedOnStartAPIService("foo")}, + delSyncAPIServices: []string{}, + alreadySynced: map[string]bool{"foo": true}, + expectedResults: checkForNothing, + }, } for _, test := range tests { fakeClient := fake.NewSimpleClientset() apiServiceIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) - c := autoRegisterController{ + alreadySynced := map[string]bool{} + for k, v := range test.alreadySynced { + alreadySynced[k] = v + } + + presentAtStart := map[string]bool{} + for k, v := range test.presentAtStart { + presentAtStart[k] = v + } + + c := &autoRegisterController{ apiServiceClient: fakeClient.Apiregistration(), apiServiceLister: listers.NewAPIServiceLister(apiServiceIndexer), apiServicesToSync: map[string]*apiregistration.APIService{}, queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "autoregister"), + + syncedSuccessfullyLock: &sync.RWMutex{}, + syncedSuccessfully: alreadySynced, + + apiServicesAtStart: presentAtStart, } for _, obj := range test.addAPIServices { @@ -217,6 +345,10 @@ func TestSync(t *testing.T) { c.AddAPIServiceToSync(obj) } + for _, obj := range test.addSyncOnStartAPIServices { + c.AddAPIServiceToSyncOnStart(obj) + } + for _, objName := range test.delSyncAPIServices { c.RemoveAPIServiceToSync(objName) } diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/available_controller.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/available_controller.go index 36fa112b9d27..d7d877737f89 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/available_controller.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/available_controller.go @@ -140,10 +140,7 @@ func (c *AvailableConditionController) sync(key string) error { // local API services are always considered available if apiService.Spec.Service == nil { - availableCondition.Status = apiregistration.ConditionTrue - availableCondition.Reason = "Local" - availableCondition.Message = "Local APIServices are always available" - apiregistration.SetAPIServiceCondition(apiService, availableCondition) + apiregistration.SetAPIServiceCondition(apiService, apiregistration.NewLocalAvailableAPIServiceCondition()) _, err := c.apiServiceClient.APIServices().UpdateStatus(apiService) return err } diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/registry/apiservice/strategy.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/registry/apiservice/strategy.go index fdcb6637f766..51887d719377 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/registry/apiservice/strategy.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/registry/apiservice/strategy.go @@ -48,6 +48,11 @@ func (apiServerStrategy) NamespaceScoped() bool { func (apiServerStrategy) PrepareForCreate(ctx genericapirequest.Context, obj runtime.Object) { apiservice := obj.(*apiregistration.APIService) apiservice.Status = apiregistration.APIServiceStatus{} + + // mark local API services as immediately available on create + if apiservice.Spec.Service == nil { + apiregistration.SetAPIServiceCondition(apiservice, apiregistration.NewLocalAvailableAPIServiceCondition()) + } } func (apiServerStrategy) PrepareForUpdate(ctx genericapirequest.Context, obj, old runtime.Object) { diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/sample-apiserver/hack/update-codegen.sh b/vendor/k8s.io/kubernetes/staging/src/k8s.io/sample-apiserver/hack/update-codegen.sh index d10f26a0f269..4204b6df7e0a 100755 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/sample-apiserver/hack/update-codegen.sh +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/sample-apiserver/hack/update-codegen.sh @@ -52,7 +52,7 @@ function generate_group() { echo "generating clientset for group ${GROUP_NAME} and version ${VERSION} at ${SCRIPT_BASE}/${CLIENT_PKG}" ${clientgen} --input-base ${PREFIX} --input ${INPUT_APIS[@]} --clientset-path ${CLIENT_PKG}/clientset_generated --output-base=${SCRIPT_BASE} ${clientgen} --clientset-name="clientset" --input-base ${PREFIX} --input ${GROUP_NAME}/${VERSION} --clientset-path ${CLIENT_PKG}/clientset_generated --output-base=${SCRIPT_BASE} - + echo "Building lister-gen" go build -o "${listergen}" ${KUBEGEN_PKG}/cmd/lister-gen diff --git a/vendor/k8s.io/kubernetes/test/e2e/cluster-logging/sd_events.go b/vendor/k8s.io/kubernetes/test/e2e/cluster-logging/sd_events.go new file mode 100644 index 000000000000..913ab18e12e0 --- /dev/null +++ b/vendor/k8s.io/kubernetes/test/e2e/cluster-logging/sd_events.go @@ -0,0 +1,93 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "time" + + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/kubernetes/test/e2e/framework" + + . "github.com/onsi/ginkgo" +) + +const ( + // eventsIngestionTimeout is the amount of time to wait until some + // events are ingested. + eventsIngestionTimeout = 10 * time.Minute + + // eventPollingInterval is the delay between attempts to read events + // from the logs provider. + eventPollingInterval = 1 * time.Second + + // eventCreationInterval is the minimal delay between two events + // created for testing purposes. + eventCreationInterval = 10 * time.Second +) + +var _ = framework.KubeDescribe("Cluster level logging using GCL", func() { + f := framework.NewDefaultFramework("gcl-logging-events") + + BeforeEach(func() { + framework.SkipUnlessProviderIs("gce", "gke") + }) + + It("should ingest events", func() { + gclLogsProvider, err := newGclLogsProvider(f) + framework.ExpectNoError(err, "Failed to create GCL logs provider") + + err = gclLogsProvider.Init() + defer gclLogsProvider.Cleanup() + framework.ExpectNoError(err, "Failed to init GCL logs provider") + + stopCh := make(chan struct{}) + successCh := make(chan struct{}) + go func() { + wait.Poll(eventPollingInterval, eventsIngestionTimeout, func() (bool, error) { + events := gclLogsProvider.ReadEvents() + if len(events) > 0 { + framework.Logf("Some events are ingested, sample event: %v", events[0]) + close(successCh) + return true, nil + } + return false, nil + }) + close(stopCh) + }() + + By("Running pods to generate events while waiting for some of them to be ingested") + wait.PollUntil(eventCreationInterval, func() (bool, error) { + podName := "synthlogger" + createLoggingPod(f, podName, "", 1, 1*time.Second) + defer f.PodClient().Delete(podName, &meta_v1.DeleteOptions{}) + err = framework.WaitForPodSuccessInNamespace(f.ClientSet, podName, f.Namespace.Name) + if err != nil { + framework.Logf("Failed to wait pod %s to successfully complete due to %v", podName, err) + } + + return false, nil + }, stopCh) + + select { + case <-successCh: + break + default: + framework.Failf("No events are present in Stackdriver after %v", eventsIngestionTimeout) + } + }) +}) diff --git a/vendor/k8s.io/kubernetes/test/e2e/cluster-logging/sd_utils.go b/vendor/k8s.io/kubernetes/test/e2e/cluster-logging/sd_utils.go index e3c50ba8fbe0..d64d9a293c1e 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/cluster-logging/sd_utils.go +++ b/vendor/k8s.io/kubernetes/test/e2e/cluster-logging/sd_utils.go @@ -40,8 +40,8 @@ const ( // The limit on the number of messages to pull from PubSub maxPullLogMessages = 100 * 1000 - // The limit on the number of messages in the cache for a pod - maxCachedMessagesPerPod = 10 * 1000 + // The limit on the number of messages in the single cache + maxCacheSize = 10 * 1000 // PubSub topic with log entries polling interval gclLoggingPollInterval = 100 * time.Millisecond @@ -55,6 +55,7 @@ type gclLogsProvider struct { Subscription *pubsub.Subscription LogSink *gcl.LogSink LogEntryCache map[string]chan logEntry + EventCache chan map[string]interface{} CacheMutex *sync.Mutex PollingStopChannel chan struct{} } @@ -77,6 +78,7 @@ func newGclLogsProvider(f *framework.Framework) (*gclLogsProvider, error) { PubsubService: pubsubService, Framework: f, LogEntryCache: map[string]chan logEntry{}, + EventCache: make(chan map[string]interface{}, maxCacheSize), CacheMutex: &sync.Mutex{}, PollingStopChannel: make(chan struct{}, 1), } @@ -137,7 +139,9 @@ func (gclLogsProvider *gclLogsProvider) createPubSubSubscription(projectId, subs func (gclLogsProvider *gclLogsProvider) createGclLogSink(projectId, nsName, sinkName, topicName string) (*gcl.LogSink, error) { projectDst := fmt.Sprintf("projects/%s", projectId) - filter := fmt.Sprintf("resource.labels.namespace_id=%s AND resource.labels.container_name=%s", nsName, loggingContainerName) + filter := fmt.Sprintf("(resource.type=\"gke_cluster\" AND jsonPayload.kind=\"Event\" AND jsonPayload.metadata.namespace=\"%s\") OR "+ + "(resource.type=\"container\" AND resource.labels.namespace_id=\"%s\")", nsName, nsName) + framework.Logf("Using the following filter for entries: %s", filter) sink := &gcl.LogSink{ Name: sinkName, Destination: fmt.Sprintf("pubsub.googleapis.com/%s", topicName), @@ -196,9 +200,30 @@ func (gclLogsProvider *gclLogsProvider) pollLogs() { continue } - podName := gclLogEntry.Resource.Labels["pod_id"] - ch := gclLogsProvider.getCacheChannel(podName) - ch <- logEntry{Payload: gclLogEntry.TextPayload} + switch gclLogEntry.Resource.Type { + case "container": + podName := gclLogEntry.Resource.Labels["pod_id"] + ch := gclLogsProvider.getCacheChannel(podName) + ch <- logEntry{Payload: gclLogEntry.TextPayload} + break + case "gke_cluster": + jsonPayloadRaw, err := gclLogEntry.JsonPayload.MarshalJSON() + if err != nil { + framework.Logf("Failed to get jsonPayload from LogEntry %v", gclLogEntry) + break + } + var eventObject map[string]interface{} + err = json.Unmarshal(jsonPayloadRaw, &eventObject) + if err != nil { + framework.Logf("Failed to deserialize jsonPayload as json object %s", string(jsonPayloadRaw[:])) + break + } + gclLogsProvider.EventCache <- eventObject + break + default: + framework.Logf("Received LogEntry with unexpected resource type: %s", gclLogEntry.Resource.Type) + break + } } if len(ids) > 0 { @@ -258,6 +283,20 @@ func (logsProvider *gclLogsProvider) FluentdApplicationName() string { return "fluentd-gcp" } +func (gclLogsProvider *gclLogsProvider) ReadEvents() []map[string]interface{} { + var events []map[string]interface{} +polling_loop: + for { + select { + case event := <-gclLogsProvider.EventCache: + events = append(events, event) + default: + break polling_loop + } + } + return events +} + func (gclLogsProvider *gclLogsProvider) getCacheChannel(podName string) chan logEntry { gclLogsProvider.CacheMutex.Lock() defer gclLogsProvider.CacheMutex.Unlock() @@ -266,7 +305,7 @@ func (gclLogsProvider *gclLogsProvider) getCacheChannel(podName string) chan log return ch } - newCh := make(chan logEntry, maxCachedMessagesPerPod) + newCh := make(chan logEntry, maxCacheSize) gclLogsProvider.LogEntryCache[podName] = newCh return newCh } diff --git a/vendor/k8s.io/kubernetes/test/e2e/cronjob.go b/vendor/k8s.io/kubernetes/test/e2e/cronjob.go index 8afbcecb6d1b..3675ee3ae5ce 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/cronjob.go +++ b/vendor/k8s.io/kubernetes/test/e2e/cronjob.go @@ -36,6 +36,7 @@ import ( "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller/job" "k8s.io/kubernetes/pkg/kubectl" + utilversion "k8s.io/kubernetes/pkg/util/version" "k8s.io/kubernetes/test/e2e/framework" ) @@ -47,6 +48,7 @@ const ( var ( CronJobGroupVersionResource = schema.GroupVersionResource{Group: batchv2alpha1.GroupName, Version: "v2alpha1", Resource: "cronjobs"} ScheduledJobGroupVersionResource = schema.GroupVersionResource{Group: batchv2alpha1.GroupName, Version: "v2alpha1", Resource: "scheduledjobs"} + removedScheduledJobsVersion = utilversion.MustParseSemantic("v1.8.0") ) var _ = framework.KubeDescribe("CronJob", func() { @@ -63,6 +65,7 @@ var _ = framework.KubeDescribe("CronJob", func() { // multiple jobs running at once It("should schedule multiple jobs concurrently", func() { + framework.SkipUnlessServerVersionLT(removedScheduledJobsVersion, f.ClientSet.Discovery()) By("Creating a cronjob") cronJob := newTestCronJob("concurrent", "*/1 * * * ?", batchv2alpha1.AllowConcurrent, sleepCommand, nil) diff --git a/vendor/k8s.io/kubernetes/test/e2e/e2e.go b/vendor/k8s.io/kubernetes/test/e2e/e2e.go index ee8d7fa76b2b..123d2c25f7c3 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/e2e.go +++ b/vendor/k8s.io/kubernetes/test/e2e/e2e.go @@ -122,6 +122,11 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte { framework.Failf("Failed to setup provider config: %v", err) } + switch framework.TestContext.Provider { + case "gce", "gke": + framework.LogClusterImageSources() + } + c, err := framework.LoadClientset() if err != nil { glog.Fatal("Error loading client: ", err) diff --git a/vendor/k8s.io/kubernetes/test/e2e/framework/firewall_util.go b/vendor/k8s.io/kubernetes/test/e2e/framework/firewall_util.go index 2de5860777a8..dfca1e4f91f9 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/framework/firewall_util.go +++ b/vendor/k8s.io/kubernetes/test/e2e/framework/firewall_util.go @@ -154,7 +154,7 @@ func GetClusterName(instancePrefix string) string { // Warning: this MUST be consistent with the CLUSTER_IP_RANGE set in // gce/config-test.sh. func GetClusterIpRange() string { - return "10.100.0.0/14" + return "10.64.0.0/14" } // GetE2eFirewalls returns all firewall rules we create for an e2e cluster. diff --git a/vendor/k8s.io/kubernetes/test/e2e/framework/google_compute.go b/vendor/k8s.io/kubernetes/test/e2e/framework/google_compute.go index 726726b397a4..19f3cc5b71b3 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/framework/google_compute.go +++ b/vendor/k8s.io/kubernetes/test/e2e/framework/google_compute.go @@ -17,8 +17,11 @@ limitations under the License. package framework import ( + "encoding/json" "fmt" + "io/ioutil" "os/exec" + "path/filepath" "regexp" "strings" "time" @@ -96,3 +99,100 @@ func DeleteGCEStaticIP(name string) error { } return nil } + +// Returns master & node image string, or error +func lookupClusterImageSources() (string, string, error) { + // Given args for a gcloud compute command, run it with other args, and return the values, + // whether separated by newlines, commas or semicolons. + gcloudf := func(argv ...string) ([]string, error) { + args := []string{"compute"} + args = append(args, argv...) + args = append(args, "--project", TestContext.CloudConfig.ProjectID, + "--zone", TestContext.CloudConfig.Zone) + outputBytes, err := exec.Command("gcloud", args...).CombinedOutput() + str := strings.Replace(string(outputBytes), ",", "\n", -1) + str = strings.Replace(str, ";", "\n", -1) + lines := strings.Split(str, "\n") + if err != nil { + Logf("lookupDiskImageSources: gcloud error with [%#v]; err:%v", argv, err) + for _, l := range lines { + Logf(" > %s", l) + } + } + return lines, err + } + + // Given a GCE instance, look through its disks, finding one that has a sourceImage + host2image := func(instance string) (string, error) { + // gcloud compute instances describe {INSTANCE} --format="get(disks[].source)" + // gcloud compute disks describe {DISKURL} --format="get(sourceImage)" + disks, err := gcloudf("instances", "describe", instance, "--format=get(disks[].source)") + if err != nil { + return "", err + } else if len(disks) == 0 { + return "", fmt.Errorf("instance %q had no findable disks", instance) + } + // Loop over disks, looking for the boot disk + for _, disk := range disks { + lines, err := gcloudf("disks", "describe", disk, "--format=get(sourceImage)") + if err != nil { + return "", err + } else if len(lines) > 0 && lines[0] != "" { + return lines[0], nil // break, we're done + } + } + return "", fmt.Errorf("instance %q had no disk with a sourceImage", instance) + } + + // gcloud compute instance-groups list-instances {GROUPNAME} --format="get(instance)" + nodeName := "" + instGroupName := strings.Split(TestContext.CloudConfig.NodeInstanceGroup, ",")[0] + if lines, err := gcloudf("instance-groups", "list-instances", instGroupName, "--format=get(instance)"); err != nil { + return "", "", err + } else if len(lines) == 0 { + return "", "", fmt.Errorf("no instances inside instance-group %q", instGroupName) + } else { + nodeName = lines[0] + } + + nodeImg, err := host2image(nodeName) + if err != nil { + return "", "", err + } + frags := strings.Split(nodeImg, "/") + nodeImg = frags[len(frags)-1] + + // For GKE clusters, MasterName will not be defined; we just leave masterImg blank. + masterImg := "" + if masterName := TestContext.CloudConfig.MasterName; masterName != "" { + img, err := host2image(masterName) + if err != nil { + return "", "", err + } + frags = strings.Split(img, "/") + masterImg = frags[len(frags)-1] + } + + return masterImg, nodeImg, nil +} + +func LogClusterImageSources() { + masterImg, nodeImg, err := lookupClusterImageSources() + if err != nil { + Logf("Cluster image sources lookup failed: %v\n", err) + return + } + Logf("cluster-master-image: %s", masterImg) + Logf("cluster-node-image: %s", nodeImg) + + images := map[string]string{ + "master_os_image": masterImg, + "node_os_image": nodeImg, + } + + outputBytes, _ := json.MarshalIndent(images, "", " ") + filePath := filepath.Join(TestContext.ReportDir, "images.json") + if err := ioutil.WriteFile(filePath, outputBytes, 0644); err != nil { + Logf("cluster images sources, could not write to %q: %v", filePath, err) + } +} diff --git a/vendor/k8s.io/kubernetes/test/e2e/framework/ingress_utils.go b/vendor/k8s.io/kubernetes/test/e2e/framework/ingress_utils.go index 6d6969cd8d08..f6e17ce2c8f9 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/framework/ingress_utils.go +++ b/vendor/k8s.io/kubernetes/test/e2e/framework/ingress_utils.go @@ -718,8 +718,7 @@ func (cont *GCEIngressController) Init() { func (cont *GCEIngressController) CreateStaticIP(name string) string { gceCloud := cont.Cloud.Provider.(*gcecloud.GCECloud) addr := &compute.Address{Name: name} - ip, err := gceCloud.ReserveGlobalAddress(addr) - if err != nil { + if err := gceCloud.ReserveGlobalAddress(addr); err != nil { if delErr := gceCloud.DeleteGlobalAddress(name); delErr != nil { if cont.isHTTPErrorCode(delErr, http.StatusNotFound) { Logf("Static ip with name %v was not allocated, nothing to delete", name) @@ -727,8 +726,14 @@ func (cont *GCEIngressController) CreateStaticIP(name string) string { Logf("Failed to delete static ip %v: %v", name, delErr) } } - Failf("Failed to allocated static ip %v: %v", name, err) + Failf("Failed to allocate static ip %v: %v", name, err) + } + + ip, err := gceCloud.GetGlobalAddress(name) + if err != nil { + Failf("Failed to get newly created static ip %v: %v", name, err) } + cont.staticIPName = ip.Name Logf("Reserved static ip %v: %v", cont.staticIPName, ip.Address) return ip.Address @@ -761,7 +766,7 @@ func gcloudComputeResourceList(resource, regex, project string, out interface{}) // so we only look at stdout. command := []string{ "compute", resource, "list", - fmt.Sprintf("--regexp=%q", regex), + fmt.Sprintf("--filter='name ~ \"%q\"'", regex), fmt.Sprintf("--project=%v", project), "-q", "--format=json", } @@ -883,9 +888,12 @@ func (j *IngressTestJig) GetRootCA(secretName string) (rootCA []byte) { return } -// DeleteIngress deletes the ingress resource -func (j *IngressTestJig) DeleteIngress() { - ExpectNoError(j.Client.Extensions().Ingresses(j.Ingress.Namespace).Delete(j.Ingress.Name, nil)) +// TryDeleteIngress attempts to delete the ingress resource and logs errors if they occur. +func (j *IngressTestJig) TryDeleteIngress() { + err := j.Client.Extensions().Ingresses(j.Ingress.Namespace).Delete(j.Ingress.Name, nil) + if err != nil { + Logf("Error while deleting the ingress %v/%v: %v", j.Ingress.Namespace, j.Ingress.Name, err) + } } // WaitForIngress waits till the ingress acquires an IP, then waits for its diff --git a/vendor/k8s.io/kubernetes/test/e2e/framework/pv_util.go b/vendor/k8s.io/kubernetes/test/e2e/framework/pv_util.go index 950c36ea4df2..5866875ebca3 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/framework/pv_util.go +++ b/vendor/k8s.io/kubernetes/test/e2e/framework/pv_util.go @@ -716,16 +716,21 @@ func createPD(zone string) (string, error) { } else if TestContext.Provider == "azure" { pdName := fmt.Sprintf("%s-%s", TestContext.Prefix, string(uuid.NewUUID())) azureCloud, err := GetAzureCloud() + if err != nil { return "", err } - _, diskUri, _, err := azureCloud.CreateVolume(pdName, "" /* account */, "" /* sku */, "" /* location */, 1 /* sizeGb */) + if azureCloud.BlobDiskController == nil { + return "", fmt.Errorf("BlobDiskController is nil, it's not expected.") + } + + diskUri, err := azureCloud.BlobDiskController.CreateBlobDisk(pdName, "standard_lrs", 1, false) if err != nil { return "", err } - return diskUri, nil + return diskUri, nil } else { return "", fmt.Errorf("provider does not support volume creation") } @@ -770,8 +775,11 @@ func deletePD(pdName string) error { if err != nil { return err } + if azureCloud.BlobDiskController == nil { + return fmt.Errorf("BlobDiskController is nil, it's not expected.") + } diskName := pdName[(strings.LastIndex(pdName, "/") + 1):] - err = azureCloud.DeleteVolume(diskName, pdName) + err = azureCloud.BlobDiskController.DeleteBlobDisk(diskName, false) if err != nil { Logf("failed to delete Azure volume %q: %v", pdName, err) return err diff --git a/vendor/k8s.io/kubernetes/test/e2e/framework/service_util.go b/vendor/k8s.io/kubernetes/test/e2e/framework/service_util.go index 1598ecda389b..2f2b5c78ddea 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/framework/service_util.go +++ b/vendor/k8s.io/kubernetes/test/e2e/framework/service_util.go @@ -38,6 +38,8 @@ import ( "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/client/retry" + azurecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/azure" + gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" testutils "k8s.io/kubernetes/test/utils" . "github.com/onsi/ginkgo" @@ -1330,3 +1332,29 @@ func DescribeSvc(ns string) { "describe", "svc", fmt.Sprintf("--namespace=%v", ns)) Logf(desc) } + +// EnableAndDisableInternalLB returns two functions for enabling and disabling the internal load balancer +// setting for the supported cloud providers: GCE/GKE and Azure +func EnableAndDisableInternalLB() (enable func(svc *v1.Service), disable func(svc *v1.Service)) { + enable = func(svc *v1.Service) {} + disable = func(svc *v1.Service) {} + + switch TestContext.Provider { + case "gce", "gke": + enable = func(svc *v1.Service) { + svc.ObjectMeta.Annotations = map[string]string{gcecloud.ServiceAnnotationLoadBalancerType: string(gcecloud.LBTypeInternal)} + } + disable = func(svc *v1.Service) { + delete(svc.ObjectMeta.Annotations, gcecloud.ServiceAnnotationLoadBalancerType) + } + case "azure": + enable = func(svc *v1.Service) { + svc.ObjectMeta.Annotations = map[string]string{azurecloud.ServiceAnnotationLoadBalancerInternal: "true"} + } + disable = func(svc *v1.Service) { + svc.ObjectMeta.Annotations = map[string]string{azurecloud.ServiceAnnotationLoadBalancerInternal: "false"} + } + } + + return +} diff --git a/vendor/k8s.io/kubernetes/test/e2e/framework/statefulset_utils.go b/vendor/k8s.io/kubernetes/test/e2e/framework/statefulset_utils.go index fc54166d7205..a32f1b380c13 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/framework/statefulset_utils.go +++ b/vendor/k8s.io/kubernetes/test/e2e/framework/statefulset_utils.go @@ -19,6 +19,7 @@ package framework import ( "fmt" "path/filepath" + "reflect" "regexp" "sort" "strconv" @@ -32,6 +33,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" utilyaml "k8s.io/apimachinery/pkg/util/yaml" @@ -147,7 +149,7 @@ func (s *StatefulSetTester) CheckMount(ss *apps.StatefulSet, mountPath string) e func (s *StatefulSetTester) ExecInStatefulPods(ss *apps.StatefulSet, cmd string) error { podList := s.GetPodList(ss) for _, statefulPod := range podList.Items { - stdout, err := RunHostCmd(statefulPod.Namespace, statefulPod.Name, cmd) + stdout, err := RunHostCmdWithRetries(statefulPod.Namespace, statefulPod.Name, cmd, StatefulSetPoll, StatefulPodTimeout) Logf("stdout of %v on %v: %v", cmd, statefulPod.Name, stdout) if err != nil { return err @@ -161,7 +163,7 @@ func (s *StatefulSetTester) CheckHostname(ss *apps.StatefulSet) error { cmd := "printf $(hostname)" podList := s.GetPodList(ss) for _, statefulPod := range podList.Items { - hostname, err := RunHostCmd(statefulPod.Namespace, statefulPod.Name, cmd) + hostname, err := RunHostCmdWithRetries(statefulPod.Namespace, statefulPod.Name, cmd, StatefulSetPoll, StatefulPodTimeout) if err != nil { return err } @@ -176,10 +178,10 @@ func (s *StatefulSetTester) CheckHostname(ss *apps.StatefulSet) error { func (s *StatefulSetTester) Saturate(ss *apps.StatefulSet) { var i int32 for i = 0; i < *(ss.Spec.Replicas); i++ { - Logf("Waiting for stateful pod at index " + fmt.Sprintf("%v", i+1) + " to enter Running") - s.WaitForRunningAndReady(i+1, ss) - Logf("Marking stateful pod at index " + fmt.Sprintf("%v", i) + " healthy") - s.SetHealthy(ss) + Logf("Waiting for stateful pod at index %v to enter Running", i) + s.WaitForRunning(i+1, i, ss) + Logf("Resuming stateful pod at index %v", i) + s.ResumeNextPod(ss) } } @@ -210,10 +212,12 @@ func getStatefulSetPodNameAtIndex(index int, ss *apps.StatefulSet) string { } // Scale scales ss to count replicas. -func (s *StatefulSetTester) Scale(ss *apps.StatefulSet, count int32) error { +func (s *StatefulSetTester) Scale(ss *apps.StatefulSet, count int32) (*apps.StatefulSet, error) { name := ss.Name ns := ss.Namespace - s.update(ns, name, func(ss *apps.StatefulSet) { *(ss.Spec.Replicas) = count }) + + Logf("Scaling statefulset %s to %d", name, count) + ss = s.update(ns, name, func(ss *apps.StatefulSet) { *(ss.Spec.Replicas) = count }) var statefulPodList *v1.PodList pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, func() (bool, error) { @@ -231,9 +235,9 @@ func (s *StatefulSetTester) Scale(ss *apps.StatefulSet, count int32) error { unhealthy = append(unhealthy, fmt.Sprintf("%v: deletion %v, phase %v, readiness %v", statefulPod.Name, delTs, phase, readiness)) } } - return fmt.Errorf("Failed to scale statefulset to %d in %v. Remaining pods:\n%v", count, StatefulSetTimeout, unhealthy) + return ss, fmt.Errorf("Failed to scale statefulset to %d in %v. Remaining pods:\n%v", count, StatefulSetTimeout, unhealthy) } - return nil + return ss, nil } // UpdateReplicas updates the replicas of ss to count. @@ -244,11 +248,16 @@ func (s *StatefulSetTester) UpdateReplicas(ss *apps.StatefulSet, count int32) { // Restart scales ss to 0 and then back to its previous number of replicas. func (s *StatefulSetTester) Restart(ss *apps.StatefulSet) { oldReplicas := *(ss.Spec.Replicas) - ExpectNoError(s.Scale(ss, 0)) + ss, err := s.Scale(ss, 0) + ExpectNoError(err) + // Wait for controller to report the desired number of Pods. + // This way we know the controller has observed all Pod deletions + // before we scale it back up. + s.WaitForStatusReplicas(ss, 0) s.update(ss.Namespace, ss.Name, func(ss *apps.StatefulSet) { *(ss.Spec.Replicas) = oldReplicas }) } -func (s *StatefulSetTester) update(ns, name string, update func(ss *apps.StatefulSet)) { +func (s *StatefulSetTester) update(ns, name string, update func(ss *apps.StatefulSet)) *apps.StatefulSet { for i := 0; i < 3; i++ { ss, err := s.c.Apps().StatefulSets(ns).Get(name, metav1.GetOptions{}) if err != nil { @@ -257,13 +266,14 @@ func (s *StatefulSetTester) update(ns, name string, update func(ss *apps.Statefu update(ss) ss, err = s.c.Apps().StatefulSets(ns).Update(ss) if err == nil { - return + return ss } if !apierrs.IsConflict(err) && !apierrs.IsServerTimeout(err) { Failf("failed to update statefulset %q: %v", name, err) } } Failf("too many retries draining statefulset %q", name) + return nil } // GetPodList gets the current Pods in ss. @@ -298,18 +308,22 @@ func (s *StatefulSetTester) ConfirmStatefulPodCount(count int, ss *apps.Stateful } } -func (s *StatefulSetTester) waitForRunning(numStatefulPods int32, ss *apps.StatefulSet, shouldBeReady bool) { +// WaitForRunning waits for numPodsRunning in ss to be Running and for the first +// numPodsReady ordinals to be Ready. +func (s *StatefulSetTester) WaitForRunning(numPodsRunning, numPodsReady int32, ss *apps.StatefulSet) { pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout, func() (bool, error) { podList := s.GetPodList(ss) - if int32(len(podList.Items)) < numStatefulPods { - Logf("Found %d stateful pods, waiting for %d", len(podList.Items), numStatefulPods) + s.SortStatefulPods(podList) + if int32(len(podList.Items)) < numPodsRunning { + Logf("Found %d stateful pods, waiting for %d", len(podList.Items), numPodsRunning) return false, nil } - if int32(len(podList.Items)) > numStatefulPods { - return false, fmt.Errorf("Too many pods scheduled, expected %d got %d", numStatefulPods, len(podList.Items)) + if int32(len(podList.Items)) > numPodsRunning { + return false, fmt.Errorf("Too many pods scheduled, expected %d got %d", numPodsRunning, len(podList.Items)) } for _, p := range podList.Items { + shouldBeReady := getStatefulPodOrdinal(&p) < int(numPodsReady) isReady := podutil.IsPodReady(&p) desiredReadiness := shouldBeReady == isReady Logf("Waiting for pod %v to enter %v - Ready=%v, currently %v - Ready=%v", p.Name, v1.PodRunning, shouldBeReady, p.Status.Phase, isReady) @@ -355,7 +369,7 @@ func (s *StatefulSetTester) WaitForStatus(set *apps.StatefulSet) *apps.StatefulS // WaitForRunningAndReady waits for numStatefulPods in ss to be Running and Ready. func (s *StatefulSetTester) WaitForRunningAndReady(numStatefulPods int32, ss *apps.StatefulSet) { - s.waitForRunning(numStatefulPods, ss, true) + s.WaitForRunning(numStatefulPods, numStatefulPods, ss) } // WaitForPodReady waits for the Pod named podName in set to exist and have a Ready condition. @@ -489,73 +503,120 @@ func (s *StatefulSetTester) WaitForPartitionedRollingUpdate(set *apps.StatefulSe // WaitForRunningAndReady waits for numStatefulPods in ss to be Running and not Ready. func (s *StatefulSetTester) WaitForRunningAndNotReady(numStatefulPods int32, ss *apps.StatefulSet) { - s.waitForRunning(numStatefulPods, ss, false) + s.WaitForRunning(numStatefulPods, 0, ss) +} + +var httpProbe = &v1.Probe{ + Handler: v1.Handler{ + HTTPGet: &v1.HTTPGetAction{ + Path: "/index.html", + Port: intstr.IntOrString{IntVal: 80}, + }, + }, + PeriodSeconds: 1, + SuccessThreshold: 1, + FailureThreshold: 1, +} + +// SetHttpProbe sets the pod template's ReadinessProbe for Nginx StatefulSet containers. +// This probe can then be controlled with BreakHttpProbe() and RestoreHttpProbe(). +// Note that this cannot be used together with PauseNewPods(). +func (s *StatefulSetTester) SetHttpProbe(ss *apps.StatefulSet) { + ss.Spec.Template.Spec.Containers[0].ReadinessProbe = httpProbe } -// BreakProbe breaks the readiness probe for Nginx StatefulSet containers in ss. -func (s *StatefulSetTester) BreakProbe(ss *apps.StatefulSet, probe *v1.Probe) error { - path := probe.HTTPGet.Path +// BreakHttpProbe breaks the readiness probe for Nginx StatefulSet containers in ss. +func (s *StatefulSetTester) BreakHttpProbe(ss *apps.StatefulSet) error { + path := httpProbe.HTTPGet.Path if path == "" { return fmt.Errorf("Path expected to be not empty: %v", path) } - cmd := fmt.Sprintf("mv -v /usr/share/nginx/html%v /tmp/", path) + // Ignore 'mv' errors to make this idempotent. + cmd := fmt.Sprintf("mv -v /usr/share/nginx/html%v /tmp/ || true", path) return s.ExecInStatefulPods(ss, cmd) } -// BreakProbe breaks the readiness probe for Nginx StatefulSet containers in pod. -func (s *StatefulSetTester) BreakPodProbe(ss *apps.StatefulSet, pod *v1.Pod, probe *v1.Probe) error { - path := probe.HTTPGet.Path +// BreakPodHttpProbe breaks the readiness probe for Nginx StatefulSet containers in one pod. +func (s *StatefulSetTester) BreakPodHttpProbe(ss *apps.StatefulSet, pod *v1.Pod) error { + path := httpProbe.HTTPGet.Path if path == "" { return fmt.Errorf("Path expected to be not empty: %v", path) } - cmd := fmt.Sprintf("mv -v /usr/share/nginx/html%v /tmp/", path) - stdout, err := RunHostCmd(pod.Namespace, pod.Name, cmd) + // Ignore 'mv' errors to make this idempotent. + cmd := fmt.Sprintf("mv -v /usr/share/nginx/html%v /tmp/ || true", path) + stdout, err := RunHostCmdWithRetries(pod.Namespace, pod.Name, cmd, StatefulSetPoll, StatefulPodTimeout) Logf("stdout of %v on %v: %v", cmd, pod.Name, stdout) return err } -// RestoreProbe restores the readiness probe for Nginx StatefulSet containers in ss. -func (s *StatefulSetTester) RestoreProbe(ss *apps.StatefulSet, probe *v1.Probe) error { - path := probe.HTTPGet.Path +// RestoreHttpProbe restores the readiness probe for Nginx StatefulSet containers in ss. +func (s *StatefulSetTester) RestoreHttpProbe(ss *apps.StatefulSet) error { + path := httpProbe.HTTPGet.Path if path == "" { return fmt.Errorf("Path expected to be not empty: %v", path) } - cmd := fmt.Sprintf("mv -v /tmp%v /usr/share/nginx/html/", path) + // Ignore 'mv' errors to make this idempotent. + cmd := fmt.Sprintf("mv -v /tmp%v /usr/share/nginx/html/ || true", path) return s.ExecInStatefulPods(ss, cmd) } -// RestoreProbe restores the readiness probe for Nginx StatefulSet containers in pod. -func (s *StatefulSetTester) RestorePodProbe(ss *apps.StatefulSet, pod *v1.Pod, probe *v1.Probe) error { - path := probe.HTTPGet.Path +// RestorePodHttpProbe restores the readiness probe for Nginx StatefulSet containers in pod. +func (s *StatefulSetTester) RestorePodHttpProbe(ss *apps.StatefulSet, pod *v1.Pod) error { + path := httpProbe.HTTPGet.Path if path == "" { return fmt.Errorf("Path expected to be not empty: %v", path) } - cmd := fmt.Sprintf("mv -v /tmp%v /usr/share/nginx/html/", path) - stdout, err := RunHostCmd(pod.Namespace, pod.Name, cmd) + // Ignore 'mv' errors to make this idempotent. + cmd := fmt.Sprintf("mv -v /tmp%v /usr/share/nginx/html/ || true", path) + stdout, err := RunHostCmdWithRetries(pod.Namespace, pod.Name, cmd, StatefulSetPoll, StatefulPodTimeout) Logf("stdout of %v on %v: %v", cmd, pod.Name, stdout) return err } -// SetHealthy updates the StatefulSet InitAnnotation to true in order to set a StatefulSet Pod to be Running and Ready. -func (s *StatefulSetTester) SetHealthy(ss *apps.StatefulSet) { +var pauseProbe = &v1.Probe{ + Handler: v1.Handler{ + Exec: &v1.ExecAction{Command: []string{"test", "-f", "/data/statefulset-continue"}}, + }, + PeriodSeconds: 1, + SuccessThreshold: 1, + FailureThreshold: 1, +} + +func hasPauseProbe(pod *v1.Pod) bool { + probe := pod.Spec.Containers[0].ReadinessProbe + return probe != nil && reflect.DeepEqual(probe.Exec.Command, pauseProbe.Exec.Command) +} + +// PauseNewPods adds an always-failing ReadinessProbe to the StatefulSet PodTemplate. +// This causes all newly-created Pods to stay Unready until they are manually resumed +// with ResumeNextPod(). +// Note that this cannot be used together with SetHttpProbe(). +func (s *StatefulSetTester) PauseNewPods(ss *apps.StatefulSet) { + ss.Spec.Template.Spec.Containers[0].ReadinessProbe = pauseProbe +} + +// ResumeNextPod allows the next Pod in the StatefulSet to continue by removing the ReadinessProbe +// added by PauseNewPods(), if it's still there. +// It fails the test if it finds any pods that are not in phase Running, +// or if it finds more than one paused Pod existing at the same time. +// This is a no-op if there are no paused pods. +func (s *StatefulSetTester) ResumeNextPod(ss *apps.StatefulSet) { podList := s.GetPodList(ss) - markedHealthyPod := "" + resumedPod := "" for _, pod := range podList.Items { if pod.Status.Phase != v1.PodRunning { - Failf("Found pod in %v cannot set health", pod.Status.Phase) + Failf("Found pod in phase %q, cannot resume", pod.Status.Phase) } - if IsStatefulSetPodInitialized(pod) { + if podutil.IsPodReady(&pod) || !hasPauseProbe(&pod) { continue } - if markedHealthyPod != "" { - Failf("Found multiple non-healthy stateful pods: %v and %v", pod.Name, markedHealthyPod) + if resumedPod != "" { + Failf("Found multiple paused stateful pods: %v and %v", pod.Name, resumedPod) } - p, err := UpdatePodWithRetries(s.c, pod.Namespace, pod.Name, func(update *v1.Pod) { - update.Annotations[apps.StatefulSetInitAnnotation] = "true" - }) + _, err := RunHostCmdWithRetries(pod.Namespace, pod.Name, "touch /data/statefulset-continue; sync", StatefulSetPoll, StatefulPodTimeout) ExpectNoError(err) - Logf("Set annotation %v to %v on pod %v", apps.StatefulSetInitAnnotation, p.Annotations[apps.StatefulSetInitAnnotation], pod.Name) - markedHealthyPod = pod.Name + Logf("Resumed pod %v", pod.Name) + resumedPod = pod.Name } } @@ -635,12 +696,13 @@ func DeleteAllStatefulSets(c clientset.Interface, ns string) { // Scale down each statefulset, then delete it completely. // Deleting a pvc without doing this will leak volumes, #25101. errList := []string{} - for _, ss := range ssList.Items { - Logf("Scaling statefulset %v to 0", ss.Name) - if err := sst.Scale(&ss, 0); err != nil { + for i := range ssList.Items { + ss := &ssList.Items[i] + var err error + if ss, err = sst.Scale(ss, 0); err != nil { errList = append(errList, fmt.Sprintf("%v", err)) } - sst.WaitForStatusReplicas(&ss, 0) + sst.WaitForStatusReplicas(ss, 0) Logf("Deleting statefulset %v", ss.Name) // Use OrphanDependents=false so it's deleted synchronously. // We already made sure the Pods are gone inside Scale(). @@ -698,19 +760,6 @@ func DeleteAllStatefulSets(c clientset.Interface, ns string) { } } -// IsStatefulSetPodInitialized returns true if pod's StatefulSetInitAnnotation exists and is set to true. -func IsStatefulSetPodInitialized(pod v1.Pod) bool { - initialized, ok := pod.Annotations[apps.StatefulSetInitAnnotation] - if !ok { - return false - } - inited, err := strconv.ParseBool(initialized) - if err != nil { - Failf("Couldn't parse statefulset init annotations %v", initialized) - } - return inited -} - // NewStatefulSetPVC returns a PersistentVolumeClaim named name, for testing StatefulSets. func NewStatefulSetPVC(name string) v1.PersistentVolumeClaim { return v1.PersistentVolumeClaim{ @@ -792,11 +841,6 @@ func NewStatefulSet(name, ns, governingSvcName string, replicas int32, statefulP } } -// SetStatefulSetInitializedAnnotation sets teh StatefulSetInitAnnotation to value. -func SetStatefulSetInitializedAnnotation(ss *apps.StatefulSet, value string) { - ss.Spec.Template.ObjectMeta.Annotations["pod.alpha.kubernetes.io/initialized"] = value -} - var statefulPodRegex = regexp.MustCompile("(.*)-([0-9]+)$") func getStatefulPodOrdinal(pod *v1.Pod) int { diff --git a/vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go b/vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go index 07ed74b2822c..eb2cfdd3ddf6 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go +++ b/vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go @@ -130,6 +130,10 @@ type NodeTestContextType struct { PrepullImages bool // KubeletConfig is the kubelet configuration the test is running against. KubeletConfig componentconfig.KubeletConfiguration + // SystemSpecName is the name of the system spec (e.g., gke) that's used in + // the node e2e test. If empty, the default one (system.DefaultSpec) is + // used. The system specs are in test/e2e_node/system/specs/. + SystemSpecName string } type CloudConfig struct { @@ -240,6 +244,7 @@ func RegisterNodeFlags() { // It is hard and unnecessary to deal with the complexity inside the test suite. flag.BoolVar(&TestContext.NodeConformance, "conformance", false, "If true, the test suite will not start kubelet, and fetch system log (kernel, docker, kubelet log etc.) to the report directory.") flag.BoolVar(&TestContext.PrepullImages, "prepull-images", true, "If true, prepull images so image pull failures do not cause test failures.") + flag.StringVar(&TestContext.SystemSpecName, "system-spec-name", "", "The name of the system spec (e.g., gke) that's used in the node e2e test. The system specs are in test/e2e_node/system/specs/. This is used by the test framework to determine which tests to run for validating the system requirements.") } // ViperizeFlags sets up all flag and config processing. Future configuration info should be added to viper, not to flags. diff --git a/vendor/k8s.io/kubernetes/test/e2e/framework/util.go b/vendor/k8s.io/kubernetes/test/e2e/framework/util.go index 7b373e9f88f5..a60746f73e92 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/framework/util.go +++ b/vendor/k8s.io/kubernetes/test/e2e/framework/util.go @@ -353,6 +353,24 @@ func SkipIfContainerRuntimeIs(runtimes ...string) { } } +func RunIfContainerRuntimeIs(runtimes ...string) { + for _, runtime := range runtimes { + if runtime == TestContext.ContainerRuntime { + return + } + } + Skipf("Skipped because container runtime %q is not in %s", TestContext.ContainerRuntime, runtimes) +} + +func RunIfSystemSpecNameIs(names ...string) { + for _, name := range names { + if name == TestContext.SystemSpecName { + return + } + } + Skipf("Skipped because system spec name %q is not in %v", TestContext.SystemSpecName, names) +} + func ProviderIs(providers ...string) bool { for _, provider := range providers { if strings.ToLower(provider) == strings.ToLower(TestContext.Provider) { @@ -410,6 +428,16 @@ func SkipUnlessServerVersionGTE(v *utilversion.Version, c discovery.ServerVersio } } +func SkipUnlessServerVersionLT(v *utilversion.Version, c discovery.ServerVersionInterface) { + gte, err := ServerVersionGTE(v, c) + if err != nil { + Failf("Failed to get server version: %v", err) + } + if gte { + Skipf("Not supported for server versions starting from %q", v) + } +} + func SkipIfMissingResource(clientPool dynamic.ClientPool, gvr schema.GroupVersionResource, namespace string) { dynamicClient, err := clientPool.ClientForGroupVersionResource(gvr) if err != nil { @@ -3736,6 +3764,24 @@ func RunHostCmdOrDie(ns, name, cmd string) string { return stdout } +// RunHostCmdWithRetries calls RunHostCmd and retries errors it thinks may be transient +// until it succeeds or the specified timeout expires. +// This can be used with idempotent commands to deflake transient Node issues. +func RunHostCmdWithRetries(ns, name, cmd string, interval, timeout time.Duration) (string, error) { + start := time.Now() + for { + out, err := RunHostCmd(ns, name, cmd) + if err == nil { + return out, nil + } + if elapsed := time.Since(start); elapsed > timeout { + return out, fmt.Errorf("RunHostCmd still failed after %v: %v", elapsed, err) + } + Logf("Waiting %v to retry failed RunHostCmd: %v", interval, err) + time.Sleep(interval) + } +} + // LaunchHostExecPod launches a hostexec pod in the given namespace and waits // until it's Running func LaunchHostExecPod(client clientset.Interface, ns, name string) *v1.Pod { @@ -4886,17 +4932,17 @@ func CheckConnectivityToHost(f *Framework, nodeName, podName, host string, timeo } // CoreDump SSHs to the master and all nodes and dumps their logs into dir. -// It shells out to cluster/log-dump.sh to accomplish this. +// It shells out to cluster/log-dump/log-dump.sh to accomplish this. func CoreDump(dir string) { if TestContext.DisableLogDump { Logf("Skipping dumping logs from cluster") return } - cmd := exec.Command(path.Join(TestContext.RepoRoot, "cluster", "log-dump.sh"), dir) + cmd := exec.Command(path.Join(TestContext.RepoRoot, "cluster", "log-dump", "log-dump.sh"), dir) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr if err := cmd.Run(); err != nil { - Logf("Error running cluster/log-dump.sh: %v", err) + Logf("Error running cluster/log-dump/log-dump.sh: %v", err) } } diff --git a/vendor/k8s.io/kubernetes/test/e2e/generated/bindata.go b/vendor/k8s.io/kubernetes/test/e2e/generated/bindata.go index 70368fb18016..30f9f6375fee 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/generated/bindata.go +++ b/vendor/k8s.io/kubernetes/test/e2e/generated/bindata.go @@ -11918,7 +11918,7 @@ func testFixturesPkgKubectlPluginsEnvEnvSh() (*asset, error) { return a, nil } -var _testFixturesPkgKubectlPluginsEnvPluginYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xca\x4b\xcc\x4d\xb5\x52\x48\xcd\x2b\xe3\x2a\xce\xc8\x2f\x2a\x71\x49\x2d\x4e\xb6\x52\x50\x0a\xc9\x48\x55\x28\xc8\x29\x4d\xcf\xcc\x03\x49\x15\x43\xd9\x4a\x5c\xc9\xf9\xb9\xb9\x89\x79\x29\x56\x0a\x4a\x7a\xfa\xa9\x79\x65\x7a\xc5\x19\x4a\x5c\x80\x00\x00\x00\xff\xff\x5d\x7d\x46\x67\x42\x00\x00\x00") +var _testFixturesPkgKubectlPluginsEnvPluginYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x7c\x8e\x31\xca\xc3\x30\x0c\x46\x77\x9d\xe2\x43\xfb\x9f\x9f\xb6\x9b\xe7\x1e\xa1\x74\x17\x89\x12\x1b\x6c\xa7\x54\x76\xce\x5f\x9c\xa4\x85\x52\xc8\x26\x78\x4f\x4f\xca\x92\xd4\x41\xf3\x42\xe6\xe7\x67\xb9\xaa\xf5\x0e\x7c\xf3\x8a\x47\xac\x53\xc8\x0d\xd9\x3e\x33\xf5\x73\x4a\x92\x07\x07\xee\xfe\x35\x2f\x9d\x79\xa6\x31\xca\x64\x0e\x04\xfc\x61\xab\x71\x51\x2b\x27\x26\x00\x18\xde\xc1\x60\x08\x06\x41\xd3\xb1\xc2\x2f\xfd\x7c\xa4\xef\x70\xfd\xd0\x6f\xf7\xcb\x4f\xe1\x72\x54\xf8\xc0\xf1\x2e\xb1\xb6\x95\x41\x47\xa9\xb1\x30\xbd\x02\x00\x00\xff\xff\x0d\x64\xd9\xa6\x02\x01\x00\x00") func testFixturesPkgKubectlPluginsEnvPluginYamlBytes() ([]byte, error) { return bindataRead( diff --git a/vendor/k8s.io/kubernetes/test/e2e/ingress.go b/vendor/k8s.io/kubernetes/test/e2e/ingress.go index 2b3914a60b11..a28bf38282ba 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/ingress.go +++ b/vendor/k8s.io/kubernetes/test/e2e/ingress.go @@ -87,7 +87,7 @@ var _ = framework.KubeDescribe("Loadbalancing: L7", func() { return } By("Deleting ingress") - jig.DeleteIngress() + jig.TryDeleteIngress() By("Cleaning up cloud resources") framework.CleanupGCEIngressController(gceController) @@ -177,7 +177,7 @@ var _ = framework.KubeDescribe("Loadbalancing: L7", func() { return } By("Deleting ingress") - jig.DeleteIngress() + jig.TryDeleteIngress() }) It("should conform to Ingress spec", func() { diff --git a/vendor/k8s.io/kubernetes/test/e2e/kubectl.go b/vendor/k8s.io/kubernetes/test/e2e/kubectl.go index 9e0f6f6a4c1d..731606adaa5a 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/kubectl.go +++ b/vendor/k8s.io/kubernetes/test/e2e/kubectl.go @@ -191,44 +191,6 @@ var _ = framework.KubeDescribe("Kubectl alpha client", func() { // Customized Wait / ForEach wrapper for this test. These demonstrate the - framework.KubeDescribe("Kubectl run ScheduledJob", func() { - var nsFlag string - var sjName string - - BeforeEach(func() { - nsFlag = fmt.Sprintf("--namespace=%v", ns) - sjName = "e2e-test-echo-scheduledjob" - }) - - AfterEach(func() { - framework.RunKubectlOrDie("delete", "cronjobs", sjName, nsFlag) - }) - - It("should create a ScheduledJob", func() { - framework.SkipIfMissingResource(f.ClientPool, ScheduledJobGroupVersionResource, f.Namespace.Name) - - schedule := "*/5 * * * ?" - framework.RunKubectlOrDie("run", sjName, "--restart=OnFailure", "--generator=scheduledjob/v2alpha1", - "--schedule="+schedule, "--image="+busyboxImage, nsFlag) - By("verifying the ScheduledJob " + sjName + " was created") - sj, err := c.BatchV2alpha1().CronJobs(ns).Get(sjName, metav1.GetOptions{}) - if err != nil { - framework.Failf("Failed getting ScheduledJob %s: %v", sjName, err) - } - if sj.Spec.Schedule != schedule { - framework.Failf("Failed creating a ScheduledJob with correct schedule %s, but got %s", schedule, sj.Spec.Schedule) - } - containers := sj.Spec.JobTemplate.Spec.Template.Spec.Containers - if containers == nil || len(containers) != 1 || containers[0].Image != busyboxImage { - framework.Failf("Failed creating ScheduledJob %s for 1 pod with expected image %s: %#v", sjName, busyboxImage, containers) - } - restartPolicy := sj.Spec.JobTemplate.Spec.Template.Spec.RestartPolicy - if sj.Spec.JobTemplate.Spec.Template.Spec.RestartPolicy != v1.RestartPolicyOnFailure { - framework.Failf("Failed creating a ScheduledJob with correct restart policy %s, but got %s", v1.RestartPolicyOnFailure, restartPolicy) - } - }) - }) - framework.KubeDescribe("Kubectl run CronJob", func() { var nsFlag string var cjName string diff --git a/vendor/k8s.io/kubernetes/test/e2e/service.go b/vendor/k8s.io/kubernetes/test/e2e/service.go index 6b3c67698837..b9cc0c62b38c 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/service.go +++ b/vendor/k8s.io/kubernetes/test/e2e/service.go @@ -1242,29 +1242,31 @@ var _ = framework.KubeDescribe("Services", func() { framework.CheckReachabilityFromPod(true, normalReachabilityTimeout, namespace, dropPodName, svcIP) }) - It("should be able to create an internal type load balancer on Azure [Slow]", func() { - framework.SkipUnlessProviderIs("azure") + It("should be able to create an internal type load balancer [Slow]", func() { + framework.SkipUnlessProviderIs("azure", "gke", "gce") createTimeout := framework.LoadBalancerCreateTimeoutDefault pollInterval := framework.Poll * 10 - serviceAnnotationLoadBalancerInternal := "service.beta.kubernetes.io/azure-load-balancer-internal" namespace := f.Namespace.Name serviceName := "lb-internal" jig := framework.NewServiceTestJig(cs, serviceName) + By("creating pod to be part of service " + serviceName) + jig.RunOrFail(namespace, nil) + + enableILB, disableILB := framework.EnableAndDisableInternalLB() + isInternalEndpoint := func(lbIngress *v1.LoadBalancerIngress) bool { ingressEndpoint := framework.GetIngressPoint(lbIngress) // Needs update for providers using hostname as endpoint. return strings.HasPrefix(ingressEndpoint, "10.") } - By("creating a service with type LoadBalancer and LoadBalancerInternal annotation set to true") + By("creating a service with type LoadBalancer and cloud specific Internal-LB annotation enabled") svc := jig.CreateTCPServiceOrFail(namespace, func(svc *v1.Service) { svc.Spec.Type = v1.ServiceTypeLoadBalancer - svc.ObjectMeta.Annotations = map[string]string{ - serviceAnnotationLoadBalancerInternal: "true", - } + enableILB(svc) }) svc = jig.WaitForLoadBalancerOrFail(namespace, serviceName, createTimeout) jig.SanityCheckService(svc, v1.ServiceTypeLoadBalancer) @@ -1272,9 +1274,9 @@ var _ = framework.KubeDescribe("Services", func() { // should have an internal IP. Expect(isInternalEndpoint(lbIngress)).To(BeTrue()) - By("switiching to external type LoadBalancer") + By("switching to external type LoadBalancer") svc = jig.UpdateServiceOrFail(namespace, serviceName, func(svc *v1.Service) { - svc.ObjectMeta.Annotations[serviceAnnotationLoadBalancerInternal] = "false" + disableILB(svc) }) framework.Logf("Waiting up to %v for service %q to have an external LoadBalancer", createTimeout, serviceName) if pollErr := wait.PollImmediate(pollInterval, createTimeout, func() (bool, error) { @@ -1291,26 +1293,33 @@ var _ = framework.KubeDescribe("Services", func() { jig.SanityCheckService(svc, v1.ServiceTypeLoadBalancer) Expect(isInternalEndpoint(lbIngress)).To(BeFalse()) - By("switiching back to interal type LoadBalancer, with static IP specified.") - internalStaticIP := "10.240.11.11" - svc = jig.UpdateServiceOrFail(namespace, serviceName, func(svc *v1.Service) { - svc.Spec.LoadBalancerIP = internalStaticIP - svc.ObjectMeta.Annotations[serviceAnnotationLoadBalancerInternal] = "true" - }) - framework.Logf("Waiting up to %v for service %q to have an internal LoadBalancer", createTimeout, serviceName) - if pollErr := wait.PollImmediate(pollInterval, createTimeout, func() (bool, error) { - svc, err := jig.Client.Core().Services(namespace).Get(serviceName, metav1.GetOptions{}) - if err != nil { - return false, err + // GCE cannot test a specific IP because the test may not own it. This cloud specific condition + // will be removed when GCP supports similar functionality. + if framework.ProviderIs("azure") { + By("switching back to interal type LoadBalancer, with static IP specified.") + internalStaticIP := "10.240.11.11" + svc = jig.UpdateServiceOrFail(namespace, serviceName, func(svc *v1.Service) { + svc.Spec.LoadBalancerIP = internalStaticIP + enableILB(svc) + }) + framework.Logf("Waiting up to %v for service %q to have an internal LoadBalancer", createTimeout, serviceName) + if pollErr := wait.PollImmediate(pollInterval, createTimeout, func() (bool, error) { + svc, err := jig.Client.Core().Services(namespace).Get(serviceName, metav1.GetOptions{}) + if err != nil { + return false, err + } + lbIngress = &svc.Status.LoadBalancer.Ingress[0] + return isInternalEndpoint(lbIngress), nil + }); pollErr != nil { + framework.Failf("Loadbalancer IP not changed to internal.") } - lbIngress = &svc.Status.LoadBalancer.Ingress[0] - return isInternalEndpoint(lbIngress), nil - }); pollErr != nil { - framework.Failf("Loadbalancer IP not changed to internal.") + // should have the given static internal IP. + jig.SanityCheckService(svc, v1.ServiceTypeLoadBalancer) + Expect(framework.GetIngressPoint(lbIngress)).To(Equal(internalStaticIP)) } - // should have the given static internal IP. - jig.SanityCheckService(svc, v1.ServiceTypeLoadBalancer) - Expect(framework.GetIngressPoint(lbIngress)).To(Equal(internalStaticIP)) + + By("switching to ClusterIP type to destroy loadbalancer") + jig.ChangeServiceType(svc.Namespace, svc.Name, v1.ServiceTypeClusterIP, createTimeout) }) }) diff --git a/vendor/k8s.io/kubernetes/test/e2e/statefulset.go b/vendor/k8s.io/kubernetes/test/e2e/statefulset.go index 36c370cdeccc..0763dd8e92ee 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/statefulset.go +++ b/vendor/k8s.io/kubernetes/test/e2e/statefulset.go @@ -27,7 +27,6 @@ import ( "k8s.io/apimachinery/pkg/labels" klabels "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" "k8s.io/kubernetes/pkg/api/v1" @@ -93,13 +92,12 @@ var _ = framework.KubeDescribe("StatefulSet", func() { It("should provide basic identity", func() { By("Creating statefulset " + ssName + " in namespace " + ns) *(ss.Spec.Replicas) = 3 - framework.SetStatefulSetInitializedAnnotation(ss, "false") + sst := framework.NewStatefulSetTester(c) + sst.PauseNewPods(ss) _, err := c.Apps().StatefulSets(ns).Create(ss) Expect(err).NotTo(HaveOccurred()) - sst := framework.NewStatefulSetTester(c) - By("Saturating stateful set " + ss.Name) sst.Saturate(ss) @@ -118,7 +116,7 @@ var _ = framework.KubeDescribe("StatefulSet", func() { By("Restarting statefulset " + ss.Name) sst.Restart(ss) - sst.Saturate(ss) + sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss) By("Verifying statefulset mounted data directory is usable") framework.ExpectNoError(sst.CheckMount(ss, "/data")) @@ -131,7 +129,8 @@ var _ = framework.KubeDescribe("StatefulSet", func() { It("should adopt matching orphans and release non-matching pods", func() { By("Creating statefulset " + ssName + " in namespace " + ns) *(ss.Spec.Replicas) = 1 - framework.SetStatefulSetInitializedAnnotation(ss, "false") + sst := framework.NewStatefulSetTester(c) + sst.PauseNewPods(ss) // Replace ss with the one returned from Create() so it has the UID. // Save Kind since it won't be populated in the returned ss. @@ -140,8 +139,6 @@ var _ = framework.KubeDescribe("StatefulSet", func() { Expect(err).NotTo(HaveOccurred()) ss.Kind = kind - sst := framework.NewStatefulSetTester(c) - By("Saturating stateful set " + ss.Name) sst.Saturate(ss) pods := sst.GetPodList(ss) @@ -215,20 +212,19 @@ var _ = framework.KubeDescribe("StatefulSet", func() { It("should not deadlock when a pod's predecessor fails", func() { By("Creating statefulset " + ssName + " in namespace " + ns) *(ss.Spec.Replicas) = 2 - framework.SetStatefulSetInitializedAnnotation(ss, "false") + sst := framework.NewStatefulSetTester(c) + sst.PauseNewPods(ss) _, err := c.Apps().StatefulSets(ns).Create(ss) Expect(err).NotTo(HaveOccurred()) - sst := framework.NewStatefulSetTester(c) - - sst.WaitForRunningAndReady(1, ss) + sst.WaitForRunning(1, 0, ss) - By("Marking stateful pod at index 0 as healthy.") - sst.SetHealthy(ss) + By("Resuming stateful pod at index 0.") + sst.ResumeNextPod(ss) By("Waiting for stateful pod at index 1 to enter running.") - sst.WaitForRunningAndReady(2, ss) + sst.WaitForRunning(2, 1, ss) // Now we have 1 healthy and 1 unhealthy stateful pod. Deleting the healthy stateful pod should *not* // create a new stateful pod till the remaining stateful pod becomes healthy, which won't happen till @@ -238,25 +234,22 @@ var _ = framework.KubeDescribe("StatefulSet", func() { sst.DeleteStatefulPodAtIndex(0, ss) By("Confirming stateful pod at index 0 is recreated.") - sst.WaitForRunningAndReady(2, ss) + sst.WaitForRunning(2, 1, ss) - By("Deleting unhealthy stateful pod at index 1.") - sst.DeleteStatefulPodAtIndex(1, ss) + By("Resuming stateful pod at index 1.") + sst.ResumeNextPod(ss) By("Confirming all stateful pods in statefulset are created.") - sst.Saturate(ss) + sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss) }) It("should perform rolling updates and roll backs of template modifications", func() { By("Creating a new StatefulSet") - testProbe := &v1.Probe{Handler: v1.Handler{HTTPGet: &v1.HTTPGetAction{ - Path: "/index.html", - Port: intstr.IntOrString{IntVal: 80}}}} ss := framework.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels) - ss.Spec.Template.Spec.Containers[0].ReadinessProbe = testProbe + sst := framework.NewStatefulSetTester(c) + sst.SetHttpProbe(ss) ss, err := c.Apps().StatefulSets(ns).Create(ss) Expect(err).NotTo(HaveOccurred()) - sst := framework.NewStatefulSetTester(c) sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss) ss = sst.WaitForStatus(ss) currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision @@ -273,7 +266,7 @@ var _ = framework.KubeDescribe("StatefulSet", func() { currentRevision)) } sst.SortStatefulPods(pods) - sst.BreakPodProbe(ss, &pods.Items[1], testProbe) + sst.BreakPodHttpProbe(ss, &pods.Items[1]) Expect(err).NotTo(HaveOccurred()) ss, pods = sst.WaitForPodNotReady(ss, pods.Items[1].Name) newImage := newNginxImage @@ -295,7 +288,7 @@ var _ = framework.KubeDescribe("StatefulSet", func() { By("Updating Pods in reverse ordinal order") pods = sst.GetPodList(ss) sst.SortStatefulPods(pods) - sst.RestorePodProbe(ss, &pods.Items[1], testProbe) + sst.RestorePodHttpProbe(ss, &pods.Items[1]) ss, pods = sst.WaitForPodReady(ss, pods.Items[1].Name) ss, pods = sst.WaitForRollingUpdate(ss) Expect(ss.Status.CurrentRevision).To(Equal(updateRevision), @@ -320,7 +313,7 @@ var _ = framework.KubeDescribe("StatefulSet", func() { } By("Rolling back to a previous revision") - sst.BreakPodProbe(ss, &pods.Items[1], testProbe) + sst.BreakPodHttpProbe(ss, &pods.Items[1]) Expect(err).NotTo(HaveOccurred()) ss, pods = sst.WaitForPodNotReady(ss, pods.Items[1].Name) priorRevision := currentRevision @@ -339,7 +332,7 @@ var _ = framework.KubeDescribe("StatefulSet", func() { By("Rolling back update in reverse ordinal order") pods = sst.GetPodList(ss) sst.SortStatefulPods(pods) - sst.RestorePodProbe(ss, &pods.Items[1], testProbe) + sst.RestorePodHttpProbe(ss, &pods.Items[1]) ss, pods = sst.WaitForPodReady(ss, pods.Items[1].Name) ss, pods = sst.WaitForRollingUpdate(ss) Expect(ss.Status.CurrentRevision).To(Equal(priorRevision), @@ -367,11 +360,9 @@ var _ = framework.KubeDescribe("StatefulSet", func() { It("should perform canary updates and phased rolling updates of template modifications", func() { By("Creating a new StaefulSet") - testProbe := &v1.Probe{Handler: v1.Handler{HTTPGet: &v1.HTTPGetAction{ - Path: "/index.html", - Port: intstr.IntOrString{IntVal: 80}}}} ss := framework.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels) - ss.Spec.Template.Spec.Containers[0].ReadinessProbe = testProbe + sst := framework.NewStatefulSetTester(c) + sst.SetHttpProbe(ss) ss.Spec.UpdateStrategy = apps.StatefulSetUpdateStrategy{ Type: apps.RollingUpdateStatefulSetStrategyType, RollingUpdate: func() *apps.RollingUpdateStatefulSetStrategy { @@ -384,7 +375,6 @@ var _ = framework.KubeDescribe("StatefulSet", func() { } ss, err := c.Apps().StatefulSets(ns).Create(ss) Expect(err).NotTo(HaveOccurred()) - sst := framework.NewStatefulSetTester(c) sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss) ss = sst.WaitForStatus(ss) currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision @@ -579,17 +569,14 @@ var _ = framework.KubeDescribe("StatefulSet", func() { It("should implement legacy replacement when the update strategy is OnDelete", func() { By("Creating a new StatefulSet") - testProbe := &v1.Probe{Handler: v1.Handler{HTTPGet: &v1.HTTPGetAction{ - Path: "/index.html", - Port: intstr.IntOrString{IntVal: 80}}}} ss := framework.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels) - ss.Spec.Template.Spec.Containers[0].ReadinessProbe = testProbe + sst := framework.NewStatefulSetTester(c) + sst.SetHttpProbe(ss) ss.Spec.UpdateStrategy = apps.StatefulSetUpdateStrategy{ Type: apps.OnDeleteStatefulSetStrategyType, } ss, err := c.Apps().StatefulSets(ns).Create(ss) Expect(err).NotTo(HaveOccurred()) - sst := framework.NewStatefulSetTester(c) sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss) ss = sst.WaitForStatus(ss) currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision @@ -669,27 +656,24 @@ var _ = framework.KubeDescribe("StatefulSet", func() { Expect(err).NotTo(HaveOccurred()) By("Creating stateful set " + ssName + " in namespace " + ns) - testProbe := &v1.Probe{Handler: v1.Handler{HTTPGet: &v1.HTTPGetAction{ - Path: "/index.html", - Port: intstr.IntOrString{IntVal: 80}}}} ss := framework.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, psLabels) - ss.Spec.Template.Spec.Containers[0].ReadinessProbe = testProbe + sst := framework.NewStatefulSetTester(c) + sst.SetHttpProbe(ss) ss, err = c.Apps().StatefulSets(ns).Create(ss) Expect(err).NotTo(HaveOccurred()) By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns) - sst := framework.NewStatefulSetTester(c) sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss) By("Confirming that stateful set scale up will halt with unhealthy stateful pod") - sst.BreakProbe(ss, testProbe) + sst.BreakHttpProbe(ss) sst.WaitForRunningAndNotReady(*ss.Spec.Replicas, ss) sst.WaitForStatusReadyReplicas(ss, 0) sst.UpdateReplicas(ss, 3) sst.ConfirmStatefulPodCount(1, ss, 10*time.Second, true) By("Scaling up stateful set " + ssName + " to 3 replicas and waiting until all of them will be running in namespace " + ns) - sst.RestoreProbe(ss, testProbe) + sst.RestoreHttpProbe(ss) sst.WaitForRunningAndReady(3, ss) By("Verifying that stateful set " + ssName + " was scaled up in order") @@ -713,14 +697,14 @@ var _ = framework.KubeDescribe("StatefulSet", func() { }) Expect(err).NotTo(HaveOccurred()) - sst.BreakProbe(ss, testProbe) + sst.BreakHttpProbe(ss) sst.WaitForStatusReadyReplicas(ss, 0) sst.WaitForRunningAndNotReady(3, ss) sst.UpdateReplicas(ss, 0) sst.ConfirmStatefulPodCount(3, ss, 10*time.Second, true) By("Scaling down stateful set " + ssName + " to 0 replicas and waiting until none of pods will run in namespace" + ns) - sst.RestoreProbe(ss, testProbe) + sst.RestoreHttpProbe(ss) sst.Scale(ss, 0) By("Verifying that stateful set " + ssName + " was scaled down in reverse order") @@ -743,41 +727,38 @@ var _ = framework.KubeDescribe("StatefulSet", func() { psLabels := klabels.Set(labels) By("Creating stateful set " + ssName + " in namespace " + ns) - testProbe := &v1.Probe{Handler: v1.Handler{HTTPGet: &v1.HTTPGetAction{ - Path: "/index.html", - Port: intstr.IntOrString{IntVal: 80}}}} ss := framework.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, psLabels) ss.Spec.PodManagementPolicy = apps.ParallelPodManagement - ss.Spec.Template.Spec.Containers[0].ReadinessProbe = testProbe + sst := framework.NewStatefulSetTester(c) + sst.SetHttpProbe(ss) ss, err := c.Apps().StatefulSets(ns).Create(ss) Expect(err).NotTo(HaveOccurred()) By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns) - sst := framework.NewStatefulSetTester(c) sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss) By("Confirming that stateful set scale up will not halt with unhealthy stateful pod") - sst.BreakProbe(ss, testProbe) + sst.BreakHttpProbe(ss) sst.WaitForRunningAndNotReady(*ss.Spec.Replicas, ss) sst.WaitForStatusReadyReplicas(ss, 0) sst.UpdateReplicas(ss, 3) sst.ConfirmStatefulPodCount(3, ss, 10*time.Second, false) By("Scaling up stateful set " + ssName + " to 3 replicas and waiting until all of them will be running in namespace " + ns) - sst.RestoreProbe(ss, testProbe) + sst.RestoreHttpProbe(ss) sst.WaitForRunningAndReady(3, ss) By("Scale down will not halt with unhealthy stateful pod") - sst.BreakProbe(ss, testProbe) + sst.BreakHttpProbe(ss) sst.WaitForStatusReadyReplicas(ss, 0) sst.WaitForRunningAndNotReady(3, ss) sst.UpdateReplicas(ss, 0) sst.ConfirmStatefulPodCount(0, ss, 10*time.Second, false) By("Scaling down stateful set " + ssName + " to 0 replicas and waiting until none of pods will run in namespace" + ns) - sst.RestoreProbe(ss, testProbe) + sst.RestoreHttpProbe(ss) sst.Scale(ss, 0) - sst.WaitForStatusReadyReplicas(ss, 0) + sst.WaitForStatusReplicas(ss, 0) }) It("Should recreate evicted statefulset", func() { diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/OWNERS b/vendor/k8s.io/kubernetes/test/e2e/storage/OWNERS index 2de902ff8499..67e6d2ee25a2 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/OWNERS +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/OWNERS @@ -1,13 +1,13 @@ approvers: - saad-aali - rootfs -- gnufied +- gnufied - jingxu97 - jsafrane reviewers: - saad-aali - rootfs -- gnufied +- gnufied - jingxu97 - jsafrane - msau42 diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/ingress/http/ing.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/ingress/http/ing.yaml index 0d4828d1799d..969a8f488d50 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/ingress/http/ing.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/ingress/http/ing.yaml @@ -22,4 +22,3 @@ spec: backend: serviceName: echoheadersx servicePort: 80 - diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/ingress/http/svc.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/ingress/http/svc.yaml index 3a53c8d21659..081c77f7417d 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/ingress/http/svc.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/ingress/http/svc.yaml @@ -29,4 +29,3 @@ spec: name: http selector: app: echoheaders - diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/ingress/static-ip/ing.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/ingress/static-ip/ing.yaml index 342c9dcafbf6..a75d42db2e6a 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/ingress/static-ip/ing.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/ingress/static-ip/ing.yaml @@ -13,4 +13,3 @@ spec: backend: serviceName: echoheaders-https servicePort: 80 - diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/ingress/static-ip/secret.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/ingress/static-ip/secret.yaml index a6e2e1b80f16..26cc00aa674c 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/ingress/static-ip/secret.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/ingress/static-ip/secret.yaml @@ -6,4 +6,3 @@ kind: Secret metadata: name: tls-secret type: Opaque - diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/serviceloadbalancer/nginxsvc.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/serviceloadbalancer/nginxsvc.yaml index 22625ad14579..3793e8c72c5b 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/serviceloadbalancer/nginxsvc.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/serviceloadbalancer/nginxsvc.yaml @@ -11,4 +11,3 @@ spec: name: http selector: app: nginx - diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/mysql-galera/service.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/mysql-galera/service.yaml index 8fa9c324a349..c2bfa9ad7093 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/mysql-galera/service.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/mysql-galera/service.yaml @@ -15,4 +15,3 @@ spec: clusterIP: None selector: app: mysql - diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/redis/service.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/redis/service.yaml index 376c7beefb55..8562c5cb3c87 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/redis/service.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/redis/service.yaml @@ -15,4 +15,3 @@ spec: clusterIP: None selector: app: redis - diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/zookeeper/service.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/zookeeper/service.yaml index 8659a2af6052..2db8bdca14b2 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/zookeeper/service.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/zookeeper/service.yaml @@ -17,4 +17,3 @@ spec: clusterIP: None selector: app: zk - diff --git a/vendor/k8s.io/kubernetes/test/e2e/upgrades/ingress.go b/vendor/k8s.io/kubernetes/test/e2e/upgrades/ingress.go index 1a6ace7e0d04..7ca9e3b7d130 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/upgrades/ingress.go +++ b/vendor/k8s.io/kubernetes/test/e2e/upgrades/ingress.go @@ -101,7 +101,7 @@ func (t *IngressUpgradeTest) Teardown(f *framework.Framework) { } if t.jig.Ingress != nil { By("Deleting ingress") - t.jig.DeleteIngress() + t.jig.TryDeleteIngress() } else { By("No ingress created, no cleanup necessary") } diff --git a/vendor/k8s.io/kubernetes/test/e2e/upgrades/statefulset.go b/vendor/k8s.io/kubernetes/test/e2e/upgrades/statefulset.go index 7fad63d57590..2fc9f335e3a8 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/upgrades/statefulset.go +++ b/vendor/k8s.io/kubernetes/test/e2e/upgrades/statefulset.go @@ -61,12 +61,12 @@ func (t *StatefulSetUpgradeTest) Setup(f *framework.Framework) { t.set = framework.NewStatefulSet(ssName, ns, headlessSvcName, 2, statefulPodMounts, podMounts, labels) t.service = framework.CreateStatefulSetService(ssName, labels) *(t.set.Spec.Replicas) = 3 - framework.SetStatefulSetInitializedAnnotation(t.set, "false") + t.tester = framework.NewStatefulSetTester(f.ClientSet) + t.tester.PauseNewPods(t.set) By("Creating service " + headlessSvcName + " in namespace " + ns) _, err := f.ClientSet.Core().Services(ns).Create(t.service) Expect(err).NotTo(HaveOccurred()) - t.tester = framework.NewStatefulSetTester(f.ClientSet) By("Creating statefulset " + ssName + " in namespace " + ns) *(t.set.Spec.Replicas) = 3 @@ -109,5 +109,5 @@ func (t *StatefulSetUpgradeTest) verify() { func (t *StatefulSetUpgradeTest) restart() { By("Restarting statefulset " + t.set.Name) t.tester.Restart(t.set) - t.tester.Saturate(t.set) + t.tester.WaitForRunningAndReady(*t.set.Spec.Replicas, t.set) } diff --git a/vendor/k8s.io/kubernetes/test/e2e_node/remote/node_conformance.go b/vendor/k8s.io/kubernetes/test/e2e_node/remote/node_conformance.go index 750f8447a9c1..d51bc28999d9 100644 --- a/vendor/k8s.io/kubernetes/test/e2e_node/remote/node_conformance.go +++ b/vendor/k8s.io/kubernetes/test/e2e_node/remote/node_conformance.go @@ -63,13 +63,17 @@ const ( // timestamp is used as an unique id of current test. var timestamp = getTimestamp() -// getConformanceImageRepo returns conformance image full repo name. -func getConformanceImageRepo() string { - return fmt.Sprintf("%s/node-test-%s:%s", conformanceRegistry, conformanceArch, timestamp) +// getConformanceTestImageName returns name of the conformance test image given the system spec name. +func getConformanceTestImageName(systemSpecName string) string { + if systemSpecName == "" { + return fmt.Sprintf("%s/node-test-%s:%s", conformanceRegistry, conformanceArch, timestamp) + } else { + return fmt.Sprintf("%s/node-test-%s-%s:%s", conformanceRegistry, systemSpecName, conformanceArch, timestamp) + } } // buildConformanceTest builds node conformance test image tarball into binDir. -func buildConformanceTest(binDir string) error { +func buildConformanceTest(binDir, systemSpecName string) error { // Get node conformance directory. conformancePath, err := getConformanceDirectory() if err != nil { @@ -79,13 +83,14 @@ func buildConformanceTest(binDir string) error { cmd := exec.Command("make", "-C", conformancePath, "BIN_DIR="+binDir, "REGISTRY="+conformanceRegistry, "ARCH="+conformanceArch, - "VERSION="+timestamp) + "VERSION="+timestamp, + "SYSTEM_SPEC_NAME="+systemSpecName) if output, err := cmd.CombinedOutput(); err != nil { return fmt.Errorf("failed to build node conformance docker image: command - %q, error - %v, output - %q", commandToString(cmd), err, output) } // Save docker image into tar file. - cmd = exec.Command("docker", "save", "-o", filepath.Join(binDir, conformanceTarfile), getConformanceImageRepo()) + cmd = exec.Command("docker", "save", "-o", filepath.Join(binDir, conformanceTarfile), getConformanceTestImageName(systemSpecName)) if output, err := cmd.CombinedOutput(); err != nil { return fmt.Errorf("failed to save node conformance docker image into tar file: command - %q, error - %v, output - %q", commandToString(cmd), err, output) @@ -94,7 +99,7 @@ func buildConformanceTest(binDir string) error { } // SetupTestPackage sets up the test package with binaries k8s required for node conformance test -func (c *ConformanceRemote) SetupTestPackage(tardir string) error { +func (c *ConformanceRemote) SetupTestPackage(tardir, systemSpecName string) error { // Build the executables if err := builder.BuildGo(); err != nil { return fmt.Errorf("failed to build the depedencies: %v", err) @@ -107,8 +112,8 @@ func (c *ConformanceRemote) SetupTestPackage(tardir string) error { } // Build node conformance tarball. - if err := buildConformanceTest(buildOutputDir); err != nil { - return fmt.Errorf("failed to build node conformance test %v", err) + if err := buildConformanceTest(buildOutputDir, systemSpecName); err != nil { + return fmt.Errorf("failed to build node conformance test: %v", err) } // Copy files @@ -253,7 +258,7 @@ func stopKubelet(host, workspace string) error { } // RunTest runs test on the node. -func (c *ConformanceRemote) RunTest(host, workspace, results, junitFilePrefix, testArgs, _ string, timeout time.Duration) (string, error) { +func (c *ConformanceRemote) RunTest(host, workspace, results, junitFilePrefix, testArgs, _, systemSpecName string, timeout time.Duration) (string, error) { // Install the cni plugins and add a basic CNI configuration. if err := setupCNI(host, workspace); err != nil { return "", err @@ -288,7 +293,7 @@ func (c *ConformanceRemote) RunTest(host, workspace, results, junitFilePrefix, t glog.V(2).Infof("Starting tests on %q", host) podManifestPath := getPodManifestPath(workspace) cmd := fmt.Sprintf("'timeout -k 30s %fs docker run --rm --privileged=true --net=host -v /:/rootfs -v %s:%s -v %s:/var/result -e TEST_ARGS=--report-prefix=%s %s'", - timeout.Seconds(), podManifestPath, podManifestPath, results, junitFilePrefix, getConformanceImageRepo()) + timeout.Seconds(), podManifestPath, podManifestPath, results, junitFilePrefix, getConformanceTestImageName(systemSpecName)) testOutput, err := SSH(host, "sh", "-c", cmd) if err != nil { return testOutput, err diff --git a/vendor/k8s.io/kubernetes/test/e2e_node/remote/node_e2e.go b/vendor/k8s.io/kubernetes/test/e2e_node/remote/node_e2e.go index 3068cd5e3250..7485137d4d16 100644 --- a/vendor/k8s.io/kubernetes/test/e2e_node/remote/node_e2e.go +++ b/vendor/k8s.io/kubernetes/test/e2e_node/remote/node_e2e.go @@ -29,7 +29,10 @@ import ( "k8s.io/kubernetes/test/e2e_node/builder" ) -const localCOSMounterPath = "cluster/gce/gci/mounter/mounter" +const ( + localCOSMounterPath = "cluster/gce/gci/mounter/mounter" + systemSpecPath = "test/e2e_node/system/specs" +) // NodeE2ERemote contains the specific functions in the node e2e test suite. type NodeE2ERemote struct{} @@ -40,7 +43,7 @@ func InitNodeE2ERemote() TestSuite { } // SetupTestPackage sets up the test package with binaries k8s required for node e2e tests -func (n *NodeE2ERemote) SetupTestPackage(tardir string) error { +func (n *NodeE2ERemote) SetupTestPackage(tardir, systemSpecName string) error { // Build the executables if err := builder.BuildGo(); err != nil { return fmt.Errorf("failed to build the depedencies: %v", err) @@ -49,7 +52,12 @@ func (n *NodeE2ERemote) SetupTestPackage(tardir string) error { // Make sure we can find the newly built binaries buildOutputDir, err := builder.GetK8sBuildOutputDir() if err != nil { - return fmt.Errorf("failed to locate kubernetes build output directory %v", err) + return fmt.Errorf("failed to locate kubernetes build output directory: %v", err) + } + + rootDir, err := builder.GetK8sRootDir() + if err != nil { + return fmt.Errorf("failed to locate kubernetes root directory: %v", err) } // Copy binaries @@ -65,6 +73,18 @@ func (n *NodeE2ERemote) SetupTestPackage(tardir string) error { } } + if systemSpecName != "" { + // Copy system spec file + source := filepath.Join(rootDir, systemSpecPath, systemSpecName+".yaml") + if _, err := os.Stat(source); err != nil { + return fmt.Errorf("failed to locate system spec %q: %v", source, err) + } + out, err := exec.Command("cp", source, tardir).CombinedOutput() + if err != nil { + return fmt.Errorf("failed to copy system spec %q: %v, output: %q", source, err, out) + } + } + // Include the GCI/COS mounter artifacts in the deployed tarball err = tarAddCOSMounter(tardir) if err != nil { @@ -163,7 +183,7 @@ func updateOSSpecificKubeletFlags(args, host, workspace string) (string, error) } // RunTest runs test on the node. -func (n *NodeE2ERemote) RunTest(host, workspace, results, junitFilePrefix, testArgs, ginkgoArgs string, timeout time.Duration) (string, error) { +func (n *NodeE2ERemote) RunTest(host, workspace, results, junitFilePrefix, testArgs, ginkgoArgs, systemSpecName string, timeout time.Duration) (string, error) { // Install the cni plugins and add a basic CNI configuration. if err := setupCNI(host, workspace); err != nil { return "", err @@ -182,12 +202,17 @@ func (n *NodeE2ERemote) RunTest(host, workspace, results, junitFilePrefix, testA return "", err } + systemSpecFile := "" + if systemSpecName != "" { + systemSpecFile = systemSpecName + ".yaml" + } + // Run the tests glog.V(2).Infof("Starting tests on %q", host) cmd := getSSHCommand(" && ", fmt.Sprintf("cd %s", workspace), - fmt.Sprintf("timeout -k 30s %fs ./ginkgo %s ./e2e_node.test -- --logtostderr --v 4 --node-name=%s --report-dir=%s --report-prefix=%s %s", - timeout.Seconds(), ginkgoArgs, host, results, junitFilePrefix, testArgs), + fmt.Sprintf("timeout -k 30s %fs ./ginkgo %s ./e2e_node.test -- --system-spec-name=%s --system-spec-file=%s --logtostderr --v 4 --node-name=%s --report-dir=%s --report-prefix=%s %s", + timeout.Seconds(), ginkgoArgs, systemSpecName, systemSpecFile, host, results, junitFilePrefix, testArgs), ) return SSH(host, "sh", "-c", cmd) } diff --git a/vendor/k8s.io/kubernetes/test/e2e_node/remote/remote.go b/vendor/k8s.io/kubernetes/test/e2e_node/remote/remote.go index 9b6d0b9b7bcf..a5406cc779cd 100644 --- a/vendor/k8s.io/kubernetes/test/e2e_node/remote/remote.go +++ b/vendor/k8s.io/kubernetes/test/e2e_node/remote/remote.go @@ -34,7 +34,7 @@ var resultsDir = flag.String("results-dir", "/tmp/", "Directory to scp test resu const archiveName = "e2e_node_test.tar.gz" -func CreateTestArchive(suite TestSuite) (string, error) { +func CreateTestArchive(suite TestSuite, systemSpecName string) (string, error) { glog.V(2).Infof("Building archive...") tardir, err := ioutil.TempDir("", "node-e2e-archive") if err != nil { @@ -43,7 +43,7 @@ func CreateTestArchive(suite TestSuite) (string, error) { defer os.RemoveAll(tardir) // Call the suite function to setup the test package. - err = suite.SetupTestPackage(tardir) + err = suite.SetupTestPackage(tardir, systemSpecName) if err != nil { return "", fmt.Errorf("failed to setup test package %q: %v", tardir, err) } @@ -63,7 +63,7 @@ func CreateTestArchive(suite TestSuite) (string, error) { // Returns the command output, whether the exit was ok, and any errors // TODO(random-liu): junitFilePrefix is not prefix actually, the file name is junit-junitFilePrefix.xml. Change the variable name. -func RunRemote(suite TestSuite, archive string, host string, cleanup bool, junitFilePrefix string, testArgs string, ginkgoArgs string) (string, bool, error) { +func RunRemote(suite TestSuite, archive string, host string, cleanup bool, junitFilePrefix string, testArgs string, ginkgoArgs string, systemSpecName string) (string, bool, error) { // Create the temp staging directory glog.V(2).Infof("Staging test binaries on %q", host) workspace := fmt.Sprintf("/tmp/node-e2e-%s", getTimestamp()) @@ -108,7 +108,7 @@ func RunRemote(suite TestSuite, archive string, host string, cleanup bool, junit } glog.V(2).Infof("Running test on %q", host) - output, err := suite.RunTest(host, workspace, resultDir, junitFilePrefix, testArgs, ginkgoArgs, *testTimeoutSeconds) + output, err := suite.RunTest(host, workspace, resultDir, junitFilePrefix, testArgs, ginkgoArgs, systemSpecName, *testTimeoutSeconds) aggErrs := []error{} // Do not log the output here, let the caller deal with the test output. diff --git a/vendor/k8s.io/kubernetes/test/e2e_node/remote/types.go b/vendor/k8s.io/kubernetes/test/e2e_node/remote/types.go index 52b80a54222d..984d3797c55a 100644 --- a/vendor/k8s.io/kubernetes/test/e2e_node/remote/types.go +++ b/vendor/k8s.io/kubernetes/test/e2e_node/remote/types.go @@ -29,7 +29,7 @@ type TestSuite interface { // * create a tarball with the directory. // * deploy the tarball to the testing host. // * untar the tarball to the testing workspace on the testing host. - SetupTestPackage(path string) error + SetupTestPackage(path, systemSpecName string) error // RunTest runs test on the node in the given workspace and returns test output // and test error if there is any. // * host is the target node to run the test. @@ -40,6 +40,8 @@ type TestSuite interface { // * junitFilePrefix is the prefix of output junit file. // * testArgs is the arguments passed to test. // * ginkgoArgs is the arguments passed to ginkgo. + // * systemSpecName is the name of the system spec used for validating the + // image on which the test runs. // * timeout is the test timeout. - RunTest(host, workspace, results, junitFilePrefix, testArgs, ginkgoArgs string, timeout time.Duration) (string, error) + RunTest(host, workspace, results, junitFilePrefix, testArgs, ginkgoArgs, systemSpecName string, timeout time.Duration) (string, error) } diff --git a/vendor/k8s.io/kubernetes/test/e2e_node/runner/local/run_local.go b/vendor/k8s.io/kubernetes/test/e2e_node/runner/local/run_local.go index 3371f5335150..0d89f83aaaae 100644 --- a/vendor/k8s.io/kubernetes/test/e2e_node/runner/local/run_local.go +++ b/vendor/k8s.io/kubernetes/test/e2e_node/runner/local/run_local.go @@ -18,6 +18,7 @@ package main import ( "flag" + "fmt" "os" "os/exec" "path/filepath" @@ -31,6 +32,11 @@ import ( var buildDependencies = flag.Bool("build-dependencies", true, "If true, build all dependencies.") var ginkgoFlags = flag.String("ginkgo-flags", "", "Space-separated list of arguments to pass to Ginkgo test runner.") var testFlags = flag.String("test-flags", "", "Space-separated list of arguments to pass to node e2e test.") +var systemSpecName = flag.String("system-spec-name", "", "The name of the system spec used for validating the image in the node conformance test. The specs are at test/e2e_node/system/specs/. If unspecified, the default built-in spec (system.DefaultSpec) will be used.") + +const ( + systemSpecPath = "test/e2e_node/system/specs" +) func main() { flag.Parse() @@ -50,7 +56,17 @@ func main() { glog.Infof("Got build output dir: %v", outputDir) ginkgo := filepath.Join(outputDir, "ginkgo") test := filepath.Join(outputDir, "e2e_node.test") - runCommand(ginkgo, *ginkgoFlags, test, "--", *testFlags) + + if *systemSpecName == "" { + runCommand(ginkgo, *ginkgoFlags, test, "--", *testFlags) + return + } + rootDir, err := builder.GetK8sRootDir() + if err != nil { + glog.Fatalf("Failed to get k8s root directory: %v", err) + } + systemSpecFile := filepath.Join(rootDir, systemSpecPath, *systemSpecName+".yaml") + runCommand(ginkgo, *ginkgoFlags, test, "--", fmt.Sprintf("--system-spec-name=%s --system-spec-file=%s", *systemSpecName, systemSpecFile), *testFlags) return } diff --git a/vendor/k8s.io/kubernetes/test/e2e_node/runner/remote/run_remote.go b/vendor/k8s.io/kubernetes/test/e2e_node/runner/remote/run_remote.go index ddc4a261407e..f869994cf8af 100644 --- a/vendor/k8s.io/kubernetes/test/e2e_node/runner/remote/run_remote.go +++ b/vendor/k8s.io/kubernetes/test/e2e_node/runner/remote/run_remote.go @@ -58,6 +58,7 @@ var buildOnly = flag.Bool("build-only", false, "If true, build e2e_node_test.tar var instanceMetadata = flag.String("instance-metadata", "", "key/value metadata for instances separated by '=' or '<', 'k=v' means the key is 'k' and the value is 'v'; 'k=1.0)" or "bar (>=2.0)" is required. type PackageSpec struct { // Name is the name of the package to be checked. - Name string + Name string `json:"name,omitempty"` // VersionRange represents a range of versions that the package must // satisfy. Note that the version requirement will not be enforced if // the version range is empty. For example, @@ -81,9 +84,11 @@ type PackageSpec struct { // - ">1.0 <2.0" would match between both ranges, so "1.1.1" and "1.8.7" // but not "1.0.0" or "2.0.0". // - "<2.0.0 || >=3.0.0" would match "1.0.0" and "3.0.0" but not "2.0.0". - VersionRange string + VersionRange string `json:"versionRange,omitempty"` // Description explains the reason behind this package requirements. - Description string + // + // TODO(yguo0905): Print the description where necessary. + Description string `json:"description,omitempty"` } // PackageSpecOverride defines the overrides on the PackageSpec for an OS @@ -91,31 +96,31 @@ type PackageSpec struct { type PackageSpecOverride struct { // OSDistro identifies to which OS distro this override applies. // Must be "ubuntu", "cos" or "coreos". - OSDistro string + OSDistro string `json:"osDistro,omitempty"` // Subtractions is a list of package names that are excluded from the // package spec. - Subtractions []PackageSpec + Subtractions []PackageSpec `json:"subtractions,omitempty"` // Additions is a list of additional package requirements included the // package spec. - Additions []PackageSpec + Additions []PackageSpec `json:"additions,omitempty"` } // SysSpec defines the requirement of supported system. Currently, it only contains // spec for OS, Kernel and Cgroups. type SysSpec struct { // OS is the operating system of the SysSpec. - OS string + OS string `json:"os,omitempty"` // KernelConfig defines the spec for kernel. - KernelSpec KernelSpec + KernelSpec KernelSpec `json:"kernelSpec,omitempty"` // Cgroups is the required cgroups. - Cgroups []string + Cgroups []string `json:"cgroups,omitempty"` // RuntimeSpec defines the spec for runtime. - RuntimeSpec RuntimeSpec + RuntimeSpec RuntimeSpec `json:"runtimeSpec,omitempty"` // PackageSpec defines the required packages and their versions. - PackageSpecs []PackageSpec + PackageSpecs []PackageSpec `json:"packageSpecs,omitempty"` // PackageSpec defines the overrides of the required packages and their // versions for an OS distro. - PackageSpecOverrides []PackageSpecOverride + PackageSpecOverrides []PackageSpecOverride `json:"packageSpecOverrides,omitempty"` } // DefaultSysSpec is the default SysSpec. diff --git a/vendor/k8s.io/kubernetes/test/e2e_node/system/validators.go b/vendor/k8s.io/kubernetes/test/e2e_node/system/validators.go index 8f6d9c1c41ec..7819da7b4a68 100644 --- a/vendor/k8s.io/kubernetes/test/e2e_node/system/validators.go +++ b/vendor/k8s.io/kubernetes/test/e2e_node/system/validators.go @@ -49,8 +49,8 @@ func Validate(spec SysSpec, validators []Validator) (error, error) { return errors.NewAggregate(warns), errors.NewAggregate(errs) } -// ValidateDefault uses all default validators to validate the system and writes to stdout. -func ValidateDefault(runtime string) (error, error) { +// ValidateSpec uses all default validators to validate the system and writes to stdout. +func ValidateSpec(spec SysSpec, runtime string) (error, error) { // OS-level validators. var osValidators = []Validator{ &OSValidator{Reporter: DefaultReporter}, @@ -68,5 +68,5 @@ func ValidateDefault(runtime string) (error, error) { case "docker": validators = append(validators, dockerValidators...) } - return Validate(DefaultSysSpec, validators) + return Validate(spec, validators) } diff --git a/vendor/k8s.io/kubernetes/test/fixtures/doc-yaml/admin/daemon.yaml b/vendor/k8s.io/kubernetes/test/fixtures/doc-yaml/admin/daemon.yaml index c5cd14a5921e..09f2c13122e4 100644 --- a/vendor/k8s.io/kubernetes/test/fixtures/doc-yaml/admin/daemon.yaml +++ b/vendor/k8s.io/kubernetes/test/fixtures/doc-yaml/admin/daemon.yaml @@ -1,4 +1,4 @@ -apiVersion: extensions/v1beta1 +apiVersion: extensions/v1beta1 kind: DaemonSet metadata: name: prometheus-node-exporter diff --git a/vendor/k8s.io/kubernetes/test/fixtures/doc-yaml/user-guide/deployment.yaml b/vendor/k8s.io/kubernetes/test/fixtures/doc-yaml/user-guide/deployment.yaml index 98e614ceb2eb..c20f0d800e1c 100644 --- a/vendor/k8s.io/kubernetes/test/fixtures/doc-yaml/user-guide/deployment.yaml +++ b/vendor/k8s.io/kubernetes/test/fixtures/doc-yaml/user-guide/deployment.yaml @@ -1,4 +1,4 @@ -apiVersion: extensions/v1beta1 +apiVersion: extensions/v1beta1 kind: Deployment metadata: name: nginx-deployment diff --git a/vendor/k8s.io/kubernetes/test/fixtures/doc-yaml/user-guide/ingress.yaml b/vendor/k8s.io/kubernetes/test/fixtures/doc-yaml/user-guide/ingress.yaml index 163c1d5b9d02..a81a4cc7be48 100644 --- a/vendor/k8s.io/kubernetes/test/fixtures/doc-yaml/user-guide/ingress.yaml +++ b/vendor/k8s.io/kubernetes/test/fixtures/doc-yaml/user-guide/ingress.yaml @@ -6,4 +6,3 @@ spec: backend: serviceName: testsvc servicePort: 80 - diff --git a/vendor/k8s.io/kubernetes/test/fixtures/doc-yaml/user-guide/job.yaml b/vendor/k8s.io/kubernetes/test/fixtures/doc-yaml/user-guide/job.yaml index ece4512a8acf..964de82ec5af 100644 --- a/vendor/k8s.io/kubernetes/test/fixtures/doc-yaml/user-guide/job.yaml +++ b/vendor/k8s.io/kubernetes/test/fixtures/doc-yaml/user-guide/job.yaml @@ -12,4 +12,3 @@ spec: image: perl command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"] restartPolicy: Never - diff --git a/vendor/k8s.io/kubernetes/test/fixtures/doc-yaml/user-guide/logging-demo/synthetic_0_25lps.yaml b/vendor/k8s.io/kubernetes/test/fixtures/doc-yaml/user-guide/logging-demo/synthetic_0_25lps.yaml index 93516706c050..84cb8e760265 100644 --- a/vendor/k8s.io/kubernetes/test/fixtures/doc-yaml/user-guide/logging-demo/synthetic_0_25lps.yaml +++ b/vendor/k8s.io/kubernetes/test/fixtures/doc-yaml/user-guide/logging-demo/synthetic_0_25lps.yaml @@ -27,4 +27,3 @@ spec: - -c - 'i="0"; while true; do echo -n "`hostname`: $i: "; date --rfc-3339 ns; sleep 4; i=$[$i+1]; done' - diff --git a/vendor/k8s.io/kubernetes/test/fixtures/doc-yaml/user-guide/logging-demo/synthetic_10lps.yaml b/vendor/k8s.io/kubernetes/test/fixtures/doc-yaml/user-guide/logging-demo/synthetic_10lps.yaml index 6e3c1ca92965..2637f1332cfd 100644 --- a/vendor/k8s.io/kubernetes/test/fixtures/doc-yaml/user-guide/logging-demo/synthetic_10lps.yaml +++ b/vendor/k8s.io/kubernetes/test/fixtures/doc-yaml/user-guide/logging-demo/synthetic_10lps.yaml @@ -27,4 +27,3 @@ spec: - -c - 'i="0"; while true; do echo -n "`hostname`: $i: "; date --rfc-3339 ns; sleep 0.1; i=$[$i+1]; done' - diff --git a/vendor/k8s.io/kubernetes/test/fixtures/pkg/kubectl/plugins/env/plugin.yaml b/vendor/k8s.io/kubernetes/test/fixtures/pkg/kubectl/plugins/env/plugin.yaml index b01d44a843cc..00287ad4a286 100644 --- a/vendor/k8s.io/kubernetes/test/fixtures/pkg/kubectl/plugins/env/plugin.yaml +++ b/vendor/k8s.io/kubernetes/test/fixtures/pkg/kubectl/plugins/env/plugin.yaml @@ -1,7 +1,7 @@ name: env shortDesc: "The plugin envs plugin" command: "./env.sh" -flags: +flags: - name: "test1" desc: "This is a flag 1" - name: "test2" diff --git a/vendor/k8s.io/kubernetes/test/fixtures/pkg/kubectl/plugins/tree/plugin.yaml b/vendor/k8s.io/kubernetes/test/fixtures/pkg/kubectl/plugins/tree/plugin.yaml index 889e7a6a75eb..51d928b565ed 100644 --- a/vendor/k8s.io/kubernetes/test/fixtures/pkg/kubectl/plugins/tree/plugin.yaml +++ b/vendor/k8s.io/kubernetes/test/fixtures/pkg/kubectl/plugins/tree/plugin.yaml @@ -10,4 +10,3 @@ tree: - name: "child3" shortDesc: "The third child of a tree" command: echo child three - diff --git a/vendor/k8s.io/kubernetes/test/integration/auth/node_test.go b/vendor/k8s.io/kubernetes/test/integration/auth/node_test.go index d8d9d895fac3..41350625891e 100644 --- a/vendor/k8s.io/kubernetes/test/integration/auth/node_test.go +++ b/vendor/k8s.io/kubernetes/test/integration/auth/node_test.go @@ -29,6 +29,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" restclient "k8s.io/client-go/rest" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/policy" "k8s.io/kubernetes/pkg/auth/nodeidentifier" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion" @@ -205,6 +206,30 @@ func TestNodeAuthorizer(t *testing.T) { deleteNode2 := func(client clientset.Interface) error { return client.Core().Nodes().Delete("node2", nil) } + createNode2NormalPodEviction := func(client clientset.Interface) error { + return client.Policy().Evictions("ns").Evict(&policy.Eviction{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "policy/v1beta1", + Kind: "Eviction", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "node2normalpod", + Namespace: "ns", + }, + }) + } + createNode2MirrorPodEviction := func(client clientset.Interface) error { + return client.Policy().Evictions("ns").Evict(&policy.Eviction{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "policy/v1beta1", + Kind: "Eviction", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "node2mirrorpod", + Namespace: "ns", + }, + }) + } nodeanonClient := clientsetForUser("unknown/system:nodes", clientConfig) node1Client := clientsetForUser("system:node:node1/system:nodes", clientConfig) @@ -218,7 +243,9 @@ func TestNodeAuthorizer(t *testing.T) { expectForbidden(t, getPV(nodeanonClient)) expectForbidden(t, createNode2NormalPod(nodeanonClient)) expectForbidden(t, createNode2MirrorPod(nodeanonClient)) + expectForbidden(t, deleteNode2NormalPod(nodeanonClient)) expectForbidden(t, deleteNode2MirrorPod(nodeanonClient)) + expectForbidden(t, createNode2MirrorPodEviction(nodeanonClient)) expectForbidden(t, createNode2(nodeanonClient)) expectForbidden(t, updateNode2Status(nodeanonClient)) expectForbidden(t, deleteNode2(nodeanonClient)) @@ -230,7 +257,8 @@ func TestNodeAuthorizer(t *testing.T) { expectForbidden(t, getPV(node1Client)) expectForbidden(t, createNode2NormalPod(nodeanonClient)) expectForbidden(t, createNode2MirrorPod(node1Client)) - expectForbidden(t, deleteNode2MirrorPod(node1Client)) + expectNotFound(t, deleteNode2MirrorPod(node1Client)) + expectNotFound(t, createNode2MirrorPodEviction(node1Client)) expectForbidden(t, createNode2(node1Client)) expectForbidden(t, updateNode2Status(node1Client)) expectForbidden(t, deleteNode2(node1Client)) @@ -245,6 +273,8 @@ func TestNodeAuthorizer(t *testing.T) { // mirror pod and self node lifecycle is allowed expectAllowed(t, createNode2MirrorPod(node2Client)) expectAllowed(t, deleteNode2MirrorPod(node2Client)) + expectAllowed(t, createNode2MirrorPod(node2Client)) + expectAllowed(t, createNode2MirrorPodEviction(node2Client)) expectAllowed(t, createNode2(node2Client)) expectAllowed(t, updateNode2Status(node2Client)) expectAllowed(t, deleteNode2(node2Client)) @@ -261,8 +291,10 @@ func TestNodeAuthorizer(t *testing.T) { expectForbidden(t, createNode2NormalPod(nodeanonClient)) expectForbidden(t, updateNode2NormalPodStatus(nodeanonClient)) expectForbidden(t, deleteNode2NormalPod(nodeanonClient)) + expectForbidden(t, createNode2NormalPodEviction(nodeanonClient)) expectForbidden(t, createNode2MirrorPod(nodeanonClient)) expectForbidden(t, deleteNode2MirrorPod(nodeanonClient)) + expectForbidden(t, createNode2MirrorPodEviction(nodeanonClient)) expectForbidden(t, getSecret(node1Client)) expectForbidden(t, getPVSecret(node1Client)) @@ -272,8 +304,10 @@ func TestNodeAuthorizer(t *testing.T) { expectForbidden(t, createNode2NormalPod(node1Client)) expectForbidden(t, updateNode2NormalPodStatus(node1Client)) expectForbidden(t, deleteNode2NormalPod(node1Client)) + expectForbidden(t, createNode2NormalPodEviction(node1Client)) expectForbidden(t, createNode2MirrorPod(node1Client)) - expectForbidden(t, deleteNode2MirrorPod(node1Client)) + expectNotFound(t, deleteNode2MirrorPod(node1Client)) + expectNotFound(t, createNode2MirrorPodEviction(node1Client)) // node2 can get referenced objects now expectAllowed(t, getSecret(node2Client)) @@ -286,6 +320,11 @@ func TestNodeAuthorizer(t *testing.T) { expectAllowed(t, deleteNode2NormalPod(node2Client)) expectAllowed(t, createNode2MirrorPod(node2Client)) expectAllowed(t, deleteNode2MirrorPod(node2Client)) + // recreate as an admin to test eviction + expectAllowed(t, createNode2NormalPod(superuserClient)) + expectAllowed(t, createNode2MirrorPod(superuserClient)) + expectAllowed(t, createNode2NormalPodEviction(node2Client)) + expectAllowed(t, createNode2MirrorPodEviction(node2Client)) } func expectForbidden(t *testing.T, err error) { @@ -295,6 +334,13 @@ func expectForbidden(t *testing.T, err error) { } } +func expectNotFound(t *testing.T, err error) { + if !errors.IsNotFound(err) { + _, file, line, _ := runtime.Caller(1) + t.Errorf("%s:%d: Expected notfound error, got %v", filepath.Base(file), line, err) + } +} + func expectAllowed(t *testing.T, err error) { if err != nil { _, file, line, _ := runtime.Caller(1) diff --git a/vendor/k8s.io/kubernetes/vendor/k8s.io/kube-gen b/vendor/k8s.io/kubernetes/vendor/k8s.io/kube-gen deleted file mode 120000 index c57d0e8c3893..000000000000 --- a/vendor/k8s.io/kubernetes/vendor/k8s.io/kube-gen +++ /dev/null @@ -1 +0,0 @@ -../../staging/src/k8s.io/kube-gen \ No newline at end of file From e4b8c3965037fd67a3bbfe04f3a3e3b60b967414 Mon Sep 17 00:00:00 2001 From: Hemant Kumar Date: Thu, 21 Sep 2017 15:36:07 -0400 Subject: [PATCH 08/27] UPSTREAM: 50036: Bring volume operation metrics --- .../controller/volume/persistentvolume/BUILD | 1 + .../volume/persistentvolume/pv_controller.go | 8 +- .../k8s.io/kubernetes/pkg/volume/util/BUILD | 2 + .../kubernetes/pkg/volume/util/metrics.go | 63 +++++++++++++++ .../nestedpendingoperations.go | 6 +- .../nestedpendingoperations_test.go | 60 +++++++------- .../operationexecutor/operation_executor.go | 41 ++++++---- .../operation_executor_test.go | 24 +++--- .../operationexecutor/operation_generator.go | 81 +++++++++++-------- 9 files changed, 192 insertions(+), 94 deletions(-) create mode 100644 vendor/k8s.io/kubernetes/pkg/volume/util/metrics.go diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/BUILD index 0d6da56ec02c..7dcf18a296cc 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/BUILD @@ -36,6 +36,7 @@ go_library( "//pkg/util/io:go_default_library", "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", + "//pkg/volume/util:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller.go index 97ef17f6b7a0..39d25bca0c92 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller.go @@ -39,6 +39,7 @@ import ( "k8s.io/kubernetes/pkg/util/goroutinemap" "k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff" vol "k8s.io/kubernetes/pkg/volume" + "k8s.io/kubernetes/pkg/volume/util" "github.com/golang/glog" ) @@ -1215,7 +1216,10 @@ func (ctrl *PersistentVolumeController) doDeleteVolume(volume *v1.PersistentVolu return false, fmt.Errorf("Failed to create deleter for volume %q: %v", volume.Name, err) } - if err = deleter.Delete(); err != nil { + opComplete := util.OperationCompleteHook(plugin.GetPluginName(), "volume_delete") + err = deleter.Delete() + opComplete(err) + if err != nil { // Deleter failed return false, err } @@ -1325,7 +1329,9 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claimObj interfa return } + opComplete := util.OperationCompleteHook(plugin.GetPluginName(), "volume_provision") volume, err = provisioner.Provision() + opComplete(err) if err != nil { strerr := fmt.Sprintf("Failed to provision volume with StorageClass %q: %v", storageClass.Name, err) glog.V(2).Infof("failed to provision volume for claim %q with StorageClass %q: %v", claimToClaimKey(claim), storageClass.Name, err) diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/BUILD b/vendor/k8s.io/kubernetes/pkg/volume/util/BUILD index c2ff2f56693b..ddaade57f51b 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/BUILD @@ -17,6 +17,7 @@ go_library( "doc.go", "fs.go", "io_util.go", + "metrics.go", "util.go", ], tags = ["automanaged"], @@ -27,6 +28,7 @@ go_library( "//pkg/client/clientset_generated/clientset:go_default_library", "//pkg/util/mount:go_default_library", "//vendor/github.com/golang/glog:go_default_library", + "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/metrics.go b/vendor/k8s.io/kubernetes/pkg/volume/util/metrics.go new file mode 100644 index 000000000000..087bbfff4169 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/metrics.go @@ -0,0 +1,63 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +var storageOperationMetric = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "storage_operation_duration_seconds", + Help: "Storage operation duration", + }, + []string{"volume_plugin", "operation_name"}, +) + +var storageOperationErrorMetric = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "storage_operation_errors_total", + Help: "Storage operation errors", + }, + []string{"volume_plugin", "operation_name"}, +) + +func init() { + registerMetrics() +} + +func registerMetrics() { + prometheus.MustRegister(storageOperationMetric) + prometheus.MustRegister(storageOperationErrorMetric) +} + +// OperationCompleteHook returns a hook to call when an operation is completed +func OperationCompleteHook(plugin, operationName string) func(error) { + requestTime := time.Now() + opComplete := func(err error) { + timeTaken := time.Since(requestTime).Seconds() + // Create metric with operation name and plugin name + if err != nil { + storageOperationErrorMetric.WithLabelValues(plugin, operationName).Inc() + } else { + storageOperationMetric.WithLabelValues(plugin, operationName).Observe(timeTaken) + } + } + return opComplete +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations/nestedpendingoperations.go b/vendor/k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations/nestedpendingoperations.go index 64d70900a37b..a55ea70deca2 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations/nestedpendingoperations.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations/nestedpendingoperations.go @@ -55,7 +55,7 @@ type NestedPendingOperations interface { // concatenation of volumeName and podName is removed from the list of // executing operations allowing a new operation to be started with the // volumeName without error. - Run(volumeName v1.UniqueVolumeName, podName types.UniquePodName, operationFunc func() error) error + Run(volumeName v1.UniqueVolumeName, podName types.UniquePodName, operationFunc func() error, operationCompleteFunc func(error)) error // Wait blocks until all operations are completed. This is typically // necessary during tests - the test should wait until all operations finish @@ -94,7 +94,8 @@ type operation struct { func (grm *nestedPendingOperations) Run( volumeName v1.UniqueVolumeName, podName types.UniquePodName, - operationFunc func() error) error { + operationFunc func() error, + operationCompleteFunc func(error)) error { grm.lock.Lock() defer grm.lock.Unlock() opExists, previousOpIndex := grm.isOperationExists(volumeName, podName) @@ -132,6 +133,7 @@ func (grm *nestedPendingOperations) Run( defer k8sRuntime.HandleCrash() // Handle completion of and error, if any, from operationFunc() defer grm.operationComplete(volumeName, podName, &err) + defer operationCompleteFunc(err) // Handle panic, if any, from operationFunc() defer k8sRuntime.RecoverFromPanic(&err) return operationFunc() diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations/nestedpendingoperations_test.go b/vendor/k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations/nestedpendingoperations_test.go index ce079407a63b..19e0d62fe480 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations/nestedpendingoperations_test.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations/nestedpendingoperations_test.go @@ -50,7 +50,7 @@ func Test_NewGoRoutineMap_Positive_SingleOp(t *testing.T) { operation := func() error { return nil } // Act - err := grm.Run(volumeName, "" /* operationSubName */, operation) + err := grm.Run(volumeName, "" /* operationSubName */, operation, func(error) {}) // Assert if err != nil { @@ -66,8 +66,8 @@ func Test_NewGoRoutineMap_Positive_TwoOps(t *testing.T) { operation := func() error { return nil } // Act - err1 := grm.Run(volume1Name, "" /* operationSubName */, operation) - err2 := grm.Run(volume2Name, "" /* operationSubName */, operation) + err1 := grm.Run(volume1Name, "" /* operationSubName */, operation, func(error) {}) + err2 := grm.Run(volume2Name, "" /* operationSubName */, operation, func(error) {}) // Assert if err1 != nil { @@ -88,8 +88,8 @@ func Test_NewGoRoutineMap_Positive_TwoSubOps(t *testing.T) { operation := func() error { return nil } // Act - err1 := grm.Run(volumeName, operation1PodName, operation) - err2 := grm.Run(volumeName, operation2PodName, operation) + err1 := grm.Run(volumeName, operation1PodName, operation, func(error) {}) + err2 := grm.Run(volumeName, operation2PodName, operation, func(error) {}) // Assert if err1 != nil { @@ -108,7 +108,7 @@ func Test_NewGoRoutineMap_Positive_SingleOpWithExpBackoff(t *testing.T) { operation := func() error { return nil } // Act - err := grm.Run(volumeName, "" /* operationSubName */, operation) + err := grm.Run(volumeName, "" /* operationSubName */, operation, func(error) {}) // Assert if err != nil { @@ -122,7 +122,7 @@ func Test_NewGoRoutineMap_Positive_SecondOpAfterFirstCompletes(t *testing.T) { volumeName := v1.UniqueVolumeName("volume-name") operation1DoneCh := make(chan interface{}, 0 /* bufferSize */) operation1 := generateCallbackFunc(operation1DoneCh) - err1 := grm.Run(volumeName, "" /* operationSubName */, operation1) + err1 := grm.Run(volumeName, "" /* operationSubName */, operation1, func(error) {}) if err1 != nil { t.Fatalf("NewGoRoutine failed. Expected: Actual: <%v>", err1) } @@ -133,7 +133,7 @@ func Test_NewGoRoutineMap_Positive_SecondOpAfterFirstCompletes(t *testing.T) { err2 := retryWithExponentialBackOff( time.Duration(initialOperationWaitTimeShort), func() (bool, error) { - err := grm.Run(volumeName, "" /* operationSubName */, operation2) + err := grm.Run(volumeName, "" /* operationSubName */, operation2, func(error) {}) if err != nil { t.Logf("Warning: NewGoRoutine failed with %v. Will retry.", err) return false, nil @@ -154,7 +154,7 @@ func Test_NewGoRoutineMap_Positive_SecondOpAfterFirstCompletesWithExpBackoff(t * volumeName := v1.UniqueVolumeName("volume-name") operation1DoneCh := make(chan interface{}, 0 /* bufferSize */) operation1 := generateCallbackFunc(operation1DoneCh) - err1 := grm.Run(volumeName, "" /* operationSubName */, operation1) + err1 := grm.Run(volumeName, "" /* operationSubName */, operation1, func(error) {}) if err1 != nil { t.Fatalf("NewGoRoutine failed. Expected: Actual: <%v>", err1) } @@ -165,7 +165,7 @@ func Test_NewGoRoutineMap_Positive_SecondOpAfterFirstCompletesWithExpBackoff(t * err2 := retryWithExponentialBackOff( time.Duration(initialOperationWaitTimeShort), func() (bool, error) { - err := grm.Run(volumeName, "" /* operationSubName */, operation2) + err := grm.Run(volumeName, "" /* operationSubName */, operation2, func(error) {}) if err != nil { t.Logf("Warning: NewGoRoutine failed with %v. Will retry.", err) return false, nil @@ -185,7 +185,7 @@ func Test_NewGoRoutineMap_Positive_SecondOpAfterFirstPanics(t *testing.T) { grm := NewNestedPendingOperations(false /* exponentialBackOffOnError */) volumeName := v1.UniqueVolumeName("volume-name") operation1 := generatePanicFunc() - err1 := grm.Run(volumeName, "" /* operationSubName */, operation1) + err1 := grm.Run(volumeName, "" /* operationSubName */, operation1, func(error) {}) if err1 != nil { t.Fatalf("NewGoRoutine failed. Expected: Actual: <%v>", err1) } @@ -195,7 +195,7 @@ func Test_NewGoRoutineMap_Positive_SecondOpAfterFirstPanics(t *testing.T) { err2 := retryWithExponentialBackOff( time.Duration(initialOperationWaitTimeShort), func() (bool, error) { - err := grm.Run(volumeName, "" /* operationSubName */, operation2) + err := grm.Run(volumeName, "" /* operationSubName */, operation2, func(error) {}) if err != nil { t.Logf("Warning: NewGoRoutine failed with %v. Will retry.", err) return false, nil @@ -215,7 +215,7 @@ func Test_NewGoRoutineMap_Positive_SecondOpAfterFirstPanicsWithExpBackoff(t *tes grm := NewNestedPendingOperations(true /* exponentialBackOffOnError */) volumeName := v1.UniqueVolumeName("volume-name") operation1 := generatePanicFunc() - err1 := grm.Run(volumeName, "" /* operationSubName */, operation1) + err1 := grm.Run(volumeName, "" /* operationSubName */, operation1, func(error) {}) if err1 != nil { t.Fatalf("NewGoRoutine failed. Expected: Actual: <%v>", err1) } @@ -225,7 +225,7 @@ func Test_NewGoRoutineMap_Positive_SecondOpAfterFirstPanicsWithExpBackoff(t *tes err2 := retryWithExponentialBackOff( time.Duration(initialOperationWaitTimeLong), // Longer duration to accommodate for backoff func() (bool, error) { - err := grm.Run(volumeName, "" /* operationSubName */, operation2) + err := grm.Run(volumeName, "" /* operationSubName */, operation2, func(error) {}) if err != nil { t.Logf("Warning: NewGoRoutine failed with %v. Will retry.", err) return false, nil @@ -246,14 +246,14 @@ func Test_NewGoRoutineMap_Negative_SecondOpBeforeFirstCompletes(t *testing.T) { volumeName := v1.UniqueVolumeName("volume-name") operation1DoneCh := make(chan interface{}, 0 /* bufferSize */) operation1 := generateWaitFunc(operation1DoneCh) - err1 := grm.Run(volumeName, "" /* operationSubName */, operation1) + err1 := grm.Run(volumeName, "" /* operationSubName */, operation1, func(error) {}) if err1 != nil { t.Fatalf("NewGoRoutine failed. Expected: Actual: <%v>", err1) } operation2 := generateNoopFunc() // Act - err2 := grm.Run(volumeName, "" /* operationSubName */, operation2) + err2 := grm.Run(volumeName, "" /* operationSubName */, operation2, func(error) {}) // Assert if err2 == nil { @@ -271,14 +271,14 @@ func Test_NewGoRoutineMap_Negative_SecondSubOpBeforeFirstCompletes2(t *testing.T operationPodName := types.UniquePodName("operation-podname") operation1DoneCh := make(chan interface{}, 0 /* bufferSize */) operation1 := generateWaitFunc(operation1DoneCh) - err1 := grm.Run(volumeName, operationPodName, operation1) + err1 := grm.Run(volumeName, operationPodName, operation1, func(error) {}) if err1 != nil { t.Fatalf("NewGoRoutine failed. Expected: Actual: <%v>", err1) } operation2 := generateNoopFunc() // Act - err2 := grm.Run(volumeName, operationPodName, operation2) + err2 := grm.Run(volumeName, operationPodName, operation2, func(error) {}) // Assert if err2 == nil { @@ -296,14 +296,14 @@ func Test_NewGoRoutineMap_Negative_SecondSubOpBeforeFirstCompletes(t *testing.T) operationPodName := types.UniquePodName("operation-podname") operation1DoneCh := make(chan interface{}, 0 /* bufferSize */) operation1 := generateWaitFunc(operation1DoneCh) - err1 := grm.Run(volumeName, operationPodName, operation1) + err1 := grm.Run(volumeName, operationPodName, operation1, func(error) {}) if err1 != nil { t.Fatalf("NewGoRoutine failed. Expected: Actual: <%v>", err1) } operation2 := generateNoopFunc() // Act - err2 := grm.Run(volumeName, operationPodName, operation2) + err2 := grm.Run(volumeName, operationPodName, operation2, func(error) {}) // Assert if err2 == nil { @@ -320,14 +320,14 @@ func Test_NewGoRoutineMap_Negative_SecondOpBeforeFirstCompletesWithExpBackoff(t volumeName := v1.UniqueVolumeName("volume-name") operation1DoneCh := make(chan interface{}, 0 /* bufferSize */) operation1 := generateWaitFunc(operation1DoneCh) - err1 := grm.Run(volumeName, "" /* operationSubName */, operation1) + err1 := grm.Run(volumeName, "" /* operationSubName */, operation1, func(error) {}) if err1 != nil { t.Fatalf("NewGoRoutine failed. Expected: Actual: <%v>", err1) } operation2 := generateNoopFunc() // Act - err2 := grm.Run(volumeName, "" /* operationSubName */, operation2) + err2 := grm.Run(volumeName, "" /* operationSubName */, operation2, func(error) {}) // Assert if err2 == nil { @@ -344,7 +344,7 @@ func Test_NewGoRoutineMap_Positive_ThirdOpAfterFirstCompletes(t *testing.T) { volumeName := v1.UniqueVolumeName("volume-name") operation1DoneCh := make(chan interface{}, 0 /* bufferSize */) operation1 := generateWaitFunc(operation1DoneCh) - err1 := grm.Run(volumeName, "" /* operationSubName */, operation1) + err1 := grm.Run(volumeName, "" /* operationSubName */, operation1, func(error) {}) if err1 != nil { t.Fatalf("NewGoRoutine failed. Expected: Actual: <%v>", err1) } @@ -352,7 +352,7 @@ func Test_NewGoRoutineMap_Positive_ThirdOpAfterFirstCompletes(t *testing.T) { operation3 := generateNoopFunc() // Act - err2 := grm.Run(volumeName, "" /* operationSubName */, operation2) + err2 := grm.Run(volumeName, "" /* operationSubName */, operation2, func(error) {}) // Assert if err2 == nil { @@ -367,7 +367,7 @@ func Test_NewGoRoutineMap_Positive_ThirdOpAfterFirstCompletes(t *testing.T) { err3 := retryWithExponentialBackOff( time.Duration(initialOperationWaitTimeShort), func() (bool, error) { - err := grm.Run(volumeName, "" /* operationSubName */, operation3) + err := grm.Run(volumeName, "" /* operationSubName */, operation3, func(error) {}) if err != nil { t.Logf("Warning: NewGoRoutine failed with %v. Will retry.", err) return false, nil @@ -388,7 +388,7 @@ func Test_NewGoRoutineMap_Positive_ThirdOpAfterFirstCompletesWithExpBackoff(t *t volumeName := v1.UniqueVolumeName("volume-name") operation1DoneCh := make(chan interface{}, 0 /* bufferSize */) operation1 := generateWaitFunc(operation1DoneCh) - err1 := grm.Run(volumeName, "" /* operationSubName */, operation1) + err1 := grm.Run(volumeName, "" /* operationSubName */, operation1, func(error) {}) if err1 != nil { t.Fatalf("NewGoRoutine failed. Expected: Actual: <%v>", err1) } @@ -396,7 +396,7 @@ func Test_NewGoRoutineMap_Positive_ThirdOpAfterFirstCompletesWithExpBackoff(t *t operation3 := generateNoopFunc() // Act - err2 := grm.Run(volumeName, "" /* operationSubName */, operation2) + err2 := grm.Run(volumeName, "" /* operationSubName */, operation2, func(error) {}) // Assert if err2 == nil { @@ -411,7 +411,7 @@ func Test_NewGoRoutineMap_Positive_ThirdOpAfterFirstCompletesWithExpBackoff(t *t err3 := retryWithExponentialBackOff( time.Duration(initialOperationWaitTimeShort), func() (bool, error) { - err := grm.Run(volumeName, "" /* operationSubName */, operation3) + err := grm.Run(volumeName, "" /* operationSubName */, operation3, func(error) {}) if err != nil { t.Logf("Warning: NewGoRoutine failed with %v. Will retry.", err) return false, nil @@ -471,7 +471,7 @@ func Test_NewGoRoutineMap_Positive_Wait(t *testing.T) { volumeName := v1.UniqueVolumeName("volume-name") operation1DoneCh := make(chan interface{}, 0 /* bufferSize */) operation1 := generateWaitFunc(operation1DoneCh) - err := grm.Run(volumeName, "" /* operationSubName */, operation1) + err := grm.Run(volumeName, "" /* operationSubName */, operation1, func(error) {}) if err != nil { t.Fatalf("NewGoRoutine failed. Expected: Actual: <%v>", err) } @@ -500,7 +500,7 @@ func Test_NewGoRoutineMap_Positive_WaitWithExpBackoff(t *testing.T) { volumeName := v1.UniqueVolumeName("volume-name") operation1DoneCh := make(chan interface{}, 0 /* bufferSize */) operation1 := generateWaitFunc(operation1DoneCh) - err := grm.Run(volumeName, "" /* operationSubName */, operation1) + err := grm.Run(volumeName, "" /* operationSubName */, operation1, func(error) {}) if err != nil { t.Fatalf("NewGoRoutine failed. Expected: Actual: <%v>", err) } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_executor.go b/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_executor.go index 0c1569095f50..da95adbba975 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_executor.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_executor.go @@ -31,6 +31,7 @@ import ( "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" + "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations" volumetypes "k8s.io/kubernetes/pkg/volume/util/types" "k8s.io/kubernetes/pkg/volume/util/volumehelper" @@ -535,29 +536,32 @@ func (oe *operationExecutor) IsOperationPending(volumeName v1.UniqueVolumeName, func (oe *operationExecutor) AttachVolume( volumeToAttach VolumeToAttach, actualStateOfWorld ActualStateOfWorldAttacherUpdater) error { - attachFunc, err := + attachFunc, plugin, err := oe.operationGenerator.GenerateAttachVolumeFunc(volumeToAttach, actualStateOfWorld) if err != nil { return err } + opCompleteFunc := util.OperationCompleteHook(plugin, "volume_attach") return oe.pendingOperations.Run( - volumeToAttach.VolumeName, "" /* podName */, attachFunc) + volumeToAttach.VolumeName, "" /* podName */, attachFunc, opCompleteFunc) } func (oe *operationExecutor) DetachVolume( volumeToDetach AttachedVolume, verifySafeToDetach bool, actualStateOfWorld ActualStateOfWorldAttacherUpdater) error { - detachFunc, err := + detachFunc, plugin, err := oe.operationGenerator.GenerateDetachVolumeFunc(volumeToDetach, verifySafeToDetach, actualStateOfWorld) if err != nil { return err } + opCompleteFunc := util.OperationCompleteHook(plugin, "volume_detach") return oe.pendingOperations.Run( - volumeToDetach.VolumeName, "" /* podName */, detachFunc) + volumeToDetach.VolumeName, "" /* podName */, detachFunc, opCompleteFunc) } + func (oe *operationExecutor) VerifyVolumesAreAttached( attachedVolumes map[types.NodeName][]AttachedVolume, actualStateOfWorld ActualStateOfWorldAttacherUpdater) { @@ -630,9 +634,11 @@ func (oe *operationExecutor) VerifyVolumesAreAttached( if err != nil { glog.Errorf("BulkVerifyVolumes.GenerateBulkVolumeVerifyFunc error bulk verifying volumes for plugin %q with %v", pluginName, err) } + + opCompleteFunc := util.OperationCompleteHook(pluginName, "verify_volumes_are_attached") // Ugly hack to ensure - we don't do parallel bulk polling of same volume plugin uniquePluginName := v1.UniqueVolumeName(pluginName) - err = oe.pendingOperations.Run(uniquePluginName, "" /* Pod Name */, bulkVerifyVolumeFunc) + err = oe.pendingOperations.Run(uniquePluginName, "" /* Pod Name */, bulkVerifyVolumeFunc, opCompleteFunc) if err != nil { glog.Errorf("BulkVerifyVolumes.Run Error bulk volume verification for plugin %q with %v", pluginName, err) } @@ -648,8 +654,10 @@ func (oe *operationExecutor) VerifyVolumesAreAttachedPerNode( if err != nil { return err } + + opCompleteFunc := util.OperationCompleteHook("", "verify_volumes_are_attached_per_node") // Give an empty UniqueVolumeName so that this operation could be executed concurrently. - return oe.pendingOperations.Run("" /* volumeName */, "" /* podName */, volumesAreAttachedFunc) + return oe.pendingOperations.Run("" /* volumeName */, "" /* podName */, volumesAreAttachedFunc, opCompleteFunc) } func (oe *operationExecutor) MountVolume( @@ -657,7 +665,7 @@ func (oe *operationExecutor) MountVolume( volumeToMount VolumeToMount, actualStateOfWorld ActualStateOfWorldMounterUpdater, isRemount bool) error { - mountFunc, err := oe.operationGenerator.GenerateMountVolumeFunc( + mountFunc, plugin, err := oe.operationGenerator.GenerateMountVolumeFunc( waitForAttachTimeout, volumeToMount, actualStateOfWorld, isRemount) if err != nil { return err @@ -671,15 +679,17 @@ func (oe *operationExecutor) MountVolume( podName = volumehelper.GetUniquePodName(volumeToMount.Pod) } + // TODO mount_device + opCompleteFunc := util.OperationCompleteHook(plugin, "volume_mount") return oe.pendingOperations.Run( - volumeToMount.VolumeName, podName, mountFunc) + volumeToMount.VolumeName, podName, mountFunc, opCompleteFunc) } func (oe *operationExecutor) UnmountVolume( volumeToUnmount MountedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater) error { - unmountFunc, err := + unmountFunc, plugin, err := oe.operationGenerator.GenerateUnmountVolumeFunc(volumeToUnmount, actualStateOfWorld) if err != nil { return err @@ -689,36 +699,39 @@ func (oe *operationExecutor) UnmountVolume( // same volume in parallel podName := volumetypes.UniquePodName(volumeToUnmount.PodUID) + opCompleteFunc := util.OperationCompleteHook(plugin, "volume_unmount") return oe.pendingOperations.Run( - volumeToUnmount.VolumeName, podName, unmountFunc) + volumeToUnmount.VolumeName, podName, unmountFunc, opCompleteFunc) } func (oe *operationExecutor) UnmountDevice( deviceToDetach AttachedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, mounter mount.Interface) error { - unmountDeviceFunc, err := + unmountDeviceFunc, plugin, err := oe.operationGenerator.GenerateUnmountDeviceFunc(deviceToDetach, actualStateOfWorld, mounter) if err != nil { return err } + opCompleteFunc := util.OperationCompleteHook(plugin, "unmount_device") return oe.pendingOperations.Run( - deviceToDetach.VolumeName, "" /* podName */, unmountDeviceFunc) + deviceToDetach.VolumeName, "" /* podName */, unmountDeviceFunc, opCompleteFunc) } func (oe *operationExecutor) VerifyControllerAttachedVolume( volumeToMount VolumeToMount, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) error { - verifyControllerAttachedVolumeFunc, err := + verifyControllerAttachedVolumeFunc, plugin, err := oe.operationGenerator.GenerateVerifyControllerAttachedVolumeFunc(volumeToMount, nodeName, actualStateOfWorld) if err != nil { return err } + opCompleteFunc := util.OperationCompleteHook(plugin, "verify_controller_attached_volume") return oe.pendingOperations.Run( - volumeToMount.VolumeName, "" /* podName */, verifyControllerAttachedVolumeFunc) + volumeToMount.VolumeName, "" /* podName */, verifyControllerAttachedVolumeFunc, opCompleteFunc) } // TODO: this is a workaround for the unmount device issue caused by gci mounter. diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_executor_test.go b/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_executor_test.go index b312b29d451b..941b7bf88c45 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_executor_test.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_executor_test.go @@ -239,29 +239,29 @@ func newFakeOperationGenerator(ch chan interface{}, quit chan interface{}) Opera } } -func (fopg *fakeOperationGenerator) GenerateMountVolumeFunc(waitForAttachTimeout time.Duration, volumeToMount VolumeToMount, actualStateOfWorldMounterUpdater ActualStateOfWorldMounterUpdater, isRemount bool) (func() error, error) { +func (fopg *fakeOperationGenerator) GenerateMountVolumeFunc(waitForAttachTimeout time.Duration, volumeToMount VolumeToMount, actualStateOfWorldMounterUpdater ActualStateOfWorldMounterUpdater, isRemount bool) (func() error, string, error) { return func() error { startOperationAndBlock(fopg.ch, fopg.quit) return nil - }, nil + }, "", nil } -func (fopg *fakeOperationGenerator) GenerateUnmountVolumeFunc(volumeToUnmount MountedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater) (func() error, error) { +func (fopg *fakeOperationGenerator) GenerateUnmountVolumeFunc(volumeToUnmount MountedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater) (func() error, string, error) { return func() error { startOperationAndBlock(fopg.ch, fopg.quit) return nil - }, nil + }, "", nil } -func (fopg *fakeOperationGenerator) GenerateAttachVolumeFunc(volumeToAttach VolumeToAttach, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, error) { +func (fopg *fakeOperationGenerator) GenerateAttachVolumeFunc(volumeToAttach VolumeToAttach, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, string, error) { return func() error { startOperationAndBlock(fopg.ch, fopg.quit) return nil - }, nil + }, "", nil } -func (fopg *fakeOperationGenerator) GenerateDetachVolumeFunc(volumeToDetach AttachedVolume, verifySafeToDetach bool, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, error) { +func (fopg *fakeOperationGenerator) GenerateDetachVolumeFunc(volumeToDetach AttachedVolume, verifySafeToDetach bool, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, string, error) { return func() error { startOperationAndBlock(fopg.ch, fopg.quit) return nil - }, nil + }, "", nil } func (fopg *fakeOperationGenerator) GenerateVolumesAreAttachedFunc(attachedVolumes []AttachedVolume, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, error) { return func() error { @@ -269,17 +269,17 @@ func (fopg *fakeOperationGenerator) GenerateVolumesAreAttachedFunc(attachedVolum return nil }, nil } -func (fopg *fakeOperationGenerator) GenerateUnmountDeviceFunc(deviceToDetach AttachedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, mounter mount.Interface) (func() error, error) { +func (fopg *fakeOperationGenerator) GenerateUnmountDeviceFunc(deviceToDetach AttachedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, mounter mount.Interface) (func() error, string, error) { return func() error { startOperationAndBlock(fopg.ch, fopg.quit) return nil - }, nil + }, "", nil } -func (fopg *fakeOperationGenerator) GenerateVerifyControllerAttachedVolumeFunc(volumeToMount VolumeToMount, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, error) { +func (fopg *fakeOperationGenerator) GenerateVerifyControllerAttachedVolumeFunc(volumeToMount VolumeToMount, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, string, error) { return func() error { startOperationAndBlock(fopg.ch, fopg.quit) return nil - }, nil + }, "", nil } func (fopg *fakeOperationGenerator) GenerateBulkVolumeVerifyFunc( diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_generator.go b/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_generator.go index 9e81eed67b66..b97ae9630f00 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_generator.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_generator.go @@ -73,25 +73,25 @@ func NewOperationGenerator(kubeClient clientset.Interface, // OperationGenerator interface that extracts out the functions from operation_executor to make it dependency injectable type OperationGenerator interface { // Generates the MountVolume function needed to perform the mount of a volume plugin - GenerateMountVolumeFunc(waitForAttachTimeout time.Duration, volumeToMount VolumeToMount, actualStateOfWorldMounterUpdater ActualStateOfWorldMounterUpdater, isRemount bool) (func() error, error) + GenerateMountVolumeFunc(waitForAttachTimeout time.Duration, volumeToMount VolumeToMount, actualStateOfWorldMounterUpdater ActualStateOfWorldMounterUpdater, isRemount bool) (func() error, string, error) // Generates the UnmountVolume function needed to perform the unmount of a volume plugin - GenerateUnmountVolumeFunc(volumeToUnmount MountedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater) (func() error, error) + GenerateUnmountVolumeFunc(volumeToUnmount MountedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater) (func() error, string, error) // Generates the AttachVolume function needed to perform attach of a volume plugin - GenerateAttachVolumeFunc(volumeToAttach VolumeToAttach, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, error) + GenerateAttachVolumeFunc(volumeToAttach VolumeToAttach, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, string, error) // Generates the DetachVolume function needed to perform the detach of a volume plugin - GenerateDetachVolumeFunc(volumeToDetach AttachedVolume, verifySafeToDetach bool, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, error) + GenerateDetachVolumeFunc(volumeToDetach AttachedVolume, verifySafeToDetach bool, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, string, error) // Generates the VolumesAreAttached function needed to verify if volume plugins are attached GenerateVolumesAreAttachedFunc(attachedVolumes []AttachedVolume, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, error) // Generates the UnMountDevice function needed to perform the unmount of a device - GenerateUnmountDeviceFunc(deviceToDetach AttachedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, mounter mount.Interface) (func() error, error) + GenerateUnmountDeviceFunc(deviceToDetach AttachedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, mounter mount.Interface) (func() error, string, error) // Generates the function needed to check if the attach_detach controller has attached the volume plugin - GenerateVerifyControllerAttachedVolumeFunc(volumeToMount VolumeToMount, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, error) + GenerateVerifyControllerAttachedVolumeFunc(volumeToMount VolumeToMount, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, string, error) // GetVolumePluginMgr returns volume plugin manager GetVolumePluginMgr() *volume.VolumePluginMgr @@ -245,17 +245,17 @@ func (og *operationGenerator) GenerateBulkVolumeVerifyFunc( func (og *operationGenerator) GenerateAttachVolumeFunc( volumeToAttach VolumeToAttach, - actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, error) { + actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, string, error) { // Get attacher plugin attachableVolumePlugin, err := og.volumePluginMgr.FindAttachablePluginBySpec(volumeToAttach.VolumeSpec) if err != nil || attachableVolumePlugin == nil { - return nil, volumeToAttach.GenerateErrorDetailed("AttachVolume.FindAttachablePluginBySpec failed", err) + return nil, "", volumeToAttach.GenerateErrorDetailed("AttachVolume.FindAttachablePluginBySpec failed", err) } volumeAttacher, newAttacherErr := attachableVolumePlugin.NewAttacher() if newAttacherErr != nil { - return nil, volumeToAttach.GenerateErrorDetailed("AttachVolume.NewAttacher failed", newAttacherErr) + return nil, attachableVolumePlugin.GetPluginName(), volumeToAttach.GenerateErrorDetailed("AttachVolume.NewAttacher failed", newAttacherErr) } return func() error { @@ -283,7 +283,7 @@ func (og *operationGenerator) GenerateAttachVolumeFunc( } return nil - }, nil + }, attachableVolumePlugin.GetPluginName(), nil } func (og *operationGenerator) GetVolumePluginMgr() *volume.VolumePluginMgr { @@ -293,9 +293,10 @@ func (og *operationGenerator) GetVolumePluginMgr() *volume.VolumePluginMgr { func (og *operationGenerator) GenerateDetachVolumeFunc( volumeToDetach AttachedVolume, verifySafeToDetach bool, - actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, error) { + actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, string, error) { var volumeName string var attachableVolumePlugin volume.AttachableVolumePlugin + var pluginName string var err error if volumeToDetach.VolumeSpec != nil { @@ -303,31 +304,35 @@ func (og *operationGenerator) GenerateDetachVolumeFunc( attachableVolumePlugin, err = og.volumePluginMgr.FindAttachablePluginBySpec(volumeToDetach.VolumeSpec) if err != nil || attachableVolumePlugin == nil { - return nil, volumeToDetach.GenerateErrorDetailed("DetachVolume.FindAttachablePluginBySpec failed", err) + return nil, "", volumeToDetach.GenerateErrorDetailed("DetachVolume.FindAttachablePluginBySpec failed", err) } volumeName, err = attachableVolumePlugin.GetVolumeName(volumeToDetach.VolumeSpec) if err != nil { - return nil, volumeToDetach.GenerateErrorDetailed("DetachVolume.GetVolumeName failed", err) + return nil, attachableVolumePlugin.GetPluginName(), volumeToDetach.GenerateErrorDetailed("DetachVolume.GetVolumeName failed", err) } } else { - var pluginName string // Get attacher plugin and the volumeName by splitting the volume unique name in case // there's no VolumeSpec: this happens only on attach/detach controller crash recovery // when a pod has been deleted during the controller downtime pluginName, volumeName, err = volumehelper.SplitUniqueName(volumeToDetach.VolumeName) if err != nil { - return nil, volumeToDetach.GenerateErrorDetailed("DetachVolume.SplitUniqueName failed", err) + return nil, pluginName, volumeToDetach.GenerateErrorDetailed("DetachVolume.SplitUniqueName failed", err) } attachableVolumePlugin, err = og.volumePluginMgr.FindAttachablePluginByName(pluginName) if err != nil { - return nil, volumeToDetach.GenerateErrorDetailed("DetachVolume.FindAttachablePluginBySpec failed", err) + return nil, pluginName, volumeToDetach.GenerateErrorDetailed("DetachVolume.FindAttachablePluginBySpec failed", err) } } + + if pluginName == "" { + pluginName = attachableVolumePlugin.GetPluginName() + } + volumeDetacher, err := attachableVolumePlugin.NewDetacher() if err != nil { - return nil, volumeToDetach.GenerateErrorDetailed("DetachVolume.NewDetacher failed", err) + return nil, pluginName, volumeToDetach.GenerateErrorDetailed("DetachVolume.NewDetacher failed", err) } return func() error { @@ -352,24 +357,24 @@ func (og *operationGenerator) GenerateDetachVolumeFunc( volumeToDetach.VolumeName, volumeToDetach.NodeName) return nil - }, nil + }, pluginName, nil } func (og *operationGenerator) GenerateMountVolumeFunc( waitForAttachTimeout time.Duration, volumeToMount VolumeToMount, actualStateOfWorld ActualStateOfWorldMounterUpdater, - isRemount bool) (func() error, error) { + isRemount bool) (func() error, string, error) { // Get mounter plugin volumePlugin, err := og.volumePluginMgr.FindPluginBySpec(volumeToMount.VolumeSpec) if err != nil || volumePlugin == nil { - return nil, volumeToMount.GenerateErrorDetailed("MountVolume.FindPluginBySpec failed", err) + return nil, "", volumeToMount.GenerateErrorDetailed("MountVolume.FindPluginBySpec failed", err) } affinityErr := checkNodeAffinity(og, volumeToMount, volumePlugin) if affinityErr != nil { - return nil, affinityErr + return nil, volumePlugin.GetPluginName(), affinityErr } volumeMounter, newMounterErr := volumePlugin.NewMounter( @@ -379,13 +384,13 @@ func (og *operationGenerator) GenerateMountVolumeFunc( if newMounterErr != nil { eventErr, detailedErr := volumeToMount.GenerateError("MountVolume.NewMounter initialization failed", newMounterErr) og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.FailedMountVolume, eventErr.Error()) - return nil, detailedErr + return nil, volumePlugin.GetPluginName(), detailedErr } mountCheckError := checkMountOptionSupport(og, volumeToMount, volumePlugin) if mountCheckError != nil { - return nil, mountCheckError + return nil, volumePlugin.GetPluginName(), mountCheckError } // Get attacher, if possible @@ -489,23 +494,23 @@ func (og *operationGenerator) GenerateMountVolumeFunc( } return nil - }, nil + }, volumePlugin.GetPluginName(), nil } func (og *operationGenerator) GenerateUnmountVolumeFunc( volumeToUnmount MountedVolume, - actualStateOfWorld ActualStateOfWorldMounterUpdater) (func() error, error) { + actualStateOfWorld ActualStateOfWorldMounterUpdater) (func() error, string, error) { // Get mountable plugin volumePlugin, err := og.volumePluginMgr.FindPluginByName(volumeToUnmount.PluginName) if err != nil || volumePlugin == nil { - return nil, volumeToUnmount.GenerateErrorDetailed("UnmountVolume.FindPluginByName failed", err) + return nil, "", volumeToUnmount.GenerateErrorDetailed("UnmountVolume.FindPluginByName failed", err) } volumeUnmounter, newUnmounterErr := volumePlugin.NewUnmounter( volumeToUnmount.InnerVolumeSpecName, volumeToUnmount.PodUID) if newUnmounterErr != nil { - return nil, volumeToUnmount.GenerateErrorDetailed("UnmountVolume.NewUnmounter failed", newUnmounterErr) + return nil, volumePlugin.GetPluginName(), volumeToUnmount.GenerateErrorDetailed("UnmountVolume.NewUnmounter failed", newUnmounterErr) } return func() error { @@ -535,28 +540,28 @@ func (og *operationGenerator) GenerateUnmountVolumeFunc( } return nil - }, nil + }, volumePlugin.GetPluginName(), nil } func (og *operationGenerator) GenerateUnmountDeviceFunc( deviceToDetach AttachedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, - mounter mount.Interface) (func() error, error) { + mounter mount.Interface) (func() error, string, error) { // Get attacher plugin attachableVolumePlugin, err := og.volumePluginMgr.FindAttachablePluginBySpec(deviceToDetach.VolumeSpec) if err != nil || attachableVolumePlugin == nil { - return nil, deviceToDetach.GenerateErrorDetailed("UnmountDevice.FindAttachablePluginBySpec failed", err) + return nil, "", deviceToDetach.GenerateErrorDetailed("UnmountDevice.FindAttachablePluginBySpec failed", err) } volumeDetacher, err := attachableVolumePlugin.NewDetacher() if err != nil { - return nil, deviceToDetach.GenerateErrorDetailed("UnmountDevice.NewDetacher failed", err) + return nil, attachableVolumePlugin.GetPluginName(), deviceToDetach.GenerateErrorDetailed("UnmountDevice.NewDetacher failed", err) } volumeAttacher, err := attachableVolumePlugin.NewAttacher() if err != nil { - return nil, deviceToDetach.GenerateErrorDetailed("UnmountDevice.NewAttacher failed", err) + return nil, attachableVolumePlugin.GetPluginName(), deviceToDetach.GenerateErrorDetailed("UnmountDevice.NewAttacher failed", err) } return func() error { @@ -616,13 +621,19 @@ func (og *operationGenerator) GenerateUnmountDeviceFunc( } return nil - }, nil + }, attachableVolumePlugin.GetPluginName(), nil } func (og *operationGenerator) GenerateVerifyControllerAttachedVolumeFunc( volumeToMount VolumeToMount, nodeName types.NodeName, - actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, error) { + actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, string, error) { + volumePlugin, err := + og.volumePluginMgr.FindPluginBySpec(volumeToMount.VolumeSpec) + if err != nil || volumePlugin == nil { + return nil, "", volumeToMount.GenerateErrorDetailed("VerifyControllerAttachedVolume.FindPluginBySpec failed", err) + } + return func() error { if !volumeToMount.PluginIsAttachable { // If the volume does not implement the attacher interface, it is @@ -678,7 +689,7 @@ func (og *operationGenerator) GenerateVerifyControllerAttachedVolumeFunc( // Volume not attached, return error. Caller will log and retry. return volumeToMount.GenerateErrorDetailed("Volume not attached according to node status", nil) - }, nil + }, volumePlugin.GetPluginName(), nil } func (og *operationGenerator) verifyVolumeIsSafeToDetach( From 5cc233fdc39ec2fd504eb3d683d1931d19c4fdb8 Mon Sep 17 00:00:00 2001 From: Huamin Chen Date: Thu, 21 Sep 2017 13:38:49 -0400 Subject: [PATCH 09/27] UPSTREAM: 52675: Fix FC WaitForAttach not mounting a volume Signed-off-by: Huamin Chen --- vendor/k8s.io/kubernetes/pkg/volume/fc/fc_util.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/vendor/k8s.io/kubernetes/pkg/volume/fc/fc_util.go b/vendor/k8s.io/kubernetes/pkg/volume/fc/fc_util.go index feded0c2723c..162431933c4c 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/fc/fc_util.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/fc/fc_util.go @@ -167,16 +167,16 @@ func (util *FCUtil) AttachDisk(b fcDiskMounter) (string, error) { } // mount it globalPDPath := util.MakeGlobalPDName(*b.fcDisk) + if err := os.MkdirAll(globalPDPath, 0750); err != nil { + return devicePath, fmt.Errorf("fc: failed to mkdir %s, error", globalPDPath) + } + noMnt, err := b.mounter.IsLikelyNotMountPoint(globalPDPath) if !noMnt { glog.Infof("fc: %s already mounted", globalPDPath) return devicePath, nil } - if err := os.MkdirAll(globalPDPath, 0750); err != nil { - return devicePath, fmt.Errorf("fc: failed to mkdir %s, error", globalPDPath) - } - err = b.mounter.FormatAndMount(devicePath, globalPDPath, b.fsType, nil) if err != nil { return devicePath, fmt.Errorf("fc: failed to mount fc volume %s [%s] to %s, error %v", devicePath, b.fsType, globalPDPath, err) From a1189528c32b004eaeed99cd763d091a25e52ef3 Mon Sep 17 00:00:00 2001 From: Huamin Chen Date: Thu, 21 Sep 2017 13:40:42 -0400 Subject: [PATCH 10/27] UPSTREAM: 52687: Refactoring and improvements for iSCSI and FC storage plugins Signed-off-by: Huamin Chen --- .../kubernetes/pkg/volume/fc/disk_manager.go | 44 ------------------- vendor/k8s.io/kubernetes/pkg/volume/fc/fc.go | 8 +--- .../kubernetes/pkg/volume/iscsi/attacher.go | 5 +++ .../pkg/volume/iscsi/disk_manager.go | 33 -------------- .../kubernetes/pkg/volume/iscsi/iscsi.go | 8 +--- .../kubernetes/pkg/volume/iscsi/iscsi_util.go | 3 ++ 6 files changed, 10 insertions(+), 91 deletions(-) diff --git a/vendor/k8s.io/kubernetes/pkg/volume/fc/disk_manager.go b/vendor/k8s.io/kubernetes/pkg/volume/fc/disk_manager.go index ab5774554e36..77ea372daea4 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/fc/disk_manager.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/fc/disk_manager.go @@ -67,47 +67,3 @@ func diskSetUp(manager diskManager, b fcDiskMounter, volPath string, mounter mou return nil } - -// utility to tear down a disk based filesystem -func diskTearDown(manager diskManager, c fcDiskUnmounter, volPath string, mounter mount.Interface) error { - noMnt, err := mounter.IsLikelyNotMountPoint(volPath) - if err != nil { - glog.Errorf("cannot validate mountpoint %s", volPath) - return err - } - if noMnt { - return os.Remove(volPath) - } - - refs, err := mount.GetMountRefs(mounter, volPath) - if err != nil { - glog.Errorf("failed to get reference count %s", volPath) - return err - } - if err := mounter.Unmount(volPath); err != nil { - glog.Errorf("failed to unmount %s", volPath) - return err - } - // If len(refs) is 1, then all bind mounts have been removed, and the - // remaining reference is the global mount. It is safe to detach. - if len(refs) == 1 { - mntPath := refs[0] - if err := manager.DetachDisk(c, mntPath); err != nil { - glog.Errorf("failed to detach disk from %s", mntPath) - return err - } - } - - noMnt, mntErr := mounter.IsLikelyNotMountPoint(volPath) - if mntErr != nil { - glog.Errorf("isMountpoint check failed: %v", mntErr) - return err - } - if noMnt { - if err := os.Remove(volPath); err != nil { - return err - } - } - return nil - -} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/fc/fc.go b/vendor/k8s.io/kubernetes/pkg/volume/fc/fc.go index 2d98773a99ea..0c5c4a4cb656 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/fc/fc.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/fc/fc.go @@ -231,13 +231,7 @@ func (c *fcDiskUnmounter) TearDown() error { } func (c *fcDiskUnmounter) TearDownAt(dir string) error { - if pathExists, pathErr := util.PathExists(dir); pathErr != nil { - return fmt.Errorf("Error checking if path exists: %v", pathErr) - } else if !pathExists { - glog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir) - return nil - } - return diskTearDown(c.manager, *c, dir, c.mounter) + return util.UnmountPath(dir, c.mounter) } func getVolumeSource(spec *volume.Spec) (*v1.FCVolumeSource, bool, error) { diff --git a/vendor/k8s.io/kubernetes/pkg/volume/iscsi/attacher.go b/vendor/k8s.io/kubernetes/pkg/volume/iscsi/attacher.go index 7ec17614a65f..4b4525e79e45 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/iscsi/attacher.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/iscsi/attacher.go @@ -148,6 +148,11 @@ func (detacher *iscsiDetacher) UnmountDevice(deviceMountPath string) error { if err != nil { return fmt.Errorf("iscsi: failed to detach disk: %s\nError: %v", deviceMountPath, err) } + glog.V(4).Infof("iscsi: %q is unmounted, deleting the directory", deviceMountPath) + err = os.RemoveAll(deviceMountPath) + if err != nil { + return fmt.Errorf("iscsi: failed to delete the directory: %s\nError: %v", deviceMountPath, err) + } glog.V(4).Infof("iscsi: successfully detached disk: %s", deviceMountPath) return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/iscsi/disk_manager.go b/vendor/k8s.io/kubernetes/pkg/volume/iscsi/disk_manager.go index 697efa241d85..458d972a0d1e 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/iscsi/disk_manager.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/iscsi/disk_manager.go @@ -90,36 +90,3 @@ func diskSetUp(manager diskManager, b iscsiDiskMounter, volPath string, mounter return nil } - -// utility to tear down a disk based filesystem -func diskTearDown(manager diskManager, c iscsiDiskUnmounter, volPath string, mounter mount.Interface) error { - notMnt, err := mounter.IsLikelyNotMountPoint(volPath) - if err != nil { - glog.Errorf("cannot validate mountpoint %s", volPath) - return err - } - if notMnt { - return os.Remove(volPath) - } - _, err = mount.GetMountRefs(mounter, volPath) - if err != nil { - glog.Errorf("failed to get reference count %s", volPath) - return err - } - if err := mounter.Unmount(volPath); err != nil { - glog.Errorf("failed to unmount %s", volPath) - return err - } - notMnt, mntErr := mounter.IsLikelyNotMountPoint(volPath) - if mntErr != nil { - glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) - return err - } - if notMnt { - if err := os.Remove(volPath); err != nil { - return err - } - } - return nil - -} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi.go b/vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi.go index d1d450507638..ad5d5f74b087 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi.go @@ -263,13 +263,7 @@ func (c *iscsiDiskUnmounter) TearDown() error { } func (c *iscsiDiskUnmounter) TearDownAt(dir string) error { - if pathExists, pathErr := ioutil.PathExists(dir); pathErr != nil { - return fmt.Errorf("Error checking if path exists: %v", pathErr) - } else if !pathExists { - glog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir) - return nil - } - return diskTearDown(c.manager, *c, dir, c.mounter) + return ioutil.UnmountPath(dir, c.mounter) } func portalMounter(portal string) string { diff --git a/vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi_util.go b/vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi_util.go index 75fad95bb203..dd11ad2f1028 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi_util.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/iscsi/iscsi_util.go @@ -272,6 +272,9 @@ func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) (string, error) { glog.Errorf("iscsi: failed to get any path for iscsi disk, last err seen:\n%v", lastErr) return "", fmt.Errorf("failed to get any path for iscsi disk, last err seen:\n%v", lastErr) } + if lastErr != nil { + glog.Errorf("iscsi: last error occurred during iscsi init:\n%v", lastErr) + } //Make sure we use a valid devicepath to find mpio device. devicePath = devicePaths[0] From 6f9eacbf5559157efcf63c4b808d55518e202adf Mon Sep 17 00:00:00 2001 From: Huamin Chen Date: Thu, 21 Sep 2017 13:42:06 -0400 Subject: [PATCH 11/27] UPSTREAM: 52691: FC plugin: Return target wwn + lun at GetVolumeName() Signed-off-by: Huamin Chen --- vendor/k8s.io/kubernetes/pkg/volume/fc/fc.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vendor/k8s.io/kubernetes/pkg/volume/fc/fc.go b/vendor/k8s.io/kubernetes/pkg/volume/fc/fc.go index 0c5c4a4cb656..9d1befd93eff 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/fc/fc.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/fc/fc.go @@ -63,7 +63,7 @@ func (plugin *fcPlugin) GetVolumeName(spec *volume.Spec) (string, error) { } // TargetWWNs are the FibreChannel target worldwide names - return fmt.Sprintf("%v", volumeSource.TargetWWNs), nil + return fmt.Sprintf("%v:%v", volumeSource.TargetWWNs, *volumeSource.Lun), nil } func (plugin *fcPlugin) CanSupport(spec *volume.Spec) bool { From 16e2fa1f18360f3e1d7a0c9080023831a41b83b0 Mon Sep 17 00:00:00 2001 From: Jim Minter Date: Fri, 15 Sep 2017 17:07:17 -0500 Subject: [PATCH 12/27] UPSTREAM: 51796: Fix pod and node names switched around in error message. --- .../k8s.io/kubernetes/pkg/controller/daemon/daemoncontroller.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vendor/k8s.io/kubernetes/pkg/controller/daemon/daemoncontroller.go b/vendor/k8s.io/kubernetes/pkg/controller/daemon/daemoncontroller.go index e2135d34a0e9..106e0a3dd306 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/daemon/daemoncontroller.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/daemon/daemoncontroller.go @@ -748,7 +748,7 @@ func (dsc *DaemonSetsController) manage(ds *extensions.DaemonSet, hash string) e var daemonPodsRunning []*v1.Pod for _, pod := range daemonPods { if pod.Status.Phase == v1.PodFailed { - msg := fmt.Sprintf("Found failed daemon pod %s/%s on node %s, will try to kill it", pod.Namespace, node.Name, pod.Name) + msg := fmt.Sprintf("Found failed daemon pod %s/%s on node %s, will try to kill it", pod.Namespace, pod.Name, node.Name) glog.V(2).Infof(msg) // Emit an event so that it's discoverable to users. dsc.eventRecorder.Eventf(ds, v1.EventTypeWarning, FailedDaemonPodReason, msg) From 1f7b121b29537b55c820b1af50e4f9b8c4507e6f Mon Sep 17 00:00:00 2001 From: Clayton Coleman Date: Sat, 16 Sep 2017 22:24:10 -0400 Subject: [PATCH 13/27] UPSTREAM: 52112: Allow watch cache disablement per type Backport the change that allows a global default watch cache size as well as being able to disable an individual watch cache item --- .../cmd/kube-apiserver/app/server.go | 13 ++- .../cmd/federation-apiserver/app/server.go | 14 ++- .../federation/registry/cluster/etcd/etcd.go | 2 - .../storage/storage.go | 2 - .../storage/storage.go | 2 - .../controllerrevision/storage/storage.go | 2 - .../apps/statefulset/storage/storage.go | 2 - .../storage/storage.go | 2 - .../registry/batch/cronjob/storage/storage.go | 2 - .../pkg/registry/batch/job/storage/storage.go | 2 - .../pkg/registry/cachesize/cachesize.go | 99 +++---------------- .../certificates/storage/storage.go | 2 - .../core/configmap/storage/storage.go | 2 - .../registry/core/endpoint/storage/storage.go | 2 - .../registry/core/event/storage/storage.go | 2 - .../core/limitrange/storage/storage.go | 2 - .../core/namespace/storage/storage.go | 2 - .../pkg/registry/core/node/storage/storage.go | 2 - .../core/persistentvolume/storage/storage.go | 2 - .../persistentvolumeclaim/storage/storage.go | 2 - .../pkg/registry/core/pod/storage/storage.go | 2 - .../core/podtemplate/storage/storage.go | 2 - .../replicationcontroller/storage/storage.go | 2 - .../core/resourcequota/storage/storage.go | 2 - .../registry/core/secret/storage/storage.go | 2 - .../registry/core/service/storage/storage.go | 2 - .../core/serviceaccount/storage/storage.go | 2 - .../extensions/daemonset/storage/storage.go | 2 - .../extensions/deployment/storage/storage.go | 2 - .../extensions/ingress/storage/storage.go | 2 - .../networkpolicy/storage/storage.go | 2 - .../podsecuritypolicy/storage/storage.go | 2 - .../extensions/replicaset/storage/storage.go | 2 - .../thirdpartyresource/storage/storage.go | 2 - .../thirdpartyresourcedata/storage/storage.go | 2 - .../networkpolicy/storage/storage.go | 2 - .../poddisruptionbudget/storage/storage.go | 2 - .../rbac/clusterrole/storage/storage.go | 2 - .../clusterrolebinding/storage/storage.go | 2 - .../pkg/registry/rbac/role/storage/storage.go | 2 - .../rbac/rolebinding/storage/storage.go | 2 - .../settings/podpreset/storage/storage.go | 2 - .../storage/storageclass/storage/storage.go | 2 - .../generic/registry/storage_factory.go | 17 ++-- .../pkg/registry/generic/registry/store.go | 5 - .../pkg/registry/generic/storage_decorator.go | 3 - .../apiserver/pkg/server/options/etcd.go | 69 ++++++++++++- .../pkg/server/options/server_run_options.go | 6 -- 48 files changed, 109 insertions(+), 197 deletions(-) diff --git a/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/server.go b/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/server.go index a08120464f0d..bf3d3716ee8d 100644 --- a/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/server.go +++ b/vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/server.go @@ -50,6 +50,7 @@ import ( genericregistry "k8s.io/apiserver/pkg/registry/generic" genericapiserver "k8s.io/apiserver/pkg/server" "k8s.io/apiserver/pkg/server/filters" + serveroptions "k8s.io/apiserver/pkg/server/options" "k8s.io/apiserver/pkg/server/options/encryptionconfig" serverstorage "k8s.io/apiserver/pkg/server/storage" aggregatorapiserver "k8s.io/kube-aggregator/pkg/apiserver" @@ -637,8 +638,16 @@ func defaultOptions(s *options.ServerRunOptions) error { } if s.Etcd.EnableWatchCache { glog.V(2).Infof("Initializing cache sizes based on %dMB limit", s.GenericServerRunOptions.TargetRAMMB) - cachesize.InitializeWatchCacheSizes(s.GenericServerRunOptions.TargetRAMMB) - cachesize.SetWatchCacheSizes(s.GenericServerRunOptions.WatchCacheSizes) + sizes := cachesize.NewHeuristicWatchCacheSizes(s.GenericServerRunOptions.TargetRAMMB) + if userSpecified, err := serveroptions.ParseWatchCacheSizes(s.Etcd.WatchCacheSizes); err == nil { + for resource, size := range userSpecified { + sizes[resource] = size + } + } + s.Etcd.WatchCacheSizes, err = serveroptions.WriteWatchCacheSizes(sizes) + if err != nil { + return err + } } return nil diff --git a/vendor/k8s.io/kubernetes/federation/cmd/federation-apiserver/app/server.go b/vendor/k8s.io/kubernetes/federation/cmd/federation-apiserver/app/server.go index e67e683fc443..fe6837d13269 100644 --- a/vendor/k8s.io/kubernetes/federation/cmd/federation-apiserver/app/server.go +++ b/vendor/k8s.io/kubernetes/federation/cmd/federation-apiserver/app/server.go @@ -36,6 +36,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" genericapiserver "k8s.io/apiserver/pkg/server" "k8s.io/apiserver/pkg/server/filters" + serveroptions "k8s.io/apiserver/pkg/server/options" serverstorage "k8s.io/apiserver/pkg/server/storage" federationv1beta1 "k8s.io/kubernetes/federation/apis/federation/v1beta1" "k8s.io/kubernetes/federation/cmd/federation-apiserver/app/options" @@ -229,8 +230,17 @@ func NonBlockingRun(s *options.ServerRunOptions, stopCh <-chan struct{}) error { // TODO: Move this to generic api server (Need to move the command line flag). if s.Etcd.EnableWatchCache { - cachesize.InitializeWatchCacheSizes(s.GenericServerRunOptions.TargetRAMMB) - cachesize.SetWatchCacheSizes(s.GenericServerRunOptions.WatchCacheSizes) + glog.V(2).Infof("Initializing cache sizes based on %dMB limit", s.GenericServerRunOptions.TargetRAMMB) + sizes := cachesize.NewHeuristicWatchCacheSizes(s.GenericServerRunOptions.TargetRAMMB) + if userSpecified, err := serveroptions.ParseWatchCacheSizes(s.Etcd.WatchCacheSizes); err == nil { + for resource, size := range userSpecified { + sizes[resource] = size + } + } + s.Etcd.WatchCacheSizes, err = serveroptions.WriteWatchCacheSizes(sizes) + if err != nil { + return err + } } m, err := genericConfig.Complete().New("federation", genericapiserver.EmptyDelegate) diff --git a/vendor/k8s.io/kubernetes/federation/registry/cluster/etcd/etcd.go b/vendor/k8s.io/kubernetes/federation/registry/cluster/etcd/etcd.go index 8f8c3be28c19..817db80c30e3 100644 --- a/vendor/k8s.io/kubernetes/federation/registry/cluster/etcd/etcd.go +++ b/vendor/k8s.io/kubernetes/federation/registry/cluster/etcd/etcd.go @@ -25,7 +25,6 @@ import ( "k8s.io/kubernetes/federation/apis/federation" "k8s.io/kubernetes/federation/registry/cluster" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/registry/cachesize" ) type REST struct { @@ -53,7 +52,6 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { NewListFunc: func() runtime.Object { return &federation.ClusterList{} }, PredicateFunc: cluster.MatchCluster, DefaultQualifiedResource: federation.Resource("clusters"), - WatchCacheSize: cachesize.GetWatchCacheSizeByResource("clusters"), CreateStrategy: cluster.Strategy, UpdateStrategy: cluster.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/externaladmissionhookconfiguration/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/externaladmissionhookconfiguration/storage/storage.go index 8e7d570bcb0e..f49d8b849c71 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/externaladmissionhookconfiguration/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/externaladmissionhookconfiguration/storage/storage.go @@ -23,7 +23,6 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/admissionregistration" "k8s.io/kubernetes/pkg/registry/admissionregistration/externaladmissionhookconfiguration" - "k8s.io/kubernetes/pkg/registry/cachesize" ) // rest implements a RESTStorage for pod disruption budgets against etcd @@ -42,7 +41,6 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST { }, PredicateFunc: externaladmissionhookconfiguration.MatchExternalAdmissionHookConfiguration, DefaultQualifiedResource: admissionregistration.Resource("externaladmissionhookconfigurations"), - WatchCacheSize: cachesize.GetWatchCacheSizeByResource("externaladmissionhookconfigurations"), CreateStrategy: externaladmissionhookconfiguration.Strategy, UpdateStrategy: externaladmissionhookconfiguration.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/initializerconfiguration/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/initializerconfiguration/storage/storage.go index becea1ae56be..5e7ec678ee9d 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/initializerconfiguration/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/admissionregistration/initializerconfiguration/storage/storage.go @@ -23,7 +23,6 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/admissionregistration" "k8s.io/kubernetes/pkg/registry/admissionregistration/initializerconfiguration" - "k8s.io/kubernetes/pkg/registry/cachesize" ) // rest implements a RESTStorage for pod disruption budgets against etcd @@ -42,7 +41,6 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST { }, PredicateFunc: initializerconfiguration.MatchInitializerConfiguration, DefaultQualifiedResource: admissionregistration.Resource("initializerconfigurations"), - WatchCacheSize: cachesize.GetWatchCacheSizeByResource("initializerconfigurations"), CreateStrategy: initializerconfiguration.Strategy, UpdateStrategy: initializerconfiguration.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/apps/controllerrevision/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/apps/controllerrevision/storage/storage.go index 556d5ef1be5c..06d68c0c8240 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/apps/controllerrevision/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/apps/controllerrevision/storage/storage.go @@ -23,7 +23,6 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/registry/apps/controllerrevision" - "k8s.io/kubernetes/pkg/registry/cachesize" ) // REST implements a RESTStorage for ControllerRevision @@ -39,7 +38,6 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST { NewListFunc: func() runtime.Object { return &apps.ControllerRevisionList{} }, PredicateFunc: controllerrevision.MatchControllerRevision, DefaultQualifiedResource: apps.Resource("controllerrevisions"), - WatchCacheSize: cachesize.GetWatchCacheSizeByResource("controllerrevisions"), CreateStrategy: controllerrevision.Strategy, UpdateStrategy: controllerrevision.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/apps/statefulset/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/apps/statefulset/storage/storage.go index d40f1d14c5e3..45fab6865147 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/apps/statefulset/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/apps/statefulset/storage/storage.go @@ -26,7 +26,6 @@ import ( "k8s.io/kubernetes/pkg/api" appsapi "k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/registry/apps/statefulset" - "k8s.io/kubernetes/pkg/registry/cachesize" ) // rest implements a RESTStorage for replication controllers against etcd @@ -42,7 +41,6 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { NewListFunc: func() runtime.Object { return &appsapi.StatefulSetList{} }, PredicateFunc: statefulset.MatchStatefulSet, DefaultQualifiedResource: appsapi.Resource("statefulsets"), - WatchCacheSize: cachesize.GetWatchCacheSizeByResource("statefulsets"), CreateStrategy: statefulset.Strategy, UpdateStrategy: statefulset.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/autoscaling/horizontalpodautoscaler/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/autoscaling/horizontalpodautoscaler/storage/storage.go index 3421192cd074..3fee2e0a6bd7 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/autoscaling/horizontalpodautoscaler/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/autoscaling/horizontalpodautoscaler/storage/storage.go @@ -26,7 +26,6 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/autoscaling" "k8s.io/kubernetes/pkg/registry/autoscaling/horizontalpodautoscaler" - "k8s.io/kubernetes/pkg/registry/cachesize" ) type REST struct { @@ -41,7 +40,6 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { NewListFunc: func() runtime.Object { return &autoscaling.HorizontalPodAutoscalerList{} }, PredicateFunc: horizontalpodautoscaler.MatchAutoscaler, DefaultQualifiedResource: autoscaling.Resource("horizontalpodautoscalers"), - WatchCacheSize: cachesize.GetWatchCacheSizeByResource("horizontalpodautoscalers"), CreateStrategy: horizontalpodautoscaler.Strategy, UpdateStrategy: horizontalpodautoscaler.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/batch/cronjob/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/batch/cronjob/storage/storage.go index cdc7cac6516e..085e596486bd 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/batch/cronjob/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/batch/cronjob/storage/storage.go @@ -26,7 +26,6 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/registry/batch/cronjob" - "k8s.io/kubernetes/pkg/registry/cachesize" ) // REST implements a RESTStorage for scheduled jobs against etcd @@ -42,7 +41,6 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { NewListFunc: func() runtime.Object { return &batch.CronJobList{} }, PredicateFunc: cronjob.MatchCronJob, DefaultQualifiedResource: batch.Resource("cronjobs"), - WatchCacheSize: cachesize.GetWatchCacheSizeByResource("cronjobs"), CreateStrategy: cronjob.Strategy, UpdateStrategy: cronjob.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/batch/job/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/batch/job/storage/storage.go index aa52d251023c..5fda289ecbd8 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/batch/job/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/batch/job/storage/storage.go @@ -26,7 +26,6 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/registry/batch/job" - "k8s.io/kubernetes/pkg/registry/cachesize" ) // JobStorage includes dummy storage for Job. @@ -57,7 +56,6 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { NewListFunc: func() runtime.Object { return &batch.JobList{} }, PredicateFunc: job.MatchJob, DefaultQualifiedResource: batch.Resource("jobs"), - WatchCacheSize: cachesize.GetWatchCacheSizeByResource("jobs"), CreateStrategy: job.Strategy, UpdateStrategy: job.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/cachesize/cachesize.go b/vendor/k8s.io/kubernetes/pkg/registry/cachesize/cachesize.go index b853cb5e6a8a..6babf5080dce 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/cachesize/cachesize.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/cachesize/cachesize.go @@ -14,65 +14,15 @@ See the License for the specific language governing permissions and limitations under the License. */ -//use for --watch-cache-sizes param of kube-apiserver -//make watch cache size of resources configurable package cachesize import ( - "strconv" - "strings" - - "github.com/golang/glog" -) - -type Resource string - -const ( - APIServices Resource = "apiservices" - CertificateSigningRequests Resource = "certificatesigningrequests" - ClusterRoles Resource = "clusterroles" - ClusterRoleBindings Resource = "clusterrolebindings" - ConfigMaps Resource = "configmaps" - Controllers Resource = "controllers" - Daemonsets Resource = "daemonsets" - Deployments Resource = "deployments" - Endpoints Resource = "endpoints" - HorizontalPodAutoscalers Resource = "horizontalpodautoscalers" - Ingress Resource = "ingress" - PodDisruptionBudget Resource = "poddisruptionbudgets" - StatefulSet Resource = "statefulset" - Jobs Resource = "jobs" - LimitRanges Resource = "limitranges" - Namespaces Resource = "namespaces" - NetworkPolicys Resource = "networkpolicies" - Nodes Resource = "nodes" - PersistentVolumes Resource = "persistentvolumes" - PersistentVolumeClaims Resource = "persistentvolumeclaims" - Pods Resource = "pods" - PodSecurityPolicies Resource = "podsecuritypolicies" - PodTemplates Resource = "podtemplates" - Replicasets Resource = "replicasets" - ResourceQuotas Resource = "resourcequotas" - CronJobs Resource = "cronjobs" - Roles Resource = "roles" - RoleBindings Resource = "rolebindings" - Secrets Resource = "secrets" - ServiceAccounts Resource = "serviceaccounts" - Services Resource = "services" - StorageClasses Resource = "storageclasses" + "k8s.io/apimachinery/pkg/runtime/schema" ) -// TODO: This shouldn't be a global variable. -var watchCacheSizes map[Resource]int - -func init() { - watchCacheSizes = make(map[Resource]int) -} - -func InitializeWatchCacheSizes(expectedRAMCapacityMB int) { - // This is the heuristics that from memory capacity is trying to infer - // the maximum number of nodes in the cluster and set cache sizes based - // on that value. +// NewHeuristicWatchCacheSizes returns a map of suggested watch cache sizes based on total +// memory. +func NewHeuristicWatchCacheSizes(expectedRAMCapacityMB int) map[schema.GroupResource]int { // From our documentation, we officially recommend 120GB machines for // 2000 nodes, and we scale from that point. Thus we assume ~60MB of // capacity per node. @@ -83,39 +33,14 @@ func InitializeWatchCacheSizes(expectedRAMCapacityMB int) { // is supposed to have non-default value. // // TODO: Figure out which resource we should have non-default value. - watchCacheSizes[Controllers] = maxInt(5*clusterSize, 100) - watchCacheSizes[Endpoints] = maxInt(10*clusterSize, 1000) - watchCacheSizes[Nodes] = maxInt(5*clusterSize, 1000) - watchCacheSizes[Pods] = maxInt(50*clusterSize, 1000) - watchCacheSizes[Services] = maxInt(5*clusterSize, 1000) - watchCacheSizes[APIServices] = maxInt(5*clusterSize, 1000) -} - -func SetWatchCacheSizes(cacheSizes []string) { - for _, c := range cacheSizes { - tokens := strings.Split(c, "#") - if len(tokens) != 2 { - glog.Errorf("invalid value of watch cache capabilities: %s", c) - continue - } - - size, err := strconv.Atoi(tokens[1]) - if err != nil { - glog.Errorf("invalid size of watch cache capabilities: %s", c) - continue - } - - watchCacheSizes[Resource(strings.ToLower(tokens[0]))] = size - } -} - -// GetWatchCacheSizeByResource returns the configured watch cache size for the given resource. -// A nil value means to use a default size, zero means to disable caching. -func GetWatchCacheSizeByResource(resource string) (ret *int) { // TODO this should use schema.GroupResource for lookups - if value, found := watchCacheSizes[Resource(resource)]; found { - return &value - } - return nil + watchCacheSizes := make(map[schema.GroupResource]int) + watchCacheSizes[schema.GroupResource{Resource: "replicationcontrollers"}] = maxInt(5*clusterSize, 100) + watchCacheSizes[schema.GroupResource{Resource: "endpoints"}] = maxInt(10*clusterSize, 1000) + watchCacheSizes[schema.GroupResource{Resource: "nodes"}] = maxInt(5*clusterSize, 1000) + watchCacheSizes[schema.GroupResource{Resource: "pods"}] = maxInt(50*clusterSize, 1000) + watchCacheSizes[schema.GroupResource{Resource: "services"}] = maxInt(5*clusterSize, 1000) + watchCacheSizes[schema.GroupResource{Resource: "apiservices", Group: "apiregistration.k8s.io"}] = maxInt(5*clusterSize, 1000) + return watchCacheSizes } func maxInt(a, b int) int { diff --git a/vendor/k8s.io/kubernetes/pkg/registry/certificates/certificates/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/certificates/certificates/storage/storage.go index 2da56ee106fb..2f6cfdab0fc9 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/certificates/certificates/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/certificates/certificates/storage/storage.go @@ -24,7 +24,6 @@ import ( "k8s.io/apiserver/pkg/registry/rest" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/certificates" - "k8s.io/kubernetes/pkg/registry/cachesize" csrregistry "k8s.io/kubernetes/pkg/registry/certificates/certificates" ) @@ -41,7 +40,6 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST, *Approva NewListFunc: func() runtime.Object { return &certificates.CertificateSigningRequestList{} }, PredicateFunc: csrregistry.Matcher, DefaultQualifiedResource: certificates.Resource("certificatesigningrequests"), - WatchCacheSize: cachesize.GetWatchCacheSizeByResource("certificatesigningrequests"), CreateStrategy: csrregistry.Strategy, UpdateStrategy: csrregistry.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/configmap/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/core/configmap/storage/storage.go index 8aed341b7c0f..a9ebbfa4da1f 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/configmap/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/configmap/storage/storage.go @@ -22,7 +22,6 @@ import ( genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/core/configmap" ) @@ -39,7 +38,6 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST { NewListFunc: func() runtime.Object { return &api.ConfigMapList{} }, PredicateFunc: configmap.MatchConfigMap, DefaultQualifiedResource: api.Resource("configmaps"), - WatchCacheSize: cachesize.GetWatchCacheSizeByResource("configmaps"), CreateStrategy: configmap.Strategy, UpdateStrategy: configmap.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/endpoint/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/core/endpoint/storage/storage.go index 44abfc437a81..fd38ad5a7281 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/endpoint/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/endpoint/storage/storage.go @@ -22,7 +22,6 @@ import ( genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/core/endpoint" ) @@ -38,7 +37,6 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST { NewListFunc: func() runtime.Object { return &api.EndpointsList{} }, PredicateFunc: endpoint.MatchEndpoints, DefaultQualifiedResource: api.Resource("endpoints"), - WatchCacheSize: cachesize.GetWatchCacheSizeByResource("endpoints"), CreateStrategy: endpoint.Strategy, UpdateStrategy: endpoint.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/event/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/core/event/storage/storage.go index abc030be1fba..44507d307b99 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/event/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/event/storage/storage.go @@ -22,7 +22,6 @@ import ( genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/core/event" ) @@ -51,7 +50,6 @@ func NewREST(optsGetter generic.RESTOptionsGetter, ttl uint64) *REST { return ttl, nil }, DefaultQualifiedResource: resource, - WatchCacheSize: cachesize.GetWatchCacheSizeByResource(resource.Resource), CreateStrategy: event.Strategy, UpdateStrategy: event.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/limitrange/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/core/limitrange/storage/storage.go index ba29960e3624..7cf050264708 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/limitrange/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/limitrange/storage/storage.go @@ -22,7 +22,6 @@ import ( genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/core/limitrange" ) @@ -38,7 +37,6 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST { NewListFunc: func() runtime.Object { return &api.LimitRangeList{} }, PredicateFunc: limitrange.MatchLimitRange, DefaultQualifiedResource: api.Resource("limitranges"), - WatchCacheSize: cachesize.GetWatchCacheSizeByResource("limitranges"), CreateStrategy: limitrange.Strategy, UpdateStrategy: limitrange.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/namespace/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/core/namespace/storage/storage.go index 0b3edf132d5a..da389c698d94 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/namespace/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/namespace/storage/storage.go @@ -31,7 +31,6 @@ import ( "k8s.io/apiserver/pkg/storage" storageerr "k8s.io/apiserver/pkg/storage/errors" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/core/namespace" ) @@ -59,7 +58,6 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST, *Finaliz NewListFunc: func() runtime.Object { return &api.NamespaceList{} }, PredicateFunc: namespace.MatchNamespace, DefaultQualifiedResource: api.Resource("namespaces"), - WatchCacheSize: cachesize.GetWatchCacheSizeByResource("namespaces"), CreateStrategy: namespace.Strategy, UpdateStrategy: namespace.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/node/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/core/node/storage/storage.go index dc5be3dc7051..e894bb5051de 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/node/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/node/storage/storage.go @@ -30,7 +30,6 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/kubelet/client" - "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/core/node" noderest "k8s.io/kubernetes/pkg/registry/core/node/rest" ) @@ -77,7 +76,6 @@ func NewStorage(optsGetter generic.RESTOptionsGetter, kubeletClientConfig client NewListFunc: func() runtime.Object { return &api.NodeList{} }, PredicateFunc: node.MatchNode, DefaultQualifiedResource: api.Resource("nodes"), - WatchCacheSize: cachesize.GetWatchCacheSizeByResource("nodes"), CreateStrategy: node.Strategy, UpdateStrategy: node.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/persistentvolume/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/core/persistentvolume/storage/storage.go index 7c69f7eb8129..a24aa391039d 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/persistentvolume/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/persistentvolume/storage/storage.go @@ -24,7 +24,6 @@ import ( genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/core/persistentvolume" ) @@ -40,7 +39,6 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { NewListFunc: func() runtime.Object { return &api.PersistentVolumeList{} }, PredicateFunc: persistentvolume.MatchPersistentVolumes, DefaultQualifiedResource: api.Resource("persistentvolumes"), - WatchCacheSize: cachesize.GetWatchCacheSizeByResource("persistentvolumes"), CreateStrategy: persistentvolume.Strategy, UpdateStrategy: persistentvolume.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/persistentvolumeclaim/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/core/persistentvolumeclaim/storage/storage.go index d5295dbf631b..78c6a1e04111 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/persistentvolumeclaim/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/persistentvolumeclaim/storage/storage.go @@ -24,7 +24,6 @@ import ( genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/core/persistentvolumeclaim" ) @@ -40,7 +39,6 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { NewListFunc: func() runtime.Object { return &api.PersistentVolumeClaimList{} }, PredicateFunc: persistentvolumeclaim.MatchPersistentVolumeClaim, DefaultQualifiedResource: api.Resource("persistentvolumeclaims"), - WatchCacheSize: cachesize.GetWatchCacheSizeByResource("persistentvolumeclaims"), CreateStrategy: persistentvolumeclaim.Strategy, UpdateStrategy: persistentvolumeclaim.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/pod/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/core/pod/storage/storage.go index 83b20d588ef5..6cb38101d216 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/pod/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/pod/storage/storage.go @@ -38,7 +38,6 @@ import ( "k8s.io/kubernetes/pkg/printers" printersinternal "k8s.io/kubernetes/pkg/printers/internalversion" printerstorage "k8s.io/kubernetes/pkg/printers/storage" - "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/core/pod" podrest "k8s.io/kubernetes/pkg/registry/core/pod/rest" ) @@ -71,7 +70,6 @@ func NewStorage(optsGetter generic.RESTOptionsGetter, k client.ConnectionInfoGet NewListFunc: func() runtime.Object { return &api.PodList{} }, PredicateFunc: pod.MatchPod, DefaultQualifiedResource: api.Resource("pods"), - WatchCacheSize: cachesize.GetWatchCacheSizeByResource("pods"), CreateStrategy: pod.Strategy, UpdateStrategy: pod.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/podtemplate/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/core/podtemplate/storage/storage.go index 13d1d4365945..cee6f0d71a4b 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/podtemplate/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/podtemplate/storage/storage.go @@ -21,7 +21,6 @@ import ( "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/core/podtemplate" ) @@ -37,7 +36,6 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST { NewListFunc: func() runtime.Object { return &api.PodTemplateList{} }, PredicateFunc: podtemplate.MatchPodTemplate, DefaultQualifiedResource: api.Resource("podtemplates"), - WatchCacheSize: cachesize.GetWatchCacheSizeByResource("podtemplates"), CreateStrategy: podtemplate.Strategy, UpdateStrategy: podtemplate.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/replicationcontroller/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/core/replicationcontroller/storage/storage.go index 1d0bb1e76e98..c9fce60d8c8a 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/replicationcontroller/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/replicationcontroller/storage/storage.go @@ -32,7 +32,6 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/autoscaling" "k8s.io/kubernetes/pkg/apis/autoscaling/validation" - "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/core/replicationcontroller" ) @@ -66,7 +65,6 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { NewListFunc: func() runtime.Object { return &api.ReplicationControllerList{} }, PredicateFunc: replicationcontroller.MatchController, DefaultQualifiedResource: api.Resource("replicationcontrollers"), - WatchCacheSize: cachesize.GetWatchCacheSizeByResource("replicationcontrollers"), CreateStrategy: replicationcontroller.Strategy, UpdateStrategy: replicationcontroller.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/resourcequota/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/core/resourcequota/storage/storage.go index 90638de8a6fc..d5d46241cf71 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/resourcequota/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/resourcequota/storage/storage.go @@ -24,7 +24,6 @@ import ( genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/core/resourcequota" ) @@ -40,7 +39,6 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { NewListFunc: func() runtime.Object { return &api.ResourceQuotaList{} }, PredicateFunc: resourcequota.MatchResourceQuota, DefaultQualifiedResource: api.Resource("resourcequotas"), - WatchCacheSize: cachesize.GetWatchCacheSizeByResource("resourcequotas"), CreateStrategy: resourcequota.Strategy, UpdateStrategy: resourcequota.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/secret/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/core/secret/storage/storage.go index 8dc5592eed0f..fe823f7aad97 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/secret/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/secret/storage/storage.go @@ -21,7 +21,6 @@ import ( "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/core/secret" ) @@ -37,7 +36,6 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST { NewListFunc: func() runtime.Object { return &api.SecretList{} }, PredicateFunc: secret.Matcher, DefaultQualifiedResource: api.Resource("secrets"), - WatchCacheSize: cachesize.GetWatchCacheSizeByResource("secrets"), CreateStrategy: secret.Strategy, UpdateStrategy: secret.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/service/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/core/service/storage/storage.go index 6192b2842c8b..5726ac270d8e 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/service/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/service/storage/storage.go @@ -24,7 +24,6 @@ import ( genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/core/service" ) @@ -40,7 +39,6 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { NewListFunc: func() runtime.Object { return &api.ServiceList{} }, PredicateFunc: service.MatchServices, DefaultQualifiedResource: api.Resource("services"), - WatchCacheSize: cachesize.GetWatchCacheSizeByResource("services"), CreateStrategy: service.Strategy, UpdateStrategy: service.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/core/serviceaccount/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/core/serviceaccount/storage/storage.go index 3c9b1d26e94e..58133a41eb9b 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/core/serviceaccount/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/core/serviceaccount/storage/storage.go @@ -22,7 +22,6 @@ import ( genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/core/serviceaccount" ) @@ -38,7 +37,6 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST { NewListFunc: func() runtime.Object { return &api.ServiceAccountList{} }, PredicateFunc: serviceaccount.Matcher, DefaultQualifiedResource: api.Resource("serviceaccounts"), - WatchCacheSize: cachesize.GetWatchCacheSizeByResource("serviceaccounts"), CreateStrategy: serviceaccount.Strategy, UpdateStrategy: serviceaccount.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/extensions/daemonset/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/extensions/daemonset/storage/storage.go index 86e7e45c2fb1..28cdde76a747 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/extensions/daemonset/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/extensions/daemonset/storage/storage.go @@ -25,7 +25,6 @@ import ( "k8s.io/apiserver/pkg/registry/rest" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/extensions/daemonset" ) @@ -42,7 +41,6 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { NewListFunc: func() runtime.Object { return &extensions.DaemonSetList{} }, PredicateFunc: daemonset.MatchDaemonSet, DefaultQualifiedResource: extensions.Resource("daemonsets"), - WatchCacheSize: cachesize.GetWatchCacheSizeByResource("daemonsets"), CreateStrategy: daemonset.Strategy, UpdateStrategy: daemonset.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/extensions/deployment/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/extensions/deployment/storage/storage.go index d747ecad8af6..133fba16d810 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/extensions/deployment/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/extensions/deployment/storage/storage.go @@ -32,7 +32,6 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/extensions" extvalidation "k8s.io/kubernetes/pkg/apis/extensions/validation" - "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/extensions/deployment" ) @@ -68,7 +67,6 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST, *Rollbac NewListFunc: func() runtime.Object { return &extensions.DeploymentList{} }, PredicateFunc: deployment.MatchDeployment, DefaultQualifiedResource: extensions.Resource("deployments"), - WatchCacheSize: cachesize.GetWatchCacheSizeByResource("deployments"), CreateStrategy: deployment.Strategy, UpdateStrategy: deployment.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/extensions/ingress/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/extensions/ingress/storage/storage.go index 8b891a6209ad..55c8befe93bb 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/extensions/ingress/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/extensions/ingress/storage/storage.go @@ -25,7 +25,6 @@ import ( "k8s.io/apiserver/pkg/registry/rest" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/extensions/ingress" ) @@ -42,7 +41,6 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { NewListFunc: func() runtime.Object { return &extensions.IngressList{} }, PredicateFunc: ingress.MatchIngress, DefaultQualifiedResource: extensions.Resource("ingresses"), - WatchCacheSize: cachesize.GetWatchCacheSizeByResource("ingresses"), CreateStrategy: ingress.Strategy, UpdateStrategy: ingress.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/extensions/networkpolicy/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/extensions/networkpolicy/storage/storage.go index 8bb412a4a0dc..a6d484b99f51 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/extensions/networkpolicy/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/extensions/networkpolicy/storage/storage.go @@ -22,7 +22,6 @@ import ( genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/kubernetes/pkg/api" extensionsapi "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/extensions/networkpolicy" ) @@ -39,7 +38,6 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST { NewListFunc: func() runtime.Object { return &extensionsapi.NetworkPolicyList{} }, PredicateFunc: networkpolicy.MatchNetworkPolicy, DefaultQualifiedResource: extensionsapi.Resource("networkpolicies"), - WatchCacheSize: cachesize.GetWatchCacheSizeByResource("networkpolicies"), CreateStrategy: networkpolicy.Strategy, UpdateStrategy: networkpolicy.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/extensions/podsecuritypolicy/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/extensions/podsecuritypolicy/storage/storage.go index eff8d11b76e5..699bb0f9a14f 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/extensions/podsecuritypolicy/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/extensions/podsecuritypolicy/storage/storage.go @@ -22,7 +22,6 @@ import ( genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/extensions/podsecuritypolicy" ) @@ -39,7 +38,6 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST { NewListFunc: func() runtime.Object { return &extensions.PodSecurityPolicyList{} }, PredicateFunc: podsecuritypolicy.MatchPodSecurityPolicy, DefaultQualifiedResource: extensions.Resource("podsecuritypolicies"), - WatchCacheSize: cachesize.GetWatchCacheSizeByResource("podsecuritypolicies"), CreateStrategy: podsecuritypolicy.Strategy, UpdateStrategy: podsecuritypolicy.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/extensions/replicaset/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/extensions/replicaset/storage/storage.go index b10c455ae911..90314a840219 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/extensions/replicaset/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/extensions/replicaset/storage/storage.go @@ -31,7 +31,6 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/extensions" extvalidation "k8s.io/kubernetes/pkg/apis/extensions/validation" - "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/extensions/replicaset" ) @@ -65,7 +64,6 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { NewListFunc: func() runtime.Object { return &extensions.ReplicaSetList{} }, PredicateFunc: replicaset.MatchReplicaSet, DefaultQualifiedResource: extensions.Resource("replicasets"), - WatchCacheSize: cachesize.GetWatchCacheSizeByResource("replicasets"), CreateStrategy: replicaset.Strategy, UpdateStrategy: replicaset.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresource/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresource/storage/storage.go index 120cfdc82061..1910e5f02103 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresource/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresource/storage/storage.go @@ -22,7 +22,6 @@ import ( genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresource" ) @@ -48,7 +47,6 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST { NewListFunc: func() runtime.Object { return &extensions.ThirdPartyResourceList{} }, PredicateFunc: thirdpartyresource.Matcher, DefaultQualifiedResource: resource, - WatchCacheSize: cachesize.GetWatchCacheSizeByResource(resource.Resource), CreateStrategy: thirdpartyresource.Strategy, UpdateStrategy: thirdpartyresource.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresourcedata/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresourcedata/storage/storage.go index ccf18ecb2630..e89477a33d5a 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresourcedata/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresourcedata/storage/storage.go @@ -30,7 +30,6 @@ import ( "k8s.io/apiserver/pkg/registry/rest" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/extensions/thirdpartyresourcedata" ) @@ -104,7 +103,6 @@ func NewREST(optsGetter generic.RESTOptionsGetter, group, kind string) *REST { NewListFunc: func() runtime.Object { return &extensions.ThirdPartyResourceDataList{} }, PredicateFunc: thirdpartyresourcedata.Matcher, DefaultQualifiedResource: resource, - WatchCacheSize: cachesize.GetWatchCacheSizeByResource(resource.Resource), CreateStrategy: thirdpartyresourcedata.Strategy, UpdateStrategy: thirdpartyresourcedata.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/networking/networkpolicy/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/networking/networkpolicy/storage/storage.go index e29b501b547d..48823519c2d5 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/networking/networkpolicy/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/networking/networkpolicy/storage/storage.go @@ -22,7 +22,6 @@ import ( genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/kubernetes/pkg/api" networkingapi "k8s.io/kubernetes/pkg/apis/networking" - "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/networking/networkpolicy" ) @@ -39,7 +38,6 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST { NewListFunc: func() runtime.Object { return &networkingapi.NetworkPolicyList{} }, PredicateFunc: networkpolicy.Matcher, DefaultQualifiedResource: networkingapi.Resource("networkpolicies"), - WatchCacheSize: cachesize.GetWatchCacheSizeByResource("networkpolicies"), CreateStrategy: networkpolicy.Strategy, UpdateStrategy: networkpolicy.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/policy/poddisruptionbudget/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/policy/poddisruptionbudget/storage/storage.go index 2daa230365d1..0686d70893ed 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/policy/poddisruptionbudget/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/policy/poddisruptionbudget/storage/storage.go @@ -25,7 +25,6 @@ import ( "k8s.io/apiserver/pkg/registry/rest" "k8s.io/kubernetes/pkg/api" policyapi "k8s.io/kubernetes/pkg/apis/policy" - "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/policy/poddisruptionbudget" ) @@ -42,7 +41,6 @@ func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST) { NewListFunc: func() runtime.Object { return &policyapi.PodDisruptionBudgetList{} }, PredicateFunc: poddisruptionbudget.MatchPodDisruptionBudget, DefaultQualifiedResource: policyapi.Resource("poddisruptionbudgets"), - WatchCacheSize: cachesize.GetWatchCacheSizeByResource("poddisruptionbudgets"), CreateStrategy: poddisruptionbudget.Strategy, UpdateStrategy: poddisruptionbudget.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrole/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrole/storage/storage.go index e434280a4098..b83a79267f98 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrole/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrole/storage/storage.go @@ -22,7 +22,6 @@ import ( genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/rbac" - "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/rbac/clusterrole" ) @@ -39,7 +38,6 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST { NewListFunc: func() runtime.Object { return &rbac.ClusterRoleList{} }, PredicateFunc: clusterrole.Matcher, DefaultQualifiedResource: rbac.Resource("clusterroles"), - WatchCacheSize: cachesize.GetWatchCacheSizeByResource("clusterroles"), CreateStrategy: clusterrole.Strategy, UpdateStrategy: clusterrole.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding/storage/storage.go index 2cb178ca63f0..790c34df6888 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding/storage/storage.go @@ -22,7 +22,6 @@ import ( genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/rbac" - "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding" ) @@ -39,7 +38,6 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST { NewListFunc: func() runtime.Object { return &rbac.ClusterRoleBindingList{} }, PredicateFunc: clusterrolebinding.Matcher, DefaultQualifiedResource: rbac.Resource("clusterrolebindings"), - WatchCacheSize: cachesize.GetWatchCacheSizeByResource("clusterrolebindings"), CreateStrategy: clusterrolebinding.Strategy, UpdateStrategy: clusterrolebinding.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/role/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/rbac/role/storage/storage.go index 18f178672527..4744420d0e63 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/rbac/role/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/role/storage/storage.go @@ -22,7 +22,6 @@ import ( genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/rbac" - "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/rbac/role" ) @@ -39,7 +38,6 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST { NewListFunc: func() runtime.Object { return &rbac.RoleList{} }, PredicateFunc: role.Matcher, DefaultQualifiedResource: rbac.Resource("roles"), - WatchCacheSize: cachesize.GetWatchCacheSizeByResource("roles"), CreateStrategy: role.Strategy, UpdateStrategy: role.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/rbac/rolebinding/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/rbac/rolebinding/storage/storage.go index 3ccf4f72cf6e..cafd8ce47973 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/rbac/rolebinding/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/rbac/rolebinding/storage/storage.go @@ -22,7 +22,6 @@ import ( genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/rbac" - "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/rbac/rolebinding" ) @@ -39,7 +38,6 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST { NewListFunc: func() runtime.Object { return &rbac.RoleBindingList{} }, PredicateFunc: rolebinding.Matcher, DefaultQualifiedResource: rbac.Resource("rolebindings"), - WatchCacheSize: cachesize.GetWatchCacheSizeByResource("rolebindings"), CreateStrategy: rolebinding.Strategy, UpdateStrategy: rolebinding.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/settings/podpreset/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/settings/podpreset/storage/storage.go index 4d1c7ca54b50..78ffe711598e 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/settings/podpreset/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/settings/podpreset/storage/storage.go @@ -22,7 +22,6 @@ import ( genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/kubernetes/pkg/api" settingsapi "k8s.io/kubernetes/pkg/apis/settings" - "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/settings/podpreset" ) @@ -39,7 +38,6 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST { NewListFunc: func() runtime.Object { return &settingsapi.PodPresetList{} }, PredicateFunc: podpreset.Matcher, DefaultQualifiedResource: settingsapi.Resource("podpresets"), - WatchCacheSize: cachesize.GetWatchCacheSizeByResource("podpresets"), CreateStrategy: podpreset.Strategy, UpdateStrategy: podpreset.Strategy, diff --git a/vendor/k8s.io/kubernetes/pkg/registry/storage/storageclass/storage/storage.go b/vendor/k8s.io/kubernetes/pkg/registry/storage/storageclass/storage/storage.go index 3dcb18a301f7..7e8d293eb3a0 100644 --- a/vendor/k8s.io/kubernetes/pkg/registry/storage/storageclass/storage/storage.go +++ b/vendor/k8s.io/kubernetes/pkg/registry/storage/storageclass/storage/storage.go @@ -23,7 +23,6 @@ import ( "k8s.io/apiserver/pkg/registry/rest" "k8s.io/kubernetes/pkg/api" storageapi "k8s.io/kubernetes/pkg/apis/storage" - "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/storage/storageclass" ) @@ -39,7 +38,6 @@ func NewREST(optsGetter generic.RESTOptionsGetter) *REST { NewListFunc: func() runtime.Object { return &storageapi.StorageClassList{} }, PredicateFunc: storageclass.MatchStorageClasses, DefaultQualifiedResource: storageapi.Resource("storageclasses"), - WatchCacheSize: cachesize.GetWatchCacheSizeByResource("storageclass"), CreateStrategy: storageclass.Strategy, UpdateStrategy: storageclass.Strategy, diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/storage_factory.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/storage_factory.go index 9554e5234996..c193f01cc2a8 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/storage_factory.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/storage_factory.go @@ -17,6 +17,8 @@ limitations under the License. package registry import ( + "github.com/golang/glog" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/registry/generic" "k8s.io/apiserver/pkg/storage" @@ -26,11 +28,10 @@ import ( ) // Creates a cacher based given storageConfig. -func StorageWithCacher(defaultCapacity int) generic.StorageDecorator { +func StorageWithCacher(capacity int) generic.StorageDecorator { return func( copier runtime.ObjectCopier, storageConfig *storagebackend.Config, - requestedSize *int, objectType runtime.Object, resourcePrefix string, keyFunc func(obj runtime.Object) (string, error), @@ -38,15 +39,13 @@ func StorageWithCacher(defaultCapacity int) generic.StorageDecorator { getAttrsFunc storage.AttrFunc, triggerFunc storage.TriggerPublisherFunc) (storage.Interface, factory.DestroyFunc) { - capacity := defaultCapacity - if requestedSize != nil && *requestedSize == 0 { - panic("StorageWithCacher must not be called with zero cache size") - } - if requestedSize != nil { - capacity = *requestedSize + s, d := generic.NewRawStorage(storageConfig) + if capacity == 0 { + glog.V(5).Infof("Storage caching is disabled for %T", objectType) + return s, d } + glog.V(5).Infof("Storage caching is enabled for %T with capacity %v", objectType, capacity) - s, d := generic.NewRawStorage(storageConfig) // TODO: we would change this later to make storage always have cacher and hide low level KV layer inside. // Currently it has two layers of same storage interface -- cacher and low level kv. cacherConfig := storage.CacherConfig{ diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/store.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/store.go index 10460a46c1a3..0ea0d1b161d2 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/store.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/store.go @@ -167,10 +167,6 @@ type Store struct { Storage storage.Interface // Called to cleanup clients used by the underlying Storage; optional. DestroyFunc func() - // Maximum size of the watch history cached in memory, in number of entries. - // This value is ignored if Storage is non-nil. Nil is replaced with a default value. - // A zero integer will disable caching. - WatchCacheSize *int } // Note: the rest.StandardStorage interface aggregates the common REST verbs @@ -1313,7 +1309,6 @@ func (e *Store) CompleteWithOptions(options *generic.StoreOptions) error { e.Storage, e.DestroyFunc = opts.Decorator( e.Copier, opts.StorageConfig, - e.WatchCacheSize, e.NewFunc(), prefix, keyFunc, diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/registry/generic/storage_decorator.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/registry/generic/storage_decorator.go index ab9aeb42adcf..6c65230f35be 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/registry/generic/storage_decorator.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/registry/generic/storage_decorator.go @@ -26,11 +26,9 @@ import ( // StorageDecorator is a function signature for producing a storage.Interface // and an associated DestroyFunc from given parameters. -// A zero capacity means to disable caching, nil means to use a default. type StorageDecorator func( copier runtime.ObjectCopier, config *storagebackend.Config, - capacity *int, objectType runtime.Object, resourcePrefix string, keyFunc func(obj runtime.Object) (string, error), @@ -43,7 +41,6 @@ type StorageDecorator func( func UndecoratedStorage( copier runtime.ObjectCopier, config *storagebackend.Config, - capacity *int, objectType runtime.Object, resourcePrefix string, keyFunc func(obj runtime.Object) (string, error), diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/options/etcd.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/options/etcd.go index bdaa0f2af674..7386637bc976 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/options/etcd.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/options/etcd.go @@ -18,6 +18,8 @@ package options import ( "fmt" + "strconv" + "strings" "github.com/spf13/pflag" @@ -45,6 +47,8 @@ type EtcdOptions struct { EnableWatchCache bool // Set DefaultWatchCacheSize to zero to disable watch caches for those resources that have no explicit cache size set DefaultWatchCacheSize int + // WatchCacheSizes represents override to a given resource + WatchCacheSizes []string } func NewEtcdOptions(backendConfig *storagebackend.Config) *EtcdOptions { @@ -83,10 +87,17 @@ func (s *EtcdOptions) AddFlags(fs *pflag.FlagSet) { "Enables the generic garbage collector. MUST be synced with the corresponding flag "+ "of the kube-controller-manager.") - // TODO: enable cache in integration tests. fs.BoolVar(&s.EnableWatchCache, "watch-cache", s.EnableWatchCache, "Enable watch caching in the apiserver") + fs.IntVar(&s.DefaultWatchCacheSize, "default-watch-cache-size", s.DefaultWatchCacheSize, + "Default watch cache size. If zero, watch cache will be disabled for resources that do not have a default watch size set.") + + fs.StringSliceVar(&s.WatchCacheSizes, "watch-cache-sizes", s.WatchCacheSizes, ""+ + "List of watch cache sizes for every resource (pods, nodes, etc.), comma separated. "+ + "The individual override format: resource#size, where size is a number. It takes effect "+ + "when watch-cache is enabled.") + fs.StringVar(&s.StorageConfig.Type, "storage-backend", s.StorageConfig.Type, "The storage backend for persistence. Options: 'etcd3' (default), 'etcd2'.") @@ -138,7 +149,15 @@ func (f *SimpleRestOptionsFactory) GetRESTOptions(resource schema.GroupResource) ResourcePrefix: resource.Group + "/" + resource.Resource, } if f.Options.EnableWatchCache { - ret.Decorator = genericregistry.StorageWithCacher(f.Options.DefaultWatchCacheSize) + sizes, err := ParseWatchCacheSizes(f.Options.WatchCacheSizes) + if err != nil { + return generic.RESTOptions{}, err + } + cacheSize, ok := sizes[resource] + if !ok { + cacheSize = f.Options.DefaultWatchCacheSize + } + ret.Decorator = genericregistry.StorageWithCacher(cacheSize) } return ret, nil } @@ -162,8 +181,52 @@ func (f *storageFactoryRestOptionsFactory) GetRESTOptions(resource schema.GroupR ResourcePrefix: f.StorageFactory.ResourcePrefix(resource), } if f.Options.EnableWatchCache { - ret.Decorator = genericregistry.StorageWithCacher(f.Options.DefaultWatchCacheSize) + sizes, err := ParseWatchCacheSizes(f.Options.WatchCacheSizes) + if err != nil { + return generic.RESTOptions{}, err + } + cacheSize, ok := sizes[resource] + if !ok { + cacheSize = f.Options.DefaultWatchCacheSize + } + ret.Decorator = genericregistry.StorageWithCacher(cacheSize) } return ret, nil } + +// ParseWatchCacheSizes turns a list of cache size values into a map of group resources +// to requested sizes. +func ParseWatchCacheSizes(cacheSizes []string) (map[schema.GroupResource]int, error) { + watchCacheSizes := make(map[schema.GroupResource]int) + for _, c := range cacheSizes { + tokens := strings.Split(c, "#") + if len(tokens) != 2 { + return nil, fmt.Errorf("invalid value of watch cache size: %s", c) + } + + size, err := strconv.Atoi(tokens[1]) + if err != nil { + return nil, fmt.Errorf("invalid size of watch cache size: %s", c) + } + if size < 0 { + return nil, fmt.Errorf("watch cache size cannot be negative: %s", c) + } + + watchCacheSizes[schema.ParseGroupResource(tokens[0])] = size + } + return watchCacheSizes, nil +} + +// WriteWatchCacheSizes turns a map of cache size values into a list of string specifications. +func WriteWatchCacheSizes(watchCacheSizes map[schema.GroupResource]int) ([]string, error) { + var cacheSizes []string + + for resource, size := range watchCacheSizes { + if size < 0 { + return nil, fmt.Errorf("watch cache size cannot be negative for resource %s", resource) + } + cacheSizes = append(cacheSizes, fmt.Sprintf("%s#%d", resource.String(), size)) + } + return cacheSizes, nil +} diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/options/server_run_options.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/options/server_run_options.go index 57ae12e9d10d..a8a62dfa60f6 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/options/server_run_options.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/options/server_run_options.go @@ -43,7 +43,6 @@ type ServerRunOptions struct { RequestTimeout time.Duration MinRequestTimeout int TargetRAMMB int - WatchCacheSizes []string } func NewServerRunOptions() *ServerRunOptions { @@ -137,10 +136,5 @@ func (s *ServerRunOptions) AddUniversalFlags(fs *pflag.FlagSet) { "handler, which picks a randomized value above this number as the connection timeout, "+ "to spread out load.") - fs.StringSliceVar(&s.WatchCacheSizes, "watch-cache-sizes", s.WatchCacheSizes, ""+ - "List of watch cache sizes for every resource (pods, nodes, etc.), comma separated. "+ - "The individual override format: resource#size, where size is a number. It takes effect "+ - "when watch-cache is enabled.") - utilfeature.DefaultFeatureGate.AddFlag(fs) } From f19116c901ced53ce5422bcaefe146309e558e96 Mon Sep 17 00:00:00 2001 From: Jim Minter Date: Tue, 5 Sep 2017 15:51:46 -0500 Subject: [PATCH 14/27] UPSTREAM: 51972: ProducesObject should only update the returned API object resource documentation --- .../apiserver/pkg/endpoints/installer.go | 32 +++++++++++-------- 1 file changed, 18 insertions(+), 14 deletions(-) diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/endpoints/installer.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/endpoints/installer.go index c82f4f720de0..7bf87de5c036 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/endpoints/installer.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/endpoints/installer.go @@ -554,9 +554,9 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag reqScope.MetaGroupVersion = *a.group.MetaGroupVersion } for _, action := range actions { - versionedObject := storageMeta.ProducesObject(action.Verb) - if versionedObject == nil { - versionedObject = defaultVersionedObject + producedObject := storageMeta.ProducesObject(action.Verb) + if producedObject == nil { + producedObject = defaultVersionedObject } reqScope.Namer = action.Namer @@ -625,8 +625,8 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). Operation("read"+namespaced+kind+strings.Title(subresource)+operationSuffix). Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...). - Returns(http.StatusOK, "OK", versionedObject). - Writes(versionedObject) + Returns(http.StatusOK, "OK", producedObject). + Writes(producedObject) if isGetterWithOptions { if err := addObjectParams(ws, route, versionedGetOptions); err != nil { return nil, err @@ -682,9 +682,9 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). Operation("replace"+namespaced+kind+strings.Title(subresource)+operationSuffix). Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...). - Returns(http.StatusOK, "OK", versionedObject). - Reads(versionedObject). - Writes(versionedObject) + Returns(http.StatusOK, "OK", producedObject). + Reads(defaultVersionedObject). + Writes(producedObject) addParams(route, action.Params) routes = append(routes, route) case "PATCH": // Partially update a resource @@ -699,9 +699,9 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag Consumes(string(types.JSONPatchType), string(types.MergePatchType), string(types.StrategicMergePatchType)). Operation("patch"+namespaced+kind+strings.Title(subresource)+operationSuffix). Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...). - Returns(http.StatusOK, "OK", versionedObject). + Returns(http.StatusOK, "OK", producedObject). Reads(metav1.Patch{}). - Writes(versionedObject) + Writes(producedObject) addParams(route, action.Params) routes = append(routes, route) case "POST": // Create a resource. @@ -722,9 +722,9 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). Operation("create"+namespaced+kind+strings.Title(subresource)+operationSuffix). Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...). - Returns(http.StatusOK, "OK", versionedObject). - Reads(versionedObject). - Writes(versionedObject) + Returns(http.StatusOK, "OK", producedObject). + Reads(defaultVersionedObject). + Writes(producedObject) addParams(route, action.Params) routes = append(routes, route) case "DELETE": // Delete a resource. @@ -819,6 +819,10 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag routes = append(routes, buildProxyRoute(ws, "OPTIONS", a.prefix, action.Path, kind, resource, subresource, namespaced, requestScope, hasSubresource, action.Params, proxyHandler, operationSuffix)) case "CONNECT": for _, method := range connecter.ConnectMethods() { + connectProducedObject := storageMeta.ProducesObject(method) + if connectProducedObject == nil { + connectProducedObject = "string" + } doc := "connect " + method + " requests to " + kind if hasSubresource { doc = "connect " + method + " requests to " + subresource + " of " + kind @@ -830,7 +834,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag Operation("connect" + strings.Title(strings.ToLower(method)) + namespaced + kind + strings.Title(subresource) + operationSuffix). Produces("*/*"). Consumes("*/*"). - Writes("string") + Writes(connectProducedObject) if versionedConnectOptions != nil { if err := addObjectParams(ws, route, versionedConnectOptions); err != nil { return nil, err From 6436aca65433a01269b10d52a32615a14fe3e05b Mon Sep 17 00:00:00 2001 From: Matt Rogers Date: Fri, 11 Aug 2017 15:01:00 -0400 Subject: [PATCH 15/27] UPSTREAM: 51035: Show events when describing service accounts Signed-off-by: Matt Rogers --- .../pkg/printers/internalversion/describe.go | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/vendor/k8s.io/kubernetes/pkg/printers/internalversion/describe.go b/vendor/k8s.io/kubernetes/pkg/printers/internalversion/describe.go index 0bf823370850..38ce792d4bff 100644 --- a/vendor/k8s.io/kubernetes/pkg/printers/internalversion/describe.go +++ b/vendor/k8s.io/kubernetes/pkg/printers/internalversion/describe.go @@ -2097,10 +2097,15 @@ func (d *ServiceAccountDescriber) Describe(namespace, name string, describerSett } } - return describeServiceAccount(serviceAccount, tokens, missingSecrets) + var events *api.EventList + if describerSettings.ShowEvents { + events, _ = d.Core().Events(namespace).Search(api.Scheme, serviceAccount) + } + + return describeServiceAccount(serviceAccount, tokens, missingSecrets, events) } -func describeServiceAccount(serviceAccount *api.ServiceAccount, tokens []api.Secret, missingSecrets sets.String) (string, error) { +func describeServiceAccount(serviceAccount *api.ServiceAccount, tokens []api.Secret, missingSecrets sets.String, events *api.EventList) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%s\n", serviceAccount.Name) @@ -2152,6 +2157,10 @@ func describeServiceAccount(serviceAccount *api.ServiceAccount, tokens []api.Sec w.WriteLine() } + if events != nil { + DescribeEvents(events, w) + } + return nil }) } From 6c3b0f5e0665c94f91bfd463dc7704dd934802a4 Mon Sep 17 00:00:00 2001 From: Seth Jennings Date: Thu, 28 Sep 2017 08:56:47 -0500 Subject: [PATCH 16/27] UPSTREAM: 53069: Align imagefs eviction defaults with image gc defaults --- .../kubernetes/pkg/apis/componentconfig/v1alpha1/defaults.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/defaults.go index 7d02dff118f3..a4f925d6b4f6 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/defaults.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/defaults.go @@ -370,7 +370,7 @@ func SetDefaults_KubeletConfiguration(obj *KubeletConfiguration) { obj.HairpinMode = PromiscuousBridge } if obj.EvictionHard == nil { - temp := "memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%" + temp := "memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<15%" obj.EvictionHard = &temp } if obj.EvictionPressureTransitionPeriod == zeroDuration { From b547f0405d54d68e53632ae256f558ce457ecf0b Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 20 Jun 2017 13:35:38 -0500 Subject: [PATCH 17/27] UPSTREAM: 47806: kubelet: fix inconsistent display of terminated pod IPs by using events instead PLEG and kubelet race when reading and sending pod status to the apiserver. PLEG inserts status into a cache, and then signals kubelet. Kubelet then eventually reads the status out of that cache, but in the mean time the status could have been changed by PLEG. When a pod exits, pod status will no longer include the pod's IP address because the network plugin/runtime will report "" for terminated pod IPs. If this status gets inserted into the PLEG cache before kubelet gets the status out of the cache, kubelet will see a blank pod IP address. This happens in about 1/5 of cases when pods are short-lived, and somewhat less frequently for longer running pods. To ensure consistency for properties of dead pods, copy an old status update's IP address over to the new status update if (a) the new status update's IP is missing and (b) all sandboxes of the pod are dead/not-ready (eg, no possibility for a valid IP from the sandbox). Fixes: https://github.com/kubernetes/kubernetes/issues/47265 Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=1449373 --- .../k8s.io/kubernetes/pkg/kubelet/pleg/BUILD | 1 + .../kubernetes/pkg/kubelet/pleg/generic.go | 44 +++++++++++++++ .../pkg/kubelet/pleg/generic_test.go | 55 +++++++++++++++++++ 3 files changed, 100 insertions(+) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/pleg/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/pleg/BUILD index 6ad5cae0e5ae..e907644f6133 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/pleg/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/pleg/BUILD @@ -17,6 +17,7 @@ go_library( ], tags = ["automanaged"], deps = [ + "//pkg/kubelet/apis/cri/v1alpha1/runtime:go_default_library", "//pkg/kubelet/container:go_default_library", "//pkg/kubelet/metrics:go_default_library", "//vendor/github.com/golang/glog:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/pleg/generic.go b/vendor/k8s.io/kubernetes/pkg/kubelet/pleg/generic.go index 8e447f0ef85b..6c6c980c3d8b 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/pleg/generic.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/pleg/generic.go @@ -26,6 +26,7 @@ import ( "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" + runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/metrics" ) @@ -329,6 +330,41 @@ func (g *GenericPLEG) cacheEnabled() bool { return g.cache != nil } +// Preserve an older cached status' pod IP if the new status has no pod IP +// and its sandboxes have exited +func (g *GenericPLEG) getPodIP(pid types.UID, status *kubecontainer.PodStatus) string { + if status.IP != "" { + return status.IP + } + + oldStatus, err := g.cache.Get(pid) + if err != nil || oldStatus.IP == "" { + return "" + } + + for _, sandboxStatus := range status.SandboxStatuses { + // If at least one sandbox is ready, then use this status update's pod IP + if sandboxStatus.State == runtimeapi.PodSandboxState_SANDBOX_READY { + return status.IP + } + } + + if len(status.SandboxStatuses) == 0 { + // Without sandboxes (which built-in runtimes like rkt don't report) + // look at all the container statuses, and if any containers are + // running then use the new pod IP + for _, containerStatus := range status.ContainerStatuses { + if containerStatus.State == kubecontainer.ContainerStateCreated || containerStatus.State == kubecontainer.ContainerStateRunning { + return status.IP + } + } + } + + // For pods with no ready containers or sandboxes (like exited pods) + // use the old status' pod IP + return oldStatus.IP +} + func (g *GenericPLEG) updateCache(pod *kubecontainer.Pod, pid types.UID) error { if pod == nil { // The pod is missing in the current relist. This means that @@ -343,6 +379,14 @@ func (g *GenericPLEG) updateCache(pod *kubecontainer.Pod, pid types.UID) error { // all containers again. status, err := g.runtime.GetPodStatus(pod.ID, pod.Name, pod.Namespace) glog.V(4).Infof("PLEG: Write status for %s/%s: %#v (err: %v)", pod.Name, pod.Namespace, status, err) + if err == nil { + // Preserve the pod IP across cache updates if the new IP is empty. + // When a pod is torn down, kubelet may race with PLEG and retrieve + // a pod status after network teardown, but the kubernetes API expects + // the completed pod's IP to be available after the pod is dead. + status.IP = g.getPodIP(pid, status) + } + g.cache.Set(pod.ID, status, err, timestamp) return err } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/pleg/generic_test.go b/vendor/k8s.io/kubernetes/pkg/kubelet/pleg/generic_test.go index f5fd5635ca29..468f98bffaee 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/pleg/generic_test.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/pleg/generic_test.go @@ -496,3 +496,58 @@ func TestRelistingWithSandboxes(t *testing.T) { actual = getEventsFromChannel(ch) verifyEvents(t, expected, actual) } + +func TestRelistIPChange(t *testing.T) { + pleg, runtimeMock := newTestGenericPLEGWithRuntimeMock() + ch := pleg.Watch() + + id := types.UID("test-pod-0") + cState := kubecontainer.ContainerStateRunning + container := createTestContainer("c0", cState) + pod := &kubecontainer.Pod{ + ID: id, + Containers: []*kubecontainer.Container{container}, + } + ipAddr := "192.168.1.5/24" + status := &kubecontainer.PodStatus{ + ID: id, + IP: ipAddr, + ContainerStatuses: []*kubecontainer.ContainerStatus{{ID: container.ID, State: cState}}, + } + event := &PodLifecycleEvent{ID: pod.ID, Type: ContainerStarted, Data: container.ID.ID} + + runtimeMock.On("GetPods", true).Return([]*kubecontainer.Pod{pod}, nil).Once() + runtimeMock.On("GetPodStatus", pod.ID, "", "").Return(status, nil).Once() + + pleg.relist() + actualEvents := getEventsFromChannel(ch) + actualStatus, actualErr := pleg.cache.Get(pod.ID) + assert.Equal(t, status, actualStatus, "test0") + assert.Nil(t, actualErr, "test0") + assert.Exactly(t, []*PodLifecycleEvent{event}, actualEvents) + + // Clear the IP address and mark the container terminated + container = createTestContainer("c0", kubecontainer.ContainerStateExited) + pod = &kubecontainer.Pod{ + ID: id, + Containers: []*kubecontainer.Container{container}, + } + status = &kubecontainer.PodStatus{ + ID: id, + ContainerStatuses: []*kubecontainer.ContainerStatus{{ID: container.ID, State: kubecontainer.ContainerStateExited}}, + } + event = &PodLifecycleEvent{ID: pod.ID, Type: ContainerDied, Data: container.ID.ID} + runtimeMock.On("GetPods", true).Return([]*kubecontainer.Pod{pod}, nil).Once() + runtimeMock.On("GetPodStatus", pod.ID, "", "").Return(status, nil).Once() + + pleg.relist() + actualEvents = getEventsFromChannel(ch) + actualStatus, actualErr = pleg.cache.Get(pod.ID) + // Must copy status to compare since its pointer gets passed through all + // the way to the event + statusCopy := *status + statusCopy.IP = ipAddr + assert.Equal(t, &statusCopy, actualStatus, "test0") + assert.Nil(t, actualErr, "test0") + assert.Exactly(t, []*PodLifecycleEvent{event}, actualEvents) +} From 6d145c9f9f7605b1a9c1f1ab0ae7a51270dbbee0 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 21 Sep 2017 12:52:39 -0500 Subject: [PATCH 18/27] UPSTREAM: 52864: dockershim: fine-tune network-ready handling on sandbox teardown and removal If sandbox teardown results in an error, GC will periodically attempt to again remove the sandbox. Until the sandbox is removed, pod sandbox status calls will attempt to enter the pod's namespace and retrieve the pod IP, but the first teardown attempt may have already removed the network namespace, resulting in a pointless log error message that the network namespace doesn't exist, or that nsenter can't find eth0. The network-ready mechanism originally attempted to suppress those messages by ensuring that pod sandbox status skipped network checks when networking was already torn down, but unfortunately the ready value was cleared too early. Also, don't tear down the pod network multiple times if the first time we tore it down, it succeeded. Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=1434950 --- .../pkg/kubelet/dockershim/docker_sandbox.go | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_sandbox.go b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_sandbox.go index 70c5bcb0ded1..d1215bc1b150 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_sandbox.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_sandbox.go @@ -226,7 +226,9 @@ func (ds *dockerService) StopPodSandbox(podSandboxID string) error { // since it is stopped. With empty network namespcae, CNI bridge plugin will conduct best // effort clean up and will not return error. errList := []error{} - if needNetworkTearDown { + ready, ok := ds.getNetworkReady(podSandboxID) + if needNetworkTearDown && (ready || !ok) { + // Only tear down the pod network if we haven't done so already cID := kubecontainer.BuildContainerID(runtimeName, podSandboxID) err := ds.network.TearDownPod(namespace, name, cID) if err == nil { @@ -269,12 +271,15 @@ func (ds *dockerService) RemovePodSandbox(podSandboxID string) error { } // Remove the sandbox container. - if err := ds.client.RemoveContainer(podSandboxID, dockertypes.ContainerRemoveOptions{RemoveVolumes: true, Force: true}); err != nil && !libdocker.IsContainerNotFoundError(err) { + err = ds.client.RemoveContainer(podSandboxID, dockertypes.ContainerRemoveOptions{RemoveVolumes: true, Force: true}) + if err == nil || libdocker.IsContainerNotFoundError(err) { + // Only clear network ready when the sandbox has actually been + // removed from docker or doesn't exist + ds.clearNetworkReady(podSandboxID) + } else { errs = append(errs, err) } - ds.clearNetworkReady(podSandboxID) - // Remove the checkpoint of the sandbox. if err := ds.checkpointHandler.RemoveCheckpoint(podSandboxID); err != nil { errs = append(errs, err) From d64738c2baeeb7a3d6bcf53f51b53f037d92b49a Mon Sep 17 00:00:00 2001 From: Seth Jennings Date: Sun, 1 Oct 2017 22:42:30 -0500 Subject: [PATCH 19/27] UPSTREAM: 53318: create separate transports for liveness and readiness probes --- .../kubernetes/pkg/kubelet/prober/prober.go | 39 ++++++++++++------- 1 file changed, 24 insertions(+), 15 deletions(-) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/prober/prober.go b/vendor/k8s.io/kubernetes/pkg/kubelet/prober/prober.go index 9cff34a208bb..37a94b06ac58 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/prober/prober.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/prober/prober.go @@ -46,10 +46,14 @@ const maxProbeRetries = 3 // Prober helps to check the liveness/readiness of a container. type prober struct { - exec execprobe.ExecProber - http httprobe.HTTPProber - tcp tcprobe.TCPProber - runner kubecontainer.ContainerCommandRunner + exec execprobe.ExecProber + // probe types needs different httprobe instances so they don't + // share a connection pool which can cause collsions to the + // same host:port and transient failures. See #49740. + readinessHttp httprobe.HTTPProber + livenessHttp httprobe.HTTPProber + tcp tcprobe.TCPProber + runner kubecontainer.ContainerCommandRunner refManager *kubecontainer.RefManager recorder record.EventRecorder @@ -63,12 +67,13 @@ func newProber( recorder record.EventRecorder) *prober { return &prober{ - exec: execprobe.New(), - http: httprobe.New(), - tcp: tcprobe.New(), - runner: runner, - refManager: refManager, - recorder: recorder, + exec: execprobe.New(), + readinessHttp: httprobe.New(), + livenessHttp: httprobe.New(), + tcp: tcprobe.New(), + runner: runner, + refManager: refManager, + recorder: recorder, } } @@ -90,7 +95,7 @@ func (pb *prober) probe(probeType probeType, pod *v1.Pod, status v1.PodStatus, c return results.Success, nil } - result, output, err := pb.runProbeWithRetries(probeSpec, pod, status, container, containerID, maxProbeRetries) + result, output, err := pb.runProbeWithRetries(probeType, probeSpec, pod, status, container, containerID, maxProbeRetries) if err != nil || result != probe.Success { // Probe failed in one way or another. ref, hasRef := pb.refManager.GetRef(containerID) @@ -116,12 +121,12 @@ func (pb *prober) probe(probeType probeType, pod *v1.Pod, status v1.PodStatus, c // runProbeWithRetries tries to probe the container in a finite loop, it returns the last result // if it never succeeds. -func (pb *prober) runProbeWithRetries(p *v1.Probe, pod *v1.Pod, status v1.PodStatus, container v1.Container, containerID kubecontainer.ContainerID, retries int) (probe.Result, string, error) { +func (pb *prober) runProbeWithRetries(probeType probeType, p *v1.Probe, pod *v1.Pod, status v1.PodStatus, container v1.Container, containerID kubecontainer.ContainerID, retries int) (probe.Result, string, error) { var err error var result probe.Result var output string for i := 0; i < retries; i++ { - result, output, err = pb.runProbe(p, pod, status, container, containerID) + result, output, err = pb.runProbe(probeType, p, pod, status, container, containerID) if err == nil { return result, output, nil } @@ -139,7 +144,7 @@ func buildHeader(headerList []v1.HTTPHeader) http.Header { return headers } -func (pb *prober) runProbe(p *v1.Probe, pod *v1.Pod, status v1.PodStatus, container v1.Container, containerID kubecontainer.ContainerID) (probe.Result, string, error) { +func (pb *prober) runProbe(probeType probeType, p *v1.Probe, pod *v1.Pod, status v1.PodStatus, container v1.Container, containerID kubecontainer.ContainerID) (probe.Result, string, error) { timeout := time.Duration(p.TimeoutSeconds) * time.Second if p.Exec != nil { glog.V(4).Infof("Exec-Probe Pod: %v, Container: %v, Command: %v", pod, container, p.Exec.Command) @@ -161,7 +166,11 @@ func (pb *prober) runProbe(p *v1.Probe, pod *v1.Pod, status v1.PodStatus, contai url := formatURL(scheme, host, port, path) headers := buildHeader(p.HTTPGet.HTTPHeaders) glog.V(4).Infof("HTTP-Probe Headers: %v", headers) - return pb.http.Probe(url, headers, timeout) + if probeType == liveness { + return pb.livenessHttp.Probe(url, headers, timeout) + } else { // readiness + return pb.readinessHttp.Probe(url, headers, timeout) + } } if p.TCPSocket != nil { port, err := extractPort(p.TCPSocket.Port, container) From ddf7b9197f178fcdbad2fed0acdd2025d7916457 Mon Sep 17 00:00:00 2001 From: David Eads Date: Thu, 28 Sep 2017 14:09:41 -0400 Subject: [PATCH 20/27] UPSTREAM: : update namespace lifecycle to allow review APIs --- .../plugin/namespace/lifecycle/admission.go | 20 ++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go index d371a09cc81d..4e4e04697f32 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go @@ -222,13 +222,19 @@ func (l *lifecycle) Validate() error { // accessReviewResources are resources which give a view into permissions in a namespace. Users must be allowed to create these // resources because returning "not found" errors allows someone to search for the "people I'm going to fire in 2017" namespace. var accessReviewResources = map[schema.GroupResource]bool{ - {Group: "authorization.k8s.io", Resource: "localsubjectaccessreviews"}: true, - schema.GroupResource{Group: "", Resource: "subjectaccessreviews"}: true, - schema.GroupResource{Group: "", Resource: "localsubjectaccessreviews"}: true, - schema.GroupResource{Group: "", Resource: "resourceaccessreviews"}: true, - schema.GroupResource{Group: "", Resource: "localresourceaccessreviews"}: true, - schema.GroupResource{Group: "", Resource: "selfsubjectrulesreviews"}: true, - schema.GroupResource{Group: "", Resource: "subjectrulesreviews"}: true, + {Group: "authorization.k8s.io", Resource: "localsubjectaccessreviews"}: true, + schema.GroupResource{Group: "", Resource: "subjectaccessreviews"}: true, + schema.GroupResource{Group: "", Resource: "localsubjectaccessreviews"}: true, + schema.GroupResource{Group: "", Resource: "resourceaccessreviews"}: true, + schema.GroupResource{Group: "", Resource: "localresourceaccessreviews"}: true, + schema.GroupResource{Group: "", Resource: "selfsubjectrulesreviews"}: true, + schema.GroupResource{Group: "", Resource: "subjectrulesreviews"}: true, + schema.GroupResource{Group: "authorization.openshift.io", Resource: "subjectaccessreviews"}: true, + schema.GroupResource{Group: "authorization.openshift.io", Resource: "localsubjectaccessreviews"}: true, + schema.GroupResource{Group: "authorization.openshift.io", Resource: "resourceaccessreviews"}: true, + schema.GroupResource{Group: "authorization.openshift.io", Resource: "localresourceaccessreviews"}: true, + schema.GroupResource{Group: "authorization.openshift.io", Resource: "selfsubjectrulesreviews"}: true, + schema.GroupResource{Group: "authorization.openshift.io", Resource: "subjectrulesreviews"}: true, } func isAccessReview(a admission.Attributes) bool { From 0770a4ae8c532ddc282ae15fcb71830c627694c3 Mon Sep 17 00:00:00 2001 From: Maciej Szulik Date: Fri, 29 Sep 2017 21:41:23 +0200 Subject: [PATCH 21/27] UPSTREAM: : aggregate openapi through servers. 1.8 should fix tthis for CRD --- .../src/k8s.io/apiserver/pkg/server/genericapiserver.go | 2 +- .../src/k8s.io/apiserver/pkg/server/routes/swagger.go | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/genericapiserver.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/genericapiserver.go index f9d4370f717e..410e813b11f7 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/genericapiserver.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/genericapiserver.go @@ -269,7 +269,7 @@ type preparedGenericAPIServer struct { // PrepareRun does post API installation setup steps. func (s *GenericAPIServer) PrepareRun() preparedGenericAPIServer { if s.swaggerConfig != nil { - routes.Swagger{Config: s.swaggerConfig}.Install(s.Handler.GoRestfulContainer) + routes.Swagger{Config: s.swaggerConfig}.Install(s.SwaggerAPIContainers(), s.Handler.GoRestfulContainer) } if err := s.PrepareOpenAPIService(); err != nil { panic(err) diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/routes/swagger.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/routes/swagger.go index 08e342ef5682..9218a1e18594 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/routes/swagger.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/routes/swagger.go @@ -30,7 +30,6 @@ type Swagger struct { } // Install adds the SwaggerUI webservice to the given mux. -func (s Swagger) Install(c *restful.Container) { - s.Config.WebServices = c.RegisteredWebServices() - swagger.RegisterSwaggerService(*s.Config, c) +func (s Swagger) Install(webserviceContainers []*restful.Container, c *restful.Container) { + swagger.RegisterSwaggerService(*s.Config, webserviceContainers, c) } From 7dd3445e752cb06432f9a2a4f2728dd76add85ca Mon Sep 17 00:00:00 2001 From: Jordan Liggitt Date: Tue, 18 Jul 2017 22:01:19 -0400 Subject: [PATCH 22/27] UPSTREAM: : Adapt etcd testing util to v3.2.1 Drop when upstream is on a compatible level of etcd 3.2.1 --- .../src/k8s.io/apiserver/pkg/storage/etcd/testing/utils.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/utils.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/utils.go index 83fcbd5049c4..cdbbd28a7455 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/utils.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/utils.go @@ -169,6 +169,7 @@ func configureTestCluster(t *testing.T, name string, https bool) *EtcdTestServer if err != nil { t.Fatal(err) } + m.AuthToken = "simple" clusterStr := fmt.Sprintf("%s=http://%s", name, pln.Addr().String()) m.InitialPeerURLsMap, err = types.NewURLsMap(clusterStr) @@ -190,7 +191,7 @@ func (m *EtcdTestServer) launch(t *testing.T) error { if m.s, err = etcdserver.NewServer(&m.ServerConfig); err != nil { return fmt.Errorf("failed to initialize the etcd server: %v", err) } - m.s.SyncTicker = time.Tick(500 * time.Millisecond) + m.s.SyncTicker = time.NewTicker(500 * time.Millisecond) m.s.Start() m.raftHandler = &testutil.PauseableHandler{Next: v2http.NewPeerHandler(m.s)} for _, ln := range m.PeerListeners { From 822a0e110fac29effab918f4eadf085201aa5349 Mon Sep 17 00:00:00 2001 From: Maciej Szulik Date: Sat, 30 Sep 2017 21:05:44 +0200 Subject: [PATCH 23/27] UPSTREAM: docker/distribution: : Azure dependencies --- .../github.com/Azure/azure-sdk-for-go/LICENSE | 202 ++ .../arm/compute/availabilitysets.go | 366 +++ .../azure-sdk-for-go/arm/compute/client.go | 58 + .../azure-sdk-for-go/arm/compute/models.go | 1180 ++++++++ .../arm/compute/usageoperations.go | 136 + .../azure-sdk-for-go/arm/compute/version.go | 43 + .../compute/virtualmachineextensionimages.go | 238 ++ .../arm/compute/virtualmachineextensions.go | 261 ++ .../arm/compute/virtualmachineimages.go | 376 +++ .../arm/compute/virtualmachines.go | 989 +++++++ .../arm/compute/virtualmachinescalesets.go | 1091 ++++++++ .../arm/compute/virtualmachinescalesetvms.go | 688 +++++ .../arm/compute/virtualmachinesizes.go | 111 + .../arm/containerregistry/client.go | 57 + .../arm/containerregistry/models.go | 118 + .../arm/containerregistry/registries.go | 685 +++++ .../arm/containerregistry/version.go | 43 + .../arm/network/applicationgateways.go | 629 +++++ .../azure-sdk-for-go/arm/network/client.go | 130 + .../expressroutecircuitauthorizations.go | 340 +++ .../network/expressroutecircuitpeerings.go | 338 +++ .../arm/network/expressroutecircuits.go | 755 +++++ .../network/expressrouteserviceproviders.go | 128 + .../arm/network/interfaces.go | 813 ++++++ .../arm/network/loadbalancers.go | 416 +++ .../arm/network/localnetworkgateways.go | 342 +++ .../azure-sdk-for-go/arm/network/models.go | 2158 +++++++++++++++ .../arm/network/publicipaddresses.go | 445 +++ .../azure-sdk-for-go/arm/network/routes.go | 335 +++ .../arm/network/routetables.go | 423 +++ .../arm/network/securitygroups.go | 428 +++ .../arm/network/securityrules.go | 350 +++ .../azure-sdk-for-go/arm/network/subnets.go | 357 +++ .../azure-sdk-for-go/arm/network/usages.go | 136 + .../azure-sdk-for-go/arm/network/version.go | 43 + .../virtualnetworkgatewayconnections.go | 595 ++++ .../arm/network/virtualnetworkgateways.go | 482 ++++ .../arm/network/virtualnetworkpeerings.go | 339 +++ .../arm/network/virtualnetworks.go | 484 ++++ .../azure-sdk-for-go/arm/storage/accounts.go | 715 +++++ .../azure-sdk-for-go/arm/storage/client.go | 58 + .../azure-sdk-for-go/arm/storage/models.go | 299 ++ .../arm/storage/usageoperations.go | 101 + .../azure-sdk-for-go/arm/storage/version.go | 43 + .../Azure/azure-sdk-for-go/storage/README.md | 5 + .../Azure/azure-sdk-for-go/storage/blob.go | 1596 +++++++++++ .../azure-sdk-for-go/storage/blob_test.go | 1584 +++++++++++ .../Azure/azure-sdk-for-go/storage/client.go | 552 ++++ .../azure-sdk-for-go/storage/client_test.go | 231 ++ .../Azure/azure-sdk-for-go/storage/file.go | 878 ++++++ .../azure-sdk-for-go/storage/file_test.go | 555 ++++ .../Azure/azure-sdk-for-go/storage/queue.go | 344 +++ .../azure-sdk-for-go/storage/queue_test.go | 142 + .../Azure/azure-sdk-for-go/storage/table.go | 129 + .../storage/table_entities.go | 357 +++ .../azure-sdk-for-go/storage/table_test.go | 287 ++ .../Azure/azure-sdk-for-go/storage/util.go | 85 + .../azure-sdk-for-go/storage/util_test.go | 86 + .../github.com/Azure/go-ansiterm/LICENSE | 21 + .../github.com/Azure/go-ansiterm/README.md | 9 + .../github.com/Azure/go-ansiterm/constants.go | 188 ++ .../github.com/Azure/go-ansiterm/context.go | 7 + .../Azure/go-ansiterm/csi_entry_state.go | 49 + .../Azure/go-ansiterm/csi_param_state.go | 38 + .../go-ansiterm/escape_intermediate_state.go | 36 + .../Azure/go-ansiterm/escape_state.go | 47 + .../Azure/go-ansiterm/event_handler.go | 90 + .../Azure/go-ansiterm/ground_state.go | 24 + .../Azure/go-ansiterm/osc_string_state.go | 31 + .../github.com/Azure/go-ansiterm/parser.go | 137 + .../go-ansiterm/parser_action_helpers.go | 103 + .../Azure/go-ansiterm/parser_actions.go | 122 + .../Azure/go-ansiterm/parser_test.go | 141 + .../go-ansiterm/parser_test_helpers_test.go | 114 + .../go-ansiterm/parser_test_utilities_test.go | 66 + .../github.com/Azure/go-ansiterm/states.go | 71 + .../go-ansiterm/test_event_handler_test.go | 173 ++ .../github.com/Azure/go-ansiterm/utilities.go | 21 + .../Azure/go-ansiterm/winterm/ansi.go | 182 ++ .../Azure/go-ansiterm/winterm/api.go | 329 +++ .../go-ansiterm/winterm/attr_translation.go | 102 + .../go-ansiterm/winterm/cursor_helpers.go | 101 + .../go-ansiterm/winterm/erase_helpers.go | 86 + .../go-ansiterm/winterm/scroll_helper.go | 118 + .../Azure/go-ansiterm/winterm/utilities.go | 9 + .../go-ansiterm/winterm/win_event_handler.go | 725 +++++ .../github.com/Azure/go-autorest/LICENSE | 191 ++ .../Azure/go-autorest/autorest/autorest.go | 114 + .../go-autorest/autorest/autorest_test.go | 126 + .../Azure/go-autorest/autorest/azure/async.go | 307 +++ .../go-autorest/autorest/azure/async_test.go | 1115 ++++++++ .../Azure/go-autorest/autorest/azure/azure.go | 180 ++ .../go-autorest/autorest/azure/azure_test.go | 431 +++ .../go-autorest/autorest/azure/config.go | 13 + .../go-autorest/autorest/azure/devicetoken.go | 193 ++ .../autorest/azure/devicetoken_test.go | 301 ++ .../autorest/azure/environments.go | 162 ++ .../autorest/azure/environments_test.go | 232 ++ .../go-autorest/autorest/azure/persist.go | 59 + .../autorest/azure/persist_test.go | 157 ++ .../Azure/go-autorest/autorest/azure/token.go | 363 +++ .../go-autorest/autorest/azure/token_test.go | 512 ++++ .../Azure/go-autorest/autorest/client.go | 212 ++ .../Azure/go-autorest/autorest/client_test.go | 315 +++ .../Azure/go-autorest/autorest/date/date.go | 82 + .../go-autorest/autorest/date/date_test.go | 223 ++ .../Azure/go-autorest/autorest/date/time.go | 89 + .../go-autorest/autorest/date/time_test.go | 263 ++ .../go-autorest/autorest/date/timerfc1123.go | 86 + .../autorest/date/timerfc1123_test.go | 212 ++ .../go-autorest/autorest/date/utility.go | 11 + .../Azure/go-autorest/autorest/error.go | 80 + .../Azure/go-autorest/autorest/error_test.go | 188 ++ .../Azure/go-autorest/autorest/preparer.go | 433 +++ .../go-autorest/autorest/preparer_test.go | 718 +++++ .../Azure/go-autorest/autorest/responder.go | 215 ++ .../go-autorest/autorest/responder_test.go | 591 ++++ .../Azure/go-autorest/autorest/sender.go | 269 ++ .../Azure/go-autorest/autorest/sender_test.go | 734 +++++ .../Azure/go-autorest/autorest/to/convert.go | 133 + .../go-autorest/autorest/to/convert_test.go | 220 ++ .../Azure/go-autorest/autorest/utility.go | 178 ++ .../go-autorest/autorest/utility_test.go | 368 +++ .../autorest/validation/validation.go | 373 +++ .../autorest/validation/validation_test.go | 2417 +++++++++++++++++ .../Azure/go-autorest/autorest/version.go | 18 + .../go-autorest/autorest/version_test.go | 13 + 127 files changed, 41192 insertions(+) create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/LICENSE create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/availabilitysets.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/client.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/models.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/usageoperations.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/version.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensionimages.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensions.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineimages.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachines.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesets.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetvms.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinesizes.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/client.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/models.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/registries.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/version.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/applicationgateways.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/client.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitauthorizations.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitpeerings.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuits.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressrouteserviceproviders.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaces.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancers.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/localnetworkgateways.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/models.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/publicipaddresses.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routes.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routetables.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securitygroups.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securityrules.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/subnets.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/usages.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/version.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgatewayconnections.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgateways.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkpeerings.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworks.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/accounts.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/client.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/models.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/usageoperations.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/version.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/blob_test.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/client_test.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/file_test.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/queue_test.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/table_entities.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/table_test.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/util_test.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/LICENSE create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/README.md create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/constants.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/context.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/csi_param_state.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/escape_state.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/event_handler.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/ground_state.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/osc_string_state.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/parser.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/parser_actions.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/parser_test.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/parser_test_helpers_test.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/parser_test_utilities_test.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/states.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/test_event_handler_test.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/utilities.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/winterm/api.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/LICENSE create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/autorest.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/autorest_test.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/azure/async.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/azure/async_test.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/azure/azure_test.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/azure/config.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/azure/devicetoken.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/azure/devicetoken_test.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/azure/environments_test.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/azure/persist.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/azure/persist_test.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/azure/token.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/azure/token_test.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/client.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/client_test.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/date/date.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/date/date_test.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/date/time.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/date/time_test.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123_test.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/date/utility.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/error.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/error_test.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/preparer.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/preparer_test.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/responder.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/responder_test.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/sender.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/sender_test.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/to/convert.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/to/convert_test.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/utility.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/utility_test.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/validation/validation_test.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/version.go create mode 100644 vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/version_test.go diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/LICENSE b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/LICENSE new file mode 100644 index 000000000000..af39a91e7033 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/availabilitysets.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/availabilitysets.go new file mode 100644 index 000000000000..34a4d2df8269 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/availabilitysets.go @@ -0,0 +1,366 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// AvailabilitySetsClient is the the Compute Management Client. +type AvailabilitySetsClient struct { + ManagementClient +} + +// NewAvailabilitySetsClient creates an instance of the AvailabilitySetsClient +// client. +func NewAvailabilitySetsClient(subscriptionID string) AvailabilitySetsClient { + return NewAvailabilitySetsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewAvailabilitySetsClientWithBaseURI creates an instance of the +// AvailabilitySetsClient client. +func NewAvailabilitySetsClientWithBaseURI(baseURI string, subscriptionID string) AvailabilitySetsClient { + return AvailabilitySetsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate create or update an availability set. +// +// resourceGroupName is the name of the resource group. name is the name of +// the availability set. parameters is parameters supplied to the Create +// Availability Set operation. +func (client AvailabilitySetsClient) CreateOrUpdate(resourceGroupName string, name string, parameters AvailabilitySet) (result AvailabilitySet, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.AvailabilitySetProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.AvailabilitySetProperties.Statuses", Name: validation.ReadOnly, Rule: true, Chain: nil}}}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "compute.AvailabilitySetsClient", "CreateOrUpdate") + } + + req, err := client.CreateOrUpdatePreparer(resourceGroupName, name, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "CreateOrUpdate", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "CreateOrUpdate", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client AvailabilitySetsClient) CreateOrUpdatePreparer(resourceGroupName string, name string, parameters AvailabilitySet) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{name}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client AvailabilitySetsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client AvailabilitySetsClient) CreateOrUpdateResponder(resp *http.Response) (result AvailabilitySet, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete delete an availability set. +// +// resourceGroupName is the name of the resource group. availabilitySetName is +// the name of the availability set. +func (client AvailabilitySetsClient) Delete(resourceGroupName string, availabilitySetName string) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, availabilitySetName) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client AvailabilitySetsClient) DeletePreparer(resourceGroupName string, availabilitySetName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "availabilitySetName": autorest.Encode("path", availabilitySetName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client AvailabilitySetsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client AvailabilitySetsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get retrieves information about an availability set. +// +// resourceGroupName is the name of the resource group. availabilitySetName is +// the name of the availability set. +func (client AvailabilitySetsClient) Get(resourceGroupName string, availabilitySetName string) (result AvailabilitySet, err error) { + req, err := client.GetPreparer(resourceGroupName, availabilitySetName) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client AvailabilitySetsClient) GetPreparer(resourceGroupName string, availabilitySetName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "availabilitySetName": autorest.Encode("path", availabilitySetName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client AvailabilitySetsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client AvailabilitySetsClient) GetResponder(resp *http.Response) (result AvailabilitySet, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists all availability sets in a resource group. +// +// resourceGroupName is the name of the resource group. +func (client AvailabilitySetsClient) List(resourceGroupName string) (result AvailabilitySetListResult, err error) { + req, err := client.ListPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client AvailabilitySetsClient) ListPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client AvailabilitySetsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client AvailabilitySetsClient) ListResponder(resp *http.Response) (result AvailabilitySetListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAvailableSizes lists all available virtual machine sizes that can be +// used to create a new virtual machine in an existing availability set. +// +// resourceGroupName is the name of the resource group. availabilitySetName is +// the name of the availability set. +func (client AvailabilitySetsClient) ListAvailableSizes(resourceGroupName string, availabilitySetName string) (result VirtualMachineSizeListResult, err error) { + req, err := client.ListAvailableSizesPreparer(resourceGroupName, availabilitySetName) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "ListAvailableSizes", nil, "Failure preparing request") + } + + resp, err := client.ListAvailableSizesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "ListAvailableSizes", resp, "Failure sending request") + } + + result, err = client.ListAvailableSizesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "ListAvailableSizes", resp, "Failure responding to request") + } + + return +} + +// ListAvailableSizesPreparer prepares the ListAvailableSizes request. +func (client AvailabilitySetsClient) ListAvailableSizesPreparer(resourceGroupName string, availabilitySetName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "availabilitySetName": autorest.Encode("path", availabilitySetName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}/vmSizes", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListAvailableSizesSender sends the ListAvailableSizes request. The method will close the +// http.Response Body if it receives an error. +func (client AvailabilitySetsClient) ListAvailableSizesSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListAvailableSizesResponder handles the response to the ListAvailableSizes request. The method always +// closes the http.Response Body. +func (client AvailabilitySetsClient) ListAvailableSizesResponder(resp *http.Response) (result VirtualMachineSizeListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/client.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/client.go new file mode 100644 index 000000000000..e8f3fb3e6d3d --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/client.go @@ -0,0 +1,58 @@ +// Package compute implements the Azure ARM Compute service API version +// 2016-03-30. +// +// The Compute Management Client. +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" +) + +const ( + // APIVersion is the version of the Compute + APIVersion = "2016-03-30" + + // DefaultBaseURI is the default URI used for the service Compute + DefaultBaseURI = "https://management.azure.com" +) + +// ManagementClient is the base client for Compute. +type ManagementClient struct { + autorest.Client + BaseURI string + APIVersion string + SubscriptionID string +} + +// New creates an instance of the ManagementClient client. +func New(subscriptionID string) ManagementClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the ManagementClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { + return ManagementClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + APIVersion: APIVersion, + SubscriptionID: subscriptionID, + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/models.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/models.go new file mode 100644 index 000000000000..13dbe637c4a0 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/models.go @@ -0,0 +1,1180 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/go-autorest/autorest/to" + "net/http" +) + +// CachingTypes enumerates the values for caching types. +type CachingTypes string + +const ( + // None specifies the none state for caching types. + None CachingTypes = "None" + // ReadOnly specifies the read only state for caching types. + ReadOnly CachingTypes = "ReadOnly" + // ReadWrite specifies the read write state for caching types. + ReadWrite CachingTypes = "ReadWrite" +) + +// ComponentNames enumerates the values for component names. +type ComponentNames string + +const ( + // MicrosoftWindowsShellSetup specifies the microsoft windows shell setup + // state for component names. + MicrosoftWindowsShellSetup ComponentNames = "Microsoft-Windows-Shell-Setup" +) + +// DiskCreateOptionTypes enumerates the values for disk create option types. +type DiskCreateOptionTypes string + +const ( + // Attach specifies the attach state for disk create option types. + Attach DiskCreateOptionTypes = "attach" + // Empty specifies the empty state for disk create option types. + Empty DiskCreateOptionTypes = "empty" + // FromImage specifies the from image state for disk create option types. + FromImage DiskCreateOptionTypes = "fromImage" +) + +// InstanceViewTypes enumerates the values for instance view types. +type InstanceViewTypes string + +const ( + // InstanceView specifies the instance view state for instance view types. + InstanceView InstanceViewTypes = "instanceView" +) + +// OperatingSystemTypes enumerates the values for operating system types. +type OperatingSystemTypes string + +const ( + // Linux specifies the linux state for operating system types. + Linux OperatingSystemTypes = "Linux" + // Windows specifies the windows state for operating system types. + Windows OperatingSystemTypes = "Windows" +) + +// PassNames enumerates the values for pass names. +type PassNames string + +const ( + // OobeSystem specifies the oobe system state for pass names. + OobeSystem PassNames = "oobeSystem" +) + +// ProtocolTypes enumerates the values for protocol types. +type ProtocolTypes string + +const ( + // HTTP specifies the http state for protocol types. + HTTP ProtocolTypes = "Http" + // HTTPS specifies the https state for protocol types. + HTTPS ProtocolTypes = "Https" +) + +// SettingNames enumerates the values for setting names. +type SettingNames string + +const ( + // AutoLogon specifies the auto logon state for setting names. + AutoLogon SettingNames = "AutoLogon" + // FirstLogonCommands specifies the first logon commands state for setting + // names. + FirstLogonCommands SettingNames = "FirstLogonCommands" +) + +// StatusLevelTypes enumerates the values for status level types. +type StatusLevelTypes string + +const ( + // Error specifies the error state for status level types. + Error StatusLevelTypes = "Error" + // Info specifies the info state for status level types. + Info StatusLevelTypes = "Info" + // Warning specifies the warning state for status level types. + Warning StatusLevelTypes = "Warning" +) + +// UpgradeMode enumerates the values for upgrade mode. +type UpgradeMode string + +const ( + // Automatic specifies the automatic state for upgrade mode. + Automatic UpgradeMode = "Automatic" + // Manual specifies the manual state for upgrade mode. + Manual UpgradeMode = "Manual" +) + +// VirtualMachineScaleSetSkuScaleType enumerates the values for virtual +// machine scale set sku scale type. +type VirtualMachineScaleSetSkuScaleType string + +const ( + // VirtualMachineScaleSetSkuScaleTypeAutomatic specifies the virtual + // machine scale set sku scale type automatic state for virtual machine + // scale set sku scale type. + VirtualMachineScaleSetSkuScaleTypeAutomatic VirtualMachineScaleSetSkuScaleType = "Automatic" + // VirtualMachineScaleSetSkuScaleTypeNone specifies the virtual machine + // scale set sku scale type none state for virtual machine scale set sku + // scale type. + VirtualMachineScaleSetSkuScaleTypeNone VirtualMachineScaleSetSkuScaleType = "None" +) + +// VirtualMachineSizeTypes enumerates the values for virtual machine size +// types. +type VirtualMachineSizeTypes string + +const ( + // BasicA0 specifies the basic a0 state for virtual machine size types. + BasicA0 VirtualMachineSizeTypes = "Basic_A0" + // BasicA1 specifies the basic a1 state for virtual machine size types. + BasicA1 VirtualMachineSizeTypes = "Basic_A1" + // BasicA2 specifies the basic a2 state for virtual machine size types. + BasicA2 VirtualMachineSizeTypes = "Basic_A2" + // BasicA3 specifies the basic a3 state for virtual machine size types. + BasicA3 VirtualMachineSizeTypes = "Basic_A3" + // BasicA4 specifies the basic a4 state for virtual machine size types. + BasicA4 VirtualMachineSizeTypes = "Basic_A4" + // StandardA0 specifies the standard a0 state for virtual machine size + // types. + StandardA0 VirtualMachineSizeTypes = "Standard_A0" + // StandardA1 specifies the standard a1 state for virtual machine size + // types. + StandardA1 VirtualMachineSizeTypes = "Standard_A1" + // StandardA10 specifies the standard a10 state for virtual machine size + // types. + StandardA10 VirtualMachineSizeTypes = "Standard_A10" + // StandardA11 specifies the standard a11 state for virtual machine size + // types. + StandardA11 VirtualMachineSizeTypes = "Standard_A11" + // StandardA2 specifies the standard a2 state for virtual machine size + // types. + StandardA2 VirtualMachineSizeTypes = "Standard_A2" + // StandardA3 specifies the standard a3 state for virtual machine size + // types. + StandardA3 VirtualMachineSizeTypes = "Standard_A3" + // StandardA4 specifies the standard a4 state for virtual machine size + // types. + StandardA4 VirtualMachineSizeTypes = "Standard_A4" + // StandardA5 specifies the standard a5 state for virtual machine size + // types. + StandardA5 VirtualMachineSizeTypes = "Standard_A5" + // StandardA6 specifies the standard a6 state for virtual machine size + // types. + StandardA6 VirtualMachineSizeTypes = "Standard_A6" + // StandardA7 specifies the standard a7 state for virtual machine size + // types. + StandardA7 VirtualMachineSizeTypes = "Standard_A7" + // StandardA8 specifies the standard a8 state for virtual machine size + // types. + StandardA8 VirtualMachineSizeTypes = "Standard_A8" + // StandardA9 specifies the standard a9 state for virtual machine size + // types. + StandardA9 VirtualMachineSizeTypes = "Standard_A9" + // StandardD1 specifies the standard d1 state for virtual machine size + // types. + StandardD1 VirtualMachineSizeTypes = "Standard_D1" + // StandardD11 specifies the standard d11 state for virtual machine size + // types. + StandardD11 VirtualMachineSizeTypes = "Standard_D11" + // StandardD11V2 specifies the standard d11v2 state for virtual machine + // size types. + StandardD11V2 VirtualMachineSizeTypes = "Standard_D11_v2" + // StandardD12 specifies the standard d12 state for virtual machine size + // types. + StandardD12 VirtualMachineSizeTypes = "Standard_D12" + // StandardD12V2 specifies the standard d12v2 state for virtual machine + // size types. + StandardD12V2 VirtualMachineSizeTypes = "Standard_D12_v2" + // StandardD13 specifies the standard d13 state for virtual machine size + // types. + StandardD13 VirtualMachineSizeTypes = "Standard_D13" + // StandardD13V2 specifies the standard d13v2 state for virtual machine + // size types. + StandardD13V2 VirtualMachineSizeTypes = "Standard_D13_v2" + // StandardD14 specifies the standard d14 state for virtual machine size + // types. + StandardD14 VirtualMachineSizeTypes = "Standard_D14" + // StandardD14V2 specifies the standard d14v2 state for virtual machine + // size types. + StandardD14V2 VirtualMachineSizeTypes = "Standard_D14_v2" + // StandardD15V2 specifies the standard d15v2 state for virtual machine + // size types. + StandardD15V2 VirtualMachineSizeTypes = "Standard_D15_v2" + // StandardD1V2 specifies the standard d1v2 state for virtual machine size + // types. + StandardD1V2 VirtualMachineSizeTypes = "Standard_D1_v2" + // StandardD2 specifies the standard d2 state for virtual machine size + // types. + StandardD2 VirtualMachineSizeTypes = "Standard_D2" + // StandardD2V2 specifies the standard d2v2 state for virtual machine size + // types. + StandardD2V2 VirtualMachineSizeTypes = "Standard_D2_v2" + // StandardD3 specifies the standard d3 state for virtual machine size + // types. + StandardD3 VirtualMachineSizeTypes = "Standard_D3" + // StandardD3V2 specifies the standard d3v2 state for virtual machine size + // types. + StandardD3V2 VirtualMachineSizeTypes = "Standard_D3_v2" + // StandardD4 specifies the standard d4 state for virtual machine size + // types. + StandardD4 VirtualMachineSizeTypes = "Standard_D4" + // StandardD4V2 specifies the standard d4v2 state for virtual machine size + // types. + StandardD4V2 VirtualMachineSizeTypes = "Standard_D4_v2" + // StandardD5V2 specifies the standard d5v2 state for virtual machine size + // types. + StandardD5V2 VirtualMachineSizeTypes = "Standard_D5_v2" + // StandardDS1 specifies the standard ds1 state for virtual machine size + // types. + StandardDS1 VirtualMachineSizeTypes = "Standard_DS1" + // StandardDS11 specifies the standard ds11 state for virtual machine size + // types. + StandardDS11 VirtualMachineSizeTypes = "Standard_DS11" + // StandardDS11V2 specifies the standard ds11v2 state for virtual machine + // size types. + StandardDS11V2 VirtualMachineSizeTypes = "Standard_DS11_v2" + // StandardDS12 specifies the standard ds12 state for virtual machine size + // types. + StandardDS12 VirtualMachineSizeTypes = "Standard_DS12" + // StandardDS12V2 specifies the standard ds12v2 state for virtual machine + // size types. + StandardDS12V2 VirtualMachineSizeTypes = "Standard_DS12_v2" + // StandardDS13 specifies the standard ds13 state for virtual machine size + // types. + StandardDS13 VirtualMachineSizeTypes = "Standard_DS13" + // StandardDS13V2 specifies the standard ds13v2 state for virtual machine + // size types. + StandardDS13V2 VirtualMachineSizeTypes = "Standard_DS13_v2" + // StandardDS14 specifies the standard ds14 state for virtual machine size + // types. + StandardDS14 VirtualMachineSizeTypes = "Standard_DS14" + // StandardDS14V2 specifies the standard ds14v2 state for virtual machine + // size types. + StandardDS14V2 VirtualMachineSizeTypes = "Standard_DS14_v2" + // StandardDS15V2 specifies the standard ds15v2 state for virtual machine + // size types. + StandardDS15V2 VirtualMachineSizeTypes = "Standard_DS15_v2" + // StandardDS1V2 specifies the standard ds1v2 state for virtual machine + // size types. + StandardDS1V2 VirtualMachineSizeTypes = "Standard_DS1_v2" + // StandardDS2 specifies the standard ds2 state for virtual machine size + // types. + StandardDS2 VirtualMachineSizeTypes = "Standard_DS2" + // StandardDS2V2 specifies the standard ds2v2 state for virtual machine + // size types. + StandardDS2V2 VirtualMachineSizeTypes = "Standard_DS2_v2" + // StandardDS3 specifies the standard ds3 state for virtual machine size + // types. + StandardDS3 VirtualMachineSizeTypes = "Standard_DS3" + // StandardDS3V2 specifies the standard ds3v2 state for virtual machine + // size types. + StandardDS3V2 VirtualMachineSizeTypes = "Standard_DS3_v2" + // StandardDS4 specifies the standard ds4 state for virtual machine size + // types. + StandardDS4 VirtualMachineSizeTypes = "Standard_DS4" + // StandardDS4V2 specifies the standard ds4v2 state for virtual machine + // size types. + StandardDS4V2 VirtualMachineSizeTypes = "Standard_DS4_v2" + // StandardDS5V2 specifies the standard ds5v2 state for virtual machine + // size types. + StandardDS5V2 VirtualMachineSizeTypes = "Standard_DS5_v2" + // StandardG1 specifies the standard g1 state for virtual machine size + // types. + StandardG1 VirtualMachineSizeTypes = "Standard_G1" + // StandardG2 specifies the standard g2 state for virtual machine size + // types. + StandardG2 VirtualMachineSizeTypes = "Standard_G2" + // StandardG3 specifies the standard g3 state for virtual machine size + // types. + StandardG3 VirtualMachineSizeTypes = "Standard_G3" + // StandardG4 specifies the standard g4 state for virtual machine size + // types. + StandardG4 VirtualMachineSizeTypes = "Standard_G4" + // StandardG5 specifies the standard g5 state for virtual machine size + // types. + StandardG5 VirtualMachineSizeTypes = "Standard_G5" + // StandardGS1 specifies the standard gs1 state for virtual machine size + // types. + StandardGS1 VirtualMachineSizeTypes = "Standard_GS1" + // StandardGS2 specifies the standard gs2 state for virtual machine size + // types. + StandardGS2 VirtualMachineSizeTypes = "Standard_GS2" + // StandardGS3 specifies the standard gs3 state for virtual machine size + // types. + StandardGS3 VirtualMachineSizeTypes = "Standard_GS3" + // StandardGS4 specifies the standard gs4 state for virtual machine size + // types. + StandardGS4 VirtualMachineSizeTypes = "Standard_GS4" + // StandardGS5 specifies the standard gs5 state for virtual machine size + // types. + StandardGS5 VirtualMachineSizeTypes = "Standard_GS5" +) + +// AdditionalUnattendContent is additional XML formatted information that can +// be included in the Unattend.xml file, which is used by Windows Setup. +// Contents are defined by setting name, component name, and the pass in +// which the content is a applied. +type AdditionalUnattendContent struct { + PassName PassNames `json:"passName,omitempty"` + ComponentName ComponentNames `json:"componentName,omitempty"` + SettingName SettingNames `json:"settingName,omitempty"` + Content *string `json:"content,omitempty"` +} + +// APIEntityReference is the API entity reference. +type APIEntityReference struct { + ID *string `json:"id,omitempty"` +} + +// APIError is api error. +type APIError struct { + Details *[]APIErrorBase `json:"details,omitempty"` + Innererror *InnerError `json:"innererror,omitempty"` + Code *string `json:"code,omitempty"` + Target *string `json:"target,omitempty"` + Message *string `json:"message,omitempty"` +} + +// APIErrorBase is api error base. +type APIErrorBase struct { + Code *string `json:"code,omitempty"` + Target *string `json:"target,omitempty"` + Message *string `json:"message,omitempty"` +} + +// AvailabilitySet is create or update availability set parameters. +type AvailabilitySet struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + *AvailabilitySetProperties `json:"properties,omitempty"` +} + +// AvailabilitySetListResult is the List Availability Set operation response. +type AvailabilitySetListResult struct { + autorest.Response `json:"-"` + Value *[]AvailabilitySet `json:"value,omitempty"` +} + +// AvailabilitySetProperties is the instance view of a resource. +type AvailabilitySetProperties struct { + PlatformUpdateDomainCount *int32 `json:"platformUpdateDomainCount,omitempty"` + PlatformFaultDomainCount *int32 `json:"platformFaultDomainCount,omitempty"` + VirtualMachines *[]SubResource `json:"virtualMachines,omitempty"` + Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` +} + +// BootDiagnostics is describes Boot Diagnostics. +type BootDiagnostics struct { + Enabled *bool `json:"enabled,omitempty"` + StorageURI *string `json:"storageUri,omitempty"` +} + +// BootDiagnosticsInstanceView is the instance view of a virtual machine boot +// diagnostics. +type BootDiagnosticsInstanceView struct { + ConsoleScreenshotBlobURI *string `json:"consoleScreenshotBlobUri,omitempty"` + SerialConsoleLogBlobURI *string `json:"serialConsoleLogBlobUri,omitempty"` +} + +// DataDisk is describes a data disk. +type DataDisk struct { + Lun *int32 `json:"lun,omitempty"` + Name *string `json:"name,omitempty"` + Vhd *VirtualHardDisk `json:"vhd,omitempty"` + Image *VirtualHardDisk `json:"image,omitempty"` + Caching CachingTypes `json:"caching,omitempty"` + CreateOption DiskCreateOptionTypes `json:"createOption,omitempty"` + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` +} + +// DataDiskImage is contains the data disk images information. +type DataDiskImage struct { + Lun *int32 `json:"lun,omitempty"` +} + +// DiagnosticsProfile is describes a diagnostics profile. +type DiagnosticsProfile struct { + BootDiagnostics *BootDiagnostics `json:"bootDiagnostics,omitempty"` +} + +// DiskEncryptionSettings is describes a Encryption Settings for a Disk +type DiskEncryptionSettings struct { + DiskEncryptionKey *KeyVaultSecretReference `json:"diskEncryptionKey,omitempty"` + KeyEncryptionKey *KeyVaultKeyReference `json:"keyEncryptionKey,omitempty"` + Enabled *bool `json:"enabled,omitempty"` +} + +// DiskInstanceView is the instance view of the disk. +type DiskInstanceView struct { + Name *string `json:"name,omitempty"` + Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` +} + +// HardwareProfile is describes a hardware profile. +type HardwareProfile struct { + VMSize VirtualMachineSizeTypes `json:"vmSize,omitempty"` +} + +// ImageReference is the image reference. +type ImageReference struct { + Publisher *string `json:"publisher,omitempty"` + Offer *string `json:"offer,omitempty"` + Sku *string `json:"sku,omitempty"` + Version *string `json:"version,omitempty"` +} + +// InnerError is inner error details. +type InnerError struct { + Exceptiontype *string `json:"exceptiontype,omitempty"` + Errordetail *string `json:"errordetail,omitempty"` +} + +// InstanceViewStatus is instance view status. +type InstanceViewStatus struct { + Code *string `json:"code,omitempty"` + Level StatusLevelTypes `json:"level,omitempty"` + DisplayStatus *string `json:"displayStatus,omitempty"` + Message *string `json:"message,omitempty"` + Time *date.Time `json:"time,omitempty"` +} + +// KeyVaultKeyReference is describes a reference to Key Vault Key +type KeyVaultKeyReference struct { + KeyURL *string `json:"keyUrl,omitempty"` + SourceVault *SubResource `json:"sourceVault,omitempty"` +} + +// KeyVaultSecretReference is describes a reference to Key Vault Secret +type KeyVaultSecretReference struct { + SecretURL *string `json:"secretUrl,omitempty"` + SourceVault *SubResource `json:"sourceVault,omitempty"` +} + +// LinuxConfiguration is describes Windows configuration of the OS Profile. +type LinuxConfiguration struct { + DisablePasswordAuthentication *bool `json:"disablePasswordAuthentication,omitempty"` + SSH *SSHConfiguration `json:"ssh,omitempty"` +} + +// ListUsagesResult is the List Usages operation response. +type ListUsagesResult struct { + autorest.Response `json:"-"` + Value *[]Usage `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// ListUsagesResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client ListUsagesResult) ListUsagesResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// ListVirtualMachineExtensionImage is +type ListVirtualMachineExtensionImage struct { + autorest.Response `json:"-"` + Value *[]VirtualMachineExtensionImage `json:"value,omitempty"` +} + +// ListVirtualMachineImageResource is +type ListVirtualMachineImageResource struct { + autorest.Response `json:"-"` + Value *[]VirtualMachineImageResource `json:"value,omitempty"` +} + +// LongRunningOperationProperties is compute-specific operation properties, +// including output +type LongRunningOperationProperties struct { + Output *map[string]interface{} `json:"output,omitempty"` +} + +// NetworkInterfaceReference is describes a network interface reference. +type NetworkInterfaceReference struct { + ID *string `json:"id,omitempty"` + *NetworkInterfaceReferenceProperties `json:"properties,omitempty"` +} + +// NetworkInterfaceReferenceProperties is describes a network interface +// reference properties. +type NetworkInterfaceReferenceProperties struct { + Primary *bool `json:"primary,omitempty"` +} + +// NetworkProfile is describes a network profile. +type NetworkProfile struct { + NetworkInterfaces *[]NetworkInterfaceReference `json:"networkInterfaces,omitempty"` +} + +// OSDisk is describes an Operating System disk. +type OSDisk struct { + OsType OperatingSystemTypes `json:"osType,omitempty"` + EncryptionSettings *DiskEncryptionSettings `json:"encryptionSettings,omitempty"` + Name *string `json:"name,omitempty"` + Vhd *VirtualHardDisk `json:"vhd,omitempty"` + Image *VirtualHardDisk `json:"image,omitempty"` + Caching CachingTypes `json:"caching,omitempty"` + CreateOption DiskCreateOptionTypes `json:"createOption,omitempty"` + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` +} + +// OSDiskImage is contains the os disk image information. +type OSDiskImage struct { + OperatingSystem OperatingSystemTypes `json:"operatingSystem,omitempty"` +} + +// OSProfile is describes an OS profile. +type OSProfile struct { + ComputerName *string `json:"computerName,omitempty"` + AdminUsername *string `json:"adminUsername,omitempty"` + AdminPassword *string `json:"adminPassword,omitempty"` + CustomData *string `json:"customData,omitempty"` + WindowsConfiguration *WindowsConfiguration `json:"windowsConfiguration,omitempty"` + LinuxConfiguration *LinuxConfiguration `json:"linuxConfiguration,omitempty"` + Secrets *[]VaultSecretGroup `json:"secrets,omitempty"` +} + +// Plan is plan for the resource. +type Plan struct { + Name *string `json:"name,omitempty"` + Publisher *string `json:"publisher,omitempty"` + Product *string `json:"product,omitempty"` + PromotionCode *string `json:"promotionCode,omitempty"` +} + +// PurchasePlan is used for establishing the purchase context of any 3rd Party +// artifact through MarketPlace. +type PurchasePlan struct { + Publisher *string `json:"publisher,omitempty"` + Name *string `json:"name,omitempty"` + Product *string `json:"product,omitempty"` +} + +// Resource is the resource model definition. +type Resource struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// Sku is describes a virtual machine scale set sku. +type Sku struct { + Name *string `json:"name,omitempty"` + Tier *string `json:"tier,omitempty"` + Capacity *int64 `json:"capacity,omitempty"` +} + +// SSHConfiguration is sSH configuration for Linux based VMs running on Azure +type SSHConfiguration struct { + PublicKeys *[]SSHPublicKey `json:"publicKeys,omitempty"` +} + +// SSHPublicKey is contains information about SSH certificate public key and +// the path on the Linux VM where the public key is placed. +type SSHPublicKey struct { + Path *string `json:"path,omitempty"` + KeyData *string `json:"keyData,omitempty"` +} + +// StorageProfile is describes a storage profile. +type StorageProfile struct { + ImageReference *ImageReference `json:"imageReference,omitempty"` + OsDisk *OSDisk `json:"osDisk,omitempty"` + DataDisks *[]DataDisk `json:"dataDisks,omitempty"` +} + +// SubResource is +type SubResource struct { + ID *string `json:"id,omitempty"` +} + +// UpgradePolicy is describes an upgrade policy - automatic or manual. +type UpgradePolicy struct { + Mode UpgradeMode `json:"mode,omitempty"` +} + +// Usage is describes Compute Resource Usage. +type Usage struct { + Unit *string `json:"unit,omitempty"` + CurrentValue *int32 `json:"currentValue,omitempty"` + Limit *int64 `json:"limit,omitempty"` + Name *UsageName `json:"name,omitempty"` +} + +// UsageName is the Usage Names. +type UsageName struct { + Value *string `json:"value,omitempty"` + LocalizedValue *string `json:"localizedValue,omitempty"` +} + +// VaultCertificate is describes a single certificate reference in a Key +// Vault, and where the certificate should reside on the VM. +type VaultCertificate struct { + CertificateURL *string `json:"certificateUrl,omitempty"` + CertificateStore *string `json:"certificateStore,omitempty"` +} + +// VaultSecretGroup is describes a set of certificates which are all in the +// same Key Vault. +type VaultSecretGroup struct { + SourceVault *SubResource `json:"sourceVault,omitempty"` + VaultCertificates *[]VaultCertificate `json:"vaultCertificates,omitempty"` +} + +// VirtualHardDisk is describes the uri of a disk. +type VirtualHardDisk struct { + URI *string `json:"uri,omitempty"` +} + +// VirtualMachine is describes a Virtual Machine. +type VirtualMachine struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Plan *Plan `json:"plan,omitempty"` + *VirtualMachineProperties `json:"properties,omitempty"` + Resources *[]VirtualMachineExtension `json:"resources,omitempty"` +} + +// VirtualMachineAgentInstanceView is the instance view of the VM Agent +// running on the virtual machine. +type VirtualMachineAgentInstanceView struct { + VMAgentVersion *string `json:"vmAgentVersion,omitempty"` + ExtensionHandlers *[]VirtualMachineExtensionHandlerInstanceView `json:"extensionHandlers,omitempty"` + Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` +} + +// VirtualMachineCaptureParameters is capture Virtual Machine parameters. +type VirtualMachineCaptureParameters struct { + VhdPrefix *string `json:"vhdPrefix,omitempty"` + DestinationContainerName *string `json:"destinationContainerName,omitempty"` + OverwriteVhds *bool `json:"overwriteVhds,omitempty"` +} + +// VirtualMachineCaptureResult is resource Id. +type VirtualMachineCaptureResult struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + *VirtualMachineCaptureResultProperties `json:"properties,omitempty"` +} + +// VirtualMachineCaptureResultProperties is compute-specific operation +// properties, including output +type VirtualMachineCaptureResultProperties struct { + Output *map[string]interface{} `json:"output,omitempty"` +} + +// VirtualMachineExtension is describes a Virtual Machine Extension. +type VirtualMachineExtension struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + *VirtualMachineExtensionProperties `json:"properties,omitempty"` +} + +// VirtualMachineExtensionHandlerInstanceView is the instance view of a +// virtual machine extension handler. +type VirtualMachineExtensionHandlerInstanceView struct { + Type *string `json:"type,omitempty"` + TypeHandlerVersion *string `json:"typeHandlerVersion,omitempty"` + Status *InstanceViewStatus `json:"status,omitempty"` +} + +// VirtualMachineExtensionImage is describes a Virtual Machine Extension Image. +type VirtualMachineExtensionImage struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + *VirtualMachineExtensionImageProperties `json:"properties,omitempty"` +} + +// VirtualMachineExtensionImageProperties is describes the properties of a +// Virtual Machine Extension Image. +type VirtualMachineExtensionImageProperties struct { + OperatingSystem *string `json:"operatingSystem,omitempty"` + ComputeRole *string `json:"computeRole,omitempty"` + HandlerSchema *string `json:"handlerSchema,omitempty"` + VMScaleSetEnabled *bool `json:"vmScaleSetEnabled,omitempty"` + SupportsMultipleExtensions *bool `json:"supportsMultipleExtensions,omitempty"` +} + +// VirtualMachineExtensionInstanceView is the instance view of a virtual +// machine extension. +type VirtualMachineExtensionInstanceView struct { + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + TypeHandlerVersion *string `json:"typeHandlerVersion,omitempty"` + Substatuses *[]InstanceViewStatus `json:"substatuses,omitempty"` + Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` +} + +// VirtualMachineExtensionProperties is describes the properties of a Virtual +// Machine Extension. +type VirtualMachineExtensionProperties struct { + ForceUpdateTag *string `json:"forceUpdateTag,omitempty"` + Publisher *string `json:"publisher,omitempty"` + Type *string `json:"type,omitempty"` + TypeHandlerVersion *string `json:"typeHandlerVersion,omitempty"` + AutoUpgradeMinorVersion *bool `json:"autoUpgradeMinorVersion,omitempty"` + Settings *map[string]interface{} `json:"settings,omitempty"` + ProtectedSettings *map[string]interface{} `json:"protectedSettings,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` + InstanceView *VirtualMachineExtensionInstanceView `json:"instanceView,omitempty"` +} + +// VirtualMachineImage is describes a Virtual Machine Image. +type VirtualMachineImage struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + *VirtualMachineImageProperties `json:"properties,omitempty"` +} + +// VirtualMachineImageProperties is describes the properties of a Virtual +// Machine Image. +type VirtualMachineImageProperties struct { + Plan *PurchasePlan `json:"plan,omitempty"` + OsDiskImage *OSDiskImage `json:"osDiskImage,omitempty"` + DataDiskImages *[]DataDiskImage `json:"dataDiskImages,omitempty"` +} + +// VirtualMachineImageResource is virtual machine image resource information. +type VirtualMachineImageResource struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// VirtualMachineInstanceView is the instance view of a virtual machine. +type VirtualMachineInstanceView struct { + PlatformUpdateDomain *int32 `json:"platformUpdateDomain,omitempty"` + PlatformFaultDomain *int32 `json:"platformFaultDomain,omitempty"` + RdpThumbPrint *string `json:"rdpThumbPrint,omitempty"` + VMAgent *VirtualMachineAgentInstanceView `json:"vmAgent,omitempty"` + Disks *[]DiskInstanceView `json:"disks,omitempty"` + Extensions *[]VirtualMachineExtensionInstanceView `json:"extensions,omitempty"` + BootDiagnostics *BootDiagnosticsInstanceView `json:"bootDiagnostics,omitempty"` + Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` +} + +// VirtualMachineListResult is the List Virtual Machine operation response. +type VirtualMachineListResult struct { + autorest.Response `json:"-"` + Value *[]VirtualMachine `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// VirtualMachineListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client VirtualMachineListResult) VirtualMachineListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// VirtualMachineProperties is describes the properties of a Virtual Machine. +type VirtualMachineProperties struct { + HardwareProfile *HardwareProfile `json:"hardwareProfile,omitempty"` + StorageProfile *StorageProfile `json:"storageProfile,omitempty"` + OsProfile *OSProfile `json:"osProfile,omitempty"` + NetworkProfile *NetworkProfile `json:"networkProfile,omitempty"` + DiagnosticsProfile *DiagnosticsProfile `json:"diagnosticsProfile,omitempty"` + AvailabilitySet *SubResource `json:"availabilitySet,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` + InstanceView *VirtualMachineInstanceView `json:"instanceView,omitempty"` + LicenseType *string `json:"licenseType,omitempty"` + VMID *string `json:"vmId,omitempty"` +} + +// VirtualMachineScaleSet is describes a Virtual Machine Scale Set. +type VirtualMachineScaleSet struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Sku *Sku `json:"sku,omitempty"` + *VirtualMachineScaleSetProperties `json:"properties,omitempty"` +} + +// VirtualMachineScaleSetExtension is describes a Virtual Machine Scale Set +// Extension. +type VirtualMachineScaleSetExtension struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + *VirtualMachineScaleSetExtensionProperties `json:"properties,omitempty"` +} + +// VirtualMachineScaleSetExtensionProfile is describes a virtual machine scale +// set extension profile. +type VirtualMachineScaleSetExtensionProfile struct { + Extensions *[]VirtualMachineScaleSetExtension `json:"extensions,omitempty"` +} + +// VirtualMachineScaleSetExtensionProperties is describes the properties of a +// Virtual Machine Scale Set Extension. +type VirtualMachineScaleSetExtensionProperties struct { + Publisher *string `json:"publisher,omitempty"` + Type *string `json:"type,omitempty"` + TypeHandlerVersion *string `json:"typeHandlerVersion,omitempty"` + AutoUpgradeMinorVersion *bool `json:"autoUpgradeMinorVersion,omitempty"` + Settings *map[string]interface{} `json:"settings,omitempty"` + ProtectedSettings *map[string]interface{} `json:"protectedSettings,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// VirtualMachineScaleSetInstanceView is the instance view of a virtual +// machine scale set. +type VirtualMachineScaleSetInstanceView struct { + autorest.Response `json:"-"` + VirtualMachine *VirtualMachineScaleSetInstanceViewStatusesSummary `json:"virtualMachine,omitempty"` + Extensions *[]VirtualMachineScaleSetVMExtensionsSummary `json:"extensions,omitempty"` + Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` +} + +// VirtualMachineScaleSetInstanceViewStatusesSummary is instance view statuses +// summary for virtual machines of a virtual machine scale set. +type VirtualMachineScaleSetInstanceViewStatusesSummary struct { + StatusesSummary *[]VirtualMachineStatusCodeCount `json:"statusesSummary,omitempty"` +} + +// VirtualMachineScaleSetIPConfiguration is describes a virtual machine scale +// set network profile's IP configuration. +type VirtualMachineScaleSetIPConfiguration struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + *VirtualMachineScaleSetIPConfigurationProperties `json:"properties,omitempty"` +} + +// VirtualMachineScaleSetIPConfigurationProperties is describes a virtual +// machine scale set network profile's IP configuration properties. +type VirtualMachineScaleSetIPConfigurationProperties struct { + Subnet *APIEntityReference `json:"subnet,omitempty"` + ApplicationGatewayBackendAddressPools *[]SubResource `json:"applicationGatewayBackendAddressPools,omitempty"` + LoadBalancerBackendAddressPools *[]SubResource `json:"loadBalancerBackendAddressPools,omitempty"` + LoadBalancerInboundNatPools *[]SubResource `json:"loadBalancerInboundNatPools,omitempty"` +} + +// VirtualMachineScaleSetListResult is the List Virtual Machine operation +// response. +type VirtualMachineScaleSetListResult struct { + autorest.Response `json:"-"` + Value *[]VirtualMachineScaleSet `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// VirtualMachineScaleSetListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client VirtualMachineScaleSetListResult) VirtualMachineScaleSetListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// VirtualMachineScaleSetListSkusResult is the Virtual Machine Scale Set List +// Skus operation response. +type VirtualMachineScaleSetListSkusResult struct { + autorest.Response `json:"-"` + Value *[]VirtualMachineScaleSetSku `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// VirtualMachineScaleSetListSkusResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client VirtualMachineScaleSetListSkusResult) VirtualMachineScaleSetListSkusResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// VirtualMachineScaleSetListWithLinkResult is the List Virtual Machine +// operation response. +type VirtualMachineScaleSetListWithLinkResult struct { + autorest.Response `json:"-"` + Value *[]VirtualMachineScaleSet `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// VirtualMachineScaleSetListWithLinkResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client VirtualMachineScaleSetListWithLinkResult) VirtualMachineScaleSetListWithLinkResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// VirtualMachineScaleSetNetworkConfiguration is describes a virtual machine +// scale set network profile's network configurations. +type VirtualMachineScaleSetNetworkConfiguration struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + *VirtualMachineScaleSetNetworkConfigurationProperties `json:"properties,omitempty"` +} + +// VirtualMachineScaleSetNetworkConfigurationProperties is describes a virtual +// machine scale set network profile's IP configuration. +type VirtualMachineScaleSetNetworkConfigurationProperties struct { + Primary *bool `json:"primary,omitempty"` + IPConfigurations *[]VirtualMachineScaleSetIPConfiguration `json:"ipConfigurations,omitempty"` +} + +// VirtualMachineScaleSetNetworkProfile is describes a virtual machine scale +// set network profile. +type VirtualMachineScaleSetNetworkProfile struct { + NetworkInterfaceConfigurations *[]VirtualMachineScaleSetNetworkConfiguration `json:"networkInterfaceConfigurations,omitempty"` +} + +// VirtualMachineScaleSetOSDisk is describes a virtual machine scale set +// operating system disk. +type VirtualMachineScaleSetOSDisk struct { + Name *string `json:"name,omitempty"` + Caching CachingTypes `json:"caching,omitempty"` + CreateOption DiskCreateOptionTypes `json:"createOption,omitempty"` + OsType OperatingSystemTypes `json:"osType,omitempty"` + Image *VirtualHardDisk `json:"image,omitempty"` + VhdContainers *[]string `json:"vhdContainers,omitempty"` +} + +// VirtualMachineScaleSetOSProfile is describes a virtual machine scale set OS +// profile. +type VirtualMachineScaleSetOSProfile struct { + ComputerNamePrefix *string `json:"computerNamePrefix,omitempty"` + AdminUsername *string `json:"adminUsername,omitempty"` + AdminPassword *string `json:"adminPassword,omitempty"` + CustomData *string `json:"customData,omitempty"` + WindowsConfiguration *WindowsConfiguration `json:"windowsConfiguration,omitempty"` + LinuxConfiguration *LinuxConfiguration `json:"linuxConfiguration,omitempty"` + Secrets *[]VaultSecretGroup `json:"secrets,omitempty"` +} + +// VirtualMachineScaleSetProperties is describes the properties of a Virtual +// Machine Scale Set. +type VirtualMachineScaleSetProperties struct { + UpgradePolicy *UpgradePolicy `json:"upgradePolicy,omitempty"` + VirtualMachineProfile *VirtualMachineScaleSetVMProfile `json:"virtualMachineProfile,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` + Overprovision *bool `json:"overprovision,omitempty"` +} + +// VirtualMachineScaleSetSku is describes an available virtual machine scale +// set sku. +type VirtualMachineScaleSetSku struct { + ResourceType *string `json:"resourceType,omitempty"` + Sku *Sku `json:"sku,omitempty"` + Capacity *VirtualMachineScaleSetSkuCapacity `json:"capacity,omitempty"` +} + +// VirtualMachineScaleSetSkuCapacity is describes scaling information of a sku. +type VirtualMachineScaleSetSkuCapacity struct { + Minimum *int64 `json:"minimum,omitempty"` + Maximum *int64 `json:"maximum,omitempty"` + DefaultCapacity *int64 `json:"defaultCapacity,omitempty"` + ScaleType VirtualMachineScaleSetSkuScaleType `json:"scaleType,omitempty"` +} + +// VirtualMachineScaleSetStorageProfile is describes a virtual machine scale +// set storage profile. +type VirtualMachineScaleSetStorageProfile struct { + ImageReference *ImageReference `json:"imageReference,omitempty"` + OsDisk *VirtualMachineScaleSetOSDisk `json:"osDisk,omitempty"` +} + +// VirtualMachineScaleSetVM is describes a virtual machine scale set virtual +// machine. +type VirtualMachineScaleSetVM struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + InstanceID *string `json:"instanceId,omitempty"` + Sku *Sku `json:"sku,omitempty"` + *VirtualMachineScaleSetVMProperties `json:"properties,omitempty"` + Plan *Plan `json:"plan,omitempty"` + Resources *[]VirtualMachineExtension `json:"resources,omitempty"` +} + +// VirtualMachineScaleSetVMExtensionsSummary is extensions summary for virtual +// machines of a virtual machine scale set. +type VirtualMachineScaleSetVMExtensionsSummary struct { + Name *string `json:"name,omitempty"` + StatusesSummary *[]VirtualMachineStatusCodeCount `json:"statusesSummary,omitempty"` +} + +// VirtualMachineScaleSetVMInstanceIDs is specifies a list of virtual machine +// instance IDs from the VM scale set. +type VirtualMachineScaleSetVMInstanceIDs struct { + InstanceIds *[]string `json:"instanceIds,omitempty"` +} + +// VirtualMachineScaleSetVMInstanceRequiredIDs is specifies a list of virtual +// machine instance IDs from the VM scale set. +type VirtualMachineScaleSetVMInstanceRequiredIDs struct { + InstanceIds *[]string `json:"instanceIds,omitempty"` +} + +// VirtualMachineScaleSetVMInstanceView is the instance view of a virtual +// machine scale set VM. +type VirtualMachineScaleSetVMInstanceView struct { + autorest.Response `json:"-"` + PlatformUpdateDomain *int32 `json:"platformUpdateDomain,omitempty"` + PlatformFaultDomain *int32 `json:"platformFaultDomain,omitempty"` + RdpThumbPrint *string `json:"rdpThumbPrint,omitempty"` + VMAgent *VirtualMachineAgentInstanceView `json:"vmAgent,omitempty"` + Disks *[]DiskInstanceView `json:"disks,omitempty"` + Extensions *[]VirtualMachineExtensionInstanceView `json:"extensions,omitempty"` + BootDiagnostics *BootDiagnosticsInstanceView `json:"bootDiagnostics,omitempty"` + Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` +} + +// VirtualMachineScaleSetVMListResult is the List Virtual Machine Scale Set +// VMs operation response. +type VirtualMachineScaleSetVMListResult struct { + autorest.Response `json:"-"` + Value *[]VirtualMachineScaleSetVM `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// VirtualMachineScaleSetVMListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client VirtualMachineScaleSetVMListResult) VirtualMachineScaleSetVMListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// VirtualMachineScaleSetVMProfile is describes a virtual machine scale set +// virtual machine profile. +type VirtualMachineScaleSetVMProfile struct { + OsProfile *VirtualMachineScaleSetOSProfile `json:"osProfile,omitempty"` + StorageProfile *VirtualMachineScaleSetStorageProfile `json:"storageProfile,omitempty"` + NetworkProfile *VirtualMachineScaleSetNetworkProfile `json:"networkProfile,omitempty"` + ExtensionProfile *VirtualMachineScaleSetExtensionProfile `json:"extensionProfile,omitempty"` +} + +// VirtualMachineScaleSetVMProperties is describes the properties of a virtual +// machine scale set virtual machine. +type VirtualMachineScaleSetVMProperties struct { + LatestModelApplied *bool `json:"latestModelApplied,omitempty"` + InstanceView *VirtualMachineInstanceView `json:"instanceView,omitempty"` + HardwareProfile *HardwareProfile `json:"hardwareProfile,omitempty"` + StorageProfile *StorageProfile `json:"storageProfile,omitempty"` + OsProfile *OSProfile `json:"osProfile,omitempty"` + NetworkProfile *NetworkProfile `json:"networkProfile,omitempty"` + DiagnosticsProfile *DiagnosticsProfile `json:"diagnosticsProfile,omitempty"` + AvailabilitySet *SubResource `json:"availabilitySet,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` + LicenseType *string `json:"licenseType,omitempty"` +} + +// VirtualMachineSize is describes the properties of a VM size. +type VirtualMachineSize struct { + Name *string `json:"name,omitempty"` + NumberOfCores *int32 `json:"numberOfCores,omitempty"` + OsDiskSizeInMB *int32 `json:"osDiskSizeInMB,omitempty"` + ResourceDiskSizeInMB *int32 `json:"resourceDiskSizeInMB,omitempty"` + MemoryInMB *int32 `json:"memoryInMB,omitempty"` + MaxDataDiskCount *int32 `json:"maxDataDiskCount,omitempty"` +} + +// VirtualMachineSizeListResult is the List Virtual Machine operation response. +type VirtualMachineSizeListResult struct { + autorest.Response `json:"-"` + Value *[]VirtualMachineSize `json:"value,omitempty"` +} + +// VirtualMachineStatusCodeCount is the status code and count of the virtual +// machine scale set instance view status summary. +type VirtualMachineStatusCodeCount struct { + Code *string `json:"code,omitempty"` + Count *int32 `json:"count,omitempty"` +} + +// WindowsConfiguration is describes Windows Configuration of the OS Profile. +type WindowsConfiguration struct { + ProvisionVMAgent *bool `json:"provisionVMAgent,omitempty"` + EnableAutomaticUpdates *bool `json:"enableAutomaticUpdates,omitempty"` + TimeZone *string `json:"timeZone,omitempty"` + AdditionalUnattendContent *[]AdditionalUnattendContent `json:"additionalUnattendContent,omitempty"` + WinRM *WinRMConfiguration `json:"winRM,omitempty"` +} + +// WinRMConfiguration is describes Windows Remote Management configuration of +// the VM +type WinRMConfiguration struct { + Listeners *[]WinRMListener `json:"listeners,omitempty"` +} + +// WinRMListener is describes Protocol and thumbprint of Windows Remote +// Management listener +type WinRMListener struct { + Protocol ProtocolTypes `json:"protocol,omitempty"` + CertificateURL *string `json:"certificateUrl,omitempty"` +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/usageoperations.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/usageoperations.go new file mode 100644 index 000000000000..5fb5bd6f555e --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/usageoperations.go @@ -0,0 +1,136 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// UsageOperationsClient is the the Compute Management Client. +type UsageOperationsClient struct { + ManagementClient +} + +// NewUsageOperationsClient creates an instance of the UsageOperationsClient +// client. +func NewUsageOperationsClient(subscriptionID string) UsageOperationsClient { + return NewUsageOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewUsageOperationsClientWithBaseURI creates an instance of the +// UsageOperationsClient client. +func NewUsageOperationsClientWithBaseURI(baseURI string, subscriptionID string) UsageOperationsClient { + return UsageOperationsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List gets, for the specified location, the current compute resource usage +// information as well as the limits for compute resources under the +// subscription. +// +// location is the location for which resource usage is queried. +func (client UsageOperationsClient) List(location string) (result ListUsagesResult, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: location, + Constraints: []validation.Constraint{{Target: "location", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "compute.UsageOperationsClient", "List") + } + + req, err := client.ListPreparer(location) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.UsageOperationsClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.UsageOperationsClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.UsageOperationsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client UsageOperationsClient) ListPreparer(location string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": autorest.Encode("path", location), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/usages", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client UsageOperationsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client UsageOperationsClient) ListResponder(resp *http.Response) (result ListUsagesResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client UsageOperationsClient) ListNextResults(lastResults ListUsagesResult) (result ListUsagesResult, err error) { + req, err := lastResults.ListUsagesResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.UsageOperationsClient", "List", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.UsageOperationsClient", "List", resp, "Failure sending next results request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.UsageOperationsClient", "List", resp, "Failure responding to next results request") + } + + return +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/version.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/version.go new file mode 100644 index 000000000000..3c4783ed6fa4 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/version.go @@ -0,0 +1,43 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "fmt" +) + +const ( + major = "7" + minor = "0" + patch = "1" + // Always begin a "tag" with a dash (as per http://semver.org) + tag = "-beta" + semVerFormat = "%s.%s.%s%s" + userAgentFormat = "Azure-SDK-for-Go/%s arm-%s/%s" +) + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return fmt.Sprintf(userAgentFormat, Version(), "compute", "2016-03-30") +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return fmt.Sprintf(semVerFormat, major, minor, patch, tag) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensionimages.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensionimages.go new file mode 100644 index 000000000000..089ebe10e820 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensionimages.go @@ -0,0 +1,238 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// VirtualMachineExtensionImagesClient is the the Compute Management Client. +type VirtualMachineExtensionImagesClient struct { + ManagementClient +} + +// NewVirtualMachineExtensionImagesClient creates an instance of the +// VirtualMachineExtensionImagesClient client. +func NewVirtualMachineExtensionImagesClient(subscriptionID string) VirtualMachineExtensionImagesClient { + return NewVirtualMachineExtensionImagesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualMachineExtensionImagesClientWithBaseURI creates an instance of +// the VirtualMachineExtensionImagesClient client. +func NewVirtualMachineExtensionImagesClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineExtensionImagesClient { + return VirtualMachineExtensionImagesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Get gets a virtual machine extension image. +// +func (client VirtualMachineExtensionImagesClient) Get(location string, publisherName string, typeParameter string, version string) (result VirtualMachineExtensionImage, err error) { + req, err := client.GetPreparer(location, publisherName, typeParameter, version) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client VirtualMachineExtensionImagesClient) GetPreparer(location string, publisherName string, typeParameter string, version string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": autorest.Encode("path", location), + "publisherName": autorest.Encode("path", publisherName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "type": autorest.Encode("path", typeParameter), + "version": autorest.Encode("path", version), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types/{type}/versions/{version}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineExtensionImagesClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client VirtualMachineExtensionImagesClient) GetResponder(resp *http.Response) (result VirtualMachineExtensionImage, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListTypes gets a list of virtual machine extension image types. +// +func (client VirtualMachineExtensionImagesClient) ListTypes(location string, publisherName string) (result ListVirtualMachineExtensionImage, err error) { + req, err := client.ListTypesPreparer(location, publisherName) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "ListTypes", nil, "Failure preparing request") + } + + resp, err := client.ListTypesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "ListTypes", resp, "Failure sending request") + } + + result, err = client.ListTypesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "ListTypes", resp, "Failure responding to request") + } + + return +} + +// ListTypesPreparer prepares the ListTypes request. +func (client VirtualMachineExtensionImagesClient) ListTypesPreparer(location string, publisherName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": autorest.Encode("path", location), + "publisherName": autorest.Encode("path", publisherName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListTypesSender sends the ListTypes request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineExtensionImagesClient) ListTypesSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListTypesResponder handles the response to the ListTypes request. The method always +// closes the http.Response Body. +func (client VirtualMachineExtensionImagesClient) ListTypesResponder(resp *http.Response) (result ListVirtualMachineExtensionImage, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Value), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListVersions gets a list of virtual machine extension image versions. +// +// filter is the filter to apply on the operation. +func (client VirtualMachineExtensionImagesClient) ListVersions(location string, publisherName string, typeParameter string, filter string, top *int32, orderby string) (result ListVirtualMachineExtensionImage, err error) { + req, err := client.ListVersionsPreparer(location, publisherName, typeParameter, filter, top, orderby) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "ListVersions", nil, "Failure preparing request") + } + + resp, err := client.ListVersionsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "ListVersions", resp, "Failure sending request") + } + + result, err = client.ListVersionsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "ListVersions", resp, "Failure responding to request") + } + + return +} + +// ListVersionsPreparer prepares the ListVersions request. +func (client VirtualMachineExtensionImagesClient) ListVersionsPreparer(location string, publisherName string, typeParameter string, filter string, top *int32, orderby string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": autorest.Encode("path", location), + "publisherName": autorest.Encode("path", publisherName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "type": autorest.Encode("path", typeParameter), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + if len(orderby) > 0 { + queryParameters["$orderby"] = autorest.Encode("query", orderby) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types/{type}/versions", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListVersionsSender sends the ListVersions request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineExtensionImagesClient) ListVersionsSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListVersionsResponder handles the response to the ListVersions request. The method always +// closes the http.Response Body. +func (client VirtualMachineExtensionImagesClient) ListVersionsResponder(resp *http.Response) (result ListVirtualMachineExtensionImage, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Value), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensions.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensions.go new file mode 100644 index 000000000000..d94a2b9683a4 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensions.go @@ -0,0 +1,261 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// VirtualMachineExtensionsClient is the the Compute Management Client. +type VirtualMachineExtensionsClient struct { + ManagementClient +} + +// NewVirtualMachineExtensionsClient creates an instance of the +// VirtualMachineExtensionsClient client. +func NewVirtualMachineExtensionsClient(subscriptionID string) VirtualMachineExtensionsClient { + return NewVirtualMachineExtensionsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualMachineExtensionsClientWithBaseURI creates an instance of the +// VirtualMachineExtensionsClient client. +func NewVirtualMachineExtensionsClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineExtensionsClient { + return VirtualMachineExtensionsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate the operation to create or update the extension. This method +// may poll for completion. Polling can be canceled by passing the cancel +// channel argument. The channel will be used to cancel polling and any +// outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. vmName is the name of +// the virtual machine where the extension should be create or updated. +// vmExtensionName is the name of the virtual machine extension. +// extensionParameters is parameters supplied to the Create Virtual Machine +// Extension operation. +func (client VirtualMachineExtensionsClient) CreateOrUpdate(resourceGroupName string, vmName string, vmExtensionName string, extensionParameters VirtualMachineExtension, cancel <-chan struct{}) (result autorest.Response, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: extensionParameters, + Constraints: []validation.Constraint{{Target: "extensionParameters.VirtualMachineExtensionProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "extensionParameters.VirtualMachineExtensionProperties.ProvisioningState", Name: validation.ReadOnly, Rule: true, Chain: nil}}}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "compute.VirtualMachineExtensionsClient", "CreateOrUpdate") + } + + req, err := client.CreateOrUpdatePreparer(resourceGroupName, vmName, vmExtensionName, extensionParameters, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "CreateOrUpdate", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "CreateOrUpdate", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client VirtualMachineExtensionsClient) CreateOrUpdatePreparer(resourceGroupName string, vmName string, vmExtensionName string, extensionParameters VirtualMachineExtension, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmExtensionName": autorest.Encode("path", vmExtensionName), + "vmName": autorest.Encode("path", vmName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}", pathParameters), + autorest.WithJSON(extensionParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineExtensionsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client VirtualMachineExtensionsClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByClosing()) + result.Response = resp + return +} + +// Delete the operation to delete the extension. This method may poll for +// completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// resourceGroupName is the name of the resource group. vmName is the name of +// the virtual machine where the extension should be deleted. vmExtensionName +// is the name of the virtual machine extension. +func (client VirtualMachineExtensionsClient) Delete(resourceGroupName string, vmName string, vmExtensionName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, vmName, vmExtensionName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client VirtualMachineExtensionsClient) DeletePreparer(resourceGroupName string, vmName string, vmExtensionName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmExtensionName": autorest.Encode("path", vmExtensionName), + "vmName": autorest.Encode("path", vmName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineExtensionsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client VirtualMachineExtensionsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get the operation to get the extension. +// +// resourceGroupName is the name of the resource group. vmName is the name of +// the virtual machine containing the extension. vmExtensionName is the name +// of the virtual machine extension. expand is the expand expression to apply +// on the operation. +func (client VirtualMachineExtensionsClient) Get(resourceGroupName string, vmName string, vmExtensionName string, expand string) (result VirtualMachineExtension, err error) { + req, err := client.GetPreparer(resourceGroupName, vmName, vmExtensionName, expand) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client VirtualMachineExtensionsClient) GetPreparer(resourceGroupName string, vmName string, vmExtensionName string, expand string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmExtensionName": autorest.Encode("path", vmExtensionName), + "vmName": autorest.Encode("path", vmName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineExtensionsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client VirtualMachineExtensionsClient) GetResponder(resp *http.Response) (result VirtualMachineExtension, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineimages.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineimages.go new file mode 100644 index 000000000000..db1877789378 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineimages.go @@ -0,0 +1,376 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// VirtualMachineImagesClient is the the Compute Management Client. +type VirtualMachineImagesClient struct { + ManagementClient +} + +// NewVirtualMachineImagesClient creates an instance of the +// VirtualMachineImagesClient client. +func NewVirtualMachineImagesClient(subscriptionID string) VirtualMachineImagesClient { + return NewVirtualMachineImagesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualMachineImagesClientWithBaseURI creates an instance of the +// VirtualMachineImagesClient client. +func NewVirtualMachineImagesClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineImagesClient { + return VirtualMachineImagesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Get gets a virtual machine image. +// +// location is the name of a supported Azure region. publisherName is a valid +// image publisher. offer is a valid image publisher offer. skus is a valid +// image SKU. version is a valid image SKU version. +func (client VirtualMachineImagesClient) Get(location string, publisherName string, offer string, skus string, version string) (result VirtualMachineImage, err error) { + req, err := client.GetPreparer(location, publisherName, offer, skus, version) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client VirtualMachineImagesClient) GetPreparer(location string, publisherName string, offer string, skus string, version string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": autorest.Encode("path", location), + "offer": autorest.Encode("path", offer), + "publisherName": autorest.Encode("path", publisherName), + "skus": autorest.Encode("path", skus), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "version": autorest.Encode("path", version), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions/{version}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineImagesClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client VirtualMachineImagesClient) GetResponder(resp *http.Response) (result VirtualMachineImage, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets a list of all virtual machine image versions for the specified +// location, publisher, offer, and SKU. +// +// location is the name of a supported Azure region. publisherName is a valid +// image publisher. offer is a valid image publisher offer. skus is a valid +// image SKU. filter is the filter to apply on the operation. +func (client VirtualMachineImagesClient) List(location string, publisherName string, offer string, skus string, filter string, top *int32, orderby string) (result ListVirtualMachineImageResource, err error) { + req, err := client.ListPreparer(location, publisherName, offer, skus, filter, top, orderby) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client VirtualMachineImagesClient) ListPreparer(location string, publisherName string, offer string, skus string, filter string, top *int32, orderby string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": autorest.Encode("path", location), + "offer": autorest.Encode("path", offer), + "publisherName": autorest.Encode("path", publisherName), + "skus": autorest.Encode("path", skus), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + if len(orderby) > 0 { + queryParameters["$orderby"] = autorest.Encode("query", orderby) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineImagesClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client VirtualMachineImagesClient) ListResponder(resp *http.Response) (result ListVirtualMachineImageResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Value), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListOffers gets a list of virtual machine image offers for the specified +// location and publisher. +// +// location is the name of a supported Azure region. publisherName is a valid +// image publisher. +func (client VirtualMachineImagesClient) ListOffers(location string, publisherName string) (result ListVirtualMachineImageResource, err error) { + req, err := client.ListOffersPreparer(location, publisherName) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListOffers", nil, "Failure preparing request") + } + + resp, err := client.ListOffersSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListOffers", resp, "Failure sending request") + } + + result, err = client.ListOffersResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListOffers", resp, "Failure responding to request") + } + + return +} + +// ListOffersPreparer prepares the ListOffers request. +func (client VirtualMachineImagesClient) ListOffersPreparer(location string, publisherName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": autorest.Encode("path", location), + "publisherName": autorest.Encode("path", publisherName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListOffersSender sends the ListOffers request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineImagesClient) ListOffersSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListOffersResponder handles the response to the ListOffers request. The method always +// closes the http.Response Body. +func (client VirtualMachineImagesClient) ListOffersResponder(resp *http.Response) (result ListVirtualMachineImageResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Value), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListPublishers gets a list of virtual machine image publishers for the +// specified Azure location. +// +// location is the name of a supported Azure region. +func (client VirtualMachineImagesClient) ListPublishers(location string) (result ListVirtualMachineImageResource, err error) { + req, err := client.ListPublishersPreparer(location) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListPublishers", nil, "Failure preparing request") + } + + resp, err := client.ListPublishersSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListPublishers", resp, "Failure sending request") + } + + result, err = client.ListPublishersResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListPublishers", resp, "Failure responding to request") + } + + return +} + +// ListPublishersPreparer prepares the ListPublishers request. +func (client VirtualMachineImagesClient) ListPublishersPreparer(location string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": autorest.Encode("path", location), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListPublishersSender sends the ListPublishers request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineImagesClient) ListPublishersSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListPublishersResponder handles the response to the ListPublishers request. The method always +// closes the http.Response Body. +func (client VirtualMachineImagesClient) ListPublishersResponder(resp *http.Response) (result ListVirtualMachineImageResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Value), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListSkus gets a list of virtual machine image SKUs for the specified +// location, publisher, and offer. +// +// location is the name of a supported Azure region. publisherName is a valid +// image publisher. offer is a valid image publisher offer. +func (client VirtualMachineImagesClient) ListSkus(location string, publisherName string, offer string) (result ListVirtualMachineImageResource, err error) { + req, err := client.ListSkusPreparer(location, publisherName, offer) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListSkus", nil, "Failure preparing request") + } + + resp, err := client.ListSkusSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListSkus", resp, "Failure sending request") + } + + result, err = client.ListSkusResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListSkus", resp, "Failure responding to request") + } + + return +} + +// ListSkusPreparer prepares the ListSkus request. +func (client VirtualMachineImagesClient) ListSkusPreparer(location string, publisherName string, offer string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": autorest.Encode("path", location), + "offer": autorest.Encode("path", offer), + "publisherName": autorest.Encode("path", publisherName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSkusSender sends the ListSkus request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineImagesClient) ListSkusSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListSkusResponder handles the response to the ListSkus request. The method always +// closes the http.Response Body. +func (client VirtualMachineImagesClient) ListSkusResponder(resp *http.Response) (result ListVirtualMachineImageResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Value), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachines.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachines.go new file mode 100644 index 000000000000..626319737ce1 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachines.go @@ -0,0 +1,989 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// VirtualMachinesClient is the the Compute Management Client. +type VirtualMachinesClient struct { + ManagementClient +} + +// NewVirtualMachinesClient creates an instance of the VirtualMachinesClient +// client. +func NewVirtualMachinesClient(subscriptionID string) VirtualMachinesClient { + return NewVirtualMachinesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualMachinesClientWithBaseURI creates an instance of the +// VirtualMachinesClient client. +func NewVirtualMachinesClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachinesClient { + return VirtualMachinesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Capture captures the VM by copying virtual hard disks of the VM and outputs +// a template that can be used to create similar VMs. This method may poll +// for completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// resourceGroupName is the name of the resource group. vmName is the name of +// the virtual machine. parameters is parameters supplied to the Capture +// Virtual Machine operation. +func (client VirtualMachinesClient) Capture(resourceGroupName string, vmName string, parameters VirtualMachineCaptureParameters, cancel <-chan struct{}) (result autorest.Response, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.VhdPrefix", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.DestinationContainerName", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.OverwriteVhds", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "compute.VirtualMachinesClient", "Capture") + } + + req, err := client.CapturePreparer(resourceGroupName, vmName, parameters, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Capture", nil, "Failure preparing request") + } + + resp, err := client.CaptureSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Capture", resp, "Failure sending request") + } + + result, err = client.CaptureResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Capture", resp, "Failure responding to request") + } + + return +} + +// CapturePreparer prepares the Capture request. +func (client VirtualMachinesClient) CapturePreparer(resourceGroupName string, vmName string, parameters VirtualMachineCaptureParameters, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmName": autorest.Encode("path", vmName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/capture", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CaptureSender sends the Capture request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) CaptureSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CaptureResponder handles the response to the Capture request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) CaptureResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// CreateOrUpdate the operation to create or update a virtual machine. This +// method may poll for completion. Polling can be canceled by passing the +// cancel channel argument. The channel will be used to cancel polling and +// any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. vmName is the name of +// the virtual machine. parameters is parameters supplied to the Create +// Virtual Machine operation. +func (client VirtualMachinesClient) CreateOrUpdate(resourceGroupName string, vmName string, parameters VirtualMachine, cancel <-chan struct{}) (result autorest.Response, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.VirtualMachineProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.VirtualMachineProperties.StorageProfile", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.VirtualMachineProperties.StorageProfile.OsDisk", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.VirtualMachineProperties.StorageProfile.OsDisk.EncryptionSettings", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.VirtualMachineProperties.StorageProfile.OsDisk.EncryptionSettings.DiskEncryptionKey", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.VirtualMachineProperties.StorageProfile.OsDisk.EncryptionSettings.DiskEncryptionKey.SecretURL", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.VirtualMachineProperties.StorageProfile.OsDisk.EncryptionSettings.DiskEncryptionKey.SourceVault", Name: validation.Null, Rule: true, Chain: nil}, + }}, + {Target: "parameters.VirtualMachineProperties.StorageProfile.OsDisk.EncryptionSettings.KeyEncryptionKey", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.VirtualMachineProperties.StorageProfile.OsDisk.EncryptionSettings.KeyEncryptionKey.KeyURL", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.VirtualMachineProperties.StorageProfile.OsDisk.EncryptionSettings.KeyEncryptionKey.SourceVault", Name: validation.Null, Rule: true, Chain: nil}, + }}, + }}, + {Target: "parameters.VirtualMachineProperties.StorageProfile.OsDisk.Name", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.VirtualMachineProperties.StorageProfile.OsDisk.Vhd", Name: validation.Null, Rule: true, Chain: nil}, + }}, + }}, + {Target: "parameters.VirtualMachineProperties.ProvisioningState", Name: validation.ReadOnly, Rule: true, Chain: nil}, + {Target: "parameters.VirtualMachineProperties.InstanceView", Name: validation.ReadOnly, Rule: true, Chain: nil}, + {Target: "parameters.VirtualMachineProperties.VMID", Name: validation.ReadOnly, Rule: true, Chain: nil}, + }}, + {Target: "parameters.Resources", Name: validation.ReadOnly, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "compute.VirtualMachinesClient", "CreateOrUpdate") + } + + req, err := client.CreateOrUpdatePreparer(resourceGroupName, vmName, parameters, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "CreateOrUpdate", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "CreateOrUpdate", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client VirtualMachinesClient) CreateOrUpdatePreparer(resourceGroupName string, vmName string, parameters VirtualMachine, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmName": autorest.Encode("path", vmName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByClosing()) + result.Response = resp + return +} + +// Deallocate shuts down the virtual machine and releases the compute +// resources. You are not billed for the compute resources that this virtual +// machine uses. This method may poll for completion. Polling can be canceled +// by passing the cancel channel argument. The channel will be used to cancel +// polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. vmName is the name of +// the virtual machine. +func (client VirtualMachinesClient) Deallocate(resourceGroupName string, vmName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeallocatePreparer(resourceGroupName, vmName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Deallocate", nil, "Failure preparing request") + } + + resp, err := client.DeallocateSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Deallocate", resp, "Failure sending request") + } + + result, err = client.DeallocateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Deallocate", resp, "Failure responding to request") + } + + return +} + +// DeallocatePreparer prepares the Deallocate request. +func (client VirtualMachinesClient) DeallocatePreparer(resourceGroupName string, vmName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmName": autorest.Encode("path", vmName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/deallocate", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeallocateSender sends the Deallocate request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) DeallocateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeallocateResponder handles the response to the Deallocate request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) DeallocateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Delete the operation to delete a virtual machine. This method may poll for +// completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// resourceGroupName is the name of the resource group. vmName is the name of +// the virtual machine. +func (client VirtualMachinesClient) Delete(resourceGroupName string, vmName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, vmName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client VirtualMachinesClient) DeletePreparer(resourceGroupName string, vmName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmName": autorest.Encode("path", vmName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Generalize sets the state of the virtual machine to generalized. +// +// resourceGroupName is the name of the resource group. vmName is the name of +// the virtual machine. +func (client VirtualMachinesClient) Generalize(resourceGroupName string, vmName string) (result autorest.Response, err error) { + req, err := client.GeneralizePreparer(resourceGroupName, vmName) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Generalize", nil, "Failure preparing request") + } + + resp, err := client.GeneralizeSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Generalize", resp, "Failure sending request") + } + + result, err = client.GeneralizeResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Generalize", resp, "Failure responding to request") + } + + return +} + +// GeneralizePreparer prepares the Generalize request. +func (client VirtualMachinesClient) GeneralizePreparer(resourceGroupName string, vmName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmName": autorest.Encode("path", vmName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/generalize", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GeneralizeSender sends the Generalize request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) GeneralizeSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GeneralizeResponder handles the response to the Generalize request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) GeneralizeResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get retrieves information about the model view or the instance view of a +// virtual machine. +// +// resourceGroupName is the name of the resource group. vmName is the name of +// the virtual machine. expand is the expand expression to apply on the +// operation. Possible values include: 'instanceView' +func (client VirtualMachinesClient) Get(resourceGroupName string, vmName string, expand InstanceViewTypes) (result VirtualMachine, err error) { + req, err := client.GetPreparer(resourceGroupName, vmName, expand) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client VirtualMachinesClient) GetPreparer(resourceGroupName string, vmName string, expand InstanceViewTypes) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmName": autorest.Encode("path", vmName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(string(expand)) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) GetResponder(resp *http.Response) (result VirtualMachine, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists all of the virtual machines in the specified resource group. Use +// the nextLink property in the response to get the next page of virtual +// machines. +// +// resourceGroupName is the name of the resource group. +func (client VirtualMachinesClient) List(resourceGroupName string) (result VirtualMachineListResult, err error) { + req, err := client.ListPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client VirtualMachinesClient) ListPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) ListResponder(resp *http.Response) (result VirtualMachineListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client VirtualMachinesClient) ListNextResults(lastResults VirtualMachineListResult) (result VirtualMachineListResult, err error) { + req, err := lastResults.VirtualMachineListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "List", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "List", resp, "Failure sending next results request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "List", resp, "Failure responding to next results request") + } + + return +} + +// ListAll lists all of the virtual machines in the specified subscription. +// Use the nextLink property in the response to get the next page of virtual +// machines. +func (client VirtualMachinesClient) ListAll() (result VirtualMachineListResult, err error) { + req, err := client.ListAllPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ListAll", nil, "Failure preparing request") + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ListAll", resp, "Failure sending request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ListAll", resp, "Failure responding to request") + } + + return +} + +// ListAllPreparer prepares the ListAll request. +func (client VirtualMachinesClient) ListAllPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/virtualMachines", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListAllSender sends the ListAll request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) ListAllSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListAllResponder handles the response to the ListAll request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) ListAllResponder(resp *http.Response) (result VirtualMachineListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAllNextResults retrieves the next set of results, if any. +func (client VirtualMachinesClient) ListAllNextResults(lastResults VirtualMachineListResult) (result VirtualMachineListResult, err error) { + req, err := lastResults.VirtualMachineListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ListAll", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ListAll", resp, "Failure sending next results request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ListAll", resp, "Failure responding to next results request") + } + + return +} + +// ListAvailableSizes lists all available virtual machine sizes to which the +// specified virtual machine can be resized. +// +// resourceGroupName is the name of the resource group. vmName is the name of +// the virtual machine. +func (client VirtualMachinesClient) ListAvailableSizes(resourceGroupName string, vmName string) (result VirtualMachineSizeListResult, err error) { + req, err := client.ListAvailableSizesPreparer(resourceGroupName, vmName) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ListAvailableSizes", nil, "Failure preparing request") + } + + resp, err := client.ListAvailableSizesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ListAvailableSizes", resp, "Failure sending request") + } + + result, err = client.ListAvailableSizesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ListAvailableSizes", resp, "Failure responding to request") + } + + return +} + +// ListAvailableSizesPreparer prepares the ListAvailableSizes request. +func (client VirtualMachinesClient) ListAvailableSizesPreparer(resourceGroupName string, vmName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmName": autorest.Encode("path", vmName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/vmSizes", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListAvailableSizesSender sends the ListAvailableSizes request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) ListAvailableSizesSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListAvailableSizesResponder handles the response to the ListAvailableSizes request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) ListAvailableSizesResponder(resp *http.Response) (result VirtualMachineSizeListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// PowerOff the operation to power off (stop) a virtual machine. The virtual +// machine can be restarted with the same provisioned resources. You are +// still charged for this virtual machine. This method may poll for +// completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// resourceGroupName is the name of the resource group. vmName is the name of +// the virtual machine. +func (client VirtualMachinesClient) PowerOff(resourceGroupName string, vmName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.PowerOffPreparer(resourceGroupName, vmName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "PowerOff", nil, "Failure preparing request") + } + + resp, err := client.PowerOffSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "PowerOff", resp, "Failure sending request") + } + + result, err = client.PowerOffResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "PowerOff", resp, "Failure responding to request") + } + + return +} + +// PowerOffPreparer prepares the PowerOff request. +func (client VirtualMachinesClient) PowerOffPreparer(resourceGroupName string, vmName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmName": autorest.Encode("path", vmName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/powerOff", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// PowerOffSender sends the PowerOff request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) PowerOffSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// PowerOffResponder handles the response to the PowerOff request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) PowerOffResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Redeploy the operation to redeploy a virtual machine. This method may poll +// for completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// resourceGroupName is the name of the resource group. vmName is the name of +// the virtual machine. +func (client VirtualMachinesClient) Redeploy(resourceGroupName string, vmName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.RedeployPreparer(resourceGroupName, vmName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Redeploy", nil, "Failure preparing request") + } + + resp, err := client.RedeploySender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Redeploy", resp, "Failure sending request") + } + + result, err = client.RedeployResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Redeploy", resp, "Failure responding to request") + } + + return +} + +// RedeployPreparer prepares the Redeploy request. +func (client VirtualMachinesClient) RedeployPreparer(resourceGroupName string, vmName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmName": autorest.Encode("path", vmName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/redeploy", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// RedeploySender sends the Redeploy request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) RedeploySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// RedeployResponder handles the response to the Redeploy request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) RedeployResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Restart the operation to restart a virtual machine. This method may poll +// for completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// resourceGroupName is the name of the resource group. vmName is the name of +// the virtual machine. +func (client VirtualMachinesClient) Restart(resourceGroupName string, vmName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.RestartPreparer(resourceGroupName, vmName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Restart", nil, "Failure preparing request") + } + + resp, err := client.RestartSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Restart", resp, "Failure sending request") + } + + result, err = client.RestartResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Restart", resp, "Failure responding to request") + } + + return +} + +// RestartPreparer prepares the Restart request. +func (client VirtualMachinesClient) RestartPreparer(resourceGroupName string, vmName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmName": autorest.Encode("path", vmName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/restart", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// RestartSender sends the Restart request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) RestartSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// RestartResponder handles the response to the Restart request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) RestartResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Start the operation to start a virtual machine. This method may poll for +// completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// resourceGroupName is the name of the resource group. vmName is the name of +// the virtual machine. +func (client VirtualMachinesClient) Start(resourceGroupName string, vmName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.StartPreparer(resourceGroupName, vmName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Start", nil, "Failure preparing request") + } + + resp, err := client.StartSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Start", resp, "Failure sending request") + } + + result, err = client.StartResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Start", resp, "Failure responding to request") + } + + return +} + +// StartPreparer prepares the Start request. +func (client VirtualMachinesClient) StartPreparer(resourceGroupName string, vmName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmName": autorest.Encode("path", vmName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/start", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// StartSender sends the Start request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) StartSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// StartResponder handles the response to the Start request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) StartResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesets.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesets.go new file mode 100644 index 000000000000..648e9fa4aec5 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesets.go @@ -0,0 +1,1091 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// VirtualMachineScaleSetsClient is the the Compute Management Client. +type VirtualMachineScaleSetsClient struct { + ManagementClient +} + +// NewVirtualMachineScaleSetsClient creates an instance of the +// VirtualMachineScaleSetsClient client. +func NewVirtualMachineScaleSetsClient(subscriptionID string) VirtualMachineScaleSetsClient { + return NewVirtualMachineScaleSetsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualMachineScaleSetsClientWithBaseURI creates an instance of the +// VirtualMachineScaleSetsClient client. +func NewVirtualMachineScaleSetsClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineScaleSetsClient { + return VirtualMachineScaleSetsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate create or update a VM scale set. This method may poll for +// completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// resourceGroupName is the name of the resource group. name is the name of +// the VM scale set to create or update. parameters is the scale set object. +func (client VirtualMachineScaleSetsClient) CreateOrUpdate(resourceGroupName string, name string, parameters VirtualMachineScaleSet, cancel <-chan struct{}) (result autorest.Response, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetProperties.VirtualMachineProfile", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetProperties.VirtualMachineProfile.StorageProfile", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetProperties.VirtualMachineProfile.StorageProfile.OsDisk", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetProperties.VirtualMachineProfile.StorageProfile.OsDisk.Name", Name: validation.Null, Rule: true, Chain: nil}}}, + }}, + }}, + {Target: "parameters.VirtualMachineScaleSetProperties.ProvisioningState", Name: validation.ReadOnly, Rule: true, Chain: nil}, + }}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "compute.VirtualMachineScaleSetsClient", "CreateOrUpdate") + } + + req, err := client.CreateOrUpdatePreparer(resourceGroupName, name, parameters, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "CreateOrUpdate", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "CreateOrUpdate", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client VirtualMachineScaleSetsClient) CreateOrUpdatePreparer(resourceGroupName string, name string, parameters VirtualMachineScaleSet, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "name": autorest.Encode("path", name), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{name}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetsClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByClosing()) + result.Response = resp + return +} + +// Deallocate deallocates specific virtual machines in a VM scale set. Shuts +// down the virtual machines and releases the compute resources. You are not +// billed for the compute resources that this virtual machine scale set +// deallocates. This method may poll for completion. Polling can be canceled +// by passing the cancel channel argument. The channel will be used to cancel +// polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. vmScaleSetName is the +// name of the VM scale set. vmInstanceIDs is a list of virtual machine +// instance IDs from the VM scale set. +func (client VirtualMachineScaleSetsClient) Deallocate(resourceGroupName string, vmScaleSetName string, vmInstanceIDs *VirtualMachineScaleSetVMInstanceIDs, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeallocatePreparer(resourceGroupName, vmScaleSetName, vmInstanceIDs, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Deallocate", nil, "Failure preparing request") + } + + resp, err := client.DeallocateSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Deallocate", resp, "Failure sending request") + } + + result, err = client.DeallocateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Deallocate", resp, "Failure responding to request") + } + + return +} + +// DeallocatePreparer prepares the Deallocate request. +func (client VirtualMachineScaleSetsClient) DeallocatePreparer(resourceGroupName string, vmScaleSetName string, vmInstanceIDs *VirtualMachineScaleSetVMInstanceIDs, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", vmScaleSetName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/deallocate", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if vmInstanceIDs != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithJSON(vmInstanceIDs)) + } + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeallocateSender sends the Deallocate request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetsClient) DeallocateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeallocateResponder handles the response to the Deallocate request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetsClient) DeallocateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Delete deletes a VM scale set. This method may poll for completion. Polling +// can be canceled by passing the cancel channel argument. The channel will +// be used to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. vmScaleSetName is the +// name of the VM scale set. +func (client VirtualMachineScaleSetsClient) Delete(resourceGroupName string, vmScaleSetName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, vmScaleSetName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client VirtualMachineScaleSetsClient) DeletePreparer(resourceGroupName string, vmScaleSetName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", vmScaleSetName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// DeleteInstances deletes virtual machines in a VM scale set. This method may +// poll for completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// resourceGroupName is the name of the resource group. vmScaleSetName is the +// name of the VM scale set. vmInstanceIDs is a list of virtual machine +// instance IDs from the VM scale set. +func (client VirtualMachineScaleSetsClient) DeleteInstances(resourceGroupName string, vmScaleSetName string, vmInstanceIDs VirtualMachineScaleSetVMInstanceRequiredIDs, cancel <-chan struct{}) (result autorest.Response, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: vmInstanceIDs, + Constraints: []validation.Constraint{{Target: "vmInstanceIDs.InstanceIds", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "compute.VirtualMachineScaleSetsClient", "DeleteInstances") + } + + req, err := client.DeleteInstancesPreparer(resourceGroupName, vmScaleSetName, vmInstanceIDs, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "DeleteInstances", nil, "Failure preparing request") + } + + resp, err := client.DeleteInstancesSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "DeleteInstances", resp, "Failure sending request") + } + + result, err = client.DeleteInstancesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "DeleteInstances", resp, "Failure responding to request") + } + + return +} + +// DeleteInstancesPreparer prepares the DeleteInstances request. +func (client VirtualMachineScaleSetsClient) DeleteInstancesPreparer(resourceGroupName string, vmScaleSetName string, vmInstanceIDs VirtualMachineScaleSetVMInstanceRequiredIDs, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", vmScaleSetName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/delete", pathParameters), + autorest.WithJSON(vmInstanceIDs), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteInstancesSender sends the DeleteInstances request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetsClient) DeleteInstancesSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteInstancesResponder handles the response to the DeleteInstances request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetsClient) DeleteInstancesResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get display information about a virtual machine scale set. +// +// resourceGroupName is the name of the resource group. vmScaleSetName is the +// name of the VM scale set. +func (client VirtualMachineScaleSetsClient) Get(resourceGroupName string, vmScaleSetName string) (result VirtualMachineScaleSet, err error) { + req, err := client.GetPreparer(resourceGroupName, vmScaleSetName) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client VirtualMachineScaleSetsClient) GetPreparer(resourceGroupName string, vmScaleSetName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", vmScaleSetName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetsClient) GetResponder(resp *http.Response) (result VirtualMachineScaleSet, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetInstanceView gets the status of a VM scale set instance. +// +// resourceGroupName is the name of the resource group. vmScaleSetName is the +// name of the VM scale set. +func (client VirtualMachineScaleSetsClient) GetInstanceView(resourceGroupName string, vmScaleSetName string) (result VirtualMachineScaleSetInstanceView, err error) { + req, err := client.GetInstanceViewPreparer(resourceGroupName, vmScaleSetName) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "GetInstanceView", nil, "Failure preparing request") + } + + resp, err := client.GetInstanceViewSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "GetInstanceView", resp, "Failure sending request") + } + + result, err = client.GetInstanceViewResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "GetInstanceView", resp, "Failure responding to request") + } + + return +} + +// GetInstanceViewPreparer prepares the GetInstanceView request. +func (client VirtualMachineScaleSetsClient) GetInstanceViewPreparer(resourceGroupName string, vmScaleSetName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", vmScaleSetName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/instanceView", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetInstanceViewSender sends the GetInstanceView request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetsClient) GetInstanceViewSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetInstanceViewResponder handles the response to the GetInstanceView request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetsClient) GetInstanceViewResponder(resp *http.Response) (result VirtualMachineScaleSetInstanceView, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets a list of all VM scale sets under a resource group. +// +// resourceGroupName is the name of the resource group. +func (client VirtualMachineScaleSetsClient) List(resourceGroupName string) (result VirtualMachineScaleSetListResult, err error) { + req, err := client.ListPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client VirtualMachineScaleSetsClient) ListPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetsClient) ListResponder(resp *http.Response) (result VirtualMachineScaleSetListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client VirtualMachineScaleSetsClient) ListNextResults(lastResults VirtualMachineScaleSetListResult) (result VirtualMachineScaleSetListResult, err error) { + req, err := lastResults.VirtualMachineScaleSetListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "List", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "List", resp, "Failure sending next results request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "List", resp, "Failure responding to next results request") + } + + return +} + +// ListAll gets a list of all VM Scale Sets in the subscription, regardless of +// the associated resource group. Use nextLink property in the response to +// get the next page of VM Scale Sets. Do this till nextLink is not null to +// fetch all the VM Scale Sets. +func (client VirtualMachineScaleSetsClient) ListAll() (result VirtualMachineScaleSetListWithLinkResult, err error) { + req, err := client.ListAllPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListAll", nil, "Failure preparing request") + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListAll", resp, "Failure sending request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListAll", resp, "Failure responding to request") + } + + return +} + +// ListAllPreparer prepares the ListAll request. +func (client VirtualMachineScaleSetsClient) ListAllPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/virtualMachineScaleSets", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListAllSender sends the ListAll request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetsClient) ListAllSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListAllResponder handles the response to the ListAll request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetsClient) ListAllResponder(resp *http.Response) (result VirtualMachineScaleSetListWithLinkResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAllNextResults retrieves the next set of results, if any. +func (client VirtualMachineScaleSetsClient) ListAllNextResults(lastResults VirtualMachineScaleSetListWithLinkResult) (result VirtualMachineScaleSetListWithLinkResult, err error) { + req, err := lastResults.VirtualMachineScaleSetListWithLinkResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListAll", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListAll", resp, "Failure sending next results request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListAll", resp, "Failure responding to next results request") + } + + return +} + +// ListSkus gets a list of SKUs available for your VM scale set, including the +// minimum and maximum VM instances allowed for each SKU. +// +// resourceGroupName is the name of the resource group. vmScaleSetName is the +// name of the VM scale set. +func (client VirtualMachineScaleSetsClient) ListSkus(resourceGroupName string, vmScaleSetName string) (result VirtualMachineScaleSetListSkusResult, err error) { + req, err := client.ListSkusPreparer(resourceGroupName, vmScaleSetName) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListSkus", nil, "Failure preparing request") + } + + resp, err := client.ListSkusSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListSkus", resp, "Failure sending request") + } + + result, err = client.ListSkusResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListSkus", resp, "Failure responding to request") + } + + return +} + +// ListSkusPreparer prepares the ListSkus request. +func (client VirtualMachineScaleSetsClient) ListSkusPreparer(resourceGroupName string, vmScaleSetName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", vmScaleSetName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/skus", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSkusSender sends the ListSkus request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetsClient) ListSkusSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListSkusResponder handles the response to the ListSkus request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetsClient) ListSkusResponder(resp *http.Response) (result VirtualMachineScaleSetListSkusResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListSkusNextResults retrieves the next set of results, if any. +func (client VirtualMachineScaleSetsClient) ListSkusNextResults(lastResults VirtualMachineScaleSetListSkusResult) (result VirtualMachineScaleSetListSkusResult, err error) { + req, err := lastResults.VirtualMachineScaleSetListSkusResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListSkus", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListSkusSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListSkus", resp, "Failure sending next results request") + } + + result, err = client.ListSkusResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListSkus", resp, "Failure responding to next results request") + } + + return +} + +// PowerOff power off (stop) one or more virtual machines in a VM scale set. +// Note that resources are still attached and you are getting charged for the +// resources. Instead, use deallocate to release resources and avoid charges. +// This method may poll for completion. Polling can be canceled by passing +// the cancel channel argument. The channel will be used to cancel polling +// and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. vmScaleSetName is the +// name of the VM scale set. vmInstanceIDs is a list of virtual machine +// instance IDs from the VM scale set. +func (client VirtualMachineScaleSetsClient) PowerOff(resourceGroupName string, vmScaleSetName string, vmInstanceIDs *VirtualMachineScaleSetVMInstanceIDs, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.PowerOffPreparer(resourceGroupName, vmScaleSetName, vmInstanceIDs, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "PowerOff", nil, "Failure preparing request") + } + + resp, err := client.PowerOffSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "PowerOff", resp, "Failure sending request") + } + + result, err = client.PowerOffResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "PowerOff", resp, "Failure responding to request") + } + + return +} + +// PowerOffPreparer prepares the PowerOff request. +func (client VirtualMachineScaleSetsClient) PowerOffPreparer(resourceGroupName string, vmScaleSetName string, vmInstanceIDs *VirtualMachineScaleSetVMInstanceIDs, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", vmScaleSetName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/poweroff", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if vmInstanceIDs != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithJSON(vmInstanceIDs)) + } + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// PowerOffSender sends the PowerOff request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetsClient) PowerOffSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// PowerOffResponder handles the response to the PowerOff request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetsClient) PowerOffResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Reimage reimages (upgrade the operating system) one or more virtual +// machines in a VM scale set. This method may poll for completion. Polling +// can be canceled by passing the cancel channel argument. The channel will +// be used to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. vmScaleSetName is the +// name of the VM scale set. +func (client VirtualMachineScaleSetsClient) Reimage(resourceGroupName string, vmScaleSetName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.ReimagePreparer(resourceGroupName, vmScaleSetName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Reimage", nil, "Failure preparing request") + } + + resp, err := client.ReimageSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Reimage", resp, "Failure sending request") + } + + result, err = client.ReimageResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Reimage", resp, "Failure responding to request") + } + + return +} + +// ReimagePreparer prepares the Reimage request. +func (client VirtualMachineScaleSetsClient) ReimagePreparer(resourceGroupName string, vmScaleSetName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", vmScaleSetName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/reimage", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// ReimageSender sends the Reimage request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetsClient) ReimageSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// ReimageResponder handles the response to the Reimage request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetsClient) ReimageResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Restart restarts one or more virtual machines in a VM scale set. This +// method may poll for completion. Polling can be canceled by passing the +// cancel channel argument. The channel will be used to cancel polling and +// any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. vmScaleSetName is the +// name of the VM scale set. vmInstanceIDs is a list of virtual machine +// instance IDs from the VM scale set. +func (client VirtualMachineScaleSetsClient) Restart(resourceGroupName string, vmScaleSetName string, vmInstanceIDs *VirtualMachineScaleSetVMInstanceIDs, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.RestartPreparer(resourceGroupName, vmScaleSetName, vmInstanceIDs, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Restart", nil, "Failure preparing request") + } + + resp, err := client.RestartSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Restart", resp, "Failure sending request") + } + + result, err = client.RestartResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Restart", resp, "Failure responding to request") + } + + return +} + +// RestartPreparer prepares the Restart request. +func (client VirtualMachineScaleSetsClient) RestartPreparer(resourceGroupName string, vmScaleSetName string, vmInstanceIDs *VirtualMachineScaleSetVMInstanceIDs, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", vmScaleSetName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/restart", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if vmInstanceIDs != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithJSON(vmInstanceIDs)) + } + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// RestartSender sends the Restart request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetsClient) RestartSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// RestartResponder handles the response to the Restart request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetsClient) RestartResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Start starts one or more virtual machines in a VM scale set. This method +// may poll for completion. Polling can be canceled by passing the cancel +// channel argument. The channel will be used to cancel polling and any +// outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. vmScaleSetName is the +// name of the VM scale set. vmInstanceIDs is a list of virtual machine +// instance IDs from the VM scale set. +func (client VirtualMachineScaleSetsClient) Start(resourceGroupName string, vmScaleSetName string, vmInstanceIDs *VirtualMachineScaleSetVMInstanceIDs, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.StartPreparer(resourceGroupName, vmScaleSetName, vmInstanceIDs, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Start", nil, "Failure preparing request") + } + + resp, err := client.StartSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Start", resp, "Failure sending request") + } + + result, err = client.StartResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Start", resp, "Failure responding to request") + } + + return +} + +// StartPreparer prepares the Start request. +func (client VirtualMachineScaleSetsClient) StartPreparer(resourceGroupName string, vmScaleSetName string, vmInstanceIDs *VirtualMachineScaleSetVMInstanceIDs, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", vmScaleSetName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/start", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if vmInstanceIDs != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithJSON(vmInstanceIDs)) + } + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// StartSender sends the Start request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetsClient) StartSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// StartResponder handles the response to the Start request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetsClient) StartResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// UpdateInstances upgrades one or more virtual machines to the latest SKU set +// in the VM scale set model. This method may poll for completion. Polling +// can be canceled by passing the cancel channel argument. The channel will +// be used to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. vmScaleSetName is the +// name of the VM scale set. vmInstanceIDs is a list of virtual machine +// instance IDs from the VM scale set. +func (client VirtualMachineScaleSetsClient) UpdateInstances(resourceGroupName string, vmScaleSetName string, vmInstanceIDs VirtualMachineScaleSetVMInstanceRequiredIDs, cancel <-chan struct{}) (result autorest.Response, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: vmInstanceIDs, + Constraints: []validation.Constraint{{Target: "vmInstanceIDs.InstanceIds", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "compute.VirtualMachineScaleSetsClient", "UpdateInstances") + } + + req, err := client.UpdateInstancesPreparer(resourceGroupName, vmScaleSetName, vmInstanceIDs, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "UpdateInstances", nil, "Failure preparing request") + } + + resp, err := client.UpdateInstancesSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "UpdateInstances", resp, "Failure sending request") + } + + result, err = client.UpdateInstancesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "UpdateInstances", resp, "Failure responding to request") + } + + return +} + +// UpdateInstancesPreparer prepares the UpdateInstances request. +func (client VirtualMachineScaleSetsClient) UpdateInstancesPreparer(resourceGroupName string, vmScaleSetName string, vmInstanceIDs VirtualMachineScaleSetVMInstanceRequiredIDs, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", vmScaleSetName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/manualupgrade", pathParameters), + autorest.WithJSON(vmInstanceIDs), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// UpdateInstancesSender sends the UpdateInstances request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetsClient) UpdateInstancesSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// UpdateInstancesResponder handles the response to the UpdateInstances request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetsClient) UpdateInstancesResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetvms.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetvms.go new file mode 100644 index 000000000000..f0b309510e63 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetvms.go @@ -0,0 +1,688 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// VirtualMachineScaleSetVMsClient is the the Compute Management Client. +type VirtualMachineScaleSetVMsClient struct { + ManagementClient +} + +// NewVirtualMachineScaleSetVMsClient creates an instance of the +// VirtualMachineScaleSetVMsClient client. +func NewVirtualMachineScaleSetVMsClient(subscriptionID string) VirtualMachineScaleSetVMsClient { + return NewVirtualMachineScaleSetVMsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualMachineScaleSetVMsClientWithBaseURI creates an instance of the +// VirtualMachineScaleSetVMsClient client. +func NewVirtualMachineScaleSetVMsClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineScaleSetVMsClient { + return VirtualMachineScaleSetVMsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Deallocate deallocates a specific virtual machine in a VM scale set. Shuts +// down the virtual machine and releases the compute resources it uses. You +// are not billed for the compute resources of this virtual machine once it +// is deallocated. This method may poll for completion. Polling can be +// canceled by passing the cancel channel argument. The channel will be used +// to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. vmScaleSetName is the +// name of the VM scale set. instanceID is the instance ID of the virtual +// machine. +func (client VirtualMachineScaleSetVMsClient) Deallocate(resourceGroupName string, vmScaleSetName string, instanceID string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeallocatePreparer(resourceGroupName, vmScaleSetName, instanceID, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Deallocate", nil, "Failure preparing request") + } + + resp, err := client.DeallocateSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Deallocate", resp, "Failure sending request") + } + + result, err = client.DeallocateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Deallocate", resp, "Failure responding to request") + } + + return +} + +// DeallocatePreparer prepares the Deallocate request. +func (client VirtualMachineScaleSetVMsClient) DeallocatePreparer(resourceGroupName string, vmScaleSetName string, instanceID string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "instanceId": autorest.Encode("path", instanceID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", vmScaleSetName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/deallocate", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeallocateSender sends the Deallocate request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetVMsClient) DeallocateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeallocateResponder handles the response to the Deallocate request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetVMsClient) DeallocateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Delete deletes a virtual machine from a VM scale set. This method may poll +// for completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// resourceGroupName is the name of the resource group. vmScaleSetName is the +// name of the VM scale set. instanceID is the instance ID of the virtual +// machine. +func (client VirtualMachineScaleSetVMsClient) Delete(resourceGroupName string, vmScaleSetName string, instanceID string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, vmScaleSetName, instanceID, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client VirtualMachineScaleSetVMsClient) DeletePreparer(resourceGroupName string, vmScaleSetName string, instanceID string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "instanceId": autorest.Encode("path", instanceID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", vmScaleSetName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetVMsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetVMsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets a virtual machine from a VM scale set. +// +// resourceGroupName is the name of the resource group. vmScaleSetName is the +// name of the VM scale set. instanceID is the instance ID of the virtual +// machine. +func (client VirtualMachineScaleSetVMsClient) Get(resourceGroupName string, vmScaleSetName string, instanceID string) (result VirtualMachineScaleSetVM, err error) { + req, err := client.GetPreparer(resourceGroupName, vmScaleSetName, instanceID) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client VirtualMachineScaleSetVMsClient) GetPreparer(resourceGroupName string, vmScaleSetName string, instanceID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "instanceId": autorest.Encode("path", instanceID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", vmScaleSetName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetVMsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetVMsClient) GetResponder(resp *http.Response) (result VirtualMachineScaleSetVM, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetInstanceView gets the status of a virtual machine from a VM scale set. +// +// resourceGroupName is the name of the resource group. vmScaleSetName is the +// name of the VM scale set. instanceID is the instance ID of the virtual +// machine. +func (client VirtualMachineScaleSetVMsClient) GetInstanceView(resourceGroupName string, vmScaleSetName string, instanceID string) (result VirtualMachineScaleSetVMInstanceView, err error) { + req, err := client.GetInstanceViewPreparer(resourceGroupName, vmScaleSetName, instanceID) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "GetInstanceView", nil, "Failure preparing request") + } + + resp, err := client.GetInstanceViewSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "GetInstanceView", resp, "Failure sending request") + } + + result, err = client.GetInstanceViewResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "GetInstanceView", resp, "Failure responding to request") + } + + return +} + +// GetInstanceViewPreparer prepares the GetInstanceView request. +func (client VirtualMachineScaleSetVMsClient) GetInstanceViewPreparer(resourceGroupName string, vmScaleSetName string, instanceID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "instanceId": autorest.Encode("path", instanceID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", vmScaleSetName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/instanceView", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetInstanceViewSender sends the GetInstanceView request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetVMsClient) GetInstanceViewSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetInstanceViewResponder handles the response to the GetInstanceView request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetVMsClient) GetInstanceViewResponder(resp *http.Response) (result VirtualMachineScaleSetVMInstanceView, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets a list of all virtual machines in a VM scale sets. +// +// resourceGroupName is the name of the resource group. +// virtualMachineScaleSetName is the name of the VM scale set. filter is the +// filter to apply to the operation. selectParameter is the list parameters. +// expand is the expand expression to apply to the operation. +func (client VirtualMachineScaleSetVMsClient) List(resourceGroupName string, virtualMachineScaleSetName string, filter string, selectParameter string, expand string) (result VirtualMachineScaleSetVMListResult, err error) { + req, err := client.ListPreparer(resourceGroupName, virtualMachineScaleSetName, filter, selectParameter, expand) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client VirtualMachineScaleSetVMsClient) ListPreparer(resourceGroupName string, virtualMachineScaleSetName string, filter string, selectParameter string, expand string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualMachineScaleSetName": autorest.Encode("path", virtualMachineScaleSetName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if len(selectParameter) > 0 { + queryParameters["$select"] = autorest.Encode("query", selectParameter) + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetVMsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetVMsClient) ListResponder(resp *http.Response) (result VirtualMachineScaleSetVMListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client VirtualMachineScaleSetVMsClient) ListNextResults(lastResults VirtualMachineScaleSetVMListResult) (result VirtualMachineScaleSetVMListResult, err error) { + req, err := lastResults.VirtualMachineScaleSetVMListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "List", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "List", resp, "Failure sending next results request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "List", resp, "Failure responding to next results request") + } + + return +} + +// PowerOff power off (stop) a virtual machine in a VM scale set. Note that +// resources are still attached and you are getting charged for the +// resources. Instead, use deallocate to release resources and avoid charges. +// This method may poll for completion. Polling can be canceled by passing +// the cancel channel argument. The channel will be used to cancel polling +// and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. vmScaleSetName is the +// name of the VM scale set. instanceID is the instance ID of the virtual +// machine. +func (client VirtualMachineScaleSetVMsClient) PowerOff(resourceGroupName string, vmScaleSetName string, instanceID string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.PowerOffPreparer(resourceGroupName, vmScaleSetName, instanceID, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "PowerOff", nil, "Failure preparing request") + } + + resp, err := client.PowerOffSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "PowerOff", resp, "Failure sending request") + } + + result, err = client.PowerOffResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "PowerOff", resp, "Failure responding to request") + } + + return +} + +// PowerOffPreparer prepares the PowerOff request. +func (client VirtualMachineScaleSetVMsClient) PowerOffPreparer(resourceGroupName string, vmScaleSetName string, instanceID string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "instanceId": autorest.Encode("path", instanceID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", vmScaleSetName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/poweroff", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// PowerOffSender sends the PowerOff request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetVMsClient) PowerOffSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// PowerOffResponder handles the response to the PowerOff request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetVMsClient) PowerOffResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Reimage reimages (upgrade the operating system) a specific virtual machine +// in a VM scale set. This method may poll for completion. Polling can be +// canceled by passing the cancel channel argument. The channel will be used +// to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. vmScaleSetName is the +// name of the VM scale set. instanceID is the instance ID of the virtual +// machine. +func (client VirtualMachineScaleSetVMsClient) Reimage(resourceGroupName string, vmScaleSetName string, instanceID string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.ReimagePreparer(resourceGroupName, vmScaleSetName, instanceID, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Reimage", nil, "Failure preparing request") + } + + resp, err := client.ReimageSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Reimage", resp, "Failure sending request") + } + + result, err = client.ReimageResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Reimage", resp, "Failure responding to request") + } + + return +} + +// ReimagePreparer prepares the Reimage request. +func (client VirtualMachineScaleSetVMsClient) ReimagePreparer(resourceGroupName string, vmScaleSetName string, instanceID string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "instanceId": autorest.Encode("path", instanceID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", vmScaleSetName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/reimage", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// ReimageSender sends the Reimage request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetVMsClient) ReimageSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// ReimageResponder handles the response to the Reimage request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetVMsClient) ReimageResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Restart restarts a virtual machine in a VM scale set. This method may poll +// for completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// resourceGroupName is the name of the resource group. vmScaleSetName is the +// name of the VM scale set. instanceID is the instance ID of the virtual +// machine. +func (client VirtualMachineScaleSetVMsClient) Restart(resourceGroupName string, vmScaleSetName string, instanceID string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.RestartPreparer(resourceGroupName, vmScaleSetName, instanceID, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Restart", nil, "Failure preparing request") + } + + resp, err := client.RestartSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Restart", resp, "Failure sending request") + } + + result, err = client.RestartResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Restart", resp, "Failure responding to request") + } + + return +} + +// RestartPreparer prepares the Restart request. +func (client VirtualMachineScaleSetVMsClient) RestartPreparer(resourceGroupName string, vmScaleSetName string, instanceID string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "instanceId": autorest.Encode("path", instanceID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", vmScaleSetName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/restart", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// RestartSender sends the Restart request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetVMsClient) RestartSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// RestartResponder handles the response to the Restart request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetVMsClient) RestartResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Start starts a virtual machine in a VM scale set. This method may poll for +// completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// resourceGroupName is the name of the resource group. vmScaleSetName is the +// name of the VM scale set. instanceID is the instance ID of the virtual +// machine. +func (client VirtualMachineScaleSetVMsClient) Start(resourceGroupName string, vmScaleSetName string, instanceID string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.StartPreparer(resourceGroupName, vmScaleSetName, instanceID, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Start", nil, "Failure preparing request") + } + + resp, err := client.StartSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Start", resp, "Failure sending request") + } + + result, err = client.StartResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Start", resp, "Failure responding to request") + } + + return +} + +// StartPreparer prepares the Start request. +func (client VirtualMachineScaleSetVMsClient) StartPreparer(resourceGroupName string, vmScaleSetName string, instanceID string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "instanceId": autorest.Encode("path", instanceID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", vmScaleSetName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/start", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// StartSender sends the Start request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetVMsClient) StartSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// StartResponder handles the response to the Start request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetVMsClient) StartResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinesizes.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinesizes.go new file mode 100644 index 000000000000..507e9f157ec4 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinesizes.go @@ -0,0 +1,111 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// VirtualMachineSizesClient is the the Compute Management Client. +type VirtualMachineSizesClient struct { + ManagementClient +} + +// NewVirtualMachineSizesClient creates an instance of the +// VirtualMachineSizesClient client. +func NewVirtualMachineSizesClient(subscriptionID string) VirtualMachineSizesClient { + return NewVirtualMachineSizesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualMachineSizesClientWithBaseURI creates an instance of the +// VirtualMachineSizesClient client. +func NewVirtualMachineSizesClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineSizesClient { + return VirtualMachineSizesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List lists all available virtual machine sizes for a subscription in a +// location. +// +// location is the location upon which virtual-machine-sizes is queried. +func (client VirtualMachineSizesClient) List(location string) (result VirtualMachineSizeListResult, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: location, + Constraints: []validation.Constraint{{Target: "location", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "compute.VirtualMachineSizesClient", "List") + } + + req, err := client.ListPreparer(location) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineSizesClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineSizesClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineSizesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client VirtualMachineSizesClient) ListPreparer(location string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": autorest.Encode("path", location), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/vmSizes", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineSizesClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client VirtualMachineSizesClient) ListResponder(resp *http.Response) (result VirtualMachineSizeListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/client.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/client.go new file mode 100644 index 000000000000..2257c451fb11 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/client.go @@ -0,0 +1,57 @@ +// Package containerregistry implements the Azure ARM Containerregistry +// service API version 2016-06-27-preview. +// +package containerregistry + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" +) + +const ( + // APIVersion is the version of the Containerregistry + APIVersion = "2016-06-27-preview" + + // DefaultBaseURI is the default URI used for the service Containerregistry + DefaultBaseURI = "https://management.azure.com" +) + +// ManagementClient is the base client for Containerregistry. +type ManagementClient struct { + autorest.Client + BaseURI string + APIVersion string + SubscriptionID string +} + +// New creates an instance of the ManagementClient client. +func New(subscriptionID string) ManagementClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the ManagementClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { + return ManagementClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + APIVersion: APIVersion, + SubscriptionID: subscriptionID, + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/models.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/models.go new file mode 100644 index 000000000000..ecf1ca8f648e --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/models.go @@ -0,0 +1,118 @@ +package containerregistry + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/go-autorest/autorest/to" + "net/http" +) + +// Registry is an object that represents a container registry. +type Registry struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + *RegistryProperties `json:"properties,omitempty"` +} + +// RegistryCredentials is the result of a request to get the administrator +// login credentials for a container registry. +type RegistryCredentials struct { + autorest.Response `json:"-"` + Username *string `json:"username,omitempty"` + Password *string `json:"password,omitempty"` +} + +// RegistryListResult is the result of a request to list container registries. +type RegistryListResult struct { + autorest.Response `json:"-"` + Value *[]Registry `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// RegistryListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client RegistryListResult) RegistryListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// RegistryNameCheckRequest is a request to check whether the container +// registry name is available. +type RegistryNameCheckRequest struct { + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` +} + +// RegistryNameStatus is the result of a request to check the availability of +// a container registry name. +type RegistryNameStatus struct { + autorest.Response `json:"-"` + NameAvailable *bool `json:"nameAvailable,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` +} + +// RegistryProperties is the properties of a container registry. +type RegistryProperties struct { + LoginServer *string `json:"loginServer,omitempty"` + CreationDate *date.Time `json:"creationDate,omitempty"` + AdminUserEnabled *bool `json:"adminUserEnabled,omitempty"` + StorageAccount *StorageAccountProperties `json:"storageAccount,omitempty"` +} + +// RegistryPropertiesUpdateParameters is the parameters for updating the +// properties of a container registry. +type RegistryPropertiesUpdateParameters struct { + AdminUserEnabled *bool `json:"adminUserEnabled,omitempty"` + StorageAccount *StorageAccountProperties `json:"storageAccount,omitempty"` +} + +// RegistryUpdateParameters is the parameters for updating a container +// registry. +type RegistryUpdateParameters struct { + Tags *map[string]*string `json:"tags,omitempty"` + *RegistryPropertiesUpdateParameters `json:"properties,omitempty"` +} + +// Resource is an Azure resource. +type Resource struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// StorageAccountProperties is the properties of a storage account for a +// container registry. +type StorageAccountProperties struct { + Name *string `json:"name,omitempty"` + AccessKey *string `json:"accessKey,omitempty"` +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/registries.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/registries.go new file mode 100644 index 000000000000..c658cccf580b --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/registries.go @@ -0,0 +1,685 @@ +package containerregistry + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// RegistriesClient is the client for the Registries methods of the +// Containerregistry service. +type RegistriesClient struct { + ManagementClient +} + +// NewRegistriesClient creates an instance of the RegistriesClient client. +func NewRegistriesClient(subscriptionID string) RegistriesClient { + return NewRegistriesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewRegistriesClientWithBaseURI creates an instance of the RegistriesClient +// client. +func NewRegistriesClientWithBaseURI(baseURI string, subscriptionID string) RegistriesClient { + return RegistriesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CheckNameAvailability checks whether the container registry name is +// available for use. The name must contain only alphanumeric characters, be +// globally unique, and between 5 and 60 characters in length. +// +// registryNameCheckRequest is the object containing information for the +// availability request. +func (client RegistriesClient) CheckNameAvailability(registryNameCheckRequest RegistryNameCheckRequest) (result RegistryNameStatus, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: registryNameCheckRequest, + Constraints: []validation.Constraint{{Target: "registryNameCheckRequest.Name", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "registryNameCheckRequest.Type", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "containerregistry.RegistriesClient", "CheckNameAvailability") + } + + req, err := client.CheckNameAvailabilityPreparer(registryNameCheckRequest) + if err != nil { + return result, autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "CheckNameAvailability", nil, "Failure preparing request") + } + + resp, err := client.CheckNameAvailabilitySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "CheckNameAvailability", resp, "Failure sending request") + } + + result, err = client.CheckNameAvailabilityResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "CheckNameAvailability", resp, "Failure responding to request") + } + + return +} + +// CheckNameAvailabilityPreparer prepares the CheckNameAvailability request. +func (client RegistriesClient) CheckNameAvailabilityPreparer(registryNameCheckRequest RegistryNameCheckRequest) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.ContainerRegistry/checkNameAvailability", pathParameters), + autorest.WithJSON(registryNameCheckRequest), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CheckNameAvailabilitySender sends the CheckNameAvailability request. The method will close the +// http.Response Body if it receives an error. +func (client RegistriesClient) CheckNameAvailabilitySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CheckNameAvailabilityResponder handles the response to the CheckNameAvailability request. The method always +// closes the http.Response Body. +func (client RegistriesClient) CheckNameAvailabilityResponder(resp *http.Response) (result RegistryNameStatus, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// CreateOrUpdate creates or updates a container registry with the specified +// parameters. +// +// resourceGroupName is the name of the resource group to which the container +// registry belongs. registryName is the name of the container registry. +// registry is the parameters for creating or updating a container registry. +func (client RegistriesClient) CreateOrUpdate(resourceGroupName string, registryName string, registry Registry) (result Registry, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: registry, + Constraints: []validation.Constraint{{Target: "registry.RegistryProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "registry.RegistryProperties.StorageAccount", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "registry.RegistryProperties.StorageAccount.Name", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "registry.RegistryProperties.StorageAccount.AccessKey", Name: validation.Null, Rule: true, Chain: nil}, + }}, + {Target: "registry.RegistryProperties.LoginServer", Name: validation.ReadOnly, Rule: true, Chain: nil}, + {Target: "registry.RegistryProperties.CreationDate", Name: validation.ReadOnly, Rule: true, Chain: nil}, + }}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "containerregistry.RegistriesClient", "CreateOrUpdate") + } + + req, err := client.CreateOrUpdatePreparer(resourceGroupName, registryName, registry) + if err != nil { + return result, autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "CreateOrUpdate", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "CreateOrUpdate", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client RegistriesClient) CreateOrUpdatePreparer(resourceGroupName string, registryName string, registry Registry) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "registryName": autorest.Encode("path", registryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}", pathParameters), + autorest.WithJSON(registry), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client RegistriesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client RegistriesClient) CreateOrUpdateResponder(resp *http.Response) (result Registry, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a container registry. +// +// resourceGroupName is the name of the resource group to which the container +// registry belongs. registryName is the name of the container registry. +func (client RegistriesClient) Delete(resourceGroupName string, registryName string) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, registryName) + if err != nil { + return result, autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client RegistriesClient) DeletePreparer(resourceGroupName string, registryName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "registryName": autorest.Encode("path", registryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client RegistriesClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client RegistriesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// GetCredentials gets the administrator login credentials for the specified +// container registry. +// +// resourceGroupName is the name of the resource group to which the container +// registry belongs. registryName is the name of the container registry. +func (client RegistriesClient) GetCredentials(resourceGroupName string, registryName string) (result RegistryCredentials, err error) { + req, err := client.GetCredentialsPreparer(resourceGroupName, registryName) + if err != nil { + return result, autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "GetCredentials", nil, "Failure preparing request") + } + + resp, err := client.GetCredentialsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "GetCredentials", resp, "Failure sending request") + } + + result, err = client.GetCredentialsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "GetCredentials", resp, "Failure responding to request") + } + + return +} + +// GetCredentialsPreparer prepares the GetCredentials request. +func (client RegistriesClient) GetCredentialsPreparer(resourceGroupName string, registryName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "registryName": autorest.Encode("path", registryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/getCredentials", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetCredentialsSender sends the GetCredentials request. The method will close the +// http.Response Body if it receives an error. +func (client RegistriesClient) GetCredentialsSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetCredentialsResponder handles the response to the GetCredentials request. The method always +// closes the http.Response Body. +func (client RegistriesClient) GetCredentialsResponder(resp *http.Response) (result RegistryCredentials, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetProperties gets the properties of the specified container registry. +// +// resourceGroupName is the name of the resource group to which the container +// registry belongs. registryName is the name of the container registry. +func (client RegistriesClient) GetProperties(resourceGroupName string, registryName string) (result Registry, err error) { + req, err := client.GetPropertiesPreparer(resourceGroupName, registryName) + if err != nil { + return result, autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "GetProperties", nil, "Failure preparing request") + } + + resp, err := client.GetPropertiesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "GetProperties", resp, "Failure sending request") + } + + result, err = client.GetPropertiesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "GetProperties", resp, "Failure responding to request") + } + + return +} + +// GetPropertiesPreparer prepares the GetProperties request. +func (client RegistriesClient) GetPropertiesPreparer(resourceGroupName string, registryName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "registryName": autorest.Encode("path", registryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetPropertiesSender sends the GetProperties request. The method will close the +// http.Response Body if it receives an error. +func (client RegistriesClient) GetPropertiesSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetPropertiesResponder handles the response to the GetProperties request. The method always +// closes the http.Response Body. +func (client RegistriesClient) GetPropertiesResponder(resp *http.Response) (result Registry, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists all the available container registries under the specified +// subscription. +func (client RegistriesClient) List() (result RegistryListResult, err error) { + req, err := client.ListPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client RegistriesClient) ListPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.ContainerRegistry/registries", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client RegistriesClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client RegistriesClient) ListResponder(resp *http.Response) (result RegistryListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client RegistriesClient) ListNextResults(lastResults RegistryListResult) (result RegistryListResult, err error) { + req, err := lastResults.RegistryListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "List", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "List", resp, "Failure sending next results request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "List", resp, "Failure responding to next results request") + } + + return +} + +// ListByResourceGroup lists all the available container registries under the +// specified resource group. +// +// resourceGroupName is the name of the resource group to which the container +// registry belongs. +func (client RegistriesClient) ListByResourceGroup(resourceGroupName string) (result RegistryListResult, err error) { + req, err := client.ListByResourceGroupPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "ListByResourceGroup", nil, "Failure preparing request") + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "ListByResourceGroup", resp, "Failure sending request") + } + + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "ListByResourceGroup", resp, "Failure responding to request") + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client RegistriesClient) ListByResourceGroupPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client RegistriesClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client RegistriesClient) ListByResourceGroupResponder(resp *http.Response) (result RegistryListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByResourceGroupNextResults retrieves the next set of results, if any. +func (client RegistriesClient) ListByResourceGroupNextResults(lastResults RegistryListResult) (result RegistryListResult, err error) { + req, err := lastResults.RegistryListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "ListByResourceGroup", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "ListByResourceGroup", resp, "Failure sending next results request") + } + + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "ListByResourceGroup", resp, "Failure responding to next results request") + } + + return +} + +// RegenerateCredentials regenerates the administrator login credentials for +// the specified container registry. +// +// resourceGroupName is the name of the resource group to which the container +// registry belongs. registryName is the name of the container registry. +func (client RegistriesClient) RegenerateCredentials(resourceGroupName string, registryName string) (result RegistryCredentials, err error) { + req, err := client.RegenerateCredentialsPreparer(resourceGroupName, registryName) + if err != nil { + return result, autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "RegenerateCredentials", nil, "Failure preparing request") + } + + resp, err := client.RegenerateCredentialsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "RegenerateCredentials", resp, "Failure sending request") + } + + result, err = client.RegenerateCredentialsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "RegenerateCredentials", resp, "Failure responding to request") + } + + return +} + +// RegenerateCredentialsPreparer prepares the RegenerateCredentials request. +func (client RegistriesClient) RegenerateCredentialsPreparer(resourceGroupName string, registryName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "registryName": autorest.Encode("path", registryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/regenerateCredentials", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// RegenerateCredentialsSender sends the RegenerateCredentials request. The method will close the +// http.Response Body if it receives an error. +func (client RegistriesClient) RegenerateCredentialsSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// RegenerateCredentialsResponder handles the response to the RegenerateCredentials request. The method always +// closes the http.Response Body. +func (client RegistriesClient) RegenerateCredentialsResponder(resp *http.Response) (result RegistryCredentials, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Update updates a container registry with the specified parameters. +// +// resourceGroupName is the name of the resource group to which the container +// registry belongs. registryName is the name of the container registry. +// registryUpdateParameters is the parameters for updating a container +// registry. +func (client RegistriesClient) Update(resourceGroupName string, registryName string, registryUpdateParameters RegistryUpdateParameters) (result Registry, err error) { + req, err := client.UpdatePreparer(resourceGroupName, registryName, registryUpdateParameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "Update", nil, "Failure preparing request") + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "Update", resp, "Failure sending request") + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "containerregistry.RegistriesClient", "Update", resp, "Failure responding to request") + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client RegistriesClient) UpdatePreparer(resourceGroupName string, registryName string, registryUpdateParameters RegistryUpdateParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "registryName": autorest.Encode("path", registryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}", pathParameters), + autorest.WithJSON(registryUpdateParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client RegistriesClient) UpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client RegistriesClient) UpdateResponder(resp *http.Response) (result Registry, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/version.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/version.go new file mode 100644 index 000000000000..e0d70c1b510c --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/version.go @@ -0,0 +1,43 @@ +package containerregistry + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "fmt" +) + +const ( + major = "7" + minor = "0" + patch = "1" + // Always begin a "tag" with a dash (as per http://semver.org) + tag = "-beta" + semVerFormat = "%s.%s.%s%s" + userAgentFormat = "Azure-SDK-for-Go/%s arm-%s/%s" +) + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return fmt.Sprintf(userAgentFormat, Version(), "containerregistry", "2016-06-27-preview") +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return fmt.Sprintf(semVerFormat, major, minor, patch, tag) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/applicationgateways.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/applicationgateways.go new file mode 100644 index 000000000000..872fbbf7c5b3 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/applicationgateways.go @@ -0,0 +1,629 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// ApplicationGatewaysClient is the the Microsoft Azure Network management API +// provides a RESTful set of web services that interact with Microsoft Azure +// Networks service to manage your network resources. The API has entities +// that capture the relationship between an end user and the Microsoft Azure +// Networks service. +type ApplicationGatewaysClient struct { + ManagementClient +} + +// NewApplicationGatewaysClient creates an instance of the +// ApplicationGatewaysClient client. +func NewApplicationGatewaysClient(subscriptionID string) ApplicationGatewaysClient { + return NewApplicationGatewaysClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewApplicationGatewaysClientWithBaseURI creates an instance of the +// ApplicationGatewaysClient client. +func NewApplicationGatewaysClientWithBaseURI(baseURI string, subscriptionID string) ApplicationGatewaysClient { + return ApplicationGatewaysClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// BackendHealth gets the backend health of the specified application gateway +// in a resource group. This method may poll for completion. Polling can be +// canceled by passing the cancel channel argument. The channel will be used +// to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. applicationGatewayName +// is the name of the application gateway. expand is expands +// BackendAddressPool and BackendHttpSettings referenced in backend health. +func (client ApplicationGatewaysClient) BackendHealth(resourceGroupName string, applicationGatewayName string, expand string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.BackendHealthPreparer(resourceGroupName, applicationGatewayName, expand, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "BackendHealth", nil, "Failure preparing request") + } + + resp, err := client.BackendHealthSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "BackendHealth", resp, "Failure sending request") + } + + result, err = client.BackendHealthResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "BackendHealth", resp, "Failure responding to request") + } + + return +} + +// BackendHealthPreparer prepares the BackendHealth request. +func (client ApplicationGatewaysClient) BackendHealthPreparer(resourceGroupName string, applicationGatewayName string, expand string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "applicationGatewayName": autorest.Encode("path", applicationGatewayName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/backendhealth", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// BackendHealthSender sends the BackendHealth request. The method will close the +// http.Response Body if it receives an error. +func (client ApplicationGatewaysClient) BackendHealthSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// BackendHealthResponder handles the response to the BackendHealth request. The method always +// closes the http.Response Body. +func (client ApplicationGatewaysClient) BackendHealthResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// CreateOrUpdate creates or updates the specified application gateway. This +// method may poll for completion. Polling can be canceled by passing the +// cancel channel argument. The channel will be used to cancel polling and +// any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. applicationGatewayName +// is the name of the application gateway. parameters is parameters supplied +// to the create or update application gateway operation. +func (client ApplicationGatewaysClient) CreateOrUpdate(resourceGroupName string, applicationGatewayName string, parameters ApplicationGateway, cancel <-chan struct{}) (result autorest.Response, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.ApplicationGatewayPropertiesFormat", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.ApplicationGatewayPropertiesFormat.WebApplicationFirewallConfiguration", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.ApplicationGatewayPropertiesFormat.WebApplicationFirewallConfiguration.Enabled", Name: validation.Null, Rule: true, Chain: nil}}}, + {Target: "parameters.ApplicationGatewayPropertiesFormat.OperationalState", Name: validation.ReadOnly, Rule: true, Chain: nil}, + }}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "network.ApplicationGatewaysClient", "CreateOrUpdate") + } + + req, err := client.CreateOrUpdatePreparer(resourceGroupName, applicationGatewayName, parameters, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "CreateOrUpdate", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "CreateOrUpdate", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client ApplicationGatewaysClient) CreateOrUpdatePreparer(resourceGroupName string, applicationGatewayName string, parameters ApplicationGateway, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "applicationGatewayName": autorest.Encode("path", applicationGatewayName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client ApplicationGatewaysClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client ApplicationGatewaysClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Delete deletes the specified application gateway. This method may poll for +// completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// resourceGroupName is the name of the resource group. applicationGatewayName +// is the name of the application gateway. +func (client ApplicationGatewaysClient) Delete(resourceGroupName string, applicationGatewayName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, applicationGatewayName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client ApplicationGatewaysClient) DeletePreparer(resourceGroupName string, applicationGatewayName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "applicationGatewayName": autorest.Encode("path", applicationGatewayName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client ApplicationGatewaysClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client ApplicationGatewaysClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusNoContent, http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the specified application gateway. +// +// resourceGroupName is the name of the resource group. applicationGatewayName +// is the name of the application gateway. +func (client ApplicationGatewaysClient) Get(resourceGroupName string, applicationGatewayName string) (result ApplicationGateway, err error) { + req, err := client.GetPreparer(resourceGroupName, applicationGatewayName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ApplicationGatewaysClient) GetPreparer(resourceGroupName string, applicationGatewayName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "applicationGatewayName": autorest.Encode("path", applicationGatewayName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ApplicationGatewaysClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ApplicationGatewaysClient) GetResponder(resp *http.Response) (result ApplicationGateway, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists all application gateways in a resource group. +// +// resourceGroupName is the name of the resource group. +func (client ApplicationGatewaysClient) List(resourceGroupName string) (result ApplicationGatewayListResult, err error) { + req, err := client.ListPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ApplicationGatewaysClient) ListPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ApplicationGatewaysClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ApplicationGatewaysClient) ListResponder(resp *http.Response) (result ApplicationGatewayListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client ApplicationGatewaysClient) ListNextResults(lastResults ApplicationGatewayListResult) (result ApplicationGatewayListResult, err error) { + req, err := lastResults.ApplicationGatewayListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "List", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "List", resp, "Failure sending next results request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "List", resp, "Failure responding to next results request") + } + + return +} + +// ListAll gets all the application gateways in a subscription. +func (client ApplicationGatewaysClient) ListAll() (result ApplicationGatewayListResult, err error) { + req, err := client.ListAllPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "ListAll", nil, "Failure preparing request") + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "ListAll", resp, "Failure sending request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "ListAll", resp, "Failure responding to request") + } + + return +} + +// ListAllPreparer prepares the ListAll request. +func (client ApplicationGatewaysClient) ListAllPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/applicationGateways", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListAllSender sends the ListAll request. The method will close the +// http.Response Body if it receives an error. +func (client ApplicationGatewaysClient) ListAllSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListAllResponder handles the response to the ListAll request. The method always +// closes the http.Response Body. +func (client ApplicationGatewaysClient) ListAllResponder(resp *http.Response) (result ApplicationGatewayListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAllNextResults retrieves the next set of results, if any. +func (client ApplicationGatewaysClient) ListAllNextResults(lastResults ApplicationGatewayListResult) (result ApplicationGatewayListResult, err error) { + req, err := lastResults.ApplicationGatewayListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "ListAll", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "ListAll", resp, "Failure sending next results request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "ListAll", resp, "Failure responding to next results request") + } + + return +} + +// Start starts the specified application gateway. This method may poll for +// completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// resourceGroupName is the name of the resource group. applicationGatewayName +// is the name of the application gateway. +func (client ApplicationGatewaysClient) Start(resourceGroupName string, applicationGatewayName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.StartPreparer(resourceGroupName, applicationGatewayName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Start", nil, "Failure preparing request") + } + + resp, err := client.StartSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Start", resp, "Failure sending request") + } + + result, err = client.StartResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Start", resp, "Failure responding to request") + } + + return +} + +// StartPreparer prepares the Start request. +func (client ApplicationGatewaysClient) StartPreparer(resourceGroupName string, applicationGatewayName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "applicationGatewayName": autorest.Encode("path", applicationGatewayName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/start", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// StartSender sends the Start request. The method will close the +// http.Response Body if it receives an error. +func (client ApplicationGatewaysClient) StartSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// StartResponder handles the response to the Start request. The method always +// closes the http.Response Body. +func (client ApplicationGatewaysClient) StartResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Stop stops the specified application gateway in a resource group. This +// method may poll for completion. Polling can be canceled by passing the +// cancel channel argument. The channel will be used to cancel polling and +// any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. applicationGatewayName +// is the name of the application gateway. +func (client ApplicationGatewaysClient) Stop(resourceGroupName string, applicationGatewayName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.StopPreparer(resourceGroupName, applicationGatewayName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Stop", nil, "Failure preparing request") + } + + resp, err := client.StopSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Stop", resp, "Failure sending request") + } + + result, err = client.StopResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Stop", resp, "Failure responding to request") + } + + return +} + +// StopPreparer prepares the Stop request. +func (client ApplicationGatewaysClient) StopPreparer(resourceGroupName string, applicationGatewayName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "applicationGatewayName": autorest.Encode("path", applicationGatewayName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/stop", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// StopSender sends the Stop request. The method will close the +// http.Response Body if it receives an error. +func (client ApplicationGatewaysClient) StopSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// StopResponder handles the response to the Stop request. The method always +// closes the http.Response Body. +func (client ApplicationGatewaysClient) StopResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/client.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/client.go new file mode 100644 index 000000000000..74e3d7e491b7 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/client.go @@ -0,0 +1,130 @@ +// Package network implements the Azure ARM Network service API version +// 2016-09-01. +// +// The Microsoft Azure Network management API provides a RESTful set of web +// services that interact with Microsoft Azure Networks service to manage +// your network resources. The API has entities that capture the relationship +// between an end user and the Microsoft Azure Networks service. +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +const ( + // APIVersion is the version of the Network + APIVersion = "2016-09-01" + + // DefaultBaseURI is the default URI used for the service Network + DefaultBaseURI = "https://management.azure.com" +) + +// ManagementClient is the base client for Network. +type ManagementClient struct { + autorest.Client + BaseURI string + APIVersion string + SubscriptionID string +} + +// New creates an instance of the ManagementClient client. +func New(subscriptionID string) ManagementClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the ManagementClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { + return ManagementClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + APIVersion: APIVersion, + SubscriptionID: subscriptionID, + } +} + +// CheckDNSNameAvailability checks whether a domain name in the cloudapp.net +// zone is available for use. +// +// location is the location of the domain name. domainNameLabel is the domain +// name to be verified. It must conform to the following regular expression: +// ^[a-z][a-z0-9-]{1,61}[a-z0-9]$. +func (client ManagementClient) CheckDNSNameAvailability(location string, domainNameLabel string) (result DNSNameAvailabilityResult, err error) { + req, err := client.CheckDNSNameAvailabilityPreparer(location, domainNameLabel) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.ManagementClient", "CheckDNSNameAvailability", nil, "Failure preparing request") + } + + resp, err := client.CheckDNSNameAvailabilitySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.ManagementClient", "CheckDNSNameAvailability", resp, "Failure sending request") + } + + result, err = client.CheckDNSNameAvailabilityResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ManagementClient", "CheckDNSNameAvailability", resp, "Failure responding to request") + } + + return +} + +// CheckDNSNameAvailabilityPreparer prepares the CheckDNSNameAvailability request. +func (client ManagementClient) CheckDNSNameAvailabilityPreparer(location string, domainNameLabel string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": autorest.Encode("path", location), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(domainNameLabel) > 0 { + queryParameters["domainNameLabel"] = autorest.Encode("query", domainNameLabel) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/CheckDnsNameAvailability", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CheckDNSNameAvailabilitySender sends the CheckDNSNameAvailability request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) CheckDNSNameAvailabilitySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CheckDNSNameAvailabilityResponder handles the response to the CheckDNSNameAvailability request. The method always +// closes the http.Response Body. +func (client ManagementClient) CheckDNSNameAvailabilityResponder(resp *http.Response) (result DNSNameAvailabilityResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitauthorizations.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitauthorizations.go new file mode 100644 index 000000000000..eb0a2a075c2c --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitauthorizations.go @@ -0,0 +1,340 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// ExpressRouteCircuitAuthorizationsClient is the the Microsoft Azure Network +// management API provides a RESTful set of web services that interact with +// Microsoft Azure Networks service to manage your network resources. The API +// has entities that capture the relationship between an end user and the +// Microsoft Azure Networks service. +type ExpressRouteCircuitAuthorizationsClient struct { + ManagementClient +} + +// NewExpressRouteCircuitAuthorizationsClient creates an instance of the +// ExpressRouteCircuitAuthorizationsClient client. +func NewExpressRouteCircuitAuthorizationsClient(subscriptionID string) ExpressRouteCircuitAuthorizationsClient { + return NewExpressRouteCircuitAuthorizationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewExpressRouteCircuitAuthorizationsClientWithBaseURI creates an instance +// of the ExpressRouteCircuitAuthorizationsClient client. +func NewExpressRouteCircuitAuthorizationsClientWithBaseURI(baseURI string, subscriptionID string) ExpressRouteCircuitAuthorizationsClient { + return ExpressRouteCircuitAuthorizationsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates an authorization in the specified express +// route circuit. This method may poll for completion. Polling can be +// canceled by passing the cancel channel argument. The channel will be used +// to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. circuitName is the +// name of the express route circuit. authorizationName is the name of the +// authorization. authorizationParameters is parameters supplied to the +// create or update express route circuit authorization operation. +func (client ExpressRouteCircuitAuthorizationsClient) CreateOrUpdate(resourceGroupName string, circuitName string, authorizationName string, authorizationParameters ExpressRouteCircuitAuthorization, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, circuitName, authorizationName, authorizationParameters, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "CreateOrUpdate", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "CreateOrUpdate", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client ExpressRouteCircuitAuthorizationsClient) CreateOrUpdatePreparer(resourceGroupName string, circuitName string, authorizationName string, authorizationParameters ExpressRouteCircuitAuthorization, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "authorizationName": autorest.Encode("path", authorizationName), + "circuitName": autorest.Encode("path", circuitName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}", pathParameters), + autorest.WithJSON(authorizationParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client ExpressRouteCircuitAuthorizationsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client ExpressRouteCircuitAuthorizationsClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Delete deletes the specified authorization from the specified express route +// circuit. This method may poll for completion. Polling can be canceled by +// passing the cancel channel argument. The channel will be used to cancel +// polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. circuitName is the +// name of the express route circuit. authorizationName is the name of the +// authorization. +func (client ExpressRouteCircuitAuthorizationsClient) Delete(resourceGroupName string, circuitName string, authorizationName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, circuitName, authorizationName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client ExpressRouteCircuitAuthorizationsClient) DeletePreparer(resourceGroupName string, circuitName string, authorizationName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "authorizationName": autorest.Encode("path", authorizationName), + "circuitName": autorest.Encode("path", circuitName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client ExpressRouteCircuitAuthorizationsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client ExpressRouteCircuitAuthorizationsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the specified authorization from the specified express route +// circuit. +// +// resourceGroupName is the name of the resource group. circuitName is the +// name of the express route circuit. authorizationName is the name of the +// authorization. +func (client ExpressRouteCircuitAuthorizationsClient) Get(resourceGroupName string, circuitName string, authorizationName string) (result ExpressRouteCircuitAuthorization, err error) { + req, err := client.GetPreparer(resourceGroupName, circuitName, authorizationName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ExpressRouteCircuitAuthorizationsClient) GetPreparer(resourceGroupName string, circuitName string, authorizationName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "authorizationName": autorest.Encode("path", authorizationName), + "circuitName": autorest.Encode("path", circuitName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ExpressRouteCircuitAuthorizationsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ExpressRouteCircuitAuthorizationsClient) GetResponder(resp *http.Response) (result ExpressRouteCircuitAuthorization, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets all authorizations in an express route circuit. +// +// resourceGroupName is the name of the resource group. circuitName is the +// name of the circuit. +func (client ExpressRouteCircuitAuthorizationsClient) List(resourceGroupName string, circuitName string) (result AuthorizationListResult, err error) { + req, err := client.ListPreparer(resourceGroupName, circuitName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ExpressRouteCircuitAuthorizationsClient) ListPreparer(resourceGroupName string, circuitName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "circuitName": autorest.Encode("path", circuitName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ExpressRouteCircuitAuthorizationsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ExpressRouteCircuitAuthorizationsClient) ListResponder(resp *http.Response) (result AuthorizationListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client ExpressRouteCircuitAuthorizationsClient) ListNextResults(lastResults AuthorizationListResult) (result AuthorizationListResult, err error) { + req, err := lastResults.AuthorizationListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "List", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "List", resp, "Failure sending next results request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "List", resp, "Failure responding to next results request") + } + + return +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitpeerings.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitpeerings.go new file mode 100644 index 000000000000..a459574b89b7 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitpeerings.go @@ -0,0 +1,338 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// ExpressRouteCircuitPeeringsClient is the the Microsoft Azure Network +// management API provides a RESTful set of web services that interact with +// Microsoft Azure Networks service to manage your network resources. The API +// has entities that capture the relationship between an end user and the +// Microsoft Azure Networks service. +type ExpressRouteCircuitPeeringsClient struct { + ManagementClient +} + +// NewExpressRouteCircuitPeeringsClient creates an instance of the +// ExpressRouteCircuitPeeringsClient client. +func NewExpressRouteCircuitPeeringsClient(subscriptionID string) ExpressRouteCircuitPeeringsClient { + return NewExpressRouteCircuitPeeringsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewExpressRouteCircuitPeeringsClientWithBaseURI creates an instance of the +// ExpressRouteCircuitPeeringsClient client. +func NewExpressRouteCircuitPeeringsClientWithBaseURI(baseURI string, subscriptionID string) ExpressRouteCircuitPeeringsClient { + return ExpressRouteCircuitPeeringsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a peering in the specified express route +// circuits. This method may poll for completion. Polling can be canceled by +// passing the cancel channel argument. The channel will be used to cancel +// polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. circuitName is the +// name of the express route circuit. peeringName is the name of the peering. +// peeringParameters is parameters supplied to the create or update express +// route circuit peering operation. +func (client ExpressRouteCircuitPeeringsClient) CreateOrUpdate(resourceGroupName string, circuitName string, peeringName string, peeringParameters ExpressRouteCircuitPeering, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, circuitName, peeringName, peeringParameters, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "CreateOrUpdate", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "CreateOrUpdate", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client ExpressRouteCircuitPeeringsClient) CreateOrUpdatePreparer(resourceGroupName string, circuitName string, peeringName string, peeringParameters ExpressRouteCircuitPeering, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "circuitName": autorest.Encode("path", circuitName), + "peeringName": autorest.Encode("path", peeringName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}", pathParameters), + autorest.WithJSON(peeringParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client ExpressRouteCircuitPeeringsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client ExpressRouteCircuitPeeringsClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByClosing()) + result.Response = resp + return +} + +// Delete deletes the specified peering from the specified express route +// circuit. This method may poll for completion. Polling can be canceled by +// passing the cancel channel argument. The channel will be used to cancel +// polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. circuitName is the +// name of the express route circuit. peeringName is the name of the peering. +func (client ExpressRouteCircuitPeeringsClient) Delete(resourceGroupName string, circuitName string, peeringName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, circuitName, peeringName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client ExpressRouteCircuitPeeringsClient) DeletePreparer(resourceGroupName string, circuitName string, peeringName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "circuitName": autorest.Encode("path", circuitName), + "peeringName": autorest.Encode("path", peeringName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client ExpressRouteCircuitPeeringsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client ExpressRouteCircuitPeeringsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the specified authorization from the specified express route +// circuit. +// +// resourceGroupName is the name of the resource group. circuitName is the +// name of the express route circuit. peeringName is the name of the peering. +func (client ExpressRouteCircuitPeeringsClient) Get(resourceGroupName string, circuitName string, peeringName string) (result ExpressRouteCircuitPeering, err error) { + req, err := client.GetPreparer(resourceGroupName, circuitName, peeringName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ExpressRouteCircuitPeeringsClient) GetPreparer(resourceGroupName string, circuitName string, peeringName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "circuitName": autorest.Encode("path", circuitName), + "peeringName": autorest.Encode("path", peeringName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ExpressRouteCircuitPeeringsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ExpressRouteCircuitPeeringsClient) GetResponder(resp *http.Response) (result ExpressRouteCircuitPeering, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets all peerings in a specified express route circuit. +// +// resourceGroupName is the name of the resource group. circuitName is the +// name of the express route circuit. +func (client ExpressRouteCircuitPeeringsClient) List(resourceGroupName string, circuitName string) (result ExpressRouteCircuitPeeringListResult, err error) { + req, err := client.ListPreparer(resourceGroupName, circuitName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ExpressRouteCircuitPeeringsClient) ListPreparer(resourceGroupName string, circuitName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "circuitName": autorest.Encode("path", circuitName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ExpressRouteCircuitPeeringsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ExpressRouteCircuitPeeringsClient) ListResponder(resp *http.Response) (result ExpressRouteCircuitPeeringListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client ExpressRouteCircuitPeeringsClient) ListNextResults(lastResults ExpressRouteCircuitPeeringListResult) (result ExpressRouteCircuitPeeringListResult, err error) { + req, err := lastResults.ExpressRouteCircuitPeeringListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "List", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "List", resp, "Failure sending next results request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "List", resp, "Failure responding to next results request") + } + + return +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuits.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuits.go new file mode 100644 index 000000000000..6572e92b625f --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuits.go @@ -0,0 +1,755 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// ExpressRouteCircuitsClient is the the Microsoft Azure Network management +// API provides a RESTful set of web services that interact with Microsoft +// Azure Networks service to manage your network resources. The API has +// entities that capture the relationship between an end user and the +// Microsoft Azure Networks service. +type ExpressRouteCircuitsClient struct { + ManagementClient +} + +// NewExpressRouteCircuitsClient creates an instance of the +// ExpressRouteCircuitsClient client. +func NewExpressRouteCircuitsClient(subscriptionID string) ExpressRouteCircuitsClient { + return NewExpressRouteCircuitsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewExpressRouteCircuitsClientWithBaseURI creates an instance of the +// ExpressRouteCircuitsClient client. +func NewExpressRouteCircuitsClientWithBaseURI(baseURI string, subscriptionID string) ExpressRouteCircuitsClient { + return ExpressRouteCircuitsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates an express route circuit. This method may +// poll for completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// resourceGroupName is the name of the resource group. circuitName is the +// name of the circuit. parameters is parameters supplied to the create or +// update express route circuit operation. +func (client ExpressRouteCircuitsClient) CreateOrUpdate(resourceGroupName string, circuitName string, parameters ExpressRouteCircuit, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, circuitName, parameters, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "CreateOrUpdate", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "CreateOrUpdate", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client ExpressRouteCircuitsClient) CreateOrUpdatePreparer(resourceGroupName string, circuitName string, parameters ExpressRouteCircuit, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "circuitName": autorest.Encode("path", circuitName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client ExpressRouteCircuitsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client ExpressRouteCircuitsClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Delete deletes the specified express route circuit. This method may poll +// for completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// resourceGroupName is the name of the resource group. circuitName is the +// name of the express route circuit. +func (client ExpressRouteCircuitsClient) Delete(resourceGroupName string, circuitName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, circuitName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client ExpressRouteCircuitsClient) DeletePreparer(resourceGroupName string, circuitName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "circuitName": autorest.Encode("path", circuitName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client ExpressRouteCircuitsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client ExpressRouteCircuitsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusAccepted, http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets information about the specified express route circuit. +// +// resourceGroupName is the name of the resource group. circuitName is the +// name of express route circuit. +func (client ExpressRouteCircuitsClient) Get(resourceGroupName string, circuitName string) (result ExpressRouteCircuit, err error) { + req, err := client.GetPreparer(resourceGroupName, circuitName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ExpressRouteCircuitsClient) GetPreparer(resourceGroupName string, circuitName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "circuitName": autorest.Encode("path", circuitName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ExpressRouteCircuitsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ExpressRouteCircuitsClient) GetResponder(resp *http.Response) (result ExpressRouteCircuit, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetPeeringStats gets all stats from an express route circuit in a resource +// group. +// +// resourceGroupName is the name of the resource group. circuitName is the +// name of the express route circuit. peeringName is the name of the peering. +func (client ExpressRouteCircuitsClient) GetPeeringStats(resourceGroupName string, circuitName string, peeringName string) (result ExpressRouteCircuitStats, err error) { + req, err := client.GetPeeringStatsPreparer(resourceGroupName, circuitName, peeringName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "GetPeeringStats", nil, "Failure preparing request") + } + + resp, err := client.GetPeeringStatsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "GetPeeringStats", resp, "Failure sending request") + } + + result, err = client.GetPeeringStatsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "GetPeeringStats", resp, "Failure responding to request") + } + + return +} + +// GetPeeringStatsPreparer prepares the GetPeeringStats request. +func (client ExpressRouteCircuitsClient) GetPeeringStatsPreparer(resourceGroupName string, circuitName string, peeringName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "circuitName": autorest.Encode("path", circuitName), + "peeringName": autorest.Encode("path", peeringName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/stats", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetPeeringStatsSender sends the GetPeeringStats request. The method will close the +// http.Response Body if it receives an error. +func (client ExpressRouteCircuitsClient) GetPeeringStatsSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetPeeringStatsResponder handles the response to the GetPeeringStats request. The method always +// closes the http.Response Body. +func (client ExpressRouteCircuitsClient) GetPeeringStatsResponder(resp *http.Response) (result ExpressRouteCircuitStats, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetStats gets all the stats from an express route circuit in a resource +// group. +// +// resourceGroupName is the name of the resource group. circuitName is the +// name of the express route circuit. +func (client ExpressRouteCircuitsClient) GetStats(resourceGroupName string, circuitName string) (result ExpressRouteCircuitStats, err error) { + req, err := client.GetStatsPreparer(resourceGroupName, circuitName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "GetStats", nil, "Failure preparing request") + } + + resp, err := client.GetStatsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "GetStats", resp, "Failure sending request") + } + + result, err = client.GetStatsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "GetStats", resp, "Failure responding to request") + } + + return +} + +// GetStatsPreparer prepares the GetStats request. +func (client ExpressRouteCircuitsClient) GetStatsPreparer(resourceGroupName string, circuitName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "circuitName": autorest.Encode("path", circuitName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/stats", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetStatsSender sends the GetStats request. The method will close the +// http.Response Body if it receives an error. +func (client ExpressRouteCircuitsClient) GetStatsSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetStatsResponder handles the response to the GetStats request. The method always +// closes the http.Response Body. +func (client ExpressRouteCircuitsClient) GetStatsResponder(resp *http.Response) (result ExpressRouteCircuitStats, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets all the express route circuits in a resource group. +// +// resourceGroupName is the name of the resource group. +func (client ExpressRouteCircuitsClient) List(resourceGroupName string) (result ExpressRouteCircuitListResult, err error) { + req, err := client.ListPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ExpressRouteCircuitsClient) ListPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ExpressRouteCircuitsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ExpressRouteCircuitsClient) ListResponder(resp *http.Response) (result ExpressRouteCircuitListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client ExpressRouteCircuitsClient) ListNextResults(lastResults ExpressRouteCircuitListResult) (result ExpressRouteCircuitListResult, err error) { + req, err := lastResults.ExpressRouteCircuitListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "List", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "List", resp, "Failure sending next results request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "List", resp, "Failure responding to next results request") + } + + return +} + +// ListAll gets all the express route circuits in a subscription. +func (client ExpressRouteCircuitsClient) ListAll() (result ExpressRouteCircuitListResult, err error) { + req, err := client.ListAllPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListAll", nil, "Failure preparing request") + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListAll", resp, "Failure sending request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListAll", resp, "Failure responding to request") + } + + return +} + +// ListAllPreparer prepares the ListAll request. +func (client ExpressRouteCircuitsClient) ListAllPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/expressRouteCircuits", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListAllSender sends the ListAll request. The method will close the +// http.Response Body if it receives an error. +func (client ExpressRouteCircuitsClient) ListAllSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListAllResponder handles the response to the ListAll request. The method always +// closes the http.Response Body. +func (client ExpressRouteCircuitsClient) ListAllResponder(resp *http.Response) (result ExpressRouteCircuitListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAllNextResults retrieves the next set of results, if any. +func (client ExpressRouteCircuitsClient) ListAllNextResults(lastResults ExpressRouteCircuitListResult) (result ExpressRouteCircuitListResult, err error) { + req, err := lastResults.ExpressRouteCircuitListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListAll", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListAll", resp, "Failure sending next results request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListAll", resp, "Failure responding to next results request") + } + + return +} + +// ListArpTable gets the currently advertised ARP table associated with the +// express route circuit in a resource group. This method may poll for +// completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// resourceGroupName is the name of the resource group. circuitName is the +// name of the express route circuit. peeringName is the name of the peering. +// devicePath is the path of the device. +func (client ExpressRouteCircuitsClient) ListArpTable(resourceGroupName string, circuitName string, peeringName string, devicePath string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.ListArpTablePreparer(resourceGroupName, circuitName, peeringName, devicePath, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListArpTable", nil, "Failure preparing request") + } + + resp, err := client.ListArpTableSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListArpTable", resp, "Failure sending request") + } + + result, err = client.ListArpTableResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListArpTable", resp, "Failure responding to request") + } + + return +} + +// ListArpTablePreparer prepares the ListArpTable request. +func (client ExpressRouteCircuitsClient) ListArpTablePreparer(resourceGroupName string, circuitName string, peeringName string, devicePath string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "circuitName": autorest.Encode("path", circuitName), + "devicePath": autorest.Encode("path", devicePath), + "peeringName": autorest.Encode("path", peeringName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/arpTables/{devicePath}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// ListArpTableSender sends the ListArpTable request. The method will close the +// http.Response Body if it receives an error. +func (client ExpressRouteCircuitsClient) ListArpTableSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// ListArpTableResponder handles the response to the ListArpTable request. The method always +// closes the http.Response Body. +func (client ExpressRouteCircuitsClient) ListArpTableResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// ListRoutesTable gets the currently advertised routes table associated with +// the express route circuit in a resource group. This method may poll for +// completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// resourceGroupName is the name of the resource group. circuitName is the +// name of the express route circuit. peeringName is the name of the peering. +// devicePath is the path of the device. +func (client ExpressRouteCircuitsClient) ListRoutesTable(resourceGroupName string, circuitName string, peeringName string, devicePath string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.ListRoutesTablePreparer(resourceGroupName, circuitName, peeringName, devicePath, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListRoutesTable", nil, "Failure preparing request") + } + + resp, err := client.ListRoutesTableSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListRoutesTable", resp, "Failure sending request") + } + + result, err = client.ListRoutesTableResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListRoutesTable", resp, "Failure responding to request") + } + + return +} + +// ListRoutesTablePreparer prepares the ListRoutesTable request. +func (client ExpressRouteCircuitsClient) ListRoutesTablePreparer(resourceGroupName string, circuitName string, peeringName string, devicePath string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "circuitName": autorest.Encode("path", circuitName), + "devicePath": autorest.Encode("path", devicePath), + "peeringName": autorest.Encode("path", peeringName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/routeTables/{devicePath}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// ListRoutesTableSender sends the ListRoutesTable request. The method will close the +// http.Response Body if it receives an error. +func (client ExpressRouteCircuitsClient) ListRoutesTableSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// ListRoutesTableResponder handles the response to the ListRoutesTable request. The method always +// closes the http.Response Body. +func (client ExpressRouteCircuitsClient) ListRoutesTableResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// ListRoutesTableSummary gets the currently advertised routes table summary +// associated with the express route circuit in a resource group. This method +// may poll for completion. Polling can be canceled by passing the cancel +// channel argument. The channel will be used to cancel polling and any +// outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. circuitName is the +// name of the express route circuit. peeringName is the name of the peering. +// devicePath is the path of the device. +func (client ExpressRouteCircuitsClient) ListRoutesTableSummary(resourceGroupName string, circuitName string, peeringName string, devicePath string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.ListRoutesTableSummaryPreparer(resourceGroupName, circuitName, peeringName, devicePath, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListRoutesTableSummary", nil, "Failure preparing request") + } + + resp, err := client.ListRoutesTableSummarySender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListRoutesTableSummary", resp, "Failure sending request") + } + + result, err = client.ListRoutesTableSummaryResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListRoutesTableSummary", resp, "Failure responding to request") + } + + return +} + +// ListRoutesTableSummaryPreparer prepares the ListRoutesTableSummary request. +func (client ExpressRouteCircuitsClient) ListRoutesTableSummaryPreparer(resourceGroupName string, circuitName string, peeringName string, devicePath string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "circuitName": autorest.Encode("path", circuitName), + "devicePath": autorest.Encode("path", devicePath), + "peeringName": autorest.Encode("path", peeringName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/routeTablesSummary/{devicePath}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// ListRoutesTableSummarySender sends the ListRoutesTableSummary request. The method will close the +// http.Response Body if it receives an error. +func (client ExpressRouteCircuitsClient) ListRoutesTableSummarySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// ListRoutesTableSummaryResponder handles the response to the ListRoutesTableSummary request. The method always +// closes the http.Response Body. +func (client ExpressRouteCircuitsClient) ListRoutesTableSummaryResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressrouteserviceproviders.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressrouteserviceproviders.go new file mode 100644 index 000000000000..9d0450ecf093 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressrouteserviceproviders.go @@ -0,0 +1,128 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// ExpressRouteServiceProvidersClient is the the Microsoft Azure Network +// management API provides a RESTful set of web services that interact with +// Microsoft Azure Networks service to manage your network resources. The API +// has entities that capture the relationship between an end user and the +// Microsoft Azure Networks service. +type ExpressRouteServiceProvidersClient struct { + ManagementClient +} + +// NewExpressRouteServiceProvidersClient creates an instance of the +// ExpressRouteServiceProvidersClient client. +func NewExpressRouteServiceProvidersClient(subscriptionID string) ExpressRouteServiceProvidersClient { + return NewExpressRouteServiceProvidersClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewExpressRouteServiceProvidersClientWithBaseURI creates an instance of the +// ExpressRouteServiceProvidersClient client. +func NewExpressRouteServiceProvidersClientWithBaseURI(baseURI string, subscriptionID string) ExpressRouteServiceProvidersClient { + return ExpressRouteServiceProvidersClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List gets all the available express route service providers. +func (client ExpressRouteServiceProvidersClient) List() (result ExpressRouteServiceProviderListResult, err error) { + req, err := client.ListPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.ExpressRouteServiceProvidersClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.ExpressRouteServiceProvidersClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteServiceProvidersClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ExpressRouteServiceProvidersClient) ListPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/expressRouteServiceProviders", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ExpressRouteServiceProvidersClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ExpressRouteServiceProvidersClient) ListResponder(resp *http.Response) (result ExpressRouteServiceProviderListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client ExpressRouteServiceProvidersClient) ListNextResults(lastResults ExpressRouteServiceProviderListResult) (result ExpressRouteServiceProviderListResult, err error) { + req, err := lastResults.ExpressRouteServiceProviderListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.ExpressRouteServiceProvidersClient", "List", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.ExpressRouteServiceProvidersClient", "List", resp, "Failure sending next results request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteServiceProvidersClient", "List", resp, "Failure responding to next results request") + } + + return +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaces.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaces.go new file mode 100644 index 000000000000..9fbf7cddd4a5 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaces.go @@ -0,0 +1,813 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// InterfacesClient is the the Microsoft Azure Network management API provides +// a RESTful set of web services that interact with Microsoft Azure Networks +// service to manage your network resources. The API has entities that +// capture the relationship between an end user and the Microsoft Azure +// Networks service. +type InterfacesClient struct { + ManagementClient +} + +// NewInterfacesClient creates an instance of the InterfacesClient client. +func NewInterfacesClient(subscriptionID string) InterfacesClient { + return NewInterfacesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewInterfacesClientWithBaseURI creates an instance of the InterfacesClient +// client. +func NewInterfacesClientWithBaseURI(baseURI string, subscriptionID string) InterfacesClient { + return InterfacesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a network interface. This method may poll +// for completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// resourceGroupName is the name of the resource group. networkInterfaceName +// is the name of the network interface. parameters is parameters supplied to +// the create or update network interface operation. +func (client InterfacesClient) CreateOrUpdate(resourceGroupName string, networkInterfaceName string, parameters Interface, cancel <-chan struct{}) (result autorest.Response, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.InterfacePropertiesFormat", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.InterfacePropertiesFormat.NetworkSecurityGroup", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.InterfacePropertiesFormat.NetworkSecurityGroup.SecurityGroupPropertiesFormat", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.InterfacePropertiesFormat.NetworkSecurityGroup.SecurityGroupPropertiesFormat.NetworkInterfaces", Name: validation.ReadOnly, Rule: true, Chain: nil}, + {Target: "parameters.InterfacePropertiesFormat.NetworkSecurityGroup.SecurityGroupPropertiesFormat.Subnets", Name: validation.ReadOnly, Rule: true, Chain: nil}, + }}, + }}, + }}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "network.InterfacesClient", "CreateOrUpdate") + } + + req, err := client.CreateOrUpdatePreparer(resourceGroupName, networkInterfaceName, parameters, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "CreateOrUpdate", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "CreateOrUpdate", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client InterfacesClient) CreateOrUpdatePreparer(resourceGroupName string, networkInterfaceName string, parameters Interface, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkInterfaceName": autorest.Encode("path", networkInterfaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client InterfacesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client InterfacesClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Delete deletes the specified network interface. This method may poll for +// completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// resourceGroupName is the name of the resource group. networkInterfaceName +// is the name of the network interface. +func (client InterfacesClient) Delete(resourceGroupName string, networkInterfaceName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, networkInterfaceName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client InterfacesClient) DeletePreparer(resourceGroupName string, networkInterfaceName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkInterfaceName": autorest.Encode("path", networkInterfaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client InterfacesClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client InterfacesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusAccepted, http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets information about the specified network interface. +// +// resourceGroupName is the name of the resource group. networkInterfaceName +// is the name of the network interface. expand is expands referenced +// resources. +func (client InterfacesClient) Get(resourceGroupName string, networkInterfaceName string, expand string) (result Interface, err error) { + req, err := client.GetPreparer(resourceGroupName, networkInterfaceName, expand) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client InterfacesClient) GetPreparer(resourceGroupName string, networkInterfaceName string, expand string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkInterfaceName": autorest.Encode("path", networkInterfaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client InterfacesClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client InterfacesClient) GetResponder(resp *http.Response) (result Interface, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetEffectiveRouteTable gets all route tables applied to a network +// interface. This method may poll for completion. Polling can be canceled by +// passing the cancel channel argument. The channel will be used to cancel +// polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. networkInterfaceName +// is the name of the network interface. +func (client InterfacesClient) GetEffectiveRouteTable(resourceGroupName string, networkInterfaceName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.GetEffectiveRouteTablePreparer(resourceGroupName, networkInterfaceName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "GetEffectiveRouteTable", nil, "Failure preparing request") + } + + resp, err := client.GetEffectiveRouteTableSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "GetEffectiveRouteTable", resp, "Failure sending request") + } + + result, err = client.GetEffectiveRouteTableResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "GetEffectiveRouteTable", resp, "Failure responding to request") + } + + return +} + +// GetEffectiveRouteTablePreparer prepares the GetEffectiveRouteTable request. +func (client InterfacesClient) GetEffectiveRouteTablePreparer(resourceGroupName string, networkInterfaceName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkInterfaceName": autorest.Encode("path", networkInterfaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveRouteTable", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// GetEffectiveRouteTableSender sends the GetEffectiveRouteTable request. The method will close the +// http.Response Body if it receives an error. +func (client InterfacesClient) GetEffectiveRouteTableSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// GetEffectiveRouteTableResponder handles the response to the GetEffectiveRouteTable request. The method always +// closes the http.Response Body. +func (client InterfacesClient) GetEffectiveRouteTableResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// GetVirtualMachineScaleSetNetworkInterface get the specified network +// interface in a virtual machine scale set. +// +// resourceGroupName is the name of the resource group. +// virtualMachineScaleSetName is the name of the virtual machine scale set. +// virtualmachineIndex is the virtual machine index. networkInterfaceName is +// the name of the network interface. expand is expands referenced resources. +func (client InterfacesClient) GetVirtualMachineScaleSetNetworkInterface(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, expand string) (result Interface, err error) { + req, err := client.GetVirtualMachineScaleSetNetworkInterfacePreparer(resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName, expand) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "GetVirtualMachineScaleSetNetworkInterface", nil, "Failure preparing request") + } + + resp, err := client.GetVirtualMachineScaleSetNetworkInterfaceSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "GetVirtualMachineScaleSetNetworkInterface", resp, "Failure sending request") + } + + result, err = client.GetVirtualMachineScaleSetNetworkInterfaceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "GetVirtualMachineScaleSetNetworkInterface", resp, "Failure responding to request") + } + + return +} + +// GetVirtualMachineScaleSetNetworkInterfacePreparer prepares the GetVirtualMachineScaleSetNetworkInterface request. +func (client InterfacesClient) GetVirtualMachineScaleSetNetworkInterfacePreparer(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, expand string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkInterfaceName": autorest.Encode("path", networkInterfaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualmachineIndex": autorest.Encode("path", virtualmachineIndex), + "virtualMachineScaleSetName": autorest.Encode("path", virtualMachineScaleSetName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetVirtualMachineScaleSetNetworkInterfaceSender sends the GetVirtualMachineScaleSetNetworkInterface request. The method will close the +// http.Response Body if it receives an error. +func (client InterfacesClient) GetVirtualMachineScaleSetNetworkInterfaceSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetVirtualMachineScaleSetNetworkInterfaceResponder handles the response to the GetVirtualMachineScaleSetNetworkInterface request. The method always +// closes the http.Response Body. +func (client InterfacesClient) GetVirtualMachineScaleSetNetworkInterfaceResponder(resp *http.Response) (result Interface, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets all network interfaces in a resource group. +// +// resourceGroupName is the name of the resource group. +func (client InterfacesClient) List(resourceGroupName string) (result InterfaceListResult, err error) { + req, err := client.ListPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client InterfacesClient) ListPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client InterfacesClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client InterfacesClient) ListResponder(resp *http.Response) (result InterfaceListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client InterfacesClient) ListNextResults(lastResults InterfaceListResult) (result InterfaceListResult, err error) { + req, err := lastResults.InterfaceListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "List", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "List", resp, "Failure sending next results request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "List", resp, "Failure responding to next results request") + } + + return +} + +// ListAll gets all network interfaces in a subscription. +func (client InterfacesClient) ListAll() (result InterfaceListResult, err error) { + req, err := client.ListAllPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListAll", nil, "Failure preparing request") + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListAll", resp, "Failure sending request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListAll", resp, "Failure responding to request") + } + + return +} + +// ListAllPreparer prepares the ListAll request. +func (client InterfacesClient) ListAllPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkInterfaces", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListAllSender sends the ListAll request. The method will close the +// http.Response Body if it receives an error. +func (client InterfacesClient) ListAllSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListAllResponder handles the response to the ListAll request. The method always +// closes the http.Response Body. +func (client InterfacesClient) ListAllResponder(resp *http.Response) (result InterfaceListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAllNextResults retrieves the next set of results, if any. +func (client InterfacesClient) ListAllNextResults(lastResults InterfaceListResult) (result InterfaceListResult, err error) { + req, err := lastResults.InterfaceListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListAll", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListAll", resp, "Failure sending next results request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListAll", resp, "Failure responding to next results request") + } + + return +} + +// ListEffectiveNetworkSecurityGroups gets all network security groups applied +// to a network interface. This method may poll for completion. Polling can +// be canceled by passing the cancel channel argument. The channel will be +// used to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. networkInterfaceName +// is the name of the network interface. +func (client InterfacesClient) ListEffectiveNetworkSecurityGroups(resourceGroupName string, networkInterfaceName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.ListEffectiveNetworkSecurityGroupsPreparer(resourceGroupName, networkInterfaceName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListEffectiveNetworkSecurityGroups", nil, "Failure preparing request") + } + + resp, err := client.ListEffectiveNetworkSecurityGroupsSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListEffectiveNetworkSecurityGroups", resp, "Failure sending request") + } + + result, err = client.ListEffectiveNetworkSecurityGroupsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListEffectiveNetworkSecurityGroups", resp, "Failure responding to request") + } + + return +} + +// ListEffectiveNetworkSecurityGroupsPreparer prepares the ListEffectiveNetworkSecurityGroups request. +func (client InterfacesClient) ListEffectiveNetworkSecurityGroupsPreparer(resourceGroupName string, networkInterfaceName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkInterfaceName": autorest.Encode("path", networkInterfaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveNetworkSecurityGroups", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// ListEffectiveNetworkSecurityGroupsSender sends the ListEffectiveNetworkSecurityGroups request. The method will close the +// http.Response Body if it receives an error. +func (client InterfacesClient) ListEffectiveNetworkSecurityGroupsSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// ListEffectiveNetworkSecurityGroupsResponder handles the response to the ListEffectiveNetworkSecurityGroups request. The method always +// closes the http.Response Body. +func (client InterfacesClient) ListEffectiveNetworkSecurityGroupsResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// ListVirtualMachineScaleSetNetworkInterfaces gets all network interfaces in +// a virtual machine scale set. +// +// resourceGroupName is the name of the resource group. +// virtualMachineScaleSetName is the name of the virtual machine scale set. +func (client InterfacesClient) ListVirtualMachineScaleSetNetworkInterfaces(resourceGroupName string, virtualMachineScaleSetName string) (result InterfaceListResult, err error) { + req, err := client.ListVirtualMachineScaleSetNetworkInterfacesPreparer(resourceGroupName, virtualMachineScaleSetName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetNetworkInterfaces", nil, "Failure preparing request") + } + + resp, err := client.ListVirtualMachineScaleSetNetworkInterfacesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetNetworkInterfaces", resp, "Failure sending request") + } + + result, err = client.ListVirtualMachineScaleSetNetworkInterfacesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetNetworkInterfaces", resp, "Failure responding to request") + } + + return +} + +// ListVirtualMachineScaleSetNetworkInterfacesPreparer prepares the ListVirtualMachineScaleSetNetworkInterfaces request. +func (client InterfacesClient) ListVirtualMachineScaleSetNetworkInterfacesPreparer(resourceGroupName string, virtualMachineScaleSetName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualMachineScaleSetName": autorest.Encode("path", virtualMachineScaleSetName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/networkInterfaces", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListVirtualMachineScaleSetNetworkInterfacesSender sends the ListVirtualMachineScaleSetNetworkInterfaces request. The method will close the +// http.Response Body if it receives an error. +func (client InterfacesClient) ListVirtualMachineScaleSetNetworkInterfacesSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListVirtualMachineScaleSetNetworkInterfacesResponder handles the response to the ListVirtualMachineScaleSetNetworkInterfaces request. The method always +// closes the http.Response Body. +func (client InterfacesClient) ListVirtualMachineScaleSetNetworkInterfacesResponder(resp *http.Response) (result InterfaceListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListVirtualMachineScaleSetNetworkInterfacesNextResults retrieves the next set of results, if any. +func (client InterfacesClient) ListVirtualMachineScaleSetNetworkInterfacesNextResults(lastResults InterfaceListResult) (result InterfaceListResult, err error) { + req, err := lastResults.InterfaceListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetNetworkInterfaces", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListVirtualMachineScaleSetNetworkInterfacesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetNetworkInterfaces", resp, "Failure sending next results request") + } + + result, err = client.ListVirtualMachineScaleSetNetworkInterfacesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetNetworkInterfaces", resp, "Failure responding to next results request") + } + + return +} + +// ListVirtualMachineScaleSetVMNetworkInterfaces gets information about all +// network interfaces in a virtual machine in a virtual machine scale set. +// +// resourceGroupName is the name of the resource group. +// virtualMachineScaleSetName is the name of the virtual machine scale set. +// virtualmachineIndex is the virtual machine index. +func (client InterfacesClient) ListVirtualMachineScaleSetVMNetworkInterfaces(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string) (result InterfaceListResult, err error) { + req, err := client.ListVirtualMachineScaleSetVMNetworkInterfacesPreparer(resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetVMNetworkInterfaces", nil, "Failure preparing request") + } + + resp, err := client.ListVirtualMachineScaleSetVMNetworkInterfacesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetVMNetworkInterfaces", resp, "Failure sending request") + } + + result, err = client.ListVirtualMachineScaleSetVMNetworkInterfacesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetVMNetworkInterfaces", resp, "Failure responding to request") + } + + return +} + +// ListVirtualMachineScaleSetVMNetworkInterfacesPreparer prepares the ListVirtualMachineScaleSetVMNetworkInterfaces request. +func (client InterfacesClient) ListVirtualMachineScaleSetVMNetworkInterfacesPreparer(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualmachineIndex": autorest.Encode("path", virtualmachineIndex), + "virtualMachineScaleSetName": autorest.Encode("path", virtualMachineScaleSetName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListVirtualMachineScaleSetVMNetworkInterfacesSender sends the ListVirtualMachineScaleSetVMNetworkInterfaces request. The method will close the +// http.Response Body if it receives an error. +func (client InterfacesClient) ListVirtualMachineScaleSetVMNetworkInterfacesSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListVirtualMachineScaleSetVMNetworkInterfacesResponder handles the response to the ListVirtualMachineScaleSetVMNetworkInterfaces request. The method always +// closes the http.Response Body. +func (client InterfacesClient) ListVirtualMachineScaleSetVMNetworkInterfacesResponder(resp *http.Response) (result InterfaceListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListVirtualMachineScaleSetVMNetworkInterfacesNextResults retrieves the next set of results, if any. +func (client InterfacesClient) ListVirtualMachineScaleSetVMNetworkInterfacesNextResults(lastResults InterfaceListResult) (result InterfaceListResult, err error) { + req, err := lastResults.InterfaceListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetVMNetworkInterfaces", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListVirtualMachineScaleSetVMNetworkInterfacesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetVMNetworkInterfaces", resp, "Failure sending next results request") + } + + result, err = client.ListVirtualMachineScaleSetVMNetworkInterfacesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetVMNetworkInterfaces", resp, "Failure responding to next results request") + } + + return +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancers.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancers.go new file mode 100644 index 000000000000..30012ff35b90 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancers.go @@ -0,0 +1,416 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// LoadBalancersClient is the the Microsoft Azure Network management API +// provides a RESTful set of web services that interact with Microsoft Azure +// Networks service to manage your network resources. The API has entities +// that capture the relationship between an end user and the Microsoft Azure +// Networks service. +type LoadBalancersClient struct { + ManagementClient +} + +// NewLoadBalancersClient creates an instance of the LoadBalancersClient +// client. +func NewLoadBalancersClient(subscriptionID string) LoadBalancersClient { + return NewLoadBalancersClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewLoadBalancersClientWithBaseURI creates an instance of the +// LoadBalancersClient client. +func NewLoadBalancersClientWithBaseURI(baseURI string, subscriptionID string) LoadBalancersClient { + return LoadBalancersClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a load balancer. This method may poll for +// completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// resourceGroupName is the name of the resource group. loadBalancerName is +// the name of the load balancer. parameters is parameters supplied to the +// create or update load balancer operation. +func (client LoadBalancersClient) CreateOrUpdate(resourceGroupName string, loadBalancerName string, parameters LoadBalancer, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, loadBalancerName, parameters, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "CreateOrUpdate", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "CreateOrUpdate", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client LoadBalancersClient) CreateOrUpdatePreparer(resourceGroupName string, loadBalancerName string, parameters LoadBalancer, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "loadBalancerName": autorest.Encode("path", loadBalancerName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client LoadBalancersClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client LoadBalancersClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Delete deletes the specified load balancer. This method may poll for +// completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// resourceGroupName is the name of the resource group. loadBalancerName is +// the name of the load balancer. +func (client LoadBalancersClient) Delete(resourceGroupName string, loadBalancerName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, loadBalancerName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client LoadBalancersClient) DeletePreparer(resourceGroupName string, loadBalancerName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "loadBalancerName": autorest.Encode("path", loadBalancerName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client LoadBalancersClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client LoadBalancersClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusAccepted, http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the specified load balancer. +// +// resourceGroupName is the name of the resource group. loadBalancerName is +// the name of the load balancer. expand is expands referenced resources. +func (client LoadBalancersClient) Get(resourceGroupName string, loadBalancerName string, expand string) (result LoadBalancer, err error) { + req, err := client.GetPreparer(resourceGroupName, loadBalancerName, expand) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client LoadBalancersClient) GetPreparer(resourceGroupName string, loadBalancerName string, expand string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "loadBalancerName": autorest.Encode("path", loadBalancerName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client LoadBalancersClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client LoadBalancersClient) GetResponder(resp *http.Response) (result LoadBalancer, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets all the load balancers in a resource group. +// +// resourceGroupName is the name of the resource group. +func (client LoadBalancersClient) List(resourceGroupName string) (result LoadBalancerListResult, err error) { + req, err := client.ListPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client LoadBalancersClient) ListPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client LoadBalancersClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client LoadBalancersClient) ListResponder(resp *http.Response) (result LoadBalancerListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client LoadBalancersClient) ListNextResults(lastResults LoadBalancerListResult) (result LoadBalancerListResult, err error) { + req, err := lastResults.LoadBalancerListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "List", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "List", resp, "Failure sending next results request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "List", resp, "Failure responding to next results request") + } + + return +} + +// ListAll gets all the load balancers in a subscription. +func (client LoadBalancersClient) ListAll() (result LoadBalancerListResult, err error) { + req, err := client.ListAllPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "ListAll", nil, "Failure preparing request") + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "ListAll", resp, "Failure sending request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "ListAll", resp, "Failure responding to request") + } + + return +} + +// ListAllPreparer prepares the ListAll request. +func (client LoadBalancersClient) ListAllPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/loadBalancers", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListAllSender sends the ListAll request. The method will close the +// http.Response Body if it receives an error. +func (client LoadBalancersClient) ListAllSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListAllResponder handles the response to the ListAll request. The method always +// closes the http.Response Body. +func (client LoadBalancersClient) ListAllResponder(resp *http.Response) (result LoadBalancerListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAllNextResults retrieves the next set of results, if any. +func (client LoadBalancersClient) ListAllNextResults(lastResults LoadBalancerListResult) (result LoadBalancerListResult, err error) { + req, err := lastResults.LoadBalancerListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "ListAll", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "ListAll", resp, "Failure sending next results request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "ListAll", resp, "Failure responding to next results request") + } + + return +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/localnetworkgateways.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/localnetworkgateways.go new file mode 100644 index 000000000000..fd48306f245d --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/localnetworkgateways.go @@ -0,0 +1,342 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// LocalNetworkGatewaysClient is the the Microsoft Azure Network management +// API provides a RESTful set of web services that interact with Microsoft +// Azure Networks service to manage your network resources. The API has +// entities that capture the relationship between an end user and the +// Microsoft Azure Networks service. +type LocalNetworkGatewaysClient struct { + ManagementClient +} + +// NewLocalNetworkGatewaysClient creates an instance of the +// LocalNetworkGatewaysClient client. +func NewLocalNetworkGatewaysClient(subscriptionID string) LocalNetworkGatewaysClient { + return NewLocalNetworkGatewaysClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewLocalNetworkGatewaysClientWithBaseURI creates an instance of the +// LocalNetworkGatewaysClient client. +func NewLocalNetworkGatewaysClientWithBaseURI(baseURI string, subscriptionID string) LocalNetworkGatewaysClient { + return LocalNetworkGatewaysClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a local network gateway in the specified +// resource group. This method may poll for completion. Polling can be +// canceled by passing the cancel channel argument. The channel will be used +// to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. +// localNetworkGatewayName is the name of the local network gateway. +// parameters is parameters supplied to the create or update local network +// gateway operation. +func (client LocalNetworkGatewaysClient) CreateOrUpdate(resourceGroupName string, localNetworkGatewayName string, parameters LocalNetworkGateway, cancel <-chan struct{}) (result autorest.Response, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.LocalNetworkGatewayPropertiesFormat", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.LocalNetworkGatewayPropertiesFormat.LocalNetworkAddressSpace", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.LocalNetworkGatewayPropertiesFormat.ProvisioningState", Name: validation.ReadOnly, Rule: true, Chain: nil}, + }}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "network.LocalNetworkGatewaysClient", "CreateOrUpdate") + } + + req, err := client.CreateOrUpdatePreparer(resourceGroupName, localNetworkGatewayName, parameters, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "CreateOrUpdate", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "CreateOrUpdate", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client LocalNetworkGatewaysClient) CreateOrUpdatePreparer(resourceGroupName string, localNetworkGatewayName string, parameters LocalNetworkGateway, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "localNetworkGatewayName": autorest.Encode("path", localNetworkGatewayName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client LocalNetworkGatewaysClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client LocalNetworkGatewaysClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Delete deletes the specified local network gateway. This method may poll +// for completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// resourceGroupName is the name of the resource group. +// localNetworkGatewayName is the name of the local network gateway. +func (client LocalNetworkGatewaysClient) Delete(resourceGroupName string, localNetworkGatewayName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, localNetworkGatewayName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client LocalNetworkGatewaysClient) DeletePreparer(resourceGroupName string, localNetworkGatewayName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "localNetworkGatewayName": autorest.Encode("path", localNetworkGatewayName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client LocalNetworkGatewaysClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client LocalNetworkGatewaysClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the specified local network gateway in a resource group. +// +// resourceGroupName is the name of the resource group. +// localNetworkGatewayName is the name of the local network gateway. +func (client LocalNetworkGatewaysClient) Get(resourceGroupName string, localNetworkGatewayName string) (result LocalNetworkGateway, err error) { + req, err := client.GetPreparer(resourceGroupName, localNetworkGatewayName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client LocalNetworkGatewaysClient) GetPreparer(resourceGroupName string, localNetworkGatewayName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "localNetworkGatewayName": autorest.Encode("path", localNetworkGatewayName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client LocalNetworkGatewaysClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client LocalNetworkGatewaysClient) GetResponder(resp *http.Response) (result LocalNetworkGateway, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets all the local network gateways in a resource group. +// +// resourceGroupName is the name of the resource group. +func (client LocalNetworkGatewaysClient) List(resourceGroupName string) (result LocalNetworkGatewayListResult, err error) { + req, err := client.ListPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client LocalNetworkGatewaysClient) ListPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client LocalNetworkGatewaysClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client LocalNetworkGatewaysClient) ListResponder(resp *http.Response) (result LocalNetworkGatewayListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client LocalNetworkGatewaysClient) ListNextResults(lastResults LocalNetworkGatewayListResult) (result LocalNetworkGatewayListResult, err error) { + req, err := lastResults.LocalNetworkGatewayListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "List", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "List", resp, "Failure sending next results request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "List", resp, "Failure responding to next results request") + } + + return +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/models.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/models.go new file mode 100644 index 000000000000..bb514f796b31 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/models.go @@ -0,0 +1,2158 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/to" + "net/http" +) + +// ApplicationGatewayBackendHealthServerHealth enumerates the values for +// application gateway backend health server health. +type ApplicationGatewayBackendHealthServerHealth string + +const ( + // Down specifies the down state for application gateway backend health + // server health. + Down ApplicationGatewayBackendHealthServerHealth = "Down" + // Partial specifies the partial state for application gateway backend + // health server health. + Partial ApplicationGatewayBackendHealthServerHealth = "Partial" + // Unknown specifies the unknown state for application gateway backend + // health server health. + Unknown ApplicationGatewayBackendHealthServerHealth = "Unknown" + // Up specifies the up state for application gateway backend health server + // health. + Up ApplicationGatewayBackendHealthServerHealth = "Up" +) + +// ApplicationGatewayCookieBasedAffinity enumerates the values for application +// gateway cookie based affinity. +type ApplicationGatewayCookieBasedAffinity string + +const ( + // Disabled specifies the disabled state for application gateway cookie + // based affinity. + Disabled ApplicationGatewayCookieBasedAffinity = "Disabled" + // Enabled specifies the enabled state for application gateway cookie + // based affinity. + Enabled ApplicationGatewayCookieBasedAffinity = "Enabled" +) + +// ApplicationGatewayFirewallMode enumerates the values for application +// gateway firewall mode. +type ApplicationGatewayFirewallMode string + +const ( + // Detection specifies the detection state for application gateway + // firewall mode. + Detection ApplicationGatewayFirewallMode = "Detection" + // Prevention specifies the prevention state for application gateway + // firewall mode. + Prevention ApplicationGatewayFirewallMode = "Prevention" +) + +// ApplicationGatewayOperationalState enumerates the values for application +// gateway operational state. +type ApplicationGatewayOperationalState string + +const ( + // Running specifies the running state for application gateway operational + // state. + Running ApplicationGatewayOperationalState = "Running" + // Starting specifies the starting state for application gateway + // operational state. + Starting ApplicationGatewayOperationalState = "Starting" + // Stopped specifies the stopped state for application gateway operational + // state. + Stopped ApplicationGatewayOperationalState = "Stopped" + // Stopping specifies the stopping state for application gateway + // operational state. + Stopping ApplicationGatewayOperationalState = "Stopping" +) + +// ApplicationGatewayProtocol enumerates the values for application gateway +// protocol. +type ApplicationGatewayProtocol string + +const ( + // HTTP specifies the http state for application gateway protocol. + HTTP ApplicationGatewayProtocol = "Http" + // HTTPS specifies the https state for application gateway protocol. + HTTPS ApplicationGatewayProtocol = "Https" +) + +// ApplicationGatewayRequestRoutingRuleType enumerates the values for +// application gateway request routing rule type. +type ApplicationGatewayRequestRoutingRuleType string + +const ( + // Basic specifies the basic state for application gateway request routing + // rule type. + Basic ApplicationGatewayRequestRoutingRuleType = "Basic" + // PathBasedRouting specifies the path based routing state for application + // gateway request routing rule type. + PathBasedRouting ApplicationGatewayRequestRoutingRuleType = "PathBasedRouting" +) + +// ApplicationGatewaySkuName enumerates the values for application gateway sku +// name. +type ApplicationGatewaySkuName string + +const ( + // StandardLarge specifies the standard large state for application + // gateway sku name. + StandardLarge ApplicationGatewaySkuName = "Standard_Large" + // StandardMedium specifies the standard medium state for application + // gateway sku name. + StandardMedium ApplicationGatewaySkuName = "Standard_Medium" + // StandardSmall specifies the standard small state for application + // gateway sku name. + StandardSmall ApplicationGatewaySkuName = "Standard_Small" + // WAFLarge specifies the waf large state for application gateway sku name. + WAFLarge ApplicationGatewaySkuName = "WAF_Large" + // WAFMedium specifies the waf medium state for application gateway sku + // name. + WAFMedium ApplicationGatewaySkuName = "WAF_Medium" +) + +// ApplicationGatewaySslProtocol enumerates the values for application gateway +// ssl protocol. +type ApplicationGatewaySslProtocol string + +const ( + // TLSv10 specifies the tl sv 10 state for application gateway ssl + // protocol. + TLSv10 ApplicationGatewaySslProtocol = "TLSv1_0" + // TLSv11 specifies the tl sv 11 state for application gateway ssl + // protocol. + TLSv11 ApplicationGatewaySslProtocol = "TLSv1_1" + // TLSv12 specifies the tl sv 12 state for application gateway ssl + // protocol. + TLSv12 ApplicationGatewaySslProtocol = "TLSv1_2" +) + +// ApplicationGatewayTier enumerates the values for application gateway tier. +type ApplicationGatewayTier string + +const ( + // Standard specifies the standard state for application gateway tier. + Standard ApplicationGatewayTier = "Standard" + // WAF specifies the waf state for application gateway tier. + WAF ApplicationGatewayTier = "WAF" +) + +// AuthorizationUseStatus enumerates the values for authorization use status. +type AuthorizationUseStatus string + +const ( + // Available specifies the available state for authorization use status. + Available AuthorizationUseStatus = "Available" + // InUse specifies the in use state for authorization use status. + InUse AuthorizationUseStatus = "InUse" +) + +// EffectiveRouteSource enumerates the values for effective route source. +type EffectiveRouteSource string + +const ( + // EffectiveRouteSourceDefault specifies the effective route source + // default state for effective route source. + EffectiveRouteSourceDefault EffectiveRouteSource = "Default" + // EffectiveRouteSourceUnknown specifies the effective route source + // unknown state for effective route source. + EffectiveRouteSourceUnknown EffectiveRouteSource = "Unknown" + // EffectiveRouteSourceUser specifies the effective route source user + // state for effective route source. + EffectiveRouteSourceUser EffectiveRouteSource = "User" + // EffectiveRouteSourceVirtualNetworkGateway specifies the effective route + // source virtual network gateway state for effective route source. + EffectiveRouteSourceVirtualNetworkGateway EffectiveRouteSource = "VirtualNetworkGateway" +) + +// EffectiveRouteState enumerates the values for effective route state. +type EffectiveRouteState string + +const ( + // Active specifies the active state for effective route state. + Active EffectiveRouteState = "Active" + // Invalid specifies the invalid state for effective route state. + Invalid EffectiveRouteState = "Invalid" +) + +// ExpressRouteCircuitPeeringAdvertisedPublicPrefixState enumerates the values +// for express route circuit peering advertised public prefix state. +type ExpressRouteCircuitPeeringAdvertisedPublicPrefixState string + +const ( + // Configured specifies the configured state for express route circuit + // peering advertised public prefix state. + Configured ExpressRouteCircuitPeeringAdvertisedPublicPrefixState = "Configured" + // Configuring specifies the configuring state for express route circuit + // peering advertised public prefix state. + Configuring ExpressRouteCircuitPeeringAdvertisedPublicPrefixState = "Configuring" + // NotConfigured specifies the not configured state for express route + // circuit peering advertised public prefix state. + NotConfigured ExpressRouteCircuitPeeringAdvertisedPublicPrefixState = "NotConfigured" + // ValidationNeeded specifies the validation needed state for express + // route circuit peering advertised public prefix state. + ValidationNeeded ExpressRouteCircuitPeeringAdvertisedPublicPrefixState = "ValidationNeeded" +) + +// ExpressRouteCircuitPeeringState enumerates the values for express route +// circuit peering state. +type ExpressRouteCircuitPeeringState string + +const ( + // ExpressRouteCircuitPeeringStateDisabled specifies the express route + // circuit peering state disabled state for express route circuit peering + // state. + ExpressRouteCircuitPeeringStateDisabled ExpressRouteCircuitPeeringState = "Disabled" + // ExpressRouteCircuitPeeringStateEnabled specifies the express route + // circuit peering state enabled state for express route circuit peering + // state. + ExpressRouteCircuitPeeringStateEnabled ExpressRouteCircuitPeeringState = "Enabled" +) + +// ExpressRouteCircuitPeeringType enumerates the values for express route +// circuit peering type. +type ExpressRouteCircuitPeeringType string + +const ( + // AzurePrivatePeering specifies the azure private peering state for + // express route circuit peering type. + AzurePrivatePeering ExpressRouteCircuitPeeringType = "AzurePrivatePeering" + // AzurePublicPeering specifies the azure public peering state for express + // route circuit peering type. + AzurePublicPeering ExpressRouteCircuitPeeringType = "AzurePublicPeering" + // MicrosoftPeering specifies the microsoft peering state for express + // route circuit peering type. + MicrosoftPeering ExpressRouteCircuitPeeringType = "MicrosoftPeering" +) + +// ExpressRouteCircuitSkuFamily enumerates the values for express route +// circuit sku family. +type ExpressRouteCircuitSkuFamily string + +const ( + // MeteredData specifies the metered data state for express route circuit + // sku family. + MeteredData ExpressRouteCircuitSkuFamily = "MeteredData" + // UnlimitedData specifies the unlimited data state for express route + // circuit sku family. + UnlimitedData ExpressRouteCircuitSkuFamily = "UnlimitedData" +) + +// ExpressRouteCircuitSkuTier enumerates the values for express route circuit +// sku tier. +type ExpressRouteCircuitSkuTier string + +const ( + // ExpressRouteCircuitSkuTierPremium specifies the express route circuit + // sku tier premium state for express route circuit sku tier. + ExpressRouteCircuitSkuTierPremium ExpressRouteCircuitSkuTier = "Premium" + // ExpressRouteCircuitSkuTierStandard specifies the express route circuit + // sku tier standard state for express route circuit sku tier. + ExpressRouteCircuitSkuTierStandard ExpressRouteCircuitSkuTier = "Standard" +) + +// IPAllocationMethod enumerates the values for ip allocation method. +type IPAllocationMethod string + +const ( + // Dynamic specifies the dynamic state for ip allocation method. + Dynamic IPAllocationMethod = "Dynamic" + // Static specifies the static state for ip allocation method. + Static IPAllocationMethod = "Static" +) + +// IPVersion enumerates the values for ip version. +type IPVersion string + +const ( + // IPv4 specifies the i pv 4 state for ip version. + IPv4 IPVersion = "IPv4" + // IPv6 specifies the i pv 6 state for ip version. + IPv6 IPVersion = "IPv6" +) + +// LoadDistribution enumerates the values for load distribution. +type LoadDistribution string + +const ( + // Default specifies the default state for load distribution. + Default LoadDistribution = "Default" + // SourceIP specifies the source ip state for load distribution. + SourceIP LoadDistribution = "SourceIP" + // SourceIPProtocol specifies the source ip protocol state for load + // distribution. + SourceIPProtocol LoadDistribution = "SourceIPProtocol" +) + +// OperationStatus enumerates the values for operation status. +type OperationStatus string + +const ( + // Failed specifies the failed state for operation status. + Failed OperationStatus = "Failed" + // InProgress specifies the in progress state for operation status. + InProgress OperationStatus = "InProgress" + // Succeeded specifies the succeeded state for operation status. + Succeeded OperationStatus = "Succeeded" +) + +// ProbeProtocol enumerates the values for probe protocol. +type ProbeProtocol string + +const ( + // ProbeProtocolHTTP specifies the probe protocol http state for probe + // protocol. + ProbeProtocolHTTP ProbeProtocol = "Http" + // ProbeProtocolTCP specifies the probe protocol tcp state for probe + // protocol. + ProbeProtocolTCP ProbeProtocol = "Tcp" +) + +// ProcessorArchitecture enumerates the values for processor architecture. +type ProcessorArchitecture string + +const ( + // Amd64 specifies the amd 64 state for processor architecture. + Amd64 ProcessorArchitecture = "Amd64" + // X86 specifies the x86 state for processor architecture. + X86 ProcessorArchitecture = "X86" +) + +// RouteNextHopType enumerates the values for route next hop type. +type RouteNextHopType string + +const ( + // RouteNextHopTypeInternet specifies the route next hop type internet + // state for route next hop type. + RouteNextHopTypeInternet RouteNextHopType = "Internet" + // RouteNextHopTypeNone specifies the route next hop type none state for + // route next hop type. + RouteNextHopTypeNone RouteNextHopType = "None" + // RouteNextHopTypeVirtualAppliance specifies the route next hop type + // virtual appliance state for route next hop type. + RouteNextHopTypeVirtualAppliance RouteNextHopType = "VirtualAppliance" + // RouteNextHopTypeVirtualNetworkGateway specifies the route next hop type + // virtual network gateway state for route next hop type. + RouteNextHopTypeVirtualNetworkGateway RouteNextHopType = "VirtualNetworkGateway" + // RouteNextHopTypeVnetLocal specifies the route next hop type vnet local + // state for route next hop type. + RouteNextHopTypeVnetLocal RouteNextHopType = "VnetLocal" +) + +// SecurityRuleAccess enumerates the values for security rule access. +type SecurityRuleAccess string + +const ( + // Allow specifies the allow state for security rule access. + Allow SecurityRuleAccess = "Allow" + // Deny specifies the deny state for security rule access. + Deny SecurityRuleAccess = "Deny" +) + +// SecurityRuleDirection enumerates the values for security rule direction. +type SecurityRuleDirection string + +const ( + // Inbound specifies the inbound state for security rule direction. + Inbound SecurityRuleDirection = "Inbound" + // Outbound specifies the outbound state for security rule direction. + Outbound SecurityRuleDirection = "Outbound" +) + +// SecurityRuleProtocol enumerates the values for security rule protocol. +type SecurityRuleProtocol string + +const ( + // Asterisk specifies the asterisk state for security rule protocol. + Asterisk SecurityRuleProtocol = "*" + // TCP specifies the tcp state for security rule protocol. + TCP SecurityRuleProtocol = "Tcp" + // UDP specifies the udp state for security rule protocol. + UDP SecurityRuleProtocol = "Udp" +) + +// ServiceProviderProvisioningState enumerates the values for service provider +// provisioning state. +type ServiceProviderProvisioningState string + +const ( + // Deprovisioning specifies the deprovisioning state for service provider + // provisioning state. + Deprovisioning ServiceProviderProvisioningState = "Deprovisioning" + // NotProvisioned specifies the not provisioned state for service provider + // provisioning state. + NotProvisioned ServiceProviderProvisioningState = "NotProvisioned" + // Provisioned specifies the provisioned state for service provider + // provisioning state. + Provisioned ServiceProviderProvisioningState = "Provisioned" + // Provisioning specifies the provisioning state for service provider + // provisioning state. + Provisioning ServiceProviderProvisioningState = "Provisioning" +) + +// TransportProtocol enumerates the values for transport protocol. +type TransportProtocol string + +const ( + // TransportProtocolTCP specifies the transport protocol tcp state for + // transport protocol. + TransportProtocolTCP TransportProtocol = "Tcp" + // TransportProtocolUDP specifies the transport protocol udp state for + // transport protocol. + TransportProtocolUDP TransportProtocol = "Udp" +) + +// VirtualNetworkGatewayConnectionStatus enumerates the values for virtual +// network gateway connection status. +type VirtualNetworkGatewayConnectionStatus string + +const ( + // VirtualNetworkGatewayConnectionStatusConnected specifies the virtual + // network gateway connection status connected state for virtual network + // gateway connection status. + VirtualNetworkGatewayConnectionStatusConnected VirtualNetworkGatewayConnectionStatus = "Connected" + // VirtualNetworkGatewayConnectionStatusConnecting specifies the virtual + // network gateway connection status connecting state for virtual network + // gateway connection status. + VirtualNetworkGatewayConnectionStatusConnecting VirtualNetworkGatewayConnectionStatus = "Connecting" + // VirtualNetworkGatewayConnectionStatusNotConnected specifies the virtual + // network gateway connection status not connected state for virtual + // network gateway connection status. + VirtualNetworkGatewayConnectionStatusNotConnected VirtualNetworkGatewayConnectionStatus = "NotConnected" + // VirtualNetworkGatewayConnectionStatusUnknown specifies the virtual + // network gateway connection status unknown state for virtual network + // gateway connection status. + VirtualNetworkGatewayConnectionStatusUnknown VirtualNetworkGatewayConnectionStatus = "Unknown" +) + +// VirtualNetworkGatewayConnectionType enumerates the values for virtual +// network gateway connection type. +type VirtualNetworkGatewayConnectionType string + +const ( + // ExpressRoute specifies the express route state for virtual network + // gateway connection type. + ExpressRoute VirtualNetworkGatewayConnectionType = "ExpressRoute" + // IPsec specifies the i psec state for virtual network gateway connection + // type. + IPsec VirtualNetworkGatewayConnectionType = "IPsec" + // Vnet2Vnet specifies the vnet 2 vnet state for virtual network gateway + // connection type. + Vnet2Vnet VirtualNetworkGatewayConnectionType = "Vnet2Vnet" + // VPNClient specifies the vpn client state for virtual network gateway + // connection type. + VPNClient VirtualNetworkGatewayConnectionType = "VPNClient" +) + +// VirtualNetworkGatewaySkuName enumerates the values for virtual network +// gateway sku name. +type VirtualNetworkGatewaySkuName string + +const ( + // VirtualNetworkGatewaySkuNameBasic specifies the virtual network gateway + // sku name basic state for virtual network gateway sku name. + VirtualNetworkGatewaySkuNameBasic VirtualNetworkGatewaySkuName = "Basic" + // VirtualNetworkGatewaySkuNameHighPerformance specifies the virtual + // network gateway sku name high performance state for virtual network + // gateway sku name. + VirtualNetworkGatewaySkuNameHighPerformance VirtualNetworkGatewaySkuName = "HighPerformance" + // VirtualNetworkGatewaySkuNameStandard specifies the virtual network + // gateway sku name standard state for virtual network gateway sku name. + VirtualNetworkGatewaySkuNameStandard VirtualNetworkGatewaySkuName = "Standard" + // VirtualNetworkGatewaySkuNameUltraPerformance specifies the virtual + // network gateway sku name ultra performance state for virtual network + // gateway sku name. + VirtualNetworkGatewaySkuNameUltraPerformance VirtualNetworkGatewaySkuName = "UltraPerformance" +) + +// VirtualNetworkGatewaySkuTier enumerates the values for virtual network +// gateway sku tier. +type VirtualNetworkGatewaySkuTier string + +const ( + // VirtualNetworkGatewaySkuTierBasic specifies the virtual network gateway + // sku tier basic state for virtual network gateway sku tier. + VirtualNetworkGatewaySkuTierBasic VirtualNetworkGatewaySkuTier = "Basic" + // VirtualNetworkGatewaySkuTierHighPerformance specifies the virtual + // network gateway sku tier high performance state for virtual network + // gateway sku tier. + VirtualNetworkGatewaySkuTierHighPerformance VirtualNetworkGatewaySkuTier = "HighPerformance" + // VirtualNetworkGatewaySkuTierStandard specifies the virtual network + // gateway sku tier standard state for virtual network gateway sku tier. + VirtualNetworkGatewaySkuTierStandard VirtualNetworkGatewaySkuTier = "Standard" + // VirtualNetworkGatewaySkuTierUltraPerformance specifies the virtual + // network gateway sku tier ultra performance state for virtual network + // gateway sku tier. + VirtualNetworkGatewaySkuTierUltraPerformance VirtualNetworkGatewaySkuTier = "UltraPerformance" +) + +// VirtualNetworkGatewayType enumerates the values for virtual network gateway +// type. +type VirtualNetworkGatewayType string + +const ( + // VirtualNetworkGatewayTypeExpressRoute specifies the virtual network + // gateway type express route state for virtual network gateway type. + VirtualNetworkGatewayTypeExpressRoute VirtualNetworkGatewayType = "ExpressRoute" + // VirtualNetworkGatewayTypeVpn specifies the virtual network gateway type + // vpn state for virtual network gateway type. + VirtualNetworkGatewayTypeVpn VirtualNetworkGatewayType = "Vpn" +) + +// VirtualNetworkPeeringState enumerates the values for virtual network +// peering state. +type VirtualNetworkPeeringState string + +const ( + // Connected specifies the connected state for virtual network peering + // state. + Connected VirtualNetworkPeeringState = "Connected" + // Disconnected specifies the disconnected state for virtual network + // peering state. + Disconnected VirtualNetworkPeeringState = "Disconnected" + // Initiated specifies the initiated state for virtual network peering + // state. + Initiated VirtualNetworkPeeringState = "Initiated" +) + +// VpnType enumerates the values for vpn type. +type VpnType string + +const ( + // PolicyBased specifies the policy based state for vpn type. + PolicyBased VpnType = "PolicyBased" + // RouteBased specifies the route based state for vpn type. + RouteBased VpnType = "RouteBased" +) + +// AddressSpace is addressSpace contains an array of IP address ranges that +// can be used by subnets of the virtual network. +type AddressSpace struct { + AddressPrefixes *[]string `json:"addressPrefixes,omitempty"` +} + +// ApplicationGateway is application gateway resource +type ApplicationGateway struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + *ApplicationGatewayPropertiesFormat `json:"properties,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// ApplicationGatewayAuthenticationCertificate is authentication certificates +// of an application gateway. +type ApplicationGatewayAuthenticationCertificate struct { + ID *string `json:"id,omitempty"` + *ApplicationGatewayAuthenticationCertificatePropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// ApplicationGatewayAuthenticationCertificatePropertiesFormat is +// authentication certificates properties of an application gateway. +type ApplicationGatewayAuthenticationCertificatePropertiesFormat struct { + Data *string `json:"data,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ApplicationGatewayBackendAddress is backend address of an application +// gateway. +type ApplicationGatewayBackendAddress struct { + Fqdn *string `json:"fqdn,omitempty"` + IPAddress *string `json:"ipAddress,omitempty"` +} + +// ApplicationGatewayBackendAddressPool is backend Address Pool of an +// application gateway. +type ApplicationGatewayBackendAddressPool struct { + ID *string `json:"id,omitempty"` + *ApplicationGatewayBackendAddressPoolPropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// ApplicationGatewayBackendAddressPoolPropertiesFormat is properties of +// Backend Address Pool of an application gateway. +type ApplicationGatewayBackendAddressPoolPropertiesFormat struct { + BackendIPConfigurations *[]InterfaceIPConfiguration `json:"backendIPConfigurations,omitempty"` + BackendAddresses *[]ApplicationGatewayBackendAddress `json:"backendAddresses,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ApplicationGatewayBackendHealth is list of +// ApplicationGatewayBackendHealthPool resources. +type ApplicationGatewayBackendHealth struct { + autorest.Response `json:"-"` + BackendAddressPools *[]ApplicationGatewayBackendHealthPool `json:"backendAddressPools,omitempty"` +} + +// ApplicationGatewayBackendHealthHTTPSettings is application gateway +// BackendHealthHttp settings. +type ApplicationGatewayBackendHealthHTTPSettings struct { + BackendHTTPSettings *ApplicationGatewayBackendHTTPSettings `json:"backendHttpSettings,omitempty"` + Servers *[]ApplicationGatewayBackendHealthServer `json:"servers,omitempty"` +} + +// ApplicationGatewayBackendHealthPool is application gateway BackendHealth +// pool. +type ApplicationGatewayBackendHealthPool struct { + BackendAddressPool *ApplicationGatewayBackendAddressPool `json:"backendAddressPool,omitempty"` + BackendHTTPSettingsCollection *[]ApplicationGatewayBackendHealthHTTPSettings `json:"backendHttpSettingsCollection,omitempty"` +} + +// ApplicationGatewayBackendHealthServer is application gateway backendhealth +// http settings. +type ApplicationGatewayBackendHealthServer struct { + Address *string `json:"address,omitempty"` + IPConfiguration *SubResource `json:"ipConfiguration,omitempty"` + Health ApplicationGatewayBackendHealthServerHealth `json:"health,omitempty"` +} + +// ApplicationGatewayBackendHTTPSettings is backend address pool settings of +// an application gateway. +type ApplicationGatewayBackendHTTPSettings struct { + ID *string `json:"id,omitempty"` + *ApplicationGatewayBackendHTTPSettingsPropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// ApplicationGatewayBackendHTTPSettingsPropertiesFormat is properties of +// Backend address pool settings of an application gateway. +type ApplicationGatewayBackendHTTPSettingsPropertiesFormat struct { + Port *int32 `json:"port,omitempty"` + Protocol ApplicationGatewayProtocol `json:"protocol,omitempty"` + CookieBasedAffinity ApplicationGatewayCookieBasedAffinity `json:"cookieBasedAffinity,omitempty"` + RequestTimeout *int32 `json:"requestTimeout,omitempty"` + Probe *SubResource `json:"probe,omitempty"` + AuthenticationCertificates *[]SubResource `json:"authenticationCertificates,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ApplicationGatewayFrontendIPConfiguration is frontend IP configuration of +// an application gateway. +type ApplicationGatewayFrontendIPConfiguration struct { + ID *string `json:"id,omitempty"` + *ApplicationGatewayFrontendIPConfigurationPropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// ApplicationGatewayFrontendIPConfigurationPropertiesFormat is properties of +// Frontend IP configuration of an application gateway. +type ApplicationGatewayFrontendIPConfigurationPropertiesFormat struct { + PrivateIPAddress *string `json:"privateIPAddress,omitempty"` + PrivateIPAllocationMethod IPAllocationMethod `json:"privateIPAllocationMethod,omitempty"` + Subnet *SubResource `json:"subnet,omitempty"` + PublicIPAddress *SubResource `json:"publicIPAddress,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ApplicationGatewayFrontendPort is frontend port of an application gateway. +type ApplicationGatewayFrontendPort struct { + ID *string `json:"id,omitempty"` + *ApplicationGatewayFrontendPortPropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// ApplicationGatewayFrontendPortPropertiesFormat is properties of Frontend +// port of an application gateway. +type ApplicationGatewayFrontendPortPropertiesFormat struct { + Port *int32 `json:"port,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ApplicationGatewayHTTPListener is http listener of an application gateway. +type ApplicationGatewayHTTPListener struct { + ID *string `json:"id,omitempty"` + *ApplicationGatewayHTTPListenerPropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// ApplicationGatewayHTTPListenerPropertiesFormat is properties of HTTP +// listener of an application gateway. +type ApplicationGatewayHTTPListenerPropertiesFormat struct { + FrontendIPConfiguration *SubResource `json:"frontendIPConfiguration,omitempty"` + FrontendPort *SubResource `json:"frontendPort,omitempty"` + Protocol ApplicationGatewayProtocol `json:"protocol,omitempty"` + HostName *string `json:"hostName,omitempty"` + SslCertificate *SubResource `json:"sslCertificate,omitempty"` + RequireServerNameIndication *bool `json:"requireServerNameIndication,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ApplicationGatewayIPConfiguration is iP configuration of an application +// gateway. Currently 1 public and 1 private IP configuration is allowed. +type ApplicationGatewayIPConfiguration struct { + ID *string `json:"id,omitempty"` + *ApplicationGatewayIPConfigurationPropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// ApplicationGatewayIPConfigurationPropertiesFormat is properties of IP +// configuration of an application gateway. +type ApplicationGatewayIPConfigurationPropertiesFormat struct { + Subnet *SubResource `json:"subnet,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ApplicationGatewayListResult is response for ListApplicationGateways API +// service call. +type ApplicationGatewayListResult struct { + autorest.Response `json:"-"` + Value *[]ApplicationGateway `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// ApplicationGatewayListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client ApplicationGatewayListResult) ApplicationGatewayListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// ApplicationGatewayPathRule is path rule of URL path map of an application +// gateway. +type ApplicationGatewayPathRule struct { + ID *string `json:"id,omitempty"` + *ApplicationGatewayPathRulePropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// ApplicationGatewayPathRulePropertiesFormat is properties of probe of an +// application gateway. +type ApplicationGatewayPathRulePropertiesFormat struct { + Paths *[]string `json:"paths,omitempty"` + BackendAddressPool *SubResource `json:"backendAddressPool,omitempty"` + BackendHTTPSettings *SubResource `json:"backendHttpSettings,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ApplicationGatewayProbe is probe of the application gateway. +type ApplicationGatewayProbe struct { + ID *string `json:"id,omitempty"` + *ApplicationGatewayProbePropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// ApplicationGatewayProbePropertiesFormat is properties of probe of an +// application gateway. +type ApplicationGatewayProbePropertiesFormat struct { + Protocol ApplicationGatewayProtocol `json:"protocol,omitempty"` + Host *string `json:"host,omitempty"` + Path *string `json:"path,omitempty"` + Interval *int32 `json:"interval,omitempty"` + Timeout *int32 `json:"timeout,omitempty"` + UnhealthyThreshold *int32 `json:"unhealthyThreshold,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ApplicationGatewayPropertiesFormat is properties of the application gateway. +type ApplicationGatewayPropertiesFormat struct { + Sku *ApplicationGatewaySku `json:"sku,omitempty"` + SslPolicy *ApplicationGatewaySslPolicy `json:"sslPolicy,omitempty"` + OperationalState ApplicationGatewayOperationalState `json:"operationalState,omitempty"` + GatewayIPConfigurations *[]ApplicationGatewayIPConfiguration `json:"gatewayIPConfigurations,omitempty"` + AuthenticationCertificates *[]ApplicationGatewayAuthenticationCertificate `json:"authenticationCertificates,omitempty"` + SslCertificates *[]ApplicationGatewaySslCertificate `json:"sslCertificates,omitempty"` + FrontendIPConfigurations *[]ApplicationGatewayFrontendIPConfiguration `json:"frontendIPConfigurations,omitempty"` + FrontendPorts *[]ApplicationGatewayFrontendPort `json:"frontendPorts,omitempty"` + Probes *[]ApplicationGatewayProbe `json:"probes,omitempty"` + BackendAddressPools *[]ApplicationGatewayBackendAddressPool `json:"backendAddressPools,omitempty"` + BackendHTTPSettingsCollection *[]ApplicationGatewayBackendHTTPSettings `json:"backendHttpSettingsCollection,omitempty"` + HTTPListeners *[]ApplicationGatewayHTTPListener `json:"httpListeners,omitempty"` + URLPathMaps *[]ApplicationGatewayURLPathMap `json:"urlPathMaps,omitempty"` + RequestRoutingRules *[]ApplicationGatewayRequestRoutingRule `json:"requestRoutingRules,omitempty"` + WebApplicationFirewallConfiguration *ApplicationGatewayWebApplicationFirewallConfiguration `json:"webApplicationFirewallConfiguration,omitempty"` + ResourceGUID *string `json:"resourceGuid,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ApplicationGatewayRequestRoutingRule is request routing rule of an +// application gateway. +type ApplicationGatewayRequestRoutingRule struct { + ID *string `json:"id,omitempty"` + *ApplicationGatewayRequestRoutingRulePropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// ApplicationGatewayRequestRoutingRulePropertiesFormat is properties of +// request routing rule of the application gateway. +type ApplicationGatewayRequestRoutingRulePropertiesFormat struct { + RuleType ApplicationGatewayRequestRoutingRuleType `json:"ruleType,omitempty"` + BackendAddressPool *SubResource `json:"backendAddressPool,omitempty"` + BackendHTTPSettings *SubResource `json:"backendHttpSettings,omitempty"` + HTTPListener *SubResource `json:"httpListener,omitempty"` + URLPathMap *SubResource `json:"urlPathMap,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ApplicationGatewaySku is sKU of an application gateway +type ApplicationGatewaySku struct { + Name ApplicationGatewaySkuName `json:"name,omitempty"` + Tier ApplicationGatewayTier `json:"tier,omitempty"` + Capacity *int32 `json:"capacity,omitempty"` +} + +// ApplicationGatewaySslCertificate is sSL certificates of an application +// gateway. +type ApplicationGatewaySslCertificate struct { + ID *string `json:"id,omitempty"` + *ApplicationGatewaySslCertificatePropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// ApplicationGatewaySslCertificatePropertiesFormat is properties of SSL +// certificates of an application gateway. +type ApplicationGatewaySslCertificatePropertiesFormat struct { + Data *string `json:"data,omitempty"` + Password *string `json:"password,omitempty"` + PublicCertData *string `json:"publicCertData,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ApplicationGatewaySslPolicy is application gateway SSL policy. +type ApplicationGatewaySslPolicy struct { + DisabledSslProtocols *[]ApplicationGatewaySslProtocol `json:"disabledSslProtocols,omitempty"` +} + +// ApplicationGatewayURLPathMap is urlPathMaps give a url path to the backend +// mapping information for PathBasedRouting. +type ApplicationGatewayURLPathMap struct { + ID *string `json:"id,omitempty"` + *ApplicationGatewayURLPathMapPropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// ApplicationGatewayURLPathMapPropertiesFormat is properties of UrlPathMap of +// the application gateway. +type ApplicationGatewayURLPathMapPropertiesFormat struct { + DefaultBackendAddressPool *SubResource `json:"defaultBackendAddressPool,omitempty"` + DefaultBackendHTTPSettings *SubResource `json:"defaultBackendHttpSettings,omitempty"` + PathRules *[]ApplicationGatewayPathRule `json:"pathRules,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ApplicationGatewayWebApplicationFirewallConfiguration is application +// gateway web application firewall configuration. +type ApplicationGatewayWebApplicationFirewallConfiguration struct { + Enabled *bool `json:"enabled,omitempty"` + FirewallMode ApplicationGatewayFirewallMode `json:"firewallMode,omitempty"` +} + +// AuthorizationListResult is response for ListAuthorizations API service call +// retrieves all authorizations that belongs to an ExpressRouteCircuit. +type AuthorizationListResult struct { + autorest.Response `json:"-"` + Value *[]ExpressRouteCircuitAuthorization `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// AuthorizationListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client AuthorizationListResult) AuthorizationListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// AuthorizationPropertiesFormat is +type AuthorizationPropertiesFormat struct { + AuthorizationKey *string `json:"authorizationKey,omitempty"` + AuthorizationUseStatus AuthorizationUseStatus `json:"authorizationUseStatus,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// AzureAsyncOperationResult is the response body contains the status of the +// specified asynchronous operation, indicating whether it has succeeded, is +// in progress, or has failed. Note that this status is distinct from the +// HTTP status code returned for the Get Operation Status operation itself. +// If the asynchronous operation succeeded, the response body includes the +// HTTP status code for the successful request. If the asynchronous operation +// failed, the response body includes the HTTP status code for the failed +// request and error information regarding the failure. +type AzureAsyncOperationResult struct { + Status OperationStatus `json:"status,omitempty"` + Error *Error `json:"error,omitempty"` +} + +// BackendAddressPool is pool of backend IP addresses. +type BackendAddressPool struct { + ID *string `json:"id,omitempty"` + *BackendAddressPoolPropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// BackendAddressPoolPropertiesFormat is properties of the backend address +// pool. +type BackendAddressPoolPropertiesFormat struct { + BackendIPConfigurations *[]InterfaceIPConfiguration `json:"backendIPConfigurations,omitempty"` + LoadBalancingRules *[]SubResource `json:"loadBalancingRules,omitempty"` + OutboundNatRule *SubResource `json:"outboundNatRule,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// BgpSettings is +type BgpSettings struct { + Asn *int64 `json:"asn,omitempty"` + BgpPeeringAddress *string `json:"bgpPeeringAddress,omitempty"` + PeerWeight *int32 `json:"peerWeight,omitempty"` +} + +// ConnectionResetSharedKey is +type ConnectionResetSharedKey struct { + autorest.Response `json:"-"` + KeyLength *int32 `json:"keyLength,omitempty"` +} + +// ConnectionSharedKey is response for GetConnectionSharedKey API service call +type ConnectionSharedKey struct { + autorest.Response `json:"-"` + Value *string `json:"value,omitempty"` +} + +// DhcpOptions is dhcpOptions contains an array of DNS servers available to +// VMs deployed in the virtual network. Standard DHCP option for a subnet +// overrides VNET DHCP options. +type DhcpOptions struct { + DNSServers *[]string `json:"dnsServers,omitempty"` +} + +// DNSNameAvailabilityResult is response for the CheckDnsNameAvailability API +// service call. +type DNSNameAvailabilityResult struct { + autorest.Response `json:"-"` + Available *bool `json:"available,omitempty"` +} + +// EffectiveNetworkSecurityGroup is effective network security group. +type EffectiveNetworkSecurityGroup struct { + NetworkSecurityGroup *SubResource `json:"networkSecurityGroup,omitempty"` + Association *EffectiveNetworkSecurityGroupAssociation `json:"association,omitempty"` + EffectiveSecurityRules *[]EffectiveNetworkSecurityRule `json:"effectiveSecurityRules,omitempty"` +} + +// EffectiveNetworkSecurityGroupAssociation is the effective network security +// group association. +type EffectiveNetworkSecurityGroupAssociation struct { + Subnet *SubResource `json:"subnet,omitempty"` + NetworkInterface *SubResource `json:"networkInterface,omitempty"` +} + +// EffectiveNetworkSecurityGroupListResult is response for list effective +// network security groups API service call. +type EffectiveNetworkSecurityGroupListResult struct { + autorest.Response `json:"-"` + Value *[]EffectiveNetworkSecurityGroup `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// EffectiveNetworkSecurityRule is effective network security rules. +type EffectiveNetworkSecurityRule struct { + Name *string `json:"name,omitempty"` + Protocol SecurityRuleProtocol `json:"protocol,omitempty"` + SourcePortRange *string `json:"sourcePortRange,omitempty"` + DestinationPortRange *string `json:"destinationPortRange,omitempty"` + SourceAddressPrefix *string `json:"sourceAddressPrefix,omitempty"` + DestinationAddressPrefix *string `json:"destinationAddressPrefix,omitempty"` + ExpandedSourceAddressPrefix *[]string `json:"expandedSourceAddressPrefix,omitempty"` + ExpandedDestinationAddressPrefix *[]string `json:"expandedDestinationAddressPrefix,omitempty"` + Access SecurityRuleAccess `json:"access,omitempty"` + Priority *int32 `json:"priority,omitempty"` + Direction SecurityRuleDirection `json:"direction,omitempty"` +} + +// EffectiveRoute is effective Route +type EffectiveRoute struct { + Name *string `json:"name,omitempty"` + Source EffectiveRouteSource `json:"source,omitempty"` + State EffectiveRouteState `json:"state,omitempty"` + AddressPrefix *[]string `json:"addressPrefix,omitempty"` + NextHopIPAddress *[]string `json:"nextHopIpAddress,omitempty"` + NextHopType RouteNextHopType `json:"nextHopType,omitempty"` +} + +// EffectiveRouteListResult is response for list effective route API service +// call. +type EffectiveRouteListResult struct { + autorest.Response `json:"-"` + Value *[]EffectiveRoute `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// Error is +type Error struct { + Code *string `json:"code,omitempty"` + Message *string `json:"message,omitempty"` + Target *string `json:"target,omitempty"` + Details *[]ErrorDetails `json:"details,omitempty"` + InnerError *string `json:"innerError,omitempty"` +} + +// ErrorDetails is +type ErrorDetails struct { + Code *string `json:"code,omitempty"` + Target *string `json:"target,omitempty"` + Message *string `json:"message,omitempty"` +} + +// ExpressRouteCircuit is expressRouteCircuit resource +type ExpressRouteCircuit struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Sku *ExpressRouteCircuitSku `json:"sku,omitempty"` + *ExpressRouteCircuitPropertiesFormat `json:"properties,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// ExpressRouteCircuitArpTable is the ARP table associated with the +// ExpressRouteCircuit. +type ExpressRouteCircuitArpTable struct { + Age *int32 `json:"age,omitempty"` + Interface *string `json:"interface,omitempty"` + IPAddress *string `json:"ipAddress,omitempty"` + MacAddress *string `json:"macAddress,omitempty"` +} + +// ExpressRouteCircuitAuthorization is authorization in an ExpressRouteCircuit +// resource. +type ExpressRouteCircuitAuthorization struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + *AuthorizationPropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// ExpressRouteCircuitListResult is response for ListExpressRouteCircuit API +// service call. +type ExpressRouteCircuitListResult struct { + autorest.Response `json:"-"` + Value *[]ExpressRouteCircuit `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// ExpressRouteCircuitListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client ExpressRouteCircuitListResult) ExpressRouteCircuitListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// ExpressRouteCircuitPeering is peering in an ExpressRouteCircuit resource. +type ExpressRouteCircuitPeering struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + *ExpressRouteCircuitPeeringPropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// ExpressRouteCircuitPeeringConfig is specifies the peering configuration. +type ExpressRouteCircuitPeeringConfig struct { + AdvertisedPublicPrefixes *[]string `json:"advertisedPublicPrefixes,omitempty"` + AdvertisedPublicPrefixesState ExpressRouteCircuitPeeringAdvertisedPublicPrefixState `json:"advertisedPublicPrefixesState,omitempty"` + CustomerASN *int32 `json:"customerASN,omitempty"` + RoutingRegistryName *string `json:"routingRegistryName,omitempty"` +} + +// ExpressRouteCircuitPeeringListResult is response for ListPeering API +// service call retrieves all peerings that belong to an ExpressRouteCircuit. +type ExpressRouteCircuitPeeringListResult struct { + autorest.Response `json:"-"` + Value *[]ExpressRouteCircuitPeering `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// ExpressRouteCircuitPeeringListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client ExpressRouteCircuitPeeringListResult) ExpressRouteCircuitPeeringListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// ExpressRouteCircuitPeeringPropertiesFormat is +type ExpressRouteCircuitPeeringPropertiesFormat struct { + PeeringType ExpressRouteCircuitPeeringType `json:"peeringType,omitempty"` + State ExpressRouteCircuitPeeringState `json:"state,omitempty"` + AzureASN *int32 `json:"azureASN,omitempty"` + PeerASN *int32 `json:"peerASN,omitempty"` + PrimaryPeerAddressPrefix *string `json:"primaryPeerAddressPrefix,omitempty"` + SecondaryPeerAddressPrefix *string `json:"secondaryPeerAddressPrefix,omitempty"` + PrimaryAzurePort *string `json:"primaryAzurePort,omitempty"` + SecondaryAzurePort *string `json:"secondaryAzurePort,omitempty"` + SharedKey *string `json:"sharedKey,omitempty"` + VlanID *int32 `json:"vlanId,omitempty"` + MicrosoftPeeringConfig *ExpressRouteCircuitPeeringConfig `json:"microsoftPeeringConfig,omitempty"` + Stats *ExpressRouteCircuitStats `json:"stats,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` + GatewayManagerEtag *string `json:"gatewayManagerEtag,omitempty"` + LastModifiedBy *string `json:"lastModifiedBy,omitempty"` +} + +// ExpressRouteCircuitPropertiesFormat is properties of ExpressRouteCircuit. +type ExpressRouteCircuitPropertiesFormat struct { + AllowClassicOperations *bool `json:"allowClassicOperations,omitempty"` + CircuitProvisioningState *string `json:"circuitProvisioningState,omitempty"` + ServiceProviderProvisioningState ServiceProviderProvisioningState `json:"serviceProviderProvisioningState,omitempty"` + Authorizations *[]ExpressRouteCircuitAuthorization `json:"authorizations,omitempty"` + Peerings *[]ExpressRouteCircuitPeering `json:"peerings,omitempty"` + ServiceKey *string `json:"serviceKey,omitempty"` + ServiceProviderNotes *string `json:"serviceProviderNotes,omitempty"` + ServiceProviderProperties *ExpressRouteCircuitServiceProviderProperties `json:"serviceProviderProperties,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` + GatewayManagerEtag *string `json:"gatewayManagerEtag,omitempty"` +} + +// ExpressRouteCircuitRoutesTable is the routes table associated with the +// ExpressRouteCircuit +type ExpressRouteCircuitRoutesTable struct { + Network *string `json:"network,omitempty"` + NextHop *string `json:"nextHop,omitempty"` + LocPrf *string `json:"locPrf,omitempty"` + Weight *int32 `json:"weight,omitempty"` + Path *string `json:"path,omitempty"` +} + +// ExpressRouteCircuitRoutesTableSummary is the routes table associated with +// the ExpressRouteCircuit. +type ExpressRouteCircuitRoutesTableSummary struct { + Neighbor *string `json:"neighbor,omitempty"` + V *int32 `json:"v,omitempty"` + As *int32 `json:"as,omitempty"` + UpDown *string `json:"upDown,omitempty"` + StatePfxRcd *string `json:"statePfxRcd,omitempty"` +} + +// ExpressRouteCircuitsArpTableListResult is response for ListArpTable +// associated with the Express Route Circuits API. +type ExpressRouteCircuitsArpTableListResult struct { + autorest.Response `json:"-"` + Value *[]ExpressRouteCircuitArpTable `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// ExpressRouteCircuitServiceProviderProperties is contains +// ServiceProviderProperties in an ExpressRouteCircuit. +type ExpressRouteCircuitServiceProviderProperties struct { + ServiceProviderName *string `json:"serviceProviderName,omitempty"` + PeeringLocation *string `json:"peeringLocation,omitempty"` + BandwidthInMbps *int32 `json:"bandwidthInMbps,omitempty"` +} + +// ExpressRouteCircuitSku is contains SKU in an ExpressRouteCircuit. +type ExpressRouteCircuitSku struct { + Name *string `json:"name,omitempty"` + Tier ExpressRouteCircuitSkuTier `json:"tier,omitempty"` + Family ExpressRouteCircuitSkuFamily `json:"family,omitempty"` +} + +// ExpressRouteCircuitsRoutesTableListResult is response for ListRoutesTable +// associated with the Express Route Circuits API. +type ExpressRouteCircuitsRoutesTableListResult struct { + autorest.Response `json:"-"` + Value *[]ExpressRouteCircuitRoutesTable `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// ExpressRouteCircuitsRoutesTableSummaryListResult is response for +// ListRoutesTable associated with the Express Route Circuits API. +type ExpressRouteCircuitsRoutesTableSummaryListResult struct { + autorest.Response `json:"-"` + Value *[]ExpressRouteCircuitRoutesTableSummary `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// ExpressRouteCircuitStats is contains stats associated with the peering. +type ExpressRouteCircuitStats struct { + autorest.Response `json:"-"` + PrimarybytesIn *int64 `json:"primarybytesIn,omitempty"` + PrimarybytesOut *int64 `json:"primarybytesOut,omitempty"` + SecondarybytesIn *int64 `json:"secondarybytesIn,omitempty"` + SecondarybytesOut *int64 `json:"secondarybytesOut,omitempty"` +} + +// ExpressRouteServiceProvider is a ExpressRouteResourceProvider object. +type ExpressRouteServiceProvider struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + *ExpressRouteServiceProviderPropertiesFormat `json:"properties,omitempty"` +} + +// ExpressRouteServiceProviderBandwidthsOffered is contains bandwidths offered +// in ExpressRouteServiceProvider resources. +type ExpressRouteServiceProviderBandwidthsOffered struct { + OfferName *string `json:"offerName,omitempty"` + ValueInMbps *int32 `json:"valueInMbps,omitempty"` +} + +// ExpressRouteServiceProviderListResult is response for the +// ListExpressRouteServiceProvider API service call. +type ExpressRouteServiceProviderListResult struct { + autorest.Response `json:"-"` + Value *[]ExpressRouteServiceProvider `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// ExpressRouteServiceProviderListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client ExpressRouteServiceProviderListResult) ExpressRouteServiceProviderListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// ExpressRouteServiceProviderPropertiesFormat is properties of +// ExpressRouteServiceProvider. +type ExpressRouteServiceProviderPropertiesFormat struct { + PeeringLocations *[]string `json:"peeringLocations,omitempty"` + BandwidthsOffered *[]ExpressRouteServiceProviderBandwidthsOffered `json:"bandwidthsOffered,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// FrontendIPConfiguration is frontend IP address of the load balancer. +type FrontendIPConfiguration struct { + ID *string `json:"id,omitempty"` + *FrontendIPConfigurationPropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// FrontendIPConfigurationPropertiesFormat is properties of Frontend IP +// Configuration of the load balancer. +type FrontendIPConfigurationPropertiesFormat struct { + InboundNatRules *[]SubResource `json:"inboundNatRules,omitempty"` + InboundNatPools *[]SubResource `json:"inboundNatPools,omitempty"` + OutboundNatRules *[]SubResource `json:"outboundNatRules,omitempty"` + LoadBalancingRules *[]SubResource `json:"loadBalancingRules,omitempty"` + PrivateIPAddress *string `json:"privateIPAddress,omitempty"` + PrivateIPAllocationMethod IPAllocationMethod `json:"privateIPAllocationMethod,omitempty"` + Subnet *Subnet `json:"subnet,omitempty"` + PublicIPAddress *PublicIPAddress `json:"publicIPAddress,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// InboundNatPool is inbound NAT pool of the load balancer. +type InboundNatPool struct { + ID *string `json:"id,omitempty"` + *InboundNatPoolPropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// InboundNatPoolPropertiesFormat is properties of Inbound NAT pool. +type InboundNatPoolPropertiesFormat struct { + FrontendIPConfiguration *SubResource `json:"frontendIPConfiguration,omitempty"` + Protocol TransportProtocol `json:"protocol,omitempty"` + FrontendPortRangeStart *int32 `json:"frontendPortRangeStart,omitempty"` + FrontendPortRangeEnd *int32 `json:"frontendPortRangeEnd,omitempty"` + BackendPort *int32 `json:"backendPort,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// InboundNatRule is inbound NAT rule of the load balancer. +type InboundNatRule struct { + ID *string `json:"id,omitempty"` + *InboundNatRulePropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// InboundNatRulePropertiesFormat is properties of the inbound NAT rule. +type InboundNatRulePropertiesFormat struct { + FrontendIPConfiguration *SubResource `json:"frontendIPConfiguration,omitempty"` + BackendIPConfiguration *InterfaceIPConfiguration `json:"backendIPConfiguration,omitempty"` + Protocol TransportProtocol `json:"protocol,omitempty"` + FrontendPort *int32 `json:"frontendPort,omitempty"` + BackendPort *int32 `json:"backendPort,omitempty"` + IdleTimeoutInMinutes *int32 `json:"idleTimeoutInMinutes,omitempty"` + EnableFloatingIP *bool `json:"enableFloatingIP,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// Interface is a network interface in a resource group. +type Interface struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + *InterfacePropertiesFormat `json:"properties,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// InterfaceDNSSettings is dNS settings of a network interface. +type InterfaceDNSSettings struct { + DNSServers *[]string `json:"dnsServers,omitempty"` + AppliedDNSServers *[]string `json:"appliedDnsServers,omitempty"` + InternalDNSNameLabel *string `json:"internalDnsNameLabel,omitempty"` + InternalFqdn *string `json:"internalFqdn,omitempty"` + InternalDomainNameSuffix *string `json:"internalDomainNameSuffix,omitempty"` +} + +// InterfaceIPConfiguration is iPConfiguration in a network interface. +type InterfaceIPConfiguration struct { + ID *string `json:"id,omitempty"` + *InterfaceIPConfigurationPropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// InterfaceIPConfigurationPropertiesFormat is properties of IP configuration. +type InterfaceIPConfigurationPropertiesFormat struct { + ApplicationGatewayBackendAddressPools *[]ApplicationGatewayBackendAddressPool `json:"applicationGatewayBackendAddressPools,omitempty"` + LoadBalancerBackendAddressPools *[]BackendAddressPool `json:"loadBalancerBackendAddressPools,omitempty"` + LoadBalancerInboundNatRules *[]InboundNatRule `json:"loadBalancerInboundNatRules,omitempty"` + PrivateIPAddress *string `json:"privateIPAddress,omitempty"` + PrivateIPAllocationMethod IPAllocationMethod `json:"privateIPAllocationMethod,omitempty"` + PrivateIPAddressVersion IPVersion `json:"privateIPAddressVersion,omitempty"` + Subnet *Subnet `json:"subnet,omitempty"` + Primary *bool `json:"primary,omitempty"` + PublicIPAddress *PublicIPAddress `json:"publicIPAddress,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// InterfaceListResult is response for the ListNetworkInterface API service +// call. +type InterfaceListResult struct { + autorest.Response `json:"-"` + Value *[]Interface `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// InterfaceListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client InterfaceListResult) InterfaceListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// InterfacePropertiesFormat is networkInterface properties. +type InterfacePropertiesFormat struct { + VirtualMachine *SubResource `json:"virtualMachine,omitempty"` + NetworkSecurityGroup *SecurityGroup `json:"networkSecurityGroup,omitempty"` + IPConfigurations *[]InterfaceIPConfiguration `json:"ipConfigurations,omitempty"` + DNSSettings *InterfaceDNSSettings `json:"dnsSettings,omitempty"` + MacAddress *string `json:"macAddress,omitempty"` + Primary *bool `json:"primary,omitempty"` + EnableAcceleratedNetworking *bool `json:"enableAcceleratedNetworking,omitempty"` + EnableIPForwarding *bool `json:"enableIPForwarding,omitempty"` + ResourceGUID *string `json:"resourceGuid,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// IPAddressAvailabilityResult is response for CheckIPAddressAvailability API +// service call +type IPAddressAvailabilityResult struct { + autorest.Response `json:"-"` + Available *bool `json:"available,omitempty"` + AvailableIPAddresses *[]string `json:"availableIPAddresses,omitempty"` +} + +// IPConfiguration is iPConfiguration +type IPConfiguration struct { + ID *string `json:"id,omitempty"` + *IPConfigurationPropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// IPConfigurationPropertiesFormat is properties of IP configuration. +type IPConfigurationPropertiesFormat struct { + PrivateIPAddress *string `json:"privateIPAddress,omitempty"` + PrivateIPAllocationMethod IPAllocationMethod `json:"privateIPAllocationMethod,omitempty"` + Subnet *Subnet `json:"subnet,omitempty"` + PublicIPAddress *PublicIPAddress `json:"publicIPAddress,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// LoadBalancer is loadBalancer resource +type LoadBalancer struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + *LoadBalancerPropertiesFormat `json:"properties,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// LoadBalancerListResult is response for ListLoadBalancers API service call. +type LoadBalancerListResult struct { + autorest.Response `json:"-"` + Value *[]LoadBalancer `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// LoadBalancerListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client LoadBalancerListResult) LoadBalancerListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// LoadBalancerPropertiesFormat is properties of the load balancer. +type LoadBalancerPropertiesFormat struct { + FrontendIPConfigurations *[]FrontendIPConfiguration `json:"frontendIPConfigurations,omitempty"` + BackendAddressPools *[]BackendAddressPool `json:"backendAddressPools,omitempty"` + LoadBalancingRules *[]LoadBalancingRule `json:"loadBalancingRules,omitempty"` + Probes *[]Probe `json:"probes,omitempty"` + InboundNatRules *[]InboundNatRule `json:"inboundNatRules,omitempty"` + InboundNatPools *[]InboundNatPool `json:"inboundNatPools,omitempty"` + OutboundNatRules *[]OutboundNatRule `json:"outboundNatRules,omitempty"` + ResourceGUID *string `json:"resourceGuid,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// LoadBalancingRule is a loag balancing rule for a load balancer. +type LoadBalancingRule struct { + ID *string `json:"id,omitempty"` + *LoadBalancingRulePropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// LoadBalancingRulePropertiesFormat is properties of the load balancer. +type LoadBalancingRulePropertiesFormat struct { + FrontendIPConfiguration *SubResource `json:"frontendIPConfiguration,omitempty"` + BackendAddressPool *SubResource `json:"backendAddressPool,omitempty"` + Probe *SubResource `json:"probe,omitempty"` + Protocol TransportProtocol `json:"protocol,omitempty"` + LoadDistribution LoadDistribution `json:"loadDistribution,omitempty"` + FrontendPort *int32 `json:"frontendPort,omitempty"` + BackendPort *int32 `json:"backendPort,omitempty"` + IdleTimeoutInMinutes *int32 `json:"idleTimeoutInMinutes,omitempty"` + EnableFloatingIP *bool `json:"enableFloatingIP,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// LocalNetworkGateway is a common class for general resource information +type LocalNetworkGateway struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + *LocalNetworkGatewayPropertiesFormat `json:"properties,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// LocalNetworkGatewayListResult is response for ListLocalNetworkGateways API +// service call. +type LocalNetworkGatewayListResult struct { + autorest.Response `json:"-"` + Value *[]LocalNetworkGateway `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// LocalNetworkGatewayListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client LocalNetworkGatewayListResult) LocalNetworkGatewayListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// LocalNetworkGatewayPropertiesFormat is localNetworkGateway properties +type LocalNetworkGatewayPropertiesFormat struct { + LocalNetworkAddressSpace *AddressSpace `json:"localNetworkAddressSpace,omitempty"` + GatewayIPAddress *string `json:"gatewayIpAddress,omitempty"` + BgpSettings *BgpSettings `json:"bgpSettings,omitempty"` + ResourceGUID *string `json:"resourceGuid,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// OutboundNatRule is outbound NAT pool of the load balancer. +type OutboundNatRule struct { + ID *string `json:"id,omitempty"` + *OutboundNatRulePropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// OutboundNatRulePropertiesFormat is outbound NAT pool of the load balancer. +type OutboundNatRulePropertiesFormat struct { + AllocatedOutboundPorts *int32 `json:"allocatedOutboundPorts,omitempty"` + FrontendIPConfigurations *[]SubResource `json:"frontendIPConfigurations,omitempty"` + BackendAddressPool *SubResource `json:"backendAddressPool,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// Probe is a load balancer probe. +type Probe struct { + ID *string `json:"id,omitempty"` + *ProbePropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// ProbePropertiesFormat is +type ProbePropertiesFormat struct { + LoadBalancingRules *[]SubResource `json:"loadBalancingRules,omitempty"` + Protocol ProbeProtocol `json:"protocol,omitempty"` + Port *int32 `json:"port,omitempty"` + IntervalInSeconds *int32 `json:"intervalInSeconds,omitempty"` + NumberOfProbes *int32 `json:"numberOfProbes,omitempty"` + RequestPath *string `json:"requestPath,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// PublicIPAddress is public IP address resource. +type PublicIPAddress struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + *PublicIPAddressPropertiesFormat `json:"properties,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// PublicIPAddressDNSSettings is contains the FQDN of the DNS record +// associated with the public IP address. +type PublicIPAddressDNSSettings struct { + DomainNameLabel *string `json:"domainNameLabel,omitempty"` + Fqdn *string `json:"fqdn,omitempty"` + ReverseFqdn *string `json:"reverseFqdn,omitempty"` +} + +// PublicIPAddressListResult is response for ListPublicIpAddresses API service +// call. +type PublicIPAddressListResult struct { + autorest.Response `json:"-"` + Value *[]PublicIPAddress `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// PublicIPAddressListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client PublicIPAddressListResult) PublicIPAddressListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// PublicIPAddressPropertiesFormat is public IP address properties. +type PublicIPAddressPropertiesFormat struct { + PublicIPAllocationMethod IPAllocationMethod `json:"publicIPAllocationMethod,omitempty"` + PublicIPAddressVersion IPVersion `json:"publicIPAddressVersion,omitempty"` + IPConfiguration *IPConfiguration `json:"ipConfiguration,omitempty"` + DNSSettings *PublicIPAddressDNSSettings `json:"dnsSettings,omitempty"` + IPAddress *string `json:"ipAddress,omitempty"` + IdleTimeoutInMinutes *int32 `json:"idleTimeoutInMinutes,omitempty"` + ResourceGUID *string `json:"resourceGuid,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// Resource is +type Resource struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// ResourceNavigationLink is resourceNavigationLink resource. +type ResourceNavigationLink struct { + ID *string `json:"id,omitempty"` + *ResourceNavigationLinkFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// ResourceNavigationLinkFormat is properties of ResourceNavigationLink. +type ResourceNavigationLinkFormat struct { + LinkedResourceType *string `json:"linkedResourceType,omitempty"` + Link *string `json:"link,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// Route is route resource +type Route struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + *RoutePropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// RouteListResult is response for the ListRoute API service call +type RouteListResult struct { + autorest.Response `json:"-"` + Value *[]Route `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// RouteListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client RouteListResult) RouteListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// RoutePropertiesFormat is route resource +type RoutePropertiesFormat struct { + AddressPrefix *string `json:"addressPrefix,omitempty"` + NextHopType RouteNextHopType `json:"nextHopType,omitempty"` + NextHopIPAddress *string `json:"nextHopIpAddress,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// RouteTable is route table resource. +type RouteTable struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + *RouteTablePropertiesFormat `json:"properties,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// RouteTableListResult is response for the ListRouteTable API service call. +type RouteTableListResult struct { + autorest.Response `json:"-"` + Value *[]RouteTable `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// RouteTableListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client RouteTableListResult) RouteTableListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// RouteTablePropertiesFormat is route Table resource +type RouteTablePropertiesFormat struct { + Routes *[]Route `json:"routes,omitempty"` + Subnets *[]Subnet `json:"subnets,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// SecurityGroup is networkSecurityGroup resource. +type SecurityGroup struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + *SecurityGroupPropertiesFormat `json:"properties,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// SecurityGroupListResult is response for ListNetworkSecurityGroups API +// service call. +type SecurityGroupListResult struct { + autorest.Response `json:"-"` + Value *[]SecurityGroup `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// SecurityGroupListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client SecurityGroupListResult) SecurityGroupListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// SecurityGroupPropertiesFormat is network Security Group resource. +type SecurityGroupPropertiesFormat struct { + SecurityRules *[]SecurityRule `json:"securityRules,omitempty"` + DefaultSecurityRules *[]SecurityRule `json:"defaultSecurityRules,omitempty"` + NetworkInterfaces *[]Interface `json:"networkInterfaces,omitempty"` + Subnets *[]Subnet `json:"subnets,omitempty"` + ResourceGUID *string `json:"resourceGuid,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// SecurityRule is network security rule. +type SecurityRule struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + *SecurityRulePropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// SecurityRuleListResult is response for ListSecurityRule API service call. +// Retrieves all security rules that belongs to a network security group. +type SecurityRuleListResult struct { + autorest.Response `json:"-"` + Value *[]SecurityRule `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// SecurityRuleListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client SecurityRuleListResult) SecurityRuleListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// SecurityRulePropertiesFormat is +type SecurityRulePropertiesFormat struct { + Description *string `json:"description,omitempty"` + Protocol SecurityRuleProtocol `json:"protocol,omitempty"` + SourcePortRange *string `json:"sourcePortRange,omitempty"` + DestinationPortRange *string `json:"destinationPortRange,omitempty"` + SourceAddressPrefix *string `json:"sourceAddressPrefix,omitempty"` + DestinationAddressPrefix *string `json:"destinationAddressPrefix,omitempty"` + Access SecurityRuleAccess `json:"access,omitempty"` + Priority *int32 `json:"priority,omitempty"` + Direction SecurityRuleDirection `json:"direction,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// String is +type String struct { + autorest.Response `json:"-"` + Value *string `json:"value,omitempty"` +} + +// Subnet is subnet in a virtual network resource. +type Subnet struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + *SubnetPropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// SubnetListResult is response for ListSubnets API service callRetrieves all +// subnet that belongs to a virtual network +type SubnetListResult struct { + autorest.Response `json:"-"` + Value *[]Subnet `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// SubnetListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client SubnetListResult) SubnetListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// SubnetPropertiesFormat is +type SubnetPropertiesFormat struct { + AddressPrefix *string `json:"addressPrefix,omitempty"` + NetworkSecurityGroup *SecurityGroup `json:"networkSecurityGroup,omitempty"` + RouteTable *RouteTable `json:"routeTable,omitempty"` + IPConfigurations *[]IPConfiguration `json:"ipConfigurations,omitempty"` + ResourceNavigationLinks *[]ResourceNavigationLink `json:"resourceNavigationLinks,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// SubResource is +type SubResource struct { + ID *string `json:"id,omitempty"` +} + +// TunnelConnectionHealth is virtualNetworkGatewayConnection properties +type TunnelConnectionHealth struct { + Tunnel *string `json:"tunnel,omitempty"` + ConnectionStatus VirtualNetworkGatewayConnectionStatus `json:"connectionStatus,omitempty"` + IngressBytesTransferred *int64 `json:"ingressBytesTransferred,omitempty"` + EgressBytesTransferred *int64 `json:"egressBytesTransferred,omitempty"` + LastConnectionEstablishedUtcTime *string `json:"lastConnectionEstablishedUtcTime,omitempty"` +} + +// Usage is describes network resource usage. +type Usage struct { + Unit *string `json:"unit,omitempty"` + CurrentValue *int64 `json:"currentValue,omitempty"` + Limit *int64 `json:"limit,omitempty"` + Name *UsageName `json:"name,omitempty"` +} + +// UsageName is the usage names. +type UsageName struct { + Value *string `json:"value,omitempty"` + LocalizedValue *string `json:"localizedValue,omitempty"` +} + +// UsagesListResult is the list usages operation response. +type UsagesListResult struct { + autorest.Response `json:"-"` + Value *[]Usage `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// UsagesListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client UsagesListResult) UsagesListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// VirtualNetwork is virtual Network resource. +type VirtualNetwork struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + *VirtualNetworkPropertiesFormat `json:"properties,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// VirtualNetworkGateway is a common class for general resource information +type VirtualNetworkGateway struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + *VirtualNetworkGatewayPropertiesFormat `json:"properties,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// VirtualNetworkGatewayConnection is a common class for general resource +// information +type VirtualNetworkGatewayConnection struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + *VirtualNetworkGatewayConnectionPropertiesFormat `json:"properties,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// VirtualNetworkGatewayConnectionListResult is response for the +// ListVirtualNetworkGatewayConnections API service call +type VirtualNetworkGatewayConnectionListResult struct { + autorest.Response `json:"-"` + Value *[]VirtualNetworkGatewayConnection `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// VirtualNetworkGatewayConnectionListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client VirtualNetworkGatewayConnectionListResult) VirtualNetworkGatewayConnectionListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// VirtualNetworkGatewayConnectionPropertiesFormat is +// virtualNetworkGatewayConnection properties +type VirtualNetworkGatewayConnectionPropertiesFormat struct { + AuthorizationKey *string `json:"authorizationKey,omitempty"` + VirtualNetworkGateway1 *VirtualNetworkGateway `json:"virtualNetworkGateway1,omitempty"` + VirtualNetworkGateway2 *VirtualNetworkGateway `json:"virtualNetworkGateway2,omitempty"` + LocalNetworkGateway2 *LocalNetworkGateway `json:"localNetworkGateway2,omitempty"` + ConnectionType VirtualNetworkGatewayConnectionType `json:"connectionType,omitempty"` + RoutingWeight *int32 `json:"routingWeight,omitempty"` + SharedKey *string `json:"sharedKey,omitempty"` + ConnectionStatus VirtualNetworkGatewayConnectionStatus `json:"connectionStatus,omitempty"` + TunnelConnectionStatus *[]TunnelConnectionHealth `json:"tunnelConnectionStatus,omitempty"` + EgressBytesTransferred *int64 `json:"egressBytesTransferred,omitempty"` + IngressBytesTransferred *int64 `json:"ingressBytesTransferred,omitempty"` + Peer *SubResource `json:"peer,omitempty"` + EnableBgp *bool `json:"enableBgp,omitempty"` + ResourceGUID *string `json:"resourceGuid,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// VirtualNetworkGatewayIPConfiguration is iP configuration for virtual +// network gateway +type VirtualNetworkGatewayIPConfiguration struct { + ID *string `json:"id,omitempty"` + *VirtualNetworkGatewayIPConfigurationPropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// VirtualNetworkGatewayIPConfigurationPropertiesFormat is properties of +// VirtualNetworkGatewayIPConfiguration +type VirtualNetworkGatewayIPConfigurationPropertiesFormat struct { + PrivateIPAllocationMethod IPAllocationMethod `json:"privateIPAllocationMethod,omitempty"` + Subnet *SubResource `json:"subnet,omitempty"` + PublicIPAddress *SubResource `json:"publicIPAddress,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// VirtualNetworkGatewayListResult is response for the +// ListVirtualNetworkGateways API service call. +type VirtualNetworkGatewayListResult struct { + autorest.Response `json:"-"` + Value *[]VirtualNetworkGateway `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// VirtualNetworkGatewayListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client VirtualNetworkGatewayListResult) VirtualNetworkGatewayListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// VirtualNetworkGatewayPropertiesFormat is virtualNetworkGateway properties +type VirtualNetworkGatewayPropertiesFormat struct { + IPConfigurations *[]VirtualNetworkGatewayIPConfiguration `json:"ipConfigurations,omitempty"` + GatewayType VirtualNetworkGatewayType `json:"gatewayType,omitempty"` + VpnType VpnType `json:"vpnType,omitempty"` + EnableBgp *bool `json:"enableBgp,omitempty"` + ActiveActive *bool `json:"activeActive,omitempty"` + GatewayDefaultSite *SubResource `json:"gatewayDefaultSite,omitempty"` + Sku *VirtualNetworkGatewaySku `json:"sku,omitempty"` + VpnClientConfiguration *VpnClientConfiguration `json:"vpnClientConfiguration,omitempty"` + BgpSettings *BgpSettings `json:"bgpSettings,omitempty"` + ResourceGUID *string `json:"resourceGuid,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// VirtualNetworkGatewaySku is virtualNetworkGatewaySku details +type VirtualNetworkGatewaySku struct { + Name VirtualNetworkGatewaySkuName `json:"name,omitempty"` + Tier VirtualNetworkGatewaySkuTier `json:"tier,omitempty"` + Capacity *int32 `json:"capacity,omitempty"` +} + +// VirtualNetworkListResult is response for the ListVirtualNetworks API +// service call. +type VirtualNetworkListResult struct { + autorest.Response `json:"-"` + Value *[]VirtualNetwork `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// VirtualNetworkListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client VirtualNetworkListResult) VirtualNetworkListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// VirtualNetworkPeering is peerings in a virtual network resource. +type VirtualNetworkPeering struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + *VirtualNetworkPeeringPropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// VirtualNetworkPeeringListResult is response for ListSubnets API service +// call. Retrieves all subnets that belong to a virtual network. +type VirtualNetworkPeeringListResult struct { + autorest.Response `json:"-"` + Value *[]VirtualNetworkPeering `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// VirtualNetworkPeeringListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client VirtualNetworkPeeringListResult) VirtualNetworkPeeringListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// VirtualNetworkPeeringPropertiesFormat is +type VirtualNetworkPeeringPropertiesFormat struct { + AllowVirtualNetworkAccess *bool `json:"allowVirtualNetworkAccess,omitempty"` + AllowForwardedTraffic *bool `json:"allowForwardedTraffic,omitempty"` + AllowGatewayTransit *bool `json:"allowGatewayTransit,omitempty"` + UseRemoteGateways *bool `json:"useRemoteGateways,omitempty"` + RemoteVirtualNetwork *SubResource `json:"remoteVirtualNetwork,omitempty"` + PeeringState VirtualNetworkPeeringState `json:"peeringState,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// VirtualNetworkPropertiesFormat is +type VirtualNetworkPropertiesFormat struct { + AddressSpace *AddressSpace `json:"addressSpace,omitempty"` + DhcpOptions *DhcpOptions `json:"dhcpOptions,omitempty"` + Subnets *[]Subnet `json:"subnets,omitempty"` + VirtualNetworkPeerings *[]VirtualNetworkPeering `json:"virtualNetworkPeerings,omitempty"` + ResourceGUID *string `json:"resourceGuid,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// VpnClientConfiguration is vpnClientConfiguration for P2S client. +type VpnClientConfiguration struct { + VpnClientAddressPool *AddressSpace `json:"vpnClientAddressPool,omitempty"` + VpnClientRootCertificates *[]VpnClientRootCertificate `json:"vpnClientRootCertificates,omitempty"` + VpnClientRevokedCertificates *[]VpnClientRevokedCertificate `json:"vpnClientRevokedCertificates,omitempty"` +} + +// VpnClientParameters is vpnClientParameters +type VpnClientParameters struct { + ProcessorArchitecture ProcessorArchitecture `json:"ProcessorArchitecture,omitempty"` +} + +// VpnClientRevokedCertificate is vPN client revoked certificate of virtual +// network gateway. +type VpnClientRevokedCertificate struct { + ID *string `json:"id,omitempty"` + *VpnClientRevokedCertificatePropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// VpnClientRevokedCertificatePropertiesFormat is properties of the revoked +// VPN client certificate of virtual network gateway. +type VpnClientRevokedCertificatePropertiesFormat struct { + Thumbprint *string `json:"thumbprint,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// VpnClientRootCertificate is vPN client root certificate of virtual network +// gateway +type VpnClientRootCertificate struct { + ID *string `json:"id,omitempty"` + *VpnClientRootCertificatePropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// VpnClientRootCertificatePropertiesFormat is properties of SSL certificates +// of application gateway +type VpnClientRootCertificatePropertiesFormat struct { + PublicCertData *string `json:"publicCertData,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/publicipaddresses.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/publicipaddresses.go new file mode 100644 index 000000000000..d8ef099a79cb --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/publicipaddresses.go @@ -0,0 +1,445 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// PublicIPAddressesClient is the the Microsoft Azure Network management API +// provides a RESTful set of web services that interact with Microsoft Azure +// Networks service to manage your network resources. The API has entities +// that capture the relationship between an end user and the Microsoft Azure +// Networks service. +type PublicIPAddressesClient struct { + ManagementClient +} + +// NewPublicIPAddressesClient creates an instance of the +// PublicIPAddressesClient client. +func NewPublicIPAddressesClient(subscriptionID string) PublicIPAddressesClient { + return NewPublicIPAddressesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewPublicIPAddressesClientWithBaseURI creates an instance of the +// PublicIPAddressesClient client. +func NewPublicIPAddressesClientWithBaseURI(baseURI string, subscriptionID string) PublicIPAddressesClient { + return PublicIPAddressesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a static or dynamic public IP address. +// This method may poll for completion. Polling can be canceled by passing +// the cancel channel argument. The channel will be used to cancel polling +// and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. publicIPAddressName is +// the name of the public IP address. parameters is parameters supplied to +// the create or update public IP address operation. +func (client PublicIPAddressesClient) CreateOrUpdate(resourceGroupName string, publicIPAddressName string, parameters PublicIPAddress, cancel <-chan struct{}) (result autorest.Response, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.PublicIPAddressPropertiesFormat", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.PublicIPAddressPropertiesFormat.IPConfiguration", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.PublicIPAddressPropertiesFormat.IPConfiguration.IPConfigurationPropertiesFormat", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.PublicIPAddressPropertiesFormat.IPConfiguration.IPConfigurationPropertiesFormat.Subnet", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.PublicIPAddressPropertiesFormat.IPConfiguration.IPConfigurationPropertiesFormat.Subnet.SubnetPropertiesFormat", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.PublicIPAddressPropertiesFormat.IPConfiguration.IPConfigurationPropertiesFormat.Subnet.SubnetPropertiesFormat.NetworkSecurityGroup", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.PublicIPAddressPropertiesFormat.IPConfiguration.IPConfigurationPropertiesFormat.Subnet.SubnetPropertiesFormat.NetworkSecurityGroup.SecurityGroupPropertiesFormat", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.PublicIPAddressPropertiesFormat.IPConfiguration.IPConfigurationPropertiesFormat.Subnet.SubnetPropertiesFormat.NetworkSecurityGroup.SecurityGroupPropertiesFormat.NetworkInterfaces", Name: validation.ReadOnly, Rule: true, Chain: nil}, + {Target: "parameters.PublicIPAddressPropertiesFormat.IPConfiguration.IPConfigurationPropertiesFormat.Subnet.SubnetPropertiesFormat.NetworkSecurityGroup.SecurityGroupPropertiesFormat.Subnets", Name: validation.ReadOnly, Rule: true, Chain: nil}, + }}, + }}, + {Target: "parameters.PublicIPAddressPropertiesFormat.IPConfiguration.IPConfigurationPropertiesFormat.Subnet.SubnetPropertiesFormat.RouteTable", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.PublicIPAddressPropertiesFormat.IPConfiguration.IPConfigurationPropertiesFormat.Subnet.SubnetPropertiesFormat.RouteTable.RouteTablePropertiesFormat", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.PublicIPAddressPropertiesFormat.IPConfiguration.IPConfigurationPropertiesFormat.Subnet.SubnetPropertiesFormat.RouteTable.RouteTablePropertiesFormat.Subnets", Name: validation.ReadOnly, Rule: true, Chain: nil}}}, + }}, + {Target: "parameters.PublicIPAddressPropertiesFormat.IPConfiguration.IPConfigurationPropertiesFormat.Subnet.SubnetPropertiesFormat.IPConfigurations", Name: validation.ReadOnly, Rule: true, Chain: nil}, + }}, + }}, + {Target: "parameters.PublicIPAddressPropertiesFormat.IPConfiguration.IPConfigurationPropertiesFormat.PublicIPAddress", Name: validation.Null, Rule: false, Chain: nil}, + }}, + }}, + {Target: "parameters.PublicIPAddressPropertiesFormat.IPConfiguration", Name: validation.ReadOnly, Rule: true, Chain: nil}, + }}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "network.PublicIPAddressesClient", "CreateOrUpdate") + } + + req, err := client.CreateOrUpdatePreparer(resourceGroupName, publicIPAddressName, parameters, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "CreateOrUpdate", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "CreateOrUpdate", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client PublicIPAddressesClient) CreateOrUpdatePreparer(resourceGroupName string, publicIPAddressName string, parameters PublicIPAddress, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "publicIpAddressName": autorest.Encode("path", publicIPAddressName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client PublicIPAddressesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client PublicIPAddressesClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Delete deletes the specified public IP address. This method may poll for +// completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// resourceGroupName is the name of the resource group. publicIPAddressName is +// the name of the subnet. +func (client PublicIPAddressesClient) Delete(resourceGroupName string, publicIPAddressName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, publicIPAddressName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client PublicIPAddressesClient) DeletePreparer(resourceGroupName string, publicIPAddressName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "publicIpAddressName": autorest.Encode("path", publicIPAddressName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client PublicIPAddressesClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client PublicIPAddressesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusAccepted, http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the specified public IP address in a specified resource group. +// +// resourceGroupName is the name of the resource group. publicIPAddressName is +// the name of the subnet. expand is expands referenced resources. +func (client PublicIPAddressesClient) Get(resourceGroupName string, publicIPAddressName string, expand string) (result PublicIPAddress, err error) { + req, err := client.GetPreparer(resourceGroupName, publicIPAddressName, expand) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client PublicIPAddressesClient) GetPreparer(resourceGroupName string, publicIPAddressName string, expand string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "publicIpAddressName": autorest.Encode("path", publicIPAddressName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client PublicIPAddressesClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client PublicIPAddressesClient) GetResponder(resp *http.Response) (result PublicIPAddress, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets all public IP addresses in a resource group. +// +// resourceGroupName is the name of the resource group. +func (client PublicIPAddressesClient) List(resourceGroupName string) (result PublicIPAddressListResult, err error) { + req, err := client.ListPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client PublicIPAddressesClient) ListPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client PublicIPAddressesClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client PublicIPAddressesClient) ListResponder(resp *http.Response) (result PublicIPAddressListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client PublicIPAddressesClient) ListNextResults(lastResults PublicIPAddressListResult) (result PublicIPAddressListResult, err error) { + req, err := lastResults.PublicIPAddressListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "List", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "List", resp, "Failure sending next results request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "List", resp, "Failure responding to next results request") + } + + return +} + +// ListAll gets all the public IP addresses in a subscription. +func (client PublicIPAddressesClient) ListAll() (result PublicIPAddressListResult, err error) { + req, err := client.ListAllPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "ListAll", nil, "Failure preparing request") + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "ListAll", resp, "Failure sending request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "ListAll", resp, "Failure responding to request") + } + + return +} + +// ListAllPreparer prepares the ListAll request. +func (client PublicIPAddressesClient) ListAllPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/publicIPAddresses", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListAllSender sends the ListAll request. The method will close the +// http.Response Body if it receives an error. +func (client PublicIPAddressesClient) ListAllSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListAllResponder handles the response to the ListAll request. The method always +// closes the http.Response Body. +func (client PublicIPAddressesClient) ListAllResponder(resp *http.Response) (result PublicIPAddressListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAllNextResults retrieves the next set of results, if any. +func (client PublicIPAddressesClient) ListAllNextResults(lastResults PublicIPAddressListResult) (result PublicIPAddressListResult, err error) { + req, err := lastResults.PublicIPAddressListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "ListAll", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "ListAll", resp, "Failure sending next results request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "ListAll", resp, "Failure responding to next results request") + } + + return +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routes.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routes.go new file mode 100644 index 000000000000..9aa1cf5054dc --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routes.go @@ -0,0 +1,335 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// RoutesClient is the the Microsoft Azure Network management API provides a +// RESTful set of web services that interact with Microsoft Azure Networks +// service to manage your network resources. The API has entities that +// capture the relationship between an end user and the Microsoft Azure +// Networks service. +type RoutesClient struct { + ManagementClient +} + +// NewRoutesClient creates an instance of the RoutesClient client. +func NewRoutesClient(subscriptionID string) RoutesClient { + return NewRoutesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewRoutesClientWithBaseURI creates an instance of the RoutesClient client. +func NewRoutesClientWithBaseURI(baseURI string, subscriptionID string) RoutesClient { + return RoutesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a route in the specified route table. +// This method may poll for completion. Polling can be canceled by passing +// the cancel channel argument. The channel will be used to cancel polling +// and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. routeTableName is the +// name of the route table. routeName is the name of the route. +// routeParameters is parameters supplied to the create or update route +// operation. +func (client RoutesClient) CreateOrUpdate(resourceGroupName string, routeTableName string, routeName string, routeParameters Route, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, routeTableName, routeName, routeParameters, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.RoutesClient", "CreateOrUpdate", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.RoutesClient", "CreateOrUpdate", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RoutesClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client RoutesClient) CreateOrUpdatePreparer(resourceGroupName string, routeTableName string, routeName string, routeParameters Route, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "routeName": autorest.Encode("path", routeName), + "routeTableName": autorest.Encode("path", routeTableName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}", pathParameters), + autorest.WithJSON(routeParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client RoutesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client RoutesClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByClosing()) + result.Response = resp + return +} + +// Delete deletes the specified route from a route table. This method may poll +// for completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// resourceGroupName is the name of the resource group. routeTableName is the +// name of the route table. routeName is the name of the route. +func (client RoutesClient) Delete(resourceGroupName string, routeTableName string, routeName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, routeTableName, routeName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.RoutesClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.RoutesClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RoutesClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client RoutesClient) DeletePreparer(resourceGroupName string, routeTableName string, routeName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "routeName": autorest.Encode("path", routeName), + "routeTableName": autorest.Encode("path", routeTableName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client RoutesClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client RoutesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the specified route from a route table. +// +// resourceGroupName is the name of the resource group. routeTableName is the +// name of the route table. routeName is the name of the route. +func (client RoutesClient) Get(resourceGroupName string, routeTableName string, routeName string) (result Route, err error) { + req, err := client.GetPreparer(resourceGroupName, routeTableName, routeName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.RoutesClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.RoutesClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RoutesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client RoutesClient) GetPreparer(resourceGroupName string, routeTableName string, routeName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "routeName": autorest.Encode("path", routeName), + "routeTableName": autorest.Encode("path", routeTableName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client RoutesClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client RoutesClient) GetResponder(resp *http.Response) (result Route, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets all routes in a route table. +// +// resourceGroupName is the name of the resource group. routeTableName is the +// name of the route table. +func (client RoutesClient) List(resourceGroupName string, routeTableName string) (result RouteListResult, err error) { + req, err := client.ListPreparer(resourceGroupName, routeTableName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.RoutesClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.RoutesClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RoutesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client RoutesClient) ListPreparer(resourceGroupName string, routeTableName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "routeTableName": autorest.Encode("path", routeTableName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client RoutesClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client RoutesClient) ListResponder(resp *http.Response) (result RouteListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client RoutesClient) ListNextResults(lastResults RouteListResult) (result RouteListResult, err error) { + req, err := lastResults.RouteListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.RoutesClient", "List", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.RoutesClient", "List", resp, "Failure sending next results request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RoutesClient", "List", resp, "Failure responding to next results request") + } + + return +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routetables.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routetables.go new file mode 100644 index 000000000000..b53e94ebaab4 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routetables.go @@ -0,0 +1,423 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// RouteTablesClient is the the Microsoft Azure Network management API +// provides a RESTful set of web services that interact with Microsoft Azure +// Networks service to manage your network resources. The API has entities +// that capture the relationship between an end user and the Microsoft Azure +// Networks service. +type RouteTablesClient struct { + ManagementClient +} + +// NewRouteTablesClient creates an instance of the RouteTablesClient client. +func NewRouteTablesClient(subscriptionID string) RouteTablesClient { + return NewRouteTablesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewRouteTablesClientWithBaseURI creates an instance of the +// RouteTablesClient client. +func NewRouteTablesClientWithBaseURI(baseURI string, subscriptionID string) RouteTablesClient { + return RouteTablesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate create or updates a route table in a specified resource +// group. This method may poll for completion. Polling can be canceled by +// passing the cancel channel argument. The channel will be used to cancel +// polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. routeTableName is the +// name of the route table. parameters is parameters supplied to the create +// or update route table operation. +func (client RouteTablesClient) CreateOrUpdate(resourceGroupName string, routeTableName string, parameters RouteTable, cancel <-chan struct{}) (result autorest.Response, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.RouteTablePropertiesFormat", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.RouteTablePropertiesFormat.Subnets", Name: validation.ReadOnly, Rule: true, Chain: nil}}}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "network.RouteTablesClient", "CreateOrUpdate") + } + + req, err := client.CreateOrUpdatePreparer(resourceGroupName, routeTableName, parameters, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "CreateOrUpdate", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "CreateOrUpdate", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client RouteTablesClient) CreateOrUpdatePreparer(resourceGroupName string, routeTableName string, parameters RouteTable, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "routeTableName": autorest.Encode("path", routeTableName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client RouteTablesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client RouteTablesClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByClosing()) + result.Response = resp + return +} + +// Delete deletes the specified route table. This method may poll for +// completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// resourceGroupName is the name of the resource group. routeTableName is the +// name of the route table. +func (client RouteTablesClient) Delete(resourceGroupName string, routeTableName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, routeTableName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client RouteTablesClient) DeletePreparer(resourceGroupName string, routeTableName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "routeTableName": autorest.Encode("path", routeTableName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client RouteTablesClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client RouteTablesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the specified route table. +// +// resourceGroupName is the name of the resource group. routeTableName is the +// name of the route table. expand is expands referenced resources. +func (client RouteTablesClient) Get(resourceGroupName string, routeTableName string, expand string) (result RouteTable, err error) { + req, err := client.GetPreparer(resourceGroupName, routeTableName, expand) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client RouteTablesClient) GetPreparer(resourceGroupName string, routeTableName string, expand string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "routeTableName": autorest.Encode("path", routeTableName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client RouteTablesClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client RouteTablesClient) GetResponder(resp *http.Response) (result RouteTable, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets all route tables in a resource group. +// +// resourceGroupName is the name of the resource group. +func (client RouteTablesClient) List(resourceGroupName string) (result RouteTableListResult, err error) { + req, err := client.ListPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client RouteTablesClient) ListPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client RouteTablesClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client RouteTablesClient) ListResponder(resp *http.Response) (result RouteTableListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client RouteTablesClient) ListNextResults(lastResults RouteTableListResult) (result RouteTableListResult, err error) { + req, err := lastResults.RouteTableListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "List", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "List", resp, "Failure sending next results request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "List", resp, "Failure responding to next results request") + } + + return +} + +// ListAll gets all route tables in a subscription. +func (client RouteTablesClient) ListAll() (result RouteTableListResult, err error) { + req, err := client.ListAllPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "ListAll", nil, "Failure preparing request") + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "ListAll", resp, "Failure sending request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "ListAll", resp, "Failure responding to request") + } + + return +} + +// ListAllPreparer prepares the ListAll request. +func (client RouteTablesClient) ListAllPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/routeTables", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListAllSender sends the ListAll request. The method will close the +// http.Response Body if it receives an error. +func (client RouteTablesClient) ListAllSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListAllResponder handles the response to the ListAll request. The method always +// closes the http.Response Body. +func (client RouteTablesClient) ListAllResponder(resp *http.Response) (result RouteTableListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAllNextResults retrieves the next set of results, if any. +func (client RouteTablesClient) ListAllNextResults(lastResults RouteTableListResult) (result RouteTableListResult, err error) { + req, err := lastResults.RouteTableListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "ListAll", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "ListAll", resp, "Failure sending next results request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "ListAll", resp, "Failure responding to next results request") + } + + return +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securitygroups.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securitygroups.go new file mode 100644 index 000000000000..37898c4d089c --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securitygroups.go @@ -0,0 +1,428 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// SecurityGroupsClient is the the Microsoft Azure Network management API +// provides a RESTful set of web services that interact with Microsoft Azure +// Networks service to manage your network resources. The API has entities +// that capture the relationship between an end user and the Microsoft Azure +// Networks service. +type SecurityGroupsClient struct { + ManagementClient +} + +// NewSecurityGroupsClient creates an instance of the SecurityGroupsClient +// client. +func NewSecurityGroupsClient(subscriptionID string) SecurityGroupsClient { + return NewSecurityGroupsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewSecurityGroupsClientWithBaseURI creates an instance of the +// SecurityGroupsClient client. +func NewSecurityGroupsClientWithBaseURI(baseURI string, subscriptionID string) SecurityGroupsClient { + return SecurityGroupsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a network security group in the specified +// resource group. This method may poll for completion. Polling can be +// canceled by passing the cancel channel argument. The channel will be used +// to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. +// networkSecurityGroupName is the name of the network security group. +// parameters is parameters supplied to the create or update network security +// group operation. +func (client SecurityGroupsClient) CreateOrUpdate(resourceGroupName string, networkSecurityGroupName string, parameters SecurityGroup, cancel <-chan struct{}) (result autorest.Response, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.SecurityGroupPropertiesFormat", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.SecurityGroupPropertiesFormat.NetworkInterfaces", Name: validation.ReadOnly, Rule: true, Chain: nil}, + {Target: "parameters.SecurityGroupPropertiesFormat.Subnets", Name: validation.ReadOnly, Rule: true, Chain: nil}, + }}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "network.SecurityGroupsClient", "CreateOrUpdate") + } + + req, err := client.CreateOrUpdatePreparer(resourceGroupName, networkSecurityGroupName, parameters, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "CreateOrUpdate", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "CreateOrUpdate", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client SecurityGroupsClient) CreateOrUpdatePreparer(resourceGroupName string, networkSecurityGroupName string, parameters SecurityGroup, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkSecurityGroupName": autorest.Encode("path", networkSecurityGroupName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client SecurityGroupsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client SecurityGroupsClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Delete deletes the specified network security group. This method may poll +// for completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// resourceGroupName is the name of the resource group. +// networkSecurityGroupName is the name of the network security group. +func (client SecurityGroupsClient) Delete(resourceGroupName string, networkSecurityGroupName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, networkSecurityGroupName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client SecurityGroupsClient) DeletePreparer(resourceGroupName string, networkSecurityGroupName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkSecurityGroupName": autorest.Encode("path", networkSecurityGroupName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client SecurityGroupsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client SecurityGroupsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the specified network security group. +// +// resourceGroupName is the name of the resource group. +// networkSecurityGroupName is the name of the network security group. expand +// is expands referenced resources. +func (client SecurityGroupsClient) Get(resourceGroupName string, networkSecurityGroupName string, expand string) (result SecurityGroup, err error) { + req, err := client.GetPreparer(resourceGroupName, networkSecurityGroupName, expand) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client SecurityGroupsClient) GetPreparer(resourceGroupName string, networkSecurityGroupName string, expand string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkSecurityGroupName": autorest.Encode("path", networkSecurityGroupName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client SecurityGroupsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client SecurityGroupsClient) GetResponder(resp *http.Response) (result SecurityGroup, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets all network security groups in a resource group. +// +// resourceGroupName is the name of the resource group. +func (client SecurityGroupsClient) List(resourceGroupName string) (result SecurityGroupListResult, err error) { + req, err := client.ListPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client SecurityGroupsClient) ListPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client SecurityGroupsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client SecurityGroupsClient) ListResponder(resp *http.Response) (result SecurityGroupListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client SecurityGroupsClient) ListNextResults(lastResults SecurityGroupListResult) (result SecurityGroupListResult, err error) { + req, err := lastResults.SecurityGroupListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "List", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "List", resp, "Failure sending next results request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "List", resp, "Failure responding to next results request") + } + + return +} + +// ListAll gets all network security groups in a subscription. +func (client SecurityGroupsClient) ListAll() (result SecurityGroupListResult, err error) { + req, err := client.ListAllPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "ListAll", nil, "Failure preparing request") + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "ListAll", resp, "Failure sending request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "ListAll", resp, "Failure responding to request") + } + + return +} + +// ListAllPreparer prepares the ListAll request. +func (client SecurityGroupsClient) ListAllPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkSecurityGroups", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListAllSender sends the ListAll request. The method will close the +// http.Response Body if it receives an error. +func (client SecurityGroupsClient) ListAllSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListAllResponder handles the response to the ListAll request. The method always +// closes the http.Response Body. +func (client SecurityGroupsClient) ListAllResponder(resp *http.Response) (result SecurityGroupListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAllNextResults retrieves the next set of results, if any. +func (client SecurityGroupsClient) ListAllNextResults(lastResults SecurityGroupListResult) (result SecurityGroupListResult, err error) { + req, err := lastResults.SecurityGroupListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "ListAll", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "ListAll", resp, "Failure sending next results request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "ListAll", resp, "Failure responding to next results request") + } + + return +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securityrules.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securityrules.go new file mode 100644 index 000000000000..9603c3c1fba2 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securityrules.go @@ -0,0 +1,350 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// SecurityRulesClient is the the Microsoft Azure Network management API +// provides a RESTful set of web services that interact with Microsoft Azure +// Networks service to manage your network resources. The API has entities +// that capture the relationship between an end user and the Microsoft Azure +// Networks service. +type SecurityRulesClient struct { + ManagementClient +} + +// NewSecurityRulesClient creates an instance of the SecurityRulesClient +// client. +func NewSecurityRulesClient(subscriptionID string) SecurityRulesClient { + return NewSecurityRulesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewSecurityRulesClientWithBaseURI creates an instance of the +// SecurityRulesClient client. +func NewSecurityRulesClientWithBaseURI(baseURI string, subscriptionID string) SecurityRulesClient { + return SecurityRulesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a security rule in the specified network +// security group. This method may poll for completion. Polling can be +// canceled by passing the cancel channel argument. The channel will be used +// to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. +// networkSecurityGroupName is the name of the network security group. +// securityRuleName is the name of the security rule. securityRuleParameters +// is parameters supplied to the create or update network security rule +// operation. +func (client SecurityRulesClient) CreateOrUpdate(resourceGroupName string, networkSecurityGroupName string, securityRuleName string, securityRuleParameters SecurityRule, cancel <-chan struct{}) (result autorest.Response, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: securityRuleParameters, + Constraints: []validation.Constraint{{Target: "securityRuleParameters.SecurityRulePropertiesFormat", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "securityRuleParameters.SecurityRulePropertiesFormat.SourceAddressPrefix", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "securityRuleParameters.SecurityRulePropertiesFormat.DestinationAddressPrefix", Name: validation.Null, Rule: true, Chain: nil}, + }}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "network.SecurityRulesClient", "CreateOrUpdate") + } + + req, err := client.CreateOrUpdatePreparer(resourceGroupName, networkSecurityGroupName, securityRuleName, securityRuleParameters, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.SecurityRulesClient", "CreateOrUpdate", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.SecurityRulesClient", "CreateOrUpdate", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityRulesClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client SecurityRulesClient) CreateOrUpdatePreparer(resourceGroupName string, networkSecurityGroupName string, securityRuleName string, securityRuleParameters SecurityRule, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkSecurityGroupName": autorest.Encode("path", networkSecurityGroupName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "securityRuleName": autorest.Encode("path", securityRuleName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}", pathParameters), + autorest.WithJSON(securityRuleParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client SecurityRulesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client SecurityRulesClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByClosing()) + result.Response = resp + return +} + +// Delete deletes the specified network security rule. This method may poll +// for completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// resourceGroupName is the name of the resource group. +// networkSecurityGroupName is the name of the network security group. +// securityRuleName is the name of the security rule. +func (client SecurityRulesClient) Delete(resourceGroupName string, networkSecurityGroupName string, securityRuleName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, networkSecurityGroupName, securityRuleName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.SecurityRulesClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.SecurityRulesClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityRulesClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client SecurityRulesClient) DeletePreparer(resourceGroupName string, networkSecurityGroupName string, securityRuleName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkSecurityGroupName": autorest.Encode("path", networkSecurityGroupName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "securityRuleName": autorest.Encode("path", securityRuleName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client SecurityRulesClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client SecurityRulesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusAccepted, http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get get the specified network security rule. +// +// resourceGroupName is the name of the resource group. +// networkSecurityGroupName is the name of the network security group. +// securityRuleName is the name of the security rule. +func (client SecurityRulesClient) Get(resourceGroupName string, networkSecurityGroupName string, securityRuleName string) (result SecurityRule, err error) { + req, err := client.GetPreparer(resourceGroupName, networkSecurityGroupName, securityRuleName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.SecurityRulesClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.SecurityRulesClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityRulesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client SecurityRulesClient) GetPreparer(resourceGroupName string, networkSecurityGroupName string, securityRuleName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkSecurityGroupName": autorest.Encode("path", networkSecurityGroupName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "securityRuleName": autorest.Encode("path", securityRuleName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client SecurityRulesClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client SecurityRulesClient) GetResponder(resp *http.Response) (result SecurityRule, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets all security rules in a network security group. +// +// resourceGroupName is the name of the resource group. +// networkSecurityGroupName is the name of the network security group. +func (client SecurityRulesClient) List(resourceGroupName string, networkSecurityGroupName string) (result SecurityRuleListResult, err error) { + req, err := client.ListPreparer(resourceGroupName, networkSecurityGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.SecurityRulesClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.SecurityRulesClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityRulesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client SecurityRulesClient) ListPreparer(resourceGroupName string, networkSecurityGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkSecurityGroupName": autorest.Encode("path", networkSecurityGroupName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client SecurityRulesClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client SecurityRulesClient) ListResponder(resp *http.Response) (result SecurityRuleListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client SecurityRulesClient) ListNextResults(lastResults SecurityRuleListResult) (result SecurityRuleListResult, err error) { + req, err := lastResults.SecurityRuleListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.SecurityRulesClient", "List", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.SecurityRulesClient", "List", resp, "Failure sending next results request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityRulesClient", "List", resp, "Failure responding to next results request") + } + + return +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/subnets.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/subnets.go new file mode 100644 index 000000000000..6230dbf03dc8 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/subnets.go @@ -0,0 +1,357 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// SubnetsClient is the the Microsoft Azure Network management API provides a +// RESTful set of web services that interact with Microsoft Azure Networks +// service to manage your network resources. The API has entities that +// capture the relationship between an end user and the Microsoft Azure +// Networks service. +type SubnetsClient struct { + ManagementClient +} + +// NewSubnetsClient creates an instance of the SubnetsClient client. +func NewSubnetsClient(subscriptionID string) SubnetsClient { + return NewSubnetsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewSubnetsClientWithBaseURI creates an instance of the SubnetsClient client. +func NewSubnetsClientWithBaseURI(baseURI string, subscriptionID string) SubnetsClient { + return SubnetsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a subnet in the specified virtual +// network. This method may poll for completion. Polling can be canceled by +// passing the cancel channel argument. The channel will be used to cancel +// polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. virtualNetworkName is +// the name of the virtual network. subnetName is the name of the subnet. +// subnetParameters is parameters supplied to the create or update subnet +// operation. +func (client SubnetsClient) CreateOrUpdate(resourceGroupName string, virtualNetworkName string, subnetName string, subnetParameters Subnet, cancel <-chan struct{}) (result autorest.Response, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: subnetParameters, + Constraints: []validation.Constraint{{Target: "subnetParameters.SubnetPropertiesFormat", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "subnetParameters.SubnetPropertiesFormat.NetworkSecurityGroup", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "subnetParameters.SubnetPropertiesFormat.NetworkSecurityGroup.SecurityGroupPropertiesFormat", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "subnetParameters.SubnetPropertiesFormat.NetworkSecurityGroup.SecurityGroupPropertiesFormat.NetworkInterfaces", Name: validation.ReadOnly, Rule: true, Chain: nil}, + {Target: "subnetParameters.SubnetPropertiesFormat.NetworkSecurityGroup.SecurityGroupPropertiesFormat.Subnets", Name: validation.ReadOnly, Rule: true, Chain: nil}, + }}, + }}, + {Target: "subnetParameters.SubnetPropertiesFormat.RouteTable", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "subnetParameters.SubnetPropertiesFormat.RouteTable.RouteTablePropertiesFormat", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "subnetParameters.SubnetPropertiesFormat.RouteTable.RouteTablePropertiesFormat.Subnets", Name: validation.ReadOnly, Rule: true, Chain: nil}}}, + }}, + {Target: "subnetParameters.SubnetPropertiesFormat.IPConfigurations", Name: validation.ReadOnly, Rule: true, Chain: nil}, + }}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "network.SubnetsClient", "CreateOrUpdate") + } + + req, err := client.CreateOrUpdatePreparer(resourceGroupName, virtualNetworkName, subnetName, subnetParameters, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.SubnetsClient", "CreateOrUpdate", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.SubnetsClient", "CreateOrUpdate", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SubnetsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client SubnetsClient) CreateOrUpdatePreparer(resourceGroupName string, virtualNetworkName string, subnetName string, subnetParameters Subnet, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subnetName": autorest.Encode("path", subnetName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkName": autorest.Encode("path", virtualNetworkName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}", pathParameters), + autorest.WithJSON(subnetParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client SubnetsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client SubnetsClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByClosing()) + result.Response = resp + return +} + +// Delete deletes the specified subnet. This method may poll for completion. +// Polling can be canceled by passing the cancel channel argument. The +// channel will be used to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. virtualNetworkName is +// the name of the virtual network. subnetName is the name of the subnet. +func (client SubnetsClient) Delete(resourceGroupName string, virtualNetworkName string, subnetName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, virtualNetworkName, subnetName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.SubnetsClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.SubnetsClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SubnetsClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client SubnetsClient) DeletePreparer(resourceGroupName string, virtualNetworkName string, subnetName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subnetName": autorest.Encode("path", subnetName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkName": autorest.Encode("path", virtualNetworkName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client SubnetsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client SubnetsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the specified subnet by virtual network and resource group. +// +// resourceGroupName is the name of the resource group. virtualNetworkName is +// the name of the virtual network. subnetName is the name of the subnet. +// expand is expands referenced resources. +func (client SubnetsClient) Get(resourceGroupName string, virtualNetworkName string, subnetName string, expand string) (result Subnet, err error) { + req, err := client.GetPreparer(resourceGroupName, virtualNetworkName, subnetName, expand) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.SubnetsClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.SubnetsClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SubnetsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client SubnetsClient) GetPreparer(resourceGroupName string, virtualNetworkName string, subnetName string, expand string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subnetName": autorest.Encode("path", subnetName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkName": autorest.Encode("path", virtualNetworkName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client SubnetsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client SubnetsClient) GetResponder(resp *http.Response) (result Subnet, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets all subnets in a virtual network. +// +// resourceGroupName is the name of the resource group. virtualNetworkName is +// the name of the virtual network. +func (client SubnetsClient) List(resourceGroupName string, virtualNetworkName string) (result SubnetListResult, err error) { + req, err := client.ListPreparer(resourceGroupName, virtualNetworkName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.SubnetsClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.SubnetsClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SubnetsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client SubnetsClient) ListPreparer(resourceGroupName string, virtualNetworkName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkName": autorest.Encode("path", virtualNetworkName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client SubnetsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client SubnetsClient) ListResponder(resp *http.Response) (result SubnetListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client SubnetsClient) ListNextResults(lastResults SubnetListResult) (result SubnetListResult, err error) { + req, err := lastResults.SubnetListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.SubnetsClient", "List", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.SubnetsClient", "List", resp, "Failure sending next results request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SubnetsClient", "List", resp, "Failure responding to next results request") + } + + return +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/usages.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/usages.go new file mode 100644 index 000000000000..016de70e1c30 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/usages.go @@ -0,0 +1,136 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// UsagesClient is the the Microsoft Azure Network management API provides a +// RESTful set of web services that interact with Microsoft Azure Networks +// service to manage your network resources. The API has entities that +// capture the relationship between an end user and the Microsoft Azure +// Networks service. +type UsagesClient struct { + ManagementClient +} + +// NewUsagesClient creates an instance of the UsagesClient client. +func NewUsagesClient(subscriptionID string) UsagesClient { + return NewUsagesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewUsagesClientWithBaseURI creates an instance of the UsagesClient client. +func NewUsagesClientWithBaseURI(baseURI string, subscriptionID string) UsagesClient { + return UsagesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List lists compute usages for a subscription. +// +// location is the location where resource usage is queried. +func (client UsagesClient) List(location string) (result UsagesListResult, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: location, + Constraints: []validation.Constraint{{Target: "location", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "network.UsagesClient", "List") + } + + req, err := client.ListPreparer(location) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.UsagesClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.UsagesClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.UsagesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client UsagesClient) ListPreparer(location string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": autorest.Encode("path", location), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/usages", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client UsagesClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client UsagesClient) ListResponder(resp *http.Response) (result UsagesListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client UsagesClient) ListNextResults(lastResults UsagesListResult) (result UsagesListResult, err error) { + req, err := lastResults.UsagesListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.UsagesClient", "List", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.UsagesClient", "List", resp, "Failure sending next results request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.UsagesClient", "List", resp, "Failure responding to next results request") + } + + return +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/version.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/version.go new file mode 100644 index 000000000000..b0628fe0bd26 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/version.go @@ -0,0 +1,43 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "fmt" +) + +const ( + major = "7" + minor = "0" + patch = "1" + // Always begin a "tag" with a dash (as per http://semver.org) + tag = "-beta" + semVerFormat = "%s.%s.%s%s" + userAgentFormat = "Azure-SDK-for-Go/%s arm-%s/%s" +) + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return fmt.Sprintf(userAgentFormat, Version(), "network", "2016-09-01") +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return fmt.Sprintf(semVerFormat, major, minor, patch, tag) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgatewayconnections.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgatewayconnections.go new file mode 100644 index 000000000000..d58a9d326fac --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgatewayconnections.go @@ -0,0 +1,595 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// VirtualNetworkGatewayConnectionsClient is the the Microsoft Azure Network +// management API provides a RESTful set of web services that interact with +// Microsoft Azure Networks service to manage your network resources. The API +// has entities that capture the relationship between an end user and the +// Microsoft Azure Networks service. +type VirtualNetworkGatewayConnectionsClient struct { + ManagementClient +} + +// NewVirtualNetworkGatewayConnectionsClient creates an instance of the +// VirtualNetworkGatewayConnectionsClient client. +func NewVirtualNetworkGatewayConnectionsClient(subscriptionID string) VirtualNetworkGatewayConnectionsClient { + return NewVirtualNetworkGatewayConnectionsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualNetworkGatewayConnectionsClientWithBaseURI creates an instance of +// the VirtualNetworkGatewayConnectionsClient client. +func NewVirtualNetworkGatewayConnectionsClientWithBaseURI(baseURI string, subscriptionID string) VirtualNetworkGatewayConnectionsClient { + return VirtualNetworkGatewayConnectionsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a virtual network gateway connection in +// the specified resource group. This method may poll for completion. Polling +// can be canceled by passing the cancel channel argument. The channel will +// be used to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. +// virtualNetworkGatewayConnectionName is the name of the virtual network +// gateway connection. parameters is parameters supplied to the create or +// update virtual network gateway connection operation. +func (client VirtualNetworkGatewayConnectionsClient) CreateOrUpdate(resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters VirtualNetworkGatewayConnection, cancel <-chan struct{}) (result autorest.Response, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.VirtualNetworkGatewayConnectionPropertiesFormat", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.VirtualNetworkGatewayConnectionPropertiesFormat.VirtualNetworkGateway1", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.VirtualNetworkGatewayConnectionPropertiesFormat.VirtualNetworkGateway1.VirtualNetworkGatewayPropertiesFormat", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.VirtualNetworkGatewayConnectionPropertiesFormat.VirtualNetworkGateway1.VirtualNetworkGatewayPropertiesFormat.IPConfigurations", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.VirtualNetworkGatewayConnectionPropertiesFormat.VirtualNetworkGateway1.VirtualNetworkGatewayPropertiesFormat.ProvisioningState", Name: validation.ReadOnly, Rule: true, Chain: nil}, + }}, + }}, + {Target: "parameters.VirtualNetworkGatewayConnectionPropertiesFormat.VirtualNetworkGateway2", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.VirtualNetworkGatewayConnectionPropertiesFormat.VirtualNetworkGateway2.VirtualNetworkGatewayPropertiesFormat", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.VirtualNetworkGatewayConnectionPropertiesFormat.VirtualNetworkGateway2.VirtualNetworkGatewayPropertiesFormat.IPConfigurations", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.VirtualNetworkGatewayConnectionPropertiesFormat.VirtualNetworkGateway2.VirtualNetworkGatewayPropertiesFormat.ProvisioningState", Name: validation.ReadOnly, Rule: true, Chain: nil}, + }}, + }}, + {Target: "parameters.VirtualNetworkGatewayConnectionPropertiesFormat.LocalNetworkGateway2", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.VirtualNetworkGatewayConnectionPropertiesFormat.LocalNetworkGateway2.LocalNetworkGatewayPropertiesFormat", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.VirtualNetworkGatewayConnectionPropertiesFormat.LocalNetworkGateway2.LocalNetworkGatewayPropertiesFormat.LocalNetworkAddressSpace", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.VirtualNetworkGatewayConnectionPropertiesFormat.LocalNetworkGateway2.LocalNetworkGatewayPropertiesFormat.ProvisioningState", Name: validation.ReadOnly, Rule: true, Chain: nil}, + }}, + }}, + {Target: "parameters.VirtualNetworkGatewayConnectionPropertiesFormat.ConnectionStatus", Name: validation.ReadOnly, Rule: true, Chain: nil}, + {Target: "parameters.VirtualNetworkGatewayConnectionPropertiesFormat.TunnelConnectionStatus", Name: validation.ReadOnly, Rule: true, Chain: nil}, + {Target: "parameters.VirtualNetworkGatewayConnectionPropertiesFormat.EgressBytesTransferred", Name: validation.ReadOnly, Rule: true, Chain: nil}, + {Target: "parameters.VirtualNetworkGatewayConnectionPropertiesFormat.IngressBytesTransferred", Name: validation.ReadOnly, Rule: true, Chain: nil}, + {Target: "parameters.VirtualNetworkGatewayConnectionPropertiesFormat.ProvisioningState", Name: validation.ReadOnly, Rule: true, Chain: nil}, + }}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "network.VirtualNetworkGatewayConnectionsClient", "CreateOrUpdate") + } + + req, err := client.CreateOrUpdatePreparer(resourceGroupName, virtualNetworkGatewayConnectionName, parameters, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "CreateOrUpdate", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "CreateOrUpdate", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client VirtualNetworkGatewayConnectionsClient) CreateOrUpdatePreparer(resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters VirtualNetworkGatewayConnection, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkGatewayConnectionName": autorest.Encode("path", virtualNetworkGatewayConnectionName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewayConnectionsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewayConnectionsClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByClosing()) + result.Response = resp + return +} + +// Delete deletes the specified virtual network Gateway connection. This +// method may poll for completion. Polling can be canceled by passing the +// cancel channel argument. The channel will be used to cancel polling and +// any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. +// virtualNetworkGatewayConnectionName is the name of the virtual network +// gateway connection. +func (client VirtualNetworkGatewayConnectionsClient) Delete(resourceGroupName string, virtualNetworkGatewayConnectionName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, virtualNetworkGatewayConnectionName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client VirtualNetworkGatewayConnectionsClient) DeletePreparer(resourceGroupName string, virtualNetworkGatewayConnectionName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkGatewayConnectionName": autorest.Encode("path", virtualNetworkGatewayConnectionName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewayConnectionsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewayConnectionsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the specified virtual network gateway connection by resource group. +// +// resourceGroupName is the name of the resource group. +// virtualNetworkGatewayConnectionName is the name of the virtual network +// gateway connection. +func (client VirtualNetworkGatewayConnectionsClient) Get(resourceGroupName string, virtualNetworkGatewayConnectionName string) (result VirtualNetworkGatewayConnection, err error) { + req, err := client.GetPreparer(resourceGroupName, virtualNetworkGatewayConnectionName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client VirtualNetworkGatewayConnectionsClient) GetPreparer(resourceGroupName string, virtualNetworkGatewayConnectionName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkGatewayConnectionName": autorest.Encode("path", virtualNetworkGatewayConnectionName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewayConnectionsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewayConnectionsClient) GetResponder(resp *http.Response) (result VirtualNetworkGatewayConnection, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetSharedKey the Get VirtualNetworkGatewayConnectionSharedKey operation +// retrieves information about the specified virtual network gateway +// connection shared key through Network resource provider. +// +// resourceGroupName is the name of the resource group. +// virtualNetworkGatewayConnectionName is the virtual network gateway +// connection shared key name. +func (client VirtualNetworkGatewayConnectionsClient) GetSharedKey(resourceGroupName string, virtualNetworkGatewayConnectionName string) (result ConnectionSharedKey, err error) { + req, err := client.GetSharedKeyPreparer(resourceGroupName, virtualNetworkGatewayConnectionName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "GetSharedKey", nil, "Failure preparing request") + } + + resp, err := client.GetSharedKeySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "GetSharedKey", resp, "Failure sending request") + } + + result, err = client.GetSharedKeyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "GetSharedKey", resp, "Failure responding to request") + } + + return +} + +// GetSharedKeyPreparer prepares the GetSharedKey request. +func (client VirtualNetworkGatewayConnectionsClient) GetSharedKeyPreparer(resourceGroupName string, virtualNetworkGatewayConnectionName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkGatewayConnectionName": autorest.Encode("path", virtualNetworkGatewayConnectionName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}/sharedkey", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSharedKeySender sends the GetSharedKey request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewayConnectionsClient) GetSharedKeySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetSharedKeyResponder handles the response to the GetSharedKey request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewayConnectionsClient) GetSharedKeyResponder(resp *http.Response) (result ConnectionSharedKey, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List the List VirtualNetworkGatewayConnections operation retrieves all the +// virtual network gateways connections created. +// +// resourceGroupName is the name of the resource group. +func (client VirtualNetworkGatewayConnectionsClient) List(resourceGroupName string) (result VirtualNetworkGatewayConnectionListResult, err error) { + req, err := client.ListPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client VirtualNetworkGatewayConnectionsClient) ListPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewayConnectionsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewayConnectionsClient) ListResponder(resp *http.Response) (result VirtualNetworkGatewayConnectionListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client VirtualNetworkGatewayConnectionsClient) ListNextResults(lastResults VirtualNetworkGatewayConnectionListResult) (result VirtualNetworkGatewayConnectionListResult, err error) { + req, err := lastResults.VirtualNetworkGatewayConnectionListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "List", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "List", resp, "Failure sending next results request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "List", resp, "Failure responding to next results request") + } + + return +} + +// ResetSharedKey the VirtualNetworkGatewayConnectionResetSharedKey operation +// resets the virtual network gateway connection shared key for passed +// virtual network gateway connection in the specified resource group through +// Network resource provider. This method may poll for completion. Polling +// can be canceled by passing the cancel channel argument. The channel will +// be used to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. +// virtualNetworkGatewayConnectionName is the virtual network gateway +// connection reset shared key Name. parameters is parameters supplied to the +// begin reset virtual network gateway connection shared key operation +// through network resource provider. +func (client VirtualNetworkGatewayConnectionsClient) ResetSharedKey(resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters ConnectionResetSharedKey, cancel <-chan struct{}) (result autorest.Response, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.KeyLength", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.KeyLength", Name: validation.InclusiveMaximum, Rule: 128, Chain: nil}, + {Target: "parameters.KeyLength", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}, + }}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "network.VirtualNetworkGatewayConnectionsClient", "ResetSharedKey") + } + + req, err := client.ResetSharedKeyPreparer(resourceGroupName, virtualNetworkGatewayConnectionName, parameters, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "ResetSharedKey", nil, "Failure preparing request") + } + + resp, err := client.ResetSharedKeySender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "ResetSharedKey", resp, "Failure sending request") + } + + result, err = client.ResetSharedKeyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "ResetSharedKey", resp, "Failure responding to request") + } + + return +} + +// ResetSharedKeyPreparer prepares the ResetSharedKey request. +func (client VirtualNetworkGatewayConnectionsClient) ResetSharedKeyPreparer(resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters ConnectionResetSharedKey, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkGatewayConnectionName": autorest.Encode("path", virtualNetworkGatewayConnectionName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}/sharedkey/reset", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// ResetSharedKeySender sends the ResetSharedKey request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewayConnectionsClient) ResetSharedKeySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// ResetSharedKeyResponder handles the response to the ResetSharedKey request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewayConnectionsClient) ResetSharedKeyResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// SetSharedKey the Put VirtualNetworkGatewayConnectionSharedKey operation +// sets the virtual network gateway connection shared key for passed virtual +// network gateway connection in the specified resource group through Network +// resource provider. This method may poll for completion. Polling can be +// canceled by passing the cancel channel argument. The channel will be used +// to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. +// virtualNetworkGatewayConnectionName is the virtual network gateway +// connection name. parameters is parameters supplied to the Begin Set +// Virtual Network Gateway connection Shared key operation throughNetwork +// resource provider. +func (client VirtualNetworkGatewayConnectionsClient) SetSharedKey(resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters ConnectionSharedKey, cancel <-chan struct{}) (result autorest.Response, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.Value", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "network.VirtualNetworkGatewayConnectionsClient", "SetSharedKey") + } + + req, err := client.SetSharedKeyPreparer(resourceGroupName, virtualNetworkGatewayConnectionName, parameters, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "SetSharedKey", nil, "Failure preparing request") + } + + resp, err := client.SetSharedKeySender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "SetSharedKey", resp, "Failure sending request") + } + + result, err = client.SetSharedKeyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "SetSharedKey", resp, "Failure responding to request") + } + + return +} + +// SetSharedKeyPreparer prepares the SetSharedKey request. +func (client VirtualNetworkGatewayConnectionsClient) SetSharedKeyPreparer(resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters ConnectionSharedKey, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkGatewayConnectionName": autorest.Encode("path", virtualNetworkGatewayConnectionName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}/sharedkey", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// SetSharedKeySender sends the SetSharedKey request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewayConnectionsClient) SetSharedKeySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// SetSharedKeyResponder handles the response to the SetSharedKey request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewayConnectionsClient) SetSharedKeyResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgateways.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgateways.go new file mode 100644 index 000000000000..2a6ce4772dfb --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgateways.go @@ -0,0 +1,482 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// VirtualNetworkGatewaysClient is the the Microsoft Azure Network management +// API provides a RESTful set of web services that interact with Microsoft +// Azure Networks service to manage your network resources. The API has +// entities that capture the relationship between an end user and the +// Microsoft Azure Networks service. +type VirtualNetworkGatewaysClient struct { + ManagementClient +} + +// NewVirtualNetworkGatewaysClient creates an instance of the +// VirtualNetworkGatewaysClient client. +func NewVirtualNetworkGatewaysClient(subscriptionID string) VirtualNetworkGatewaysClient { + return NewVirtualNetworkGatewaysClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualNetworkGatewaysClientWithBaseURI creates an instance of the +// VirtualNetworkGatewaysClient client. +func NewVirtualNetworkGatewaysClientWithBaseURI(baseURI string, subscriptionID string) VirtualNetworkGatewaysClient { + return VirtualNetworkGatewaysClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a virtual network gateway in the +// specified resource group. This method may poll for completion. Polling can +// be canceled by passing the cancel channel argument. The channel will be +// used to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. +// virtualNetworkGatewayName is the name of the virtual network gateway. +// parameters is parameters supplied to create or update virtual network +// gateway operation. +func (client VirtualNetworkGatewaysClient) CreateOrUpdate(resourceGroupName string, virtualNetworkGatewayName string, parameters VirtualNetworkGateway, cancel <-chan struct{}) (result autorest.Response, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.VirtualNetworkGatewayPropertiesFormat", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.VirtualNetworkGatewayPropertiesFormat.IPConfigurations", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.VirtualNetworkGatewayPropertiesFormat.ProvisioningState", Name: validation.ReadOnly, Rule: true, Chain: nil}, + }}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "network.VirtualNetworkGatewaysClient", "CreateOrUpdate") + } + + req, err := client.CreateOrUpdatePreparer(resourceGroupName, virtualNetworkGatewayName, parameters, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "CreateOrUpdate", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "CreateOrUpdate", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client VirtualNetworkGatewaysClient) CreateOrUpdatePreparer(resourceGroupName string, virtualNetworkGatewayName string, parameters VirtualNetworkGateway, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkGatewayName": autorest.Encode("path", virtualNetworkGatewayName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewaysClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewaysClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByClosing()) + result.Response = resp + return +} + +// Delete deletes the specified virtual network gateway. This method may poll +// for completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// resourceGroupName is the name of the resource group. +// virtualNetworkGatewayName is the name of the virtual network gateway. +func (client VirtualNetworkGatewaysClient) Delete(resourceGroupName string, virtualNetworkGatewayName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, virtualNetworkGatewayName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client VirtualNetworkGatewaysClient) DeletePreparer(resourceGroupName string, virtualNetworkGatewayName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkGatewayName": autorest.Encode("path", virtualNetworkGatewayName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewaysClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewaysClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusAccepted, http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Generatevpnclientpackage generates VPN client package for P2S client of the +// virtual network gateway in the specified resource group. +// +// resourceGroupName is the name of the resource group. +// virtualNetworkGatewayName is the name of the virtual network gateway. +// parameters is parameters supplied to the generate virtual network gateway +// VPN client package operation. +func (client VirtualNetworkGatewaysClient) Generatevpnclientpackage(resourceGroupName string, virtualNetworkGatewayName string, parameters VpnClientParameters) (result String, err error) { + req, err := client.GeneratevpnclientpackagePreparer(resourceGroupName, virtualNetworkGatewayName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Generatevpnclientpackage", nil, "Failure preparing request") + } + + resp, err := client.GeneratevpnclientpackageSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Generatevpnclientpackage", resp, "Failure sending request") + } + + result, err = client.GeneratevpnclientpackageResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Generatevpnclientpackage", resp, "Failure responding to request") + } + + return +} + +// GeneratevpnclientpackagePreparer prepares the Generatevpnclientpackage request. +func (client VirtualNetworkGatewaysClient) GeneratevpnclientpackagePreparer(resourceGroupName string, virtualNetworkGatewayName string, parameters VpnClientParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkGatewayName": autorest.Encode("path", virtualNetworkGatewayName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/generatevpnclientpackage", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GeneratevpnclientpackageSender sends the Generatevpnclientpackage request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewaysClient) GeneratevpnclientpackageSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GeneratevpnclientpackageResponder handles the response to the Generatevpnclientpackage request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewaysClient) GeneratevpnclientpackageResponder(resp *http.Response) (result String, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result.Value), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Get gets the specified virtual network gateway by resource group. +// +// resourceGroupName is the name of the resource group. +// virtualNetworkGatewayName is the name of the virtual network gateway. +func (client VirtualNetworkGatewaysClient) Get(resourceGroupName string, virtualNetworkGatewayName string) (result VirtualNetworkGateway, err error) { + req, err := client.GetPreparer(resourceGroupName, virtualNetworkGatewayName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client VirtualNetworkGatewaysClient) GetPreparer(resourceGroupName string, virtualNetworkGatewayName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkGatewayName": autorest.Encode("path", virtualNetworkGatewayName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewaysClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewaysClient) GetResponder(resp *http.Response) (result VirtualNetworkGateway, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets all virtual network gateways by resource group. +// +// resourceGroupName is the name of the resource group. +func (client VirtualNetworkGatewaysClient) List(resourceGroupName string) (result VirtualNetworkGatewayListResult, err error) { + req, err := client.ListPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client VirtualNetworkGatewaysClient) ListPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewaysClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewaysClient) ListResponder(resp *http.Response) (result VirtualNetworkGatewayListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client VirtualNetworkGatewaysClient) ListNextResults(lastResults VirtualNetworkGatewayListResult) (result VirtualNetworkGatewayListResult, err error) { + req, err := lastResults.VirtualNetworkGatewayListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "List", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "List", resp, "Failure sending next results request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "List", resp, "Failure responding to next results request") + } + + return +} + +// Reset resets the primary of the virtual network gateway in the specified +// resource group. This method may poll for completion. Polling can be +// canceled by passing the cancel channel argument. The channel will be used +// to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. +// virtualNetworkGatewayName is the name of the virtual network gateway. +// gatewayVip is virtual network gateway vip address supplied to the begin +// reset of the active-active feature enabled gateway. +func (client VirtualNetworkGatewaysClient) Reset(resourceGroupName string, virtualNetworkGatewayName string, gatewayVip string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.ResetPreparer(resourceGroupName, virtualNetworkGatewayName, gatewayVip, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Reset", nil, "Failure preparing request") + } + + resp, err := client.ResetSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Reset", resp, "Failure sending request") + } + + result, err = client.ResetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Reset", resp, "Failure responding to request") + } + + return +} + +// ResetPreparer prepares the Reset request. +func (client VirtualNetworkGatewaysClient) ResetPreparer(resourceGroupName string, virtualNetworkGatewayName string, gatewayVip string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkGatewayName": autorest.Encode("path", virtualNetworkGatewayName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(gatewayVip) > 0 { + queryParameters["gatewayVip"] = autorest.Encode("query", gatewayVip) + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/reset", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// ResetSender sends the Reset request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewaysClient) ResetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// ResetResponder handles the response to the Reset request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewaysClient) ResetResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkpeerings.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkpeerings.go new file mode 100644 index 000000000000..bfed897a3bf9 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkpeerings.go @@ -0,0 +1,339 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// VirtualNetworkPeeringsClient is the the Microsoft Azure Network management +// API provides a RESTful set of web services that interact with Microsoft +// Azure Networks service to manage your network resources. The API has +// entities that capture the relationship between an end user and the +// Microsoft Azure Networks service. +type VirtualNetworkPeeringsClient struct { + ManagementClient +} + +// NewVirtualNetworkPeeringsClient creates an instance of the +// VirtualNetworkPeeringsClient client. +func NewVirtualNetworkPeeringsClient(subscriptionID string) VirtualNetworkPeeringsClient { + return NewVirtualNetworkPeeringsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualNetworkPeeringsClientWithBaseURI creates an instance of the +// VirtualNetworkPeeringsClient client. +func NewVirtualNetworkPeeringsClientWithBaseURI(baseURI string, subscriptionID string) VirtualNetworkPeeringsClient { + return VirtualNetworkPeeringsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a peering in the specified virtual +// network. This method may poll for completion. Polling can be canceled by +// passing the cancel channel argument. The channel will be used to cancel +// polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. virtualNetworkName is +// the name of the virtual network. virtualNetworkPeeringName is the name of +// the peering. virtualNetworkPeeringParameters is parameters supplied to the +// create or update virtual network peering operation. +func (client VirtualNetworkPeeringsClient) CreateOrUpdate(resourceGroupName string, virtualNetworkName string, virtualNetworkPeeringName string, virtualNetworkPeeringParameters VirtualNetworkPeering, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, virtualNetworkName, virtualNetworkPeeringName, virtualNetworkPeeringParameters, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "CreateOrUpdate", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "CreateOrUpdate", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client VirtualNetworkPeeringsClient) CreateOrUpdatePreparer(resourceGroupName string, virtualNetworkName string, virtualNetworkPeeringName string, virtualNetworkPeeringParameters VirtualNetworkPeering, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkName": autorest.Encode("path", virtualNetworkName), + "virtualNetworkPeeringName": autorest.Encode("path", virtualNetworkPeeringName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings/{virtualNetworkPeeringName}", pathParameters), + autorest.WithJSON(virtualNetworkPeeringParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkPeeringsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client VirtualNetworkPeeringsClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByClosing()) + result.Response = resp + return +} + +// Delete deletes the specified virtual network peering. This method may poll +// for completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// resourceGroupName is the name of the resource group. virtualNetworkName is +// the name of the virtual network. virtualNetworkPeeringName is the name of +// the virtual network peering. +func (client VirtualNetworkPeeringsClient) Delete(resourceGroupName string, virtualNetworkName string, virtualNetworkPeeringName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, virtualNetworkName, virtualNetworkPeeringName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client VirtualNetworkPeeringsClient) DeletePreparer(resourceGroupName string, virtualNetworkName string, virtualNetworkPeeringName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkName": autorest.Encode("path", virtualNetworkName), + "virtualNetworkPeeringName": autorest.Encode("path", virtualNetworkPeeringName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings/{virtualNetworkPeeringName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkPeeringsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client VirtualNetworkPeeringsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the specified virtual network peering. +// +// resourceGroupName is the name of the resource group. virtualNetworkName is +// the name of the virtual network. virtualNetworkPeeringName is the name of +// the virtual network peering. +func (client VirtualNetworkPeeringsClient) Get(resourceGroupName string, virtualNetworkName string, virtualNetworkPeeringName string) (result VirtualNetworkPeering, err error) { + req, err := client.GetPreparer(resourceGroupName, virtualNetworkName, virtualNetworkPeeringName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client VirtualNetworkPeeringsClient) GetPreparer(resourceGroupName string, virtualNetworkName string, virtualNetworkPeeringName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkName": autorest.Encode("path", virtualNetworkName), + "virtualNetworkPeeringName": autorest.Encode("path", virtualNetworkPeeringName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings/{virtualNetworkPeeringName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkPeeringsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client VirtualNetworkPeeringsClient) GetResponder(resp *http.Response) (result VirtualNetworkPeering, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets all virtual network peerings in a virtual network. +// +// resourceGroupName is the name of the resource group. virtualNetworkName is +// the name of the virtual network. +func (client VirtualNetworkPeeringsClient) List(resourceGroupName string, virtualNetworkName string) (result VirtualNetworkPeeringListResult, err error) { + req, err := client.ListPreparer(resourceGroupName, virtualNetworkName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client VirtualNetworkPeeringsClient) ListPreparer(resourceGroupName string, virtualNetworkName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkName": autorest.Encode("path", virtualNetworkName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkPeeringsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client VirtualNetworkPeeringsClient) ListResponder(resp *http.Response) (result VirtualNetworkPeeringListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client VirtualNetworkPeeringsClient) ListNextResults(lastResults VirtualNetworkPeeringListResult) (result VirtualNetworkPeeringListResult, err error) { + req, err := lastResults.VirtualNetworkPeeringListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "List", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "List", resp, "Failure sending next results request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "List", resp, "Failure responding to next results request") + } + + return +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworks.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworks.go new file mode 100644 index 000000000000..9046437af2df --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworks.go @@ -0,0 +1,484 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// VirtualNetworksClient is the the Microsoft Azure Network management API +// provides a RESTful set of web services that interact with Microsoft Azure +// Networks service to manage your network resources. The API has entities +// that capture the relationship between an end user and the Microsoft Azure +// Networks service. +type VirtualNetworksClient struct { + ManagementClient +} + +// NewVirtualNetworksClient creates an instance of the VirtualNetworksClient +// client. +func NewVirtualNetworksClient(subscriptionID string) VirtualNetworksClient { + return NewVirtualNetworksClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualNetworksClientWithBaseURI creates an instance of the +// VirtualNetworksClient client. +func NewVirtualNetworksClientWithBaseURI(baseURI string, subscriptionID string) VirtualNetworksClient { + return VirtualNetworksClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CheckIPAddressAvailability checks whether a private IP address is available +// for use. +// +// resourceGroupName is the name of the resource group. virtualNetworkName is +// the name of the virtual network. ipAddress is the private IP address to be +// verified. +func (client VirtualNetworksClient) CheckIPAddressAvailability(resourceGroupName string, virtualNetworkName string, ipAddress string) (result IPAddressAvailabilityResult, err error) { + req, err := client.CheckIPAddressAvailabilityPreparer(resourceGroupName, virtualNetworkName, ipAddress) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "CheckIPAddressAvailability", nil, "Failure preparing request") + } + + resp, err := client.CheckIPAddressAvailabilitySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "CheckIPAddressAvailability", resp, "Failure sending request") + } + + result, err = client.CheckIPAddressAvailabilityResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "CheckIPAddressAvailability", resp, "Failure responding to request") + } + + return +} + +// CheckIPAddressAvailabilityPreparer prepares the CheckIPAddressAvailability request. +func (client VirtualNetworksClient) CheckIPAddressAvailabilityPreparer(resourceGroupName string, virtualNetworkName string, ipAddress string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkName": autorest.Encode("path", virtualNetworkName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(ipAddress) > 0 { + queryParameters["ipAddress"] = autorest.Encode("query", ipAddress) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/CheckIPAddressAvailability", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CheckIPAddressAvailabilitySender sends the CheckIPAddressAvailability request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworksClient) CheckIPAddressAvailabilitySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CheckIPAddressAvailabilityResponder handles the response to the CheckIPAddressAvailability request. The method always +// closes the http.Response Body. +func (client VirtualNetworksClient) CheckIPAddressAvailabilityResponder(resp *http.Response) (result IPAddressAvailabilityResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// CreateOrUpdate creates or updates a virtual network in the specified +// resource group. This method may poll for completion. Polling can be +// canceled by passing the cancel channel argument. The channel will be used +// to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. virtualNetworkName is +// the name of the virtual network. parameters is parameters supplied to the +// create or update virtual network operation +func (client VirtualNetworksClient) CreateOrUpdate(resourceGroupName string, virtualNetworkName string, parameters VirtualNetwork, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, virtualNetworkName, parameters, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "CreateOrUpdate", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "CreateOrUpdate", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client VirtualNetworksClient) CreateOrUpdatePreparer(resourceGroupName string, virtualNetworkName string, parameters VirtualNetwork, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkName": autorest.Encode("path", virtualNetworkName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworksClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client VirtualNetworksClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByClosing()) + result.Response = resp + return +} + +// Delete deletes the specified virtual network. This method may poll for +// completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// resourceGroupName is the name of the resource group. virtualNetworkName is +// the name of the virtual network. +func (client VirtualNetworksClient) Delete(resourceGroupName string, virtualNetworkName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, virtualNetworkName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client VirtualNetworksClient) DeletePreparer(resourceGroupName string, virtualNetworkName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkName": autorest.Encode("path", virtualNetworkName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworksClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client VirtualNetworksClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusNoContent, http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the specified virtual network by resource group. +// +// resourceGroupName is the name of the resource group. virtualNetworkName is +// the name of the virtual network. expand is expands referenced resources. +func (client VirtualNetworksClient) Get(resourceGroupName string, virtualNetworkName string, expand string) (result VirtualNetwork, err error) { + req, err := client.GetPreparer(resourceGroupName, virtualNetworkName, expand) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client VirtualNetworksClient) GetPreparer(resourceGroupName string, virtualNetworkName string, expand string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkName": autorest.Encode("path", virtualNetworkName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworksClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client VirtualNetworksClient) GetResponder(resp *http.Response) (result VirtualNetwork, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets all virtual networks in a resource group. +// +// resourceGroupName is the name of the resource group. +func (client VirtualNetworksClient) List(resourceGroupName string) (result VirtualNetworkListResult, err error) { + req, err := client.ListPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client VirtualNetworksClient) ListPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworksClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client VirtualNetworksClient) ListResponder(resp *http.Response) (result VirtualNetworkListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client VirtualNetworksClient) ListNextResults(lastResults VirtualNetworkListResult) (result VirtualNetworkListResult, err error) { + req, err := lastResults.VirtualNetworkListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "List", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "List", resp, "Failure sending next results request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "List", resp, "Failure responding to next results request") + } + + return +} + +// ListAll gets all virtual networks in a subscription. +func (client VirtualNetworksClient) ListAll() (result VirtualNetworkListResult, err error) { + req, err := client.ListAllPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "ListAll", nil, "Failure preparing request") + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "ListAll", resp, "Failure sending request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "ListAll", resp, "Failure responding to request") + } + + return +} + +// ListAllPreparer prepares the ListAll request. +func (client VirtualNetworksClient) ListAllPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/virtualNetworks", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListAllSender sends the ListAll request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworksClient) ListAllSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListAllResponder handles the response to the ListAll request. The method always +// closes the http.Response Body. +func (client VirtualNetworksClient) ListAllResponder(resp *http.Response) (result VirtualNetworkListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAllNextResults retrieves the next set of results, if any. +func (client VirtualNetworksClient) ListAllNextResults(lastResults VirtualNetworkListResult) (result VirtualNetworkListResult, err error) { + req, err := lastResults.VirtualNetworkListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "ListAll", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "ListAll", resp, "Failure sending next results request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "ListAll", resp, "Failure responding to next results request") + } + + return +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/accounts.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/accounts.go new file mode 100644 index 000000000000..c895c3a949f1 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/accounts.go @@ -0,0 +1,715 @@ +package storage + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// AccountsClient is the the Storage Management Client. +type AccountsClient struct { + ManagementClient +} + +// NewAccountsClient creates an instance of the AccountsClient client. +func NewAccountsClient(subscriptionID string) AccountsClient { + return NewAccountsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewAccountsClientWithBaseURI creates an instance of the AccountsClient +// client. +func NewAccountsClientWithBaseURI(baseURI string, subscriptionID string) AccountsClient { + return AccountsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CheckNameAvailability checks that the storage account name is valid and is +// not already in use. +// +// accountName is the name of the storage account within the specified +// resource group. Storage account names must be between 3 and 24 characters +// in length and use numbers and lower-case letters only. +func (client AccountsClient) CheckNameAvailability(accountName AccountCheckNameAvailabilityParameters) (result CheckNameAvailabilityResult, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName.Name", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "accountName.Type", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "storage.AccountsClient", "CheckNameAvailability") + } + + req, err := client.CheckNameAvailabilityPreparer(accountName) + if err != nil { + return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "CheckNameAvailability", nil, "Failure preparing request") + } + + resp, err := client.CheckNameAvailabilitySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "CheckNameAvailability", resp, "Failure sending request") + } + + result, err = client.CheckNameAvailabilityResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "CheckNameAvailability", resp, "Failure responding to request") + } + + return +} + +// CheckNameAvailabilityPreparer prepares the CheckNameAvailability request. +func (client AccountsClient) CheckNameAvailabilityPreparer(accountName AccountCheckNameAvailabilityParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/checkNameAvailability", pathParameters), + autorest.WithJSON(accountName), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CheckNameAvailabilitySender sends the CheckNameAvailability request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) CheckNameAvailabilitySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CheckNameAvailabilityResponder handles the response to the CheckNameAvailability request. The method always +// closes the http.Response Body. +func (client AccountsClient) CheckNameAvailabilityResponder(resp *http.Response) (result CheckNameAvailabilityResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Create asynchronously creates a new storage account with the specified +// parameters. If an account is already created and a subsequent create +// request is issued with different properties, the account properties will +// be updated. If an account is already created and a subsequent create or +// update request is issued with the exact same set of properties, the +// request will succeed. This method may poll for completion. Polling can be +// canceled by passing the cancel channel argument. The channel will be used +// to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group within the user's +// subscription. accountName is the name of the storage account within the +// specified resource group. Storage account names must be between 3 and 24 +// characters in length and use numbers and lower-case letters only. +// parameters is the parameters to provide for the created account. +func (client AccountsClient) Create(resourceGroupName string, accountName string, parameters AccountCreateParameters, cancel <-chan struct{}) (result autorest.Response, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.Sku", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.Sku.Tier", Name: validation.ReadOnly, Rule: true, Chain: nil}}}, + {Target: "parameters.Location", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.AccountPropertiesCreateParameters", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.AccountPropertiesCreateParameters.CustomDomain", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.AccountPropertiesCreateParameters.CustomDomain.Name", Name: validation.Null, Rule: true, Chain: nil}}}, + {Target: "parameters.AccountPropertiesCreateParameters.Encryption", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.AccountPropertiesCreateParameters.Encryption.Services", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.AccountPropertiesCreateParameters.Encryption.Services.Blob", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.AccountPropertiesCreateParameters.Encryption.Services.Blob.LastEnabledTime", Name: validation.ReadOnly, Rule: true, Chain: nil}}}, + }}, + {Target: "parameters.AccountPropertiesCreateParameters.Encryption.KeySource", Name: validation.Null, Rule: true, Chain: nil}, + }}, + }}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "storage.AccountsClient", "Create") + } + + req, err := client.CreatePreparer(resourceGroupName, accountName, parameters, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "Create", nil, "Failure preparing request") + } + + resp, err := client.CreateSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "Create", resp, "Failure sending request") + } + + result, err = client.CreateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Create", resp, "Failure responding to request") + } + + return +} + +// CreatePreparer prepares the Create request. +func (client AccountsClient) CreatePreparer(resourceGroupName string, accountName string, parameters AccountCreateParameters, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateSender sends the Create request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) CreateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateResponder handles the response to the Create request. The method always +// closes the http.Response Body. +func (client AccountsClient) CreateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Delete deletes a storage account in Microsoft Azure. +// +// resourceGroupName is the name of the resource group within the user's +// subscription. accountName is the name of the storage account within the +// specified resource group. Storage account names must be between 3 and 24 +// characters in length and use numbers and lower-case letters only. +func (client AccountsClient) Delete(resourceGroupName string, accountName string) (result autorest.Response, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "storage.AccountsClient", "Delete") + } + + req, err := client.DeletePreparer(resourceGroupName, accountName) + if err != nil { + return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client AccountsClient) DeletePreparer(resourceGroupName string, accountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client AccountsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// GetProperties returns the properties for the specified storage account +// including but not limited to name, SKU name, location, and account status. +// The ListKeys operation should be used to retrieve storage keys. +// +// resourceGroupName is the name of the resource group within the user's +// subscription. accountName is the name of the storage account within the +// specified resource group. Storage account names must be between 3 and 24 +// characters in length and use numbers and lower-case letters only. +func (client AccountsClient) GetProperties(resourceGroupName string, accountName string) (result Account, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "storage.AccountsClient", "GetProperties") + } + + req, err := client.GetPropertiesPreparer(resourceGroupName, accountName) + if err != nil { + return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "GetProperties", nil, "Failure preparing request") + } + + resp, err := client.GetPropertiesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "GetProperties", resp, "Failure sending request") + } + + result, err = client.GetPropertiesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "GetProperties", resp, "Failure responding to request") + } + + return +} + +// GetPropertiesPreparer prepares the GetProperties request. +func (client AccountsClient) GetPropertiesPreparer(resourceGroupName string, accountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetPropertiesSender sends the GetProperties request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) GetPropertiesSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetPropertiesResponder handles the response to the GetProperties request. The method always +// closes the http.Response Body. +func (client AccountsClient) GetPropertiesResponder(resp *http.Response) (result Account, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists all the storage accounts available under the subscription. Note +// that storage keys are not returned; use the ListKeys operation for this. +func (client AccountsClient) List() (result AccountListResult, err error) { + req, err := client.ListPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client AccountsClient) ListPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/storageAccounts", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client AccountsClient) ListResponder(resp *http.Response) (result AccountListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByResourceGroup lists all the storage accounts available under the +// given resource group. Note that storage keys are not returned; use the +// ListKeys operation for this. +// +// resourceGroupName is the name of the resource group within the user's +// subscription. +func (client AccountsClient) ListByResourceGroup(resourceGroupName string) (result AccountListResult, err error) { + req, err := client.ListByResourceGroupPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "ListByResourceGroup", nil, "Failure preparing request") + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "ListByResourceGroup", resp, "Failure sending request") + } + + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListByResourceGroup", resp, "Failure responding to request") + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client AccountsClient) ListByResourceGroupPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client AccountsClient) ListByResourceGroupResponder(resp *http.Response) (result AccountListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListKeys lists the access keys for the specified storage account. +// +// resourceGroupName is the name of the resource group within the user's +// subscription. accountName is the name of the storage account within the +// specified resource group. Storage account names must be between 3 and 24 +// characters in length and use numbers and lower-case letters only. +func (client AccountsClient) ListKeys(resourceGroupName string, accountName string) (result AccountListKeysResult, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "storage.AccountsClient", "ListKeys") + } + + req, err := client.ListKeysPreparer(resourceGroupName, accountName) + if err != nil { + return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "ListKeys", nil, "Failure preparing request") + } + + resp, err := client.ListKeysSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "ListKeys", resp, "Failure sending request") + } + + result, err = client.ListKeysResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListKeys", resp, "Failure responding to request") + } + + return +} + +// ListKeysPreparer prepares the ListKeys request. +func (client AccountsClient) ListKeysPreparer(resourceGroupName string, accountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/listKeys", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListKeysSender sends the ListKeys request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) ListKeysSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListKeysResponder handles the response to the ListKeys request. The method always +// closes the http.Response Body. +func (client AccountsClient) ListKeysResponder(resp *http.Response) (result AccountListKeysResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// RegenerateKey regenerates one of the access keys for the specified storage +// account. +// +// resourceGroupName is the name of the resource group within the user's +// subscription. accountName is the name of the storage account within the +// specified resource group. Storage account names must be between 3 and 24 +// characters in length and use numbers and lower-case letters only. +// regenerateKey is specifies name of the key which should be regenerated -- +// key1 or key2. +func (client AccountsClient) RegenerateKey(resourceGroupName string, accountName string, regenerateKey AccountRegenerateKeyParameters) (result AccountListKeysResult, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: regenerateKey, + Constraints: []validation.Constraint{{Target: "regenerateKey.KeyName", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "storage.AccountsClient", "RegenerateKey") + } + + req, err := client.RegenerateKeyPreparer(resourceGroupName, accountName, regenerateKey) + if err != nil { + return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "RegenerateKey", nil, "Failure preparing request") + } + + resp, err := client.RegenerateKeySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "RegenerateKey", resp, "Failure sending request") + } + + result, err = client.RegenerateKeyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "RegenerateKey", resp, "Failure responding to request") + } + + return +} + +// RegenerateKeyPreparer prepares the RegenerateKey request. +func (client AccountsClient) RegenerateKeyPreparer(resourceGroupName string, accountName string, regenerateKey AccountRegenerateKeyParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/regenerateKey", pathParameters), + autorest.WithJSON(regenerateKey), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// RegenerateKeySender sends the RegenerateKey request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) RegenerateKeySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// RegenerateKeyResponder handles the response to the RegenerateKey request. The method always +// closes the http.Response Body. +func (client AccountsClient) RegenerateKeyResponder(resp *http.Response) (result AccountListKeysResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Update the update operation can be used to update the SKU, encryption, +// access tier, or tags for a storage account. It can also be used to map the +// account to a custom domain. Only one custom domain is supported per +// storage account; the replacement/change of custom domain is not supported. +// In order to replace an old custom domain, the old value must be +// cleared/unregistered before a new value can be set. The update of multiple +// properties is supported. This call does not change the storage keys for +// the account. If you want to change the storage account keys, use the +// regenerate keys operation. The location and name of the storage account +// cannot be changed after creation. +// +// resourceGroupName is the name of the resource group within the user's +// subscription. accountName is the name of the storage account within the +// specified resource group. Storage account names must be between 3 and 24 +// characters in length and use numbers and lower-case letters only. +// parameters is the parameters to provide for the updated account. +func (client AccountsClient) Update(resourceGroupName string, accountName string, parameters AccountUpdateParameters) (result Account, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "storage.AccountsClient", "Update") + } + + req, err := client.UpdatePreparer(resourceGroupName, accountName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "Update", nil, "Failure preparing request") + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "storage.AccountsClient", "Update", resp, "Failure sending request") + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Update", resp, "Failure responding to request") + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client AccountsClient) UpdatePreparer(resourceGroupName string, accountName string, parameters AccountUpdateParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) UpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client AccountsClient) UpdateResponder(resp *http.Response) (result Account, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/client.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/client.go new file mode 100644 index 000000000000..68708dbf243e --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/client.go @@ -0,0 +1,58 @@ +// Package storage implements the Azure ARM Storage service API version +// 2016-01-01. +// +// The Storage Management Client. +package storage + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" +) + +const ( + // APIVersion is the version of the Storage + APIVersion = "2016-01-01" + + // DefaultBaseURI is the default URI used for the service Storage + DefaultBaseURI = "https://management.azure.com" +) + +// ManagementClient is the base client for Storage. +type ManagementClient struct { + autorest.Client + BaseURI string + APIVersion string + SubscriptionID string +} + +// New creates an instance of the ManagementClient client. +func New(subscriptionID string) ManagementClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the ManagementClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { + return ManagementClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + APIVersion: APIVersion, + SubscriptionID: subscriptionID, + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/models.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/models.go new file mode 100644 index 000000000000..bff65ec6eb5c --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/models.go @@ -0,0 +1,299 @@ +package storage + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/date" +) + +// AccessTier enumerates the values for access tier. +type AccessTier string + +const ( + // Cool specifies the cool state for access tier. + Cool AccessTier = "Cool" + // Hot specifies the hot state for access tier. + Hot AccessTier = "Hot" +) + +// AccountStatus enumerates the values for account status. +type AccountStatus string + +const ( + // Available specifies the available state for account status. + Available AccountStatus = "Available" + // Unavailable specifies the unavailable state for account status. + Unavailable AccountStatus = "Unavailable" +) + +// KeyPermission enumerates the values for key permission. +type KeyPermission string + +const ( + // FULL specifies the full state for key permission. + FULL KeyPermission = "FULL" + // READ specifies the read state for key permission. + READ KeyPermission = "READ" +) + +// Kind enumerates the values for kind. +type Kind string + +const ( + // BlobStorage specifies the blob storage state for kind. + BlobStorage Kind = "BlobStorage" + // Storage specifies the storage state for kind. + Storage Kind = "Storage" +) + +// ProvisioningState enumerates the values for provisioning state. +type ProvisioningState string + +const ( + // Creating specifies the creating state for provisioning state. + Creating ProvisioningState = "Creating" + // ResolvingDNS specifies the resolving dns state for provisioning state. + ResolvingDNS ProvisioningState = "ResolvingDNS" + // Succeeded specifies the succeeded state for provisioning state. + Succeeded ProvisioningState = "Succeeded" +) + +// Reason enumerates the values for reason. +type Reason string + +const ( + // AccountNameInvalid specifies the account name invalid state for reason. + AccountNameInvalid Reason = "AccountNameInvalid" + // AlreadyExists specifies the already exists state for reason. + AlreadyExists Reason = "AlreadyExists" +) + +// SkuName enumerates the values for sku name. +type SkuName string + +const ( + // PremiumLRS specifies the premium lrs state for sku name. + PremiumLRS SkuName = "Premium_LRS" + // StandardGRS specifies the standard grs state for sku name. + StandardGRS SkuName = "Standard_GRS" + // StandardLRS specifies the standard lrs state for sku name. + StandardLRS SkuName = "Standard_LRS" + // StandardRAGRS specifies the standard ragrs state for sku name. + StandardRAGRS SkuName = "Standard_RAGRS" + // StandardZRS specifies the standard zrs state for sku name. + StandardZRS SkuName = "Standard_ZRS" +) + +// SkuTier enumerates the values for sku tier. +type SkuTier string + +const ( + // Premium specifies the premium state for sku tier. + Premium SkuTier = "Premium" + // Standard specifies the standard state for sku tier. + Standard SkuTier = "Standard" +) + +// UsageUnit enumerates the values for usage unit. +type UsageUnit string + +const ( + // Bytes specifies the bytes state for usage unit. + Bytes UsageUnit = "Bytes" + // BytesPerSecond specifies the bytes per second state for usage unit. + BytesPerSecond UsageUnit = "BytesPerSecond" + // Count specifies the count state for usage unit. + Count UsageUnit = "Count" + // CountsPerSecond specifies the counts per second state for usage unit. + CountsPerSecond UsageUnit = "CountsPerSecond" + // Percent specifies the percent state for usage unit. + Percent UsageUnit = "Percent" + // Seconds specifies the seconds state for usage unit. + Seconds UsageUnit = "Seconds" +) + +// Account is the storage account. +type Account struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Sku *Sku `json:"sku,omitempty"` + Kind Kind `json:"kind,omitempty"` + *AccountProperties `json:"properties,omitempty"` +} + +// AccountCheckNameAvailabilityParameters is +type AccountCheckNameAvailabilityParameters struct { + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` +} + +// AccountCreateParameters is the parameters used when creating a storage +// account. +type AccountCreateParameters struct { + Sku *Sku `json:"sku,omitempty"` + Kind Kind `json:"kind,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + *AccountPropertiesCreateParameters `json:"properties,omitempty"` +} + +// AccountKey is an access key for the storage account. +type AccountKey struct { + KeyName *string `json:"keyName,omitempty"` + Value *string `json:"value,omitempty"` + Permissions KeyPermission `json:"permissions,omitempty"` +} + +// AccountListKeysResult is the response from the ListKeys operation. +type AccountListKeysResult struct { + autorest.Response `json:"-"` + Keys *[]AccountKey `json:"keys,omitempty"` +} + +// AccountListResult is the response from the List Storage Accounts operation. +type AccountListResult struct { + autorest.Response `json:"-"` + Value *[]Account `json:"value,omitempty"` +} + +// AccountProperties is +type AccountProperties struct { + ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` + PrimaryEndpoints *Endpoints `json:"primaryEndpoints,omitempty"` + PrimaryLocation *string `json:"primaryLocation,omitempty"` + StatusOfPrimary AccountStatus `json:"statusOfPrimary,omitempty"` + LastGeoFailoverTime *date.Time `json:"lastGeoFailoverTime,omitempty"` + SecondaryLocation *string `json:"secondaryLocation,omitempty"` + StatusOfSecondary AccountStatus `json:"statusOfSecondary,omitempty"` + CreationTime *date.Time `json:"creationTime,omitempty"` + CustomDomain *CustomDomain `json:"customDomain,omitempty"` + SecondaryEndpoints *Endpoints `json:"secondaryEndpoints,omitempty"` + Encryption *Encryption `json:"encryption,omitempty"` + AccessTier AccessTier `json:"accessTier,omitempty"` +} + +// AccountPropertiesCreateParameters is +type AccountPropertiesCreateParameters struct { + CustomDomain *CustomDomain `json:"customDomain,omitempty"` + Encryption *Encryption `json:"encryption,omitempty"` + AccessTier AccessTier `json:"accessTier,omitempty"` +} + +// AccountPropertiesUpdateParameters is +type AccountPropertiesUpdateParameters struct { + CustomDomain *CustomDomain `json:"customDomain,omitempty"` + Encryption *Encryption `json:"encryption,omitempty"` + AccessTier AccessTier `json:"accessTier,omitempty"` +} + +// AccountRegenerateKeyParameters is +type AccountRegenerateKeyParameters struct { + KeyName *string `json:"keyName,omitempty"` +} + +// AccountUpdateParameters is the parameters that can be provided when +// updating the storage account properties. +type AccountUpdateParameters struct { + Sku *Sku `json:"sku,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + *AccountPropertiesUpdateParameters `json:"properties,omitempty"` +} + +// CheckNameAvailabilityResult is the CheckNameAvailability operation response. +type CheckNameAvailabilityResult struct { + autorest.Response `json:"-"` + NameAvailable *bool `json:"nameAvailable,omitempty"` + Reason Reason `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` +} + +// CustomDomain is the custom domain assigned to this storage account. This +// can be set via Update. +type CustomDomain struct { + Name *string `json:"name,omitempty"` + UseSubDomain *bool `json:"useSubDomain,omitempty"` +} + +// Encryption is the encryption settings on the storage account. +type Encryption struct { + Services *EncryptionServices `json:"services,omitempty"` + KeySource *string `json:"keySource,omitempty"` +} + +// EncryptionService is a service that allows server-side encryption to be +// used. +type EncryptionService struct { + Enabled *bool `json:"enabled,omitempty"` + LastEnabledTime *date.Time `json:"lastEnabledTime,omitempty"` +} + +// EncryptionServices is a list of services that support encryption. +type EncryptionServices struct { + Blob *EncryptionService `json:"blob,omitempty"` +} + +// Endpoints is the URIs that are used to perform a retrieval of a public +// blob, queue, or table object. +type Endpoints struct { + Blob *string `json:"blob,omitempty"` + Queue *string `json:"queue,omitempty"` + Table *string `json:"table,omitempty"` + File *string `json:"file,omitempty"` +} + +// Resource is +type Resource struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// Sku is the SKU of the storage account. +type Sku struct { + Name SkuName `json:"name,omitempty"` + Tier SkuTier `json:"tier,omitempty"` +} + +// Usage is describes Storage Resource Usage. +type Usage struct { + Unit UsageUnit `json:"unit,omitempty"` + CurrentValue *int32 `json:"currentValue,omitempty"` + Limit *int32 `json:"limit,omitempty"` + Name *UsageName `json:"name,omitempty"` +} + +// UsageListResult is the response from the List Usages operation. +type UsageListResult struct { + autorest.Response `json:"-"` + Value *[]Usage `json:"value,omitempty"` +} + +// UsageName is the usage names that can be used; currently limited to +// StorageAccount. +type UsageName struct { + Value *string `json:"value,omitempty"` + LocalizedValue *string `json:"localizedValue,omitempty"` +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/usageoperations.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/usageoperations.go new file mode 100644 index 000000000000..866efc9c311d --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/usageoperations.go @@ -0,0 +1,101 @@ +package storage + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// UsageOperationsClient is the the Storage Management Client. +type UsageOperationsClient struct { + ManagementClient +} + +// NewUsageOperationsClient creates an instance of the UsageOperationsClient +// client. +func NewUsageOperationsClient(subscriptionID string) UsageOperationsClient { + return NewUsageOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewUsageOperationsClientWithBaseURI creates an instance of the +// UsageOperationsClient client. +func NewUsageOperationsClientWithBaseURI(baseURI string, subscriptionID string) UsageOperationsClient { + return UsageOperationsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List gets the current usage count and the limit for the resources under the +// subscription. +func (client UsageOperationsClient) List() (result UsageListResult, err error) { + req, err := client.ListPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "storage.UsageOperationsClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "storage.UsageOperationsClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.UsageOperationsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client UsageOperationsClient) ListPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/usages", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client UsageOperationsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client UsageOperationsClient) ListResponder(resp *http.Response) (result UsageListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/version.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/version.go new file mode 100644 index 000000000000..e0a181c11a56 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/version.go @@ -0,0 +1,43 @@ +package storage + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "fmt" +) + +const ( + major = "7" + minor = "0" + patch = "1" + // Always begin a "tag" with a dash (as per http://semver.org) + tag = "-beta" + semVerFormat = "%s.%s.%s%s" + userAgentFormat = "Azure-SDK-for-Go/%s arm-%s/%s" +) + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return fmt.Sprintf(userAgentFormat, Version(), "storage", "2016-01-01") +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return fmt.Sprintf(semVerFormat, major, minor, patch, tag) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md new file mode 100644 index 000000000000..0ab099848bba --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md @@ -0,0 +1,5 @@ +# Azure Storage SDK for Go + +The `github.com/Azure/azure-sdk-for-go/storage` package is used to perform operations in Azure Storage Service. To manage your storage accounts (Azure Resource Manager / ARM), use the [github.com/Azure/azure-sdk-for-go/arm/storage](../arm/storage) package. For your classic storage accounts (Azure Service Management / ASM), use [github.com/Azure/azure-sdk-for-go/management/storageservice](../management/storageservice) package. + +This package includes support for [Azure Storage Emulator](https://azure.microsoft.com/documentation/articles/storage-use-emulator/) \ No newline at end of file diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go new file mode 100644 index 000000000000..3dbaca52ade2 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go @@ -0,0 +1,1596 @@ +package storage + +import ( + "bytes" + "encoding/xml" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + "time" +) + +// BlobStorageClient contains operations for Microsoft Azure Blob Storage +// Service. +type BlobStorageClient struct { + client Client +} + +// A Container is an entry in ContainerListResponse. +type Container struct { + Name string `xml:"Name"` + Properties ContainerProperties `xml:"Properties"` + // TODO (ahmetalpbalkan) Metadata +} + +// ContainerProperties contains various properties of a container returned from +// various endpoints like ListContainers. +type ContainerProperties struct { + LastModified string `xml:"Last-Modified"` + Etag string `xml:"Etag"` + LeaseStatus string `xml:"LeaseStatus"` + LeaseState string `xml:"LeaseState"` + LeaseDuration string `xml:"LeaseDuration"` + // TODO (ahmetalpbalkan) remaining fields +} + +// ContainerListResponse contains the response fields from +// ListContainers call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx +type ContainerListResponse struct { + XMLName xml.Name `xml:"EnumerationResults"` + Xmlns string `xml:"xmlns,attr"` + Prefix string `xml:"Prefix"` + Marker string `xml:"Marker"` + NextMarker string `xml:"NextMarker"` + MaxResults int64 `xml:"MaxResults"` + Containers []Container `xml:"Containers>Container"` +} + +// A Blob is an entry in BlobListResponse. +type Blob struct { + Name string `xml:"Name"` + Properties BlobProperties `xml:"Properties"` + Metadata BlobMetadata `xml:"Metadata"` +} + +// BlobMetadata is a set of custom name/value pairs. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179404.aspx +type BlobMetadata map[string]string + +type blobMetadataEntries struct { + Entries []blobMetadataEntry `xml:",any"` +} +type blobMetadataEntry struct { + XMLName xml.Name + Value string `xml:",chardata"` +} + +// UnmarshalXML converts the xml:Metadata into Metadata map +func (bm *BlobMetadata) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + var entries blobMetadataEntries + if err := d.DecodeElement(&entries, &start); err != nil { + return err + } + for _, entry := range entries.Entries { + if *bm == nil { + *bm = make(BlobMetadata) + } + (*bm)[strings.ToLower(entry.XMLName.Local)] = entry.Value + } + return nil +} + +// MarshalXML implements the xml.Marshaler interface. It encodes +// metadata name/value pairs as they would appear in an Azure +// ListBlobs response. +func (bm BlobMetadata) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + entries := make([]blobMetadataEntry, 0, len(bm)) + for k, v := range bm { + entries = append(entries, blobMetadataEntry{ + XMLName: xml.Name{Local: http.CanonicalHeaderKey(k)}, + Value: v, + }) + } + return enc.EncodeElement(blobMetadataEntries{ + Entries: entries, + }, start) +} + +// BlobProperties contains various properties of a blob +// returned in various endpoints like ListBlobs or GetBlobProperties. +type BlobProperties struct { + LastModified string `xml:"Last-Modified"` + Etag string `xml:"Etag"` + ContentMD5 string `xml:"Content-MD5"` + ContentLength int64 `xml:"Content-Length"` + ContentType string `xml:"Content-Type"` + ContentEncoding string `xml:"Content-Encoding"` + CacheControl string `xml:"Cache-Control"` + ContentLanguage string `xml:"Cache-Language"` + BlobType BlobType `xml:"x-ms-blob-blob-type"` + SequenceNumber int64 `xml:"x-ms-blob-sequence-number"` + CopyID string `xml:"CopyId"` + CopyStatus string `xml:"CopyStatus"` + CopySource string `xml:"CopySource"` + CopyProgress string `xml:"CopyProgress"` + CopyCompletionTime string `xml:"CopyCompletionTime"` + CopyStatusDescription string `xml:"CopyStatusDescription"` + LeaseStatus string `xml:"LeaseStatus"` +} + +// BlobHeaders contains various properties of a blob and is an entry +// in SetBlobProperties +type BlobHeaders struct { + ContentMD5 string `header:"x-ms-blob-content-md5"` + ContentLanguage string `header:"x-ms-blob-content-language"` + ContentEncoding string `header:"x-ms-blob-content-encoding"` + ContentType string `header:"x-ms-blob-content-type"` + CacheControl string `header:"x-ms-blob-cache-control"` +} + +// BlobListResponse contains the response fields from ListBlobs call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx +type BlobListResponse struct { + XMLName xml.Name `xml:"EnumerationResults"` + Xmlns string `xml:"xmlns,attr"` + Prefix string `xml:"Prefix"` + Marker string `xml:"Marker"` + NextMarker string `xml:"NextMarker"` + MaxResults int64 `xml:"MaxResults"` + Blobs []Blob `xml:"Blobs>Blob"` + + // BlobPrefix is used to traverse blobs as if it were a file system. + // It is returned if ListBlobsParameters.Delimiter is specified. + // The list here can be thought of as "folders" that may contain + // other folders or blobs. + BlobPrefixes []string `xml:"Blobs>BlobPrefix>Name"` + + // Delimiter is used to traverse blobs as if it were a file system. + // It is returned if ListBlobsParameters.Delimiter is specified. + Delimiter string `xml:"Delimiter"` +} + +// ListContainersParameters defines the set of customizable parameters to make a +// List Containers call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx +type ListContainersParameters struct { + Prefix string + Marker string + Include string + MaxResults uint + Timeout uint +} + +func (p ListContainersParameters) getParameters() url.Values { + out := url.Values{} + + if p.Prefix != "" { + out.Set("prefix", p.Prefix) + } + if p.Marker != "" { + out.Set("marker", p.Marker) + } + if p.Include != "" { + out.Set("include", p.Include) + } + if p.MaxResults != 0 { + out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults)) + } + if p.Timeout != 0 { + out.Set("timeout", fmt.Sprintf("%v", p.Timeout)) + } + + return out +} + +// ListBlobsParameters defines the set of customizable +// parameters to make a List Blobs call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx +type ListBlobsParameters struct { + Prefix string + Delimiter string + Marker string + Include string + MaxResults uint + Timeout uint +} + +func (p ListBlobsParameters) getParameters() url.Values { + out := url.Values{} + + if p.Prefix != "" { + out.Set("prefix", p.Prefix) + } + if p.Delimiter != "" { + out.Set("delimiter", p.Delimiter) + } + if p.Marker != "" { + out.Set("marker", p.Marker) + } + if p.Include != "" { + out.Set("include", p.Include) + } + if p.MaxResults != 0 { + out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults)) + } + if p.Timeout != 0 { + out.Set("timeout", fmt.Sprintf("%v", p.Timeout)) + } + + return out +} + +// BlobType defines the type of the Azure Blob. +type BlobType string + +// Types of page blobs +const ( + BlobTypeBlock BlobType = "BlockBlob" + BlobTypePage BlobType = "PageBlob" + BlobTypeAppend BlobType = "AppendBlob" +) + +// PageWriteType defines the type updates that are going to be +// done on the page blob. +type PageWriteType string + +// Types of operations on page blobs +const ( + PageWriteTypeUpdate PageWriteType = "update" + PageWriteTypeClear PageWriteType = "clear" +) + +const ( + blobCopyStatusPending = "pending" + blobCopyStatusSuccess = "success" + blobCopyStatusAborted = "aborted" + blobCopyStatusFailed = "failed" +) + +// lease constants. +const ( + leaseHeaderPrefix = "x-ms-lease-" + leaseID = "x-ms-lease-id" + leaseAction = "x-ms-lease-action" + leaseBreakPeriod = "x-ms-lease-break-period" + leaseDuration = "x-ms-lease-duration" + leaseProposedID = "x-ms-proposed-lease-id" + leaseTime = "x-ms-lease-time" + + acquireLease = "acquire" + renewLease = "renew" + changeLease = "change" + releaseLease = "release" + breakLease = "break" +) + +// BlockListType is used to filter out types of blocks in a Get Blocks List call +// for a block blob. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx for all +// block types. +type BlockListType string + +// Filters for listing blocks in block blobs +const ( + BlockListTypeAll BlockListType = "all" + BlockListTypeCommitted BlockListType = "committed" + BlockListTypeUncommitted BlockListType = "uncommitted" +) + +// ContainerAccessType defines the access level to the container from a public +// request. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx and "x-ms- +// blob-public-access" header. +type ContainerAccessType string + +// Access options for containers +const ( + ContainerAccessTypePrivate ContainerAccessType = "" + ContainerAccessTypeBlob ContainerAccessType = "blob" + ContainerAccessTypeContainer ContainerAccessType = "container" +) + +// ContainerAccessOptions are used when setting ACLs of containers (after creation) +type ContainerAccessOptions struct { + ContainerAccess ContainerAccessType + Timeout int + LeaseID string +} + +// AccessPolicyDetails are used for SETTING policies +type AccessPolicyDetails struct { + ID string + StartTime time.Time + ExpiryTime time.Time + CanRead bool + CanWrite bool + CanDelete bool +} + +// ContainerPermissions is used when setting permissions and Access Policies for containers. +type ContainerPermissions struct { + AccessOptions ContainerAccessOptions + AccessPolicy AccessPolicyDetails +} + +// AccessPolicyDetailsXML has specifics about an access policy +// annotated with XML details. +type AccessPolicyDetailsXML struct { + StartTime time.Time `xml:"Start"` + ExpiryTime time.Time `xml:"Expiry"` + Permission string `xml:"Permission"` +} + +// SignedIdentifier is a wrapper for a specific policy +type SignedIdentifier struct { + ID string `xml:"Id"` + AccessPolicy AccessPolicyDetailsXML `xml:"AccessPolicy"` +} + +// SignedIdentifiers part of the response from GetPermissions call. +type SignedIdentifiers struct { + SignedIdentifiers []SignedIdentifier `xml:"SignedIdentifier"` +} + +// AccessPolicy is the response type from the GetPermissions call. +type AccessPolicy struct { + SignedIdentifiersList SignedIdentifiers `xml:"SignedIdentifiers"` +} + +// ContainerAccessResponse is returned for the GetContainerPermissions function. +// This contains both the permission and access policy for the container. +type ContainerAccessResponse struct { + ContainerAccess ContainerAccessType + AccessPolicy SignedIdentifiers +} + +// ContainerAccessHeader references header used when setting/getting container ACL +const ( + ContainerAccessHeader string = "x-ms-blob-public-access" +) + +// Maximum sizes (per REST API) for various concepts +const ( + MaxBlobBlockSize = 4 * 1024 * 1024 + MaxBlobPageSize = 4 * 1024 * 1024 +) + +// BlockStatus defines states a block for a block blob can +// be in. +type BlockStatus string + +// List of statuses that can be used to refer to a block in a block list +const ( + BlockStatusUncommitted BlockStatus = "Uncommitted" + BlockStatusCommitted BlockStatus = "Committed" + BlockStatusLatest BlockStatus = "Latest" +) + +// Block is used to create Block entities for Put Block List +// call. +type Block struct { + ID string + Status BlockStatus +} + +// BlockListResponse contains the response fields from Get Block List call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx +type BlockListResponse struct { + XMLName xml.Name `xml:"BlockList"` + CommittedBlocks []BlockResponse `xml:"CommittedBlocks>Block"` + UncommittedBlocks []BlockResponse `xml:"UncommittedBlocks>Block"` +} + +// BlockResponse contains the block information returned +// in the GetBlockListCall. +type BlockResponse struct { + Name string `xml:"Name"` + Size int64 `xml:"Size"` +} + +// GetPageRangesResponse contains the reponse fields from +// Get Page Ranges call. +// +// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx +type GetPageRangesResponse struct { + XMLName xml.Name `xml:"PageList"` + PageList []PageRange `xml:"PageRange"` +} + +// PageRange contains information about a page of a page blob from +// Get Pages Range call. +// +// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx +type PageRange struct { + Start int64 `xml:"Start"` + End int64 `xml:"End"` +} + +var ( + errBlobCopyAborted = errors.New("storage: blob copy is aborted") + errBlobCopyIDMismatch = errors.New("storage: blob copy id is a mismatch") +) + +// ListContainers returns the list of containers in a storage account along with +// pagination token and other response details. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx +func (b BlobStorageClient) ListContainers(params ListContainersParameters) (ContainerListResponse, error) { + q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}}) + uri := b.client.getEndpoint(blobServiceName, "", q) + headers := b.client.getStandardHeaders() + + var out ContainerListResponse + resp, err := b.client.exec("GET", uri, headers, nil) + if err != nil { + return out, err + } + defer resp.body.Close() + + err = xmlUnmarshal(resp.body, &out) + return out, err +} + +// CreateContainer creates a blob container within the storage account +// with given name and access level. Returns error if container already exists. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx +func (b BlobStorageClient) CreateContainer(name string, access ContainerAccessType) error { + resp, err := b.createContainer(name, access) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// CreateContainerIfNotExists creates a blob container if it does not exist. Returns +// true if container is newly created or false if container already exists. +func (b BlobStorageClient) CreateContainerIfNotExists(name string, access ContainerAccessType) (bool, error) { + resp, err := b.createContainer(name, access) + if resp != nil { + defer resp.body.Close() + if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict { + return resp.statusCode == http.StatusCreated, nil + } + } + return false, err +} + +func (b BlobStorageClient) createContainer(name string, access ContainerAccessType) (*storageResponse, error) { + verb := "PUT" + uri := b.client.getEndpoint(blobServiceName, pathForContainer(name), url.Values{"restype": {"container"}}) + + headers := b.client.getStandardHeaders() + if access != "" { + headers[ContainerAccessHeader] = string(access) + } + return b.client.exec(verb, uri, headers, nil) +} + +// ContainerExists returns true if a container with given name exists +// on the storage account, otherwise returns false. +func (b BlobStorageClient) ContainerExists(name string) (bool, error) { + verb := "HEAD" + uri := b.client.getEndpoint(blobServiceName, pathForContainer(name), url.Values{"restype": {"container"}}) + headers := b.client.getStandardHeaders() + + resp, err := b.client.exec(verb, uri, headers, nil) + if resp != nil { + defer resp.body.Close() + if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound { + return resp.statusCode == http.StatusOK, nil + } + } + return false, err +} + +// SetContainerPermissions sets up container permissions as per https://msdn.microsoft.com/en-us/library/azure/dd179391.aspx +func (b BlobStorageClient) SetContainerPermissions(container string, containerPermissions ContainerPermissions) (err error) { + params := url.Values{ + "restype": {"container"}, + "comp": {"acl"}, + } + + if containerPermissions.AccessOptions.Timeout > 0 { + params.Add("timeout", strconv.Itoa(containerPermissions.AccessOptions.Timeout)) + } + + uri := b.client.getEndpoint(blobServiceName, pathForContainer(container), params) + headers := b.client.getStandardHeaders() + if containerPermissions.AccessOptions.ContainerAccess != "" { + headers[ContainerAccessHeader] = string(containerPermissions.AccessOptions.ContainerAccess) + } + + if containerPermissions.AccessOptions.LeaseID != "" { + headers[leaseID] = containerPermissions.AccessOptions.LeaseID + } + + // generate the XML for the SharedAccessSignature if required. + accessPolicyXML, err := generateAccessPolicy(containerPermissions.AccessPolicy) + if err != nil { + return err + } + + var resp *storageResponse + if accessPolicyXML != "" { + headers["Content-Length"] = strconv.Itoa(len(accessPolicyXML)) + resp, err = b.client.exec("PUT", uri, headers, strings.NewReader(accessPolicyXML)) + } else { + resp, err = b.client.exec("PUT", uri, headers, nil) + } + + if err != nil { + return err + } + + if resp != nil { + defer func() { + err = resp.body.Close() + }() + + if resp.statusCode != http.StatusOK { + return errors.New("Unable to set permissions") + } + } + return nil +} + +// GetContainerPermissions gets the container permissions as per https://msdn.microsoft.com/en-us/library/azure/dd179469.aspx +// If timeout is 0 then it will not be passed to Azure +// leaseID will only be passed to Azure if populated +// Returns permissionResponse which is combined permissions and AccessPolicy +func (b BlobStorageClient) GetContainerPermissions(container string, timeout int, leaseID string) (permissionResponse *ContainerAccessResponse, err error) { + params := url.Values{"restype": {"container"}, + "comp": {"acl"}} + + if timeout > 0 { + params.Add("timeout", strconv.Itoa(timeout)) + } + + uri := b.client.getEndpoint(blobServiceName, pathForContainer(container), params) + headers := b.client.getStandardHeaders() + + if leaseID != "" { + headers[leaseID] = leaseID + } + + resp, err := b.client.exec("GET", uri, headers, nil) + if err != nil { + return nil, err + } + + // containerAccess. Blob, Container, empty + containerAccess := resp.headers.Get(http.CanonicalHeaderKey(ContainerAccessHeader)) + + defer func() { + err = resp.body.Close() + }() + + var out AccessPolicy + err = xmlUnmarshal(resp.body, &out.SignedIdentifiersList) + if err != nil { + return nil, err + } + + permissionResponse = &ContainerAccessResponse{} + permissionResponse.AccessPolicy = out.SignedIdentifiersList + permissionResponse.ContainerAccess = ContainerAccessType(containerAccess) + + return permissionResponse, nil +} + +// DeleteContainer deletes the container with given name on the storage +// account. If the container does not exist returns error. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179408.aspx +func (b BlobStorageClient) DeleteContainer(name string) error { + resp, err := b.deleteContainer(name) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusAccepted}) +} + +// DeleteContainerIfExists deletes the container with given name on the storage +// account if it exists. Returns true if container is deleted with this call, or +// false if the container did not exist at the time of the Delete Container +// operation. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179408.aspx +func (b BlobStorageClient) DeleteContainerIfExists(name string) (bool, error) { + resp, err := b.deleteContainer(name) + if resp != nil { + defer resp.body.Close() + if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { + return resp.statusCode == http.StatusAccepted, nil + } + } + return false, err +} + +func (b BlobStorageClient) deleteContainer(name string) (*storageResponse, error) { + verb := "DELETE" + uri := b.client.getEndpoint(blobServiceName, pathForContainer(name), url.Values{"restype": {"container"}}) + + headers := b.client.getStandardHeaders() + return b.client.exec(verb, uri, headers, nil) +} + +// ListBlobs returns an object that contains list of blobs in the container, +// pagination token and other information in the response of List Blobs call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx +func (b BlobStorageClient) ListBlobs(container string, params ListBlobsParameters) (BlobListResponse, error) { + q := mergeParams(params.getParameters(), url.Values{ + "restype": {"container"}, + "comp": {"list"}}) + uri := b.client.getEndpoint(blobServiceName, pathForContainer(container), q) + headers := b.client.getStandardHeaders() + + var out BlobListResponse + resp, err := b.client.exec("GET", uri, headers, nil) + if err != nil { + return out, err + } + defer resp.body.Close() + + err = xmlUnmarshal(resp.body, &out) + return out, err +} + +// BlobExists returns true if a blob with given name exists on the specified +// container of the storage account. +func (b BlobStorageClient) BlobExists(container, name string) (bool, error) { + verb := "HEAD" + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) + headers := b.client.getStandardHeaders() + resp, err := b.client.exec(verb, uri, headers, nil) + if resp != nil { + defer resp.body.Close() + if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound { + return resp.statusCode == http.StatusOK, nil + } + } + return false, err +} + +// GetBlobURL gets the canonical URL to the blob with the specified name in the +// specified container. This method does not create a publicly accessible URL if +// the blob or container is private and this method does not check if the blob +// exists. +func (b BlobStorageClient) GetBlobURL(container, name string) string { + if container == "" { + container = "$root" + } + return b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) +} + +// GetBlob returns a stream to read the blob. Caller must call Close() the +// reader to close on the underlying connection. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179440.aspx +func (b BlobStorageClient) GetBlob(container, name string) (io.ReadCloser, error) { + resp, err := b.getBlobRange(container, name, "", nil) + if err != nil { + return nil, err + } + + if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return nil, err + } + return resp.body, nil +} + +// GetBlobRange reads the specified range of a blob to a stream. The bytesRange +// string must be in a format like "0-", "10-100" as defined in HTTP 1.1 spec. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179440.aspx +func (b BlobStorageClient) GetBlobRange(container, name, bytesRange string, extraHeaders map[string]string) (io.ReadCloser, error) { + resp, err := b.getBlobRange(container, name, bytesRange, extraHeaders) + if err != nil { + return nil, err + } + + if err := checkRespCode(resp.statusCode, []int{http.StatusPartialContent}); err != nil { + return nil, err + } + return resp.body, nil +} + +func (b BlobStorageClient) getBlobRange(container, name, bytesRange string, extraHeaders map[string]string) (*storageResponse, error) { + verb := "GET" + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) + + headers := b.client.getStandardHeaders() + if bytesRange != "" { + headers["Range"] = fmt.Sprintf("bytes=%s", bytesRange) + } + + for k, v := range extraHeaders { + headers[k] = v + } + + resp, err := b.client.exec(verb, uri, headers, nil) + if err != nil { + return nil, err + } + return resp, err +} + +// leasePut is common PUT code for the various aquire/release/break etc functions. +func (b BlobStorageClient) leaseCommonPut(container string, name string, headers map[string]string, expectedStatus int) (http.Header, error) { + params := url.Values{"comp": {"lease"}} + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) + + resp, err := b.client.exec("PUT", uri, headers, nil) + if err != nil { + return nil, err + } + defer resp.body.Close() + + if err := checkRespCode(resp.statusCode, []int{expectedStatus}); err != nil { + return nil, err + } + + return resp.headers, nil +} + +// SnapshotBlob creates a snapshot for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691971.aspx +func (b BlobStorageClient) SnapshotBlob(container string, name string, timeout int, extraHeaders map[string]string) (snapshotTimestamp *time.Time, err error) { + headers := b.client.getStandardHeaders() + params := url.Values{"comp": {"snapshot"}} + + if timeout > 0 { + params.Add("timeout", strconv.Itoa(timeout)) + } + + for k, v := range extraHeaders { + headers[k] = v + } + + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) + resp, err := b.client.exec("PUT", uri, headers, nil) + if err != nil { + return nil, err + } + + if err := checkRespCode(resp.statusCode, []int{http.StatusCreated}); err != nil { + return nil, err + } + + snapshotResponse := resp.headers.Get(http.CanonicalHeaderKey("x-ms-snapshot")) + if snapshotResponse != "" { + snapshotTimestamp, err := time.Parse(time.RFC3339, snapshotResponse) + if err != nil { + return nil, err + } + + return &snapshotTimestamp, nil + } + + return nil, errors.New("Snapshot not created") +} + +// AcquireLease creates a lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx +// returns leaseID acquired +func (b BlobStorageClient) AcquireLease(container string, name string, leaseTimeInSeconds int, proposedLeaseID string) (returnedLeaseID string, err error) { + headers := b.client.getStandardHeaders() + headers[leaseAction] = acquireLease + + if leaseTimeInSeconds > 0 { + headers[leaseDuration] = strconv.Itoa(leaseTimeInSeconds) + } + + if proposedLeaseID != "" { + headers[leaseProposedID] = proposedLeaseID + } + + respHeaders, err := b.leaseCommonPut(container, name, headers, http.StatusCreated) + if err != nil { + return "", err + } + + returnedLeaseID = respHeaders.Get(http.CanonicalHeaderKey(leaseID)) + + if returnedLeaseID != "" { + return returnedLeaseID, nil + } + + return "", errors.New("LeaseID not returned") +} + +// BreakLease breaks the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx +// Returns the timeout remaining in the lease in seconds +func (b BlobStorageClient) BreakLease(container string, name string) (breakTimeout int, err error) { + headers := b.client.getStandardHeaders() + headers[leaseAction] = breakLease + return b.breakLeaseCommon(container, name, headers) +} + +// BreakLeaseWithBreakPeriod breaks the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx +// breakPeriodInSeconds is used to determine how long until new lease can be created. +// Returns the timeout remaining in the lease in seconds +func (b BlobStorageClient) BreakLeaseWithBreakPeriod(container string, name string, breakPeriodInSeconds int) (breakTimeout int, err error) { + headers := b.client.getStandardHeaders() + headers[leaseAction] = breakLease + headers[leaseBreakPeriod] = strconv.Itoa(breakPeriodInSeconds) + return b.breakLeaseCommon(container, name, headers) +} + +// breakLeaseCommon is common code for both version of BreakLease (with and without break period) +func (b BlobStorageClient) breakLeaseCommon(container string, name string, headers map[string]string) (breakTimeout int, err error) { + + respHeaders, err := b.leaseCommonPut(container, name, headers, http.StatusAccepted) + if err != nil { + return 0, err + } + + breakTimeoutStr := respHeaders.Get(http.CanonicalHeaderKey(leaseTime)) + if breakTimeoutStr != "" { + breakTimeout, err = strconv.Atoi(breakTimeoutStr) + if err != nil { + return 0, err + } + } + + return breakTimeout, nil +} + +// ChangeLease changes a lease ID for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx +// Returns the new LeaseID acquired +func (b BlobStorageClient) ChangeLease(container string, name string, currentLeaseID string, proposedLeaseID string) (newLeaseID string, err error) { + headers := b.client.getStandardHeaders() + headers[leaseAction] = changeLease + headers[leaseID] = currentLeaseID + headers[leaseProposedID] = proposedLeaseID + + respHeaders, err := b.leaseCommonPut(container, name, headers, http.StatusOK) + if err != nil { + return "", err + } + + newLeaseID = respHeaders.Get(http.CanonicalHeaderKey(leaseID)) + if newLeaseID != "" { + return newLeaseID, nil + } + + return "", errors.New("LeaseID not returned") +} + +// ReleaseLease releases the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx +func (b BlobStorageClient) ReleaseLease(container string, name string, currentLeaseID string) error { + headers := b.client.getStandardHeaders() + headers[leaseAction] = releaseLease + headers[leaseID] = currentLeaseID + + _, err := b.leaseCommonPut(container, name, headers, http.StatusOK) + if err != nil { + return err + } + + return nil +} + +// RenewLease renews the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx +func (b BlobStorageClient) RenewLease(container string, name string, currentLeaseID string) error { + headers := b.client.getStandardHeaders() + headers[leaseAction] = renewLease + headers[leaseID] = currentLeaseID + + _, err := b.leaseCommonPut(container, name, headers, http.StatusOK) + if err != nil { + return err + } + + return nil +} + +// GetBlobProperties provides various information about the specified +// blob. See https://msdn.microsoft.com/en-us/library/azure/dd179394.aspx +func (b BlobStorageClient) GetBlobProperties(container, name string) (*BlobProperties, error) { + verb := "HEAD" + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) + + headers := b.client.getStandardHeaders() + resp, err := b.client.exec(verb, uri, headers, nil) + if err != nil { + return nil, err + } + defer resp.body.Close() + + if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return nil, err + } + + var contentLength int64 + contentLengthStr := resp.headers.Get("Content-Length") + if contentLengthStr != "" { + contentLength, err = strconv.ParseInt(contentLengthStr, 0, 64) + if err != nil { + return nil, err + } + } + + var sequenceNum int64 + sequenceNumStr := resp.headers.Get("x-ms-blob-sequence-number") + if sequenceNumStr != "" { + sequenceNum, err = strconv.ParseInt(sequenceNumStr, 0, 64) + if err != nil { + return nil, err + } + } + + return &BlobProperties{ + LastModified: resp.headers.Get("Last-Modified"), + Etag: resp.headers.Get("Etag"), + ContentMD5: resp.headers.Get("Content-MD5"), + ContentLength: contentLength, + ContentEncoding: resp.headers.Get("Content-Encoding"), + ContentType: resp.headers.Get("Content-Type"), + CacheControl: resp.headers.Get("Cache-Control"), + ContentLanguage: resp.headers.Get("Content-Language"), + SequenceNumber: sequenceNum, + CopyCompletionTime: resp.headers.Get("x-ms-copy-completion-time"), + CopyStatusDescription: resp.headers.Get("x-ms-copy-status-description"), + CopyID: resp.headers.Get("x-ms-copy-id"), + CopyProgress: resp.headers.Get("x-ms-copy-progress"), + CopySource: resp.headers.Get("x-ms-copy-source"), + CopyStatus: resp.headers.Get("x-ms-copy-status"), + BlobType: BlobType(resp.headers.Get("x-ms-blob-type")), + LeaseStatus: resp.headers.Get("x-ms-lease-status"), + }, nil +} + +// SetBlobProperties replaces the BlobHeaders for the specified blob. +// +// Some keys may be converted to Camel-Case before sending. All keys +// are returned in lower case by GetBlobProperties. HTTP header names +// are case-insensitive so case munging should not matter to other +// applications either. +// +// See https://msdn.microsoft.com/en-us/library/azure/ee691966.aspx +func (b BlobStorageClient) SetBlobProperties(container, name string, blobHeaders BlobHeaders) error { + params := url.Values{"comp": {"properties"}} + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) + headers := b.client.getStandardHeaders() + + extraHeaders := headersFromStruct(blobHeaders) + + for k, v := range extraHeaders { + headers[k] = v + } + + resp, err := b.client.exec("PUT", uri, headers, nil) + if err != nil { + return err + } + defer resp.body.Close() + + return checkRespCode(resp.statusCode, []int{http.StatusOK}) +} + +// SetBlobMetadata replaces the metadata for the specified blob. +// +// Some keys may be converted to Camel-Case before sending. All keys +// are returned in lower case by GetBlobMetadata. HTTP header names +// are case-insensitive so case munging should not matter to other +// applications either. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx +func (b BlobStorageClient) SetBlobMetadata(container, name string, metadata map[string]string, extraHeaders map[string]string) error { + params := url.Values{"comp": {"metadata"}} + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) + headers := b.client.getStandardHeaders() + for k, v := range metadata { + headers[userDefinedMetadataHeaderPrefix+k] = v + } + + for k, v := range extraHeaders { + headers[k] = v + } + + resp, err := b.client.exec("PUT", uri, headers, nil) + if err != nil { + return err + } + defer resp.body.Close() + + return checkRespCode(resp.statusCode, []int{http.StatusOK}) +} + +// GetBlobMetadata returns all user-defined metadata for the specified blob. +// +// All metadata keys will be returned in lower case. (HTTP header +// names are case-insensitive.) +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx +func (b BlobStorageClient) GetBlobMetadata(container, name string) (map[string]string, error) { + params := url.Values{"comp": {"metadata"}} + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) + headers := b.client.getStandardHeaders() + + resp, err := b.client.exec("GET", uri, headers, nil) + if err != nil { + return nil, err + } + defer resp.body.Close() + + if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return nil, err + } + + metadata := make(map[string]string) + for k, v := range resp.headers { + // Can't trust CanonicalHeaderKey() to munge case + // reliably. "_" is allowed in identifiers: + // https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx + // https://msdn.microsoft.com/library/aa664670(VS.71).aspx + // http://tools.ietf.org/html/rfc7230#section-3.2 + // ...but "_" is considered invalid by + // CanonicalMIMEHeaderKey in + // https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542 + // so k can be "X-Ms-Meta-Foo" or "x-ms-meta-foo_bar". + k = strings.ToLower(k) + if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) { + continue + } + // metadata["foo"] = content of the last X-Ms-Meta-Foo header + k = k[len(userDefinedMetadataHeaderPrefix):] + metadata[k] = v[len(v)-1] + } + return metadata, nil +} + +// CreateBlockBlob initializes an empty block blob with no blocks. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx +func (b BlobStorageClient) CreateBlockBlob(container, name string) error { + return b.CreateBlockBlobFromReader(container, name, 0, nil, nil) +} + +// CreateBlockBlobFromReader initializes a block blob using data from +// reader. Size must be the number of bytes read from reader. To +// create an empty blob, use size==0 and reader==nil. +// +// The API rejects requests with size > 64 MiB (but this limit is not +// checked by the SDK). To write a larger blob, use CreateBlockBlob, +// PutBlock, and PutBlockList. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx +func (b BlobStorageClient) CreateBlockBlobFromReader(container, name string, size uint64, blob io.Reader, extraHeaders map[string]string) error { + path := fmt.Sprintf("%s/%s", container, name) + uri := b.client.getEndpoint(blobServiceName, path, url.Values{}) + headers := b.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypeBlock) + headers["Content-Length"] = fmt.Sprintf("%d", size) + + for k, v := range extraHeaders { + headers[k] = v + } + + resp, err := b.client.exec("PUT", uri, headers, blob) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// PutBlock saves the given data chunk to the specified block blob with +// given ID. +// +// The API rejects chunks larger than 4 MiB (but this limit is not +// checked by the SDK). +// +// See https://msdn.microsoft.com/en-us/library/azure/dd135726.aspx +func (b BlobStorageClient) PutBlock(container, name, blockID string, chunk []byte) error { + return b.PutBlockWithLength(container, name, blockID, uint64(len(chunk)), bytes.NewReader(chunk), nil) +} + +// PutBlockWithLength saves the given data stream of exactly specified size to +// the block blob with given ID. It is an alternative to PutBlocks where data +// comes as stream but the length is known in advance. +// +// The API rejects requests with size > 4 MiB (but this limit is not +// checked by the SDK). +// +// See https://msdn.microsoft.com/en-us/library/azure/dd135726.aspx +func (b BlobStorageClient) PutBlockWithLength(container, name, blockID string, size uint64, blob io.Reader, extraHeaders map[string]string) error { + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{"comp": {"block"}, "blockid": {blockID}}) + headers := b.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypeBlock) + headers["Content-Length"] = fmt.Sprintf("%v", size) + + for k, v := range extraHeaders { + headers[k] = v + } + + resp, err := b.client.exec("PUT", uri, headers, blob) + if err != nil { + return err + } + + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// PutBlockList saves list of blocks to the specified block blob. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179467.aspx +func (b BlobStorageClient) PutBlockList(container, name string, blocks []Block) error { + blockListXML := prepareBlockListRequest(blocks) + + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{"comp": {"blocklist"}}) + headers := b.client.getStandardHeaders() + headers["Content-Length"] = fmt.Sprintf("%v", len(blockListXML)) + + resp, err := b.client.exec("PUT", uri, headers, strings.NewReader(blockListXML)) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// GetBlockList retrieves list of blocks in the specified block blob. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx +func (b BlobStorageClient) GetBlockList(container, name string, blockType BlockListType) (BlockListResponse, error) { + params := url.Values{"comp": {"blocklist"}, "blocklisttype": {string(blockType)}} + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) + headers := b.client.getStandardHeaders() + + var out BlockListResponse + resp, err := b.client.exec("GET", uri, headers, nil) + if err != nil { + return out, err + } + defer resp.body.Close() + + err = xmlUnmarshal(resp.body, &out) + return out, err +} + +// PutPageBlob initializes an empty page blob with specified name and maximum +// size in bytes (size must be aligned to a 512-byte boundary). A page blob must +// be created using this method before writing pages. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx +func (b BlobStorageClient) PutPageBlob(container, name string, size int64, extraHeaders map[string]string) error { + path := fmt.Sprintf("%s/%s", container, name) + uri := b.client.getEndpoint(blobServiceName, path, url.Values{}) + headers := b.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypePage) + headers["x-ms-blob-content-length"] = fmt.Sprintf("%v", size) + + for k, v := range extraHeaders { + headers[k] = v + } + + resp, err := b.client.exec("PUT", uri, headers, nil) + if err != nil { + return err + } + defer resp.body.Close() + + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// PutPage writes a range of pages to a page blob or clears the given range. +// In case of 'clear' writes, given chunk is discarded. Ranges must be aligned +// with 512-byte boundaries and chunk must be of size multiplies by 512. +// +// See https://msdn.microsoft.com/en-us/library/ee691975.aspx +func (b BlobStorageClient) PutPage(container, name string, startByte, endByte int64, writeType PageWriteType, chunk []byte, extraHeaders map[string]string) error { + path := fmt.Sprintf("%s/%s", container, name) + uri := b.client.getEndpoint(blobServiceName, path, url.Values{"comp": {"page"}}) + headers := b.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypePage) + headers["x-ms-page-write"] = string(writeType) + headers["x-ms-range"] = fmt.Sprintf("bytes=%v-%v", startByte, endByte) + for k, v := range extraHeaders { + headers[k] = v + } + var contentLength int64 + var data io.Reader + if writeType == PageWriteTypeClear { + contentLength = 0 + data = bytes.NewReader([]byte{}) + } else { + contentLength = int64(len(chunk)) + data = bytes.NewReader(chunk) + } + headers["Content-Length"] = fmt.Sprintf("%v", contentLength) + + resp, err := b.client.exec("PUT", uri, headers, data) + if err != nil { + return err + } + defer resp.body.Close() + + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// GetPageRanges returns the list of valid page ranges for a page blob. +// +// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx +func (b BlobStorageClient) GetPageRanges(container, name string) (GetPageRangesResponse, error) { + path := fmt.Sprintf("%s/%s", container, name) + uri := b.client.getEndpoint(blobServiceName, path, url.Values{"comp": {"pagelist"}}) + headers := b.client.getStandardHeaders() + + var out GetPageRangesResponse + resp, err := b.client.exec("GET", uri, headers, nil) + if err != nil { + return out, err + } + defer resp.body.Close() + + if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return out, err + } + err = xmlUnmarshal(resp.body, &out) + return out, err +} + +// PutAppendBlob initializes an empty append blob with specified name. An +// append blob must be created using this method before appending blocks. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx +func (b BlobStorageClient) PutAppendBlob(container, name string, extraHeaders map[string]string) error { + path := fmt.Sprintf("%s/%s", container, name) + uri := b.client.getEndpoint(blobServiceName, path, url.Values{}) + headers := b.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypeAppend) + + for k, v := range extraHeaders { + headers[k] = v + } + + resp, err := b.client.exec("PUT", uri, headers, nil) + if err != nil { + return err + } + defer resp.body.Close() + + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// AppendBlock appends a block to an append blob. +// +// See https://msdn.microsoft.com/en-us/library/azure/mt427365.aspx +func (b BlobStorageClient) AppendBlock(container, name string, chunk []byte, extraHeaders map[string]string) error { + path := fmt.Sprintf("%s/%s", container, name) + uri := b.client.getEndpoint(blobServiceName, path, url.Values{"comp": {"appendblock"}}) + headers := b.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypeAppend) + headers["Content-Length"] = fmt.Sprintf("%v", len(chunk)) + + for k, v := range extraHeaders { + headers[k] = v + } + + resp, err := b.client.exec("PUT", uri, headers, bytes.NewReader(chunk)) + if err != nil { + return err + } + defer resp.body.Close() + + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// CopyBlob starts a blob copy operation and waits for the operation to +// complete. sourceBlob parameter must be a canonical URL to the blob (can be +// obtained using GetBlobURL method.) There is no SLA on blob copy and therefore +// this helper method works faster on smaller files. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd894037.aspx +func (b BlobStorageClient) CopyBlob(container, name, sourceBlob string) error { + copyID, err := b.StartBlobCopy(container, name, sourceBlob) + if err != nil { + return err + } + + return b.WaitForBlobCopy(container, name, copyID) +} + +// StartBlobCopy starts a blob copy operation. +// sourceBlob parameter must be a canonical URL to the blob (can be +// obtained using GetBlobURL method.) +// +// See https://msdn.microsoft.com/en-us/library/azure/dd894037.aspx +func (b BlobStorageClient) StartBlobCopy(container, name, sourceBlob string) (string, error) { + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) + + headers := b.client.getStandardHeaders() + headers["x-ms-copy-source"] = sourceBlob + + resp, err := b.client.exec("PUT", uri, headers, nil) + if err != nil { + return "", err + } + defer resp.body.Close() + + if err := checkRespCode(resp.statusCode, []int{http.StatusAccepted, http.StatusCreated}); err != nil { + return "", err + } + + copyID := resp.headers.Get("x-ms-copy-id") + if copyID == "" { + return "", errors.New("Got empty copy id header") + } + return copyID, nil +} + +// AbortBlobCopy aborts a BlobCopy which has already been triggered by the StartBlobCopy function. +// copyID is generated from StartBlobCopy function. +// currentLeaseID is required IF the destination blob has an active lease on it. +// As defined in https://msdn.microsoft.com/en-us/library/azure/jj159098.aspx +func (b BlobStorageClient) AbortBlobCopy(container, name, copyID, currentLeaseID string, timeout int) error { + params := url.Values{"comp": {"copy"}, "copyid": {copyID}} + if timeout > 0 { + params.Add("timeout", strconv.Itoa(timeout)) + } + + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) + headers := b.client.getStandardHeaders() + headers["x-ms-copy-action"] = "abort" + + if currentLeaseID != "" { + headers[leaseID] = currentLeaseID + } + + resp, err := b.client.exec("PUT", uri, headers, nil) + if err != nil { + return err + } + defer resp.body.Close() + + if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { + return err + } + + return nil +} + +// WaitForBlobCopy loops until a BlobCopy operation is completed (or fails with error) +func (b BlobStorageClient) WaitForBlobCopy(container, name, copyID string) error { + for { + props, err := b.GetBlobProperties(container, name) + if err != nil { + return err + } + + if props.CopyID != copyID { + return errBlobCopyIDMismatch + } + + switch props.CopyStatus { + case blobCopyStatusSuccess: + return nil + case blobCopyStatusPending: + continue + case blobCopyStatusAborted: + return errBlobCopyAborted + case blobCopyStatusFailed: + return fmt.Errorf("storage: blob copy failed. Id=%s Description=%s", props.CopyID, props.CopyStatusDescription) + default: + return fmt.Errorf("storage: unhandled blob copy status: '%s'", props.CopyStatus) + } + } +} + +// DeleteBlob deletes the given blob from the specified container. +// If the blob does not exists at the time of the Delete Blob operation, it +// returns error. See https://msdn.microsoft.com/en-us/library/azure/dd179413.aspx +func (b BlobStorageClient) DeleteBlob(container, name string, extraHeaders map[string]string) error { + resp, err := b.deleteBlob(container, name, extraHeaders) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusAccepted}) +} + +// DeleteBlobIfExists deletes the given blob from the specified container If the +// blob is deleted with this call, returns true. Otherwise returns false. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179413.aspx +func (b BlobStorageClient) DeleteBlobIfExists(container, name string, extraHeaders map[string]string) (bool, error) { + resp, err := b.deleteBlob(container, name, extraHeaders) + if resp != nil { + defer resp.body.Close() + if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { + return resp.statusCode == http.StatusAccepted, nil + } + } + return false, err +} + +func (b BlobStorageClient) deleteBlob(container, name string, extraHeaders map[string]string) (*storageResponse, error) { + verb := "DELETE" + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) + headers := b.client.getStandardHeaders() + for k, v := range extraHeaders { + headers[k] = v + } + + return b.client.exec(verb, uri, headers, nil) +} + +// helper method to construct the path to a container given its name +func pathForContainer(name string) string { + return fmt.Sprintf("/%s", name) +} + +// helper method to construct the path to a blob given its container and blob +// name +func pathForBlob(container, name string) string { + return fmt.Sprintf("/%s/%s", container, name) +} + +// GetBlobSASURIWithSignedIPAndProtocol creates an URL to the specified blob which contains the Shared +// Access Signature with specified permissions and expiration time. Also includes signedIPRange and allowed procotols. +// If old API version is used but no signedIP is passed (ie empty string) then this should still work. +// We only populate the signedIP when it non-empty. +// +// See https://msdn.microsoft.com/en-us/library/azure/ee395415.aspx +func (b BlobStorageClient) GetBlobSASURIWithSignedIPAndProtocol(container, name string, expiry time.Time, permissions string, signedIPRange string, HTTPSOnly bool) (string, error) { + var ( + signedPermissions = permissions + blobURL = b.GetBlobURL(container, name) + ) + canonicalizedResource, err := b.client.buildCanonicalizedResource(blobURL) + if err != nil { + return "", err + } + + // "The canonicalizedresouce portion of the string is a canonical path to the signed resource. + // It must include the service name (blob, table, queue or file) for version 2015-02-21 or + // later, the storage account name, and the resource name, and must be URL-decoded. + // -- https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx + + // We need to replace + with %2b first to avoid being treated as a space (which is correct for query strings, but not the path component). + canonicalizedResource = strings.Replace(canonicalizedResource, "+", "%2b", -1) + canonicalizedResource, err = url.QueryUnescape(canonicalizedResource) + if err != nil { + return "", err + } + + signedExpiry := expiry.UTC().Format(time.RFC3339) + signedResource := "b" + + protocols := "https,http" + if HTTPSOnly { + protocols = "https" + } + stringToSign, err := blobSASStringToSign(b.client.apiVersion, canonicalizedResource, signedExpiry, signedPermissions, signedIPRange, protocols) + if err != nil { + return "", err + } + + sig := b.client.computeHmac256(stringToSign) + sasParams := url.Values{ + "sv": {b.client.apiVersion}, + "se": {signedExpiry}, + "sr": {signedResource}, + "sp": {signedPermissions}, + "sig": {sig}, + } + + if b.client.apiVersion >= "2015-04-05" { + sasParams.Add("spr", protocols) + if signedIPRange != "" { + sasParams.Add("sip", signedIPRange) + } + } + + sasURL, err := url.Parse(blobURL) + if err != nil { + return "", err + } + sasURL.RawQuery = sasParams.Encode() + return sasURL.String(), nil +} + +// GetBlobSASURI creates an URL to the specified blob which contains the Shared +// Access Signature with specified permissions and expiration time. +// +// See https://msdn.microsoft.com/en-us/library/azure/ee395415.aspx +func (b BlobStorageClient) GetBlobSASURI(container, name string, expiry time.Time, permissions string) (string, error) { + url, err := b.GetBlobSASURIWithSignedIPAndProtocol(container, name, expiry, permissions, "", false) + return url, err +} + +func blobSASStringToSign(signedVersion, canonicalizedResource, signedExpiry, signedPermissions string, signedIP string, protocols string) (string, error) { + var signedStart, signedIdentifier, rscc, rscd, rsce, rscl, rsct string + + if signedVersion >= "2015-02-21" { + canonicalizedResource = "/blob" + canonicalizedResource + } + + // https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx#Anchor_12 + if signedVersion >= "2015-04-05" { + return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedIP, protocols, signedVersion, rscc, rscd, rsce, rscl, rsct), nil + } + + // reference: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx + if signedVersion >= "2013-08-15" { + return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedVersion, rscc, rscd, rsce, rscl, rsct), nil + } + + return "", errors.New("storage: not implemented SAS for versions earlier than 2013-08-15") +} + +func generatePermissions(accessPolicy AccessPolicyDetails) (permissions string) { + // generate the permissions string (rwd). + // still want the end user API to have bool flags. + permissions = "" + + if accessPolicy.CanRead { + permissions += "r" + } + + if accessPolicy.CanWrite { + permissions += "w" + } + + if accessPolicy.CanDelete { + permissions += "d" + } + + return permissions +} + +// convertAccessPolicyToXMLStructs converts between AccessPolicyDetails which is a struct better for API usage to the +// AccessPolicy struct which will get converted to XML. +func convertAccessPolicyToXMLStructs(accessPolicy AccessPolicyDetails) SignedIdentifiers { + return SignedIdentifiers{ + SignedIdentifiers: []SignedIdentifier{ + { + ID: accessPolicy.ID, + AccessPolicy: AccessPolicyDetailsXML{ + StartTime: accessPolicy.StartTime.UTC().Round(time.Second), + ExpiryTime: accessPolicy.ExpiryTime.UTC().Round(time.Second), + Permission: generatePermissions(accessPolicy), + }, + }, + }, + } +} + +// generateAccessPolicy generates the XML access policy used as the payload for SetContainerPermissions. +func generateAccessPolicy(accessPolicy AccessPolicyDetails) (accessPolicyXML string, err error) { + + if accessPolicy.ID != "" { + signedIdentifiers := convertAccessPolicyToXMLStructs(accessPolicy) + body, _, err := xmlMarshal(signedIdentifiers) + if err != nil { + return "", err + } + + xmlByteArray, err := ioutil.ReadAll(body) + if err != nil { + return "", err + } + accessPolicyXML = string(xmlByteArray) + return accessPolicyXML, nil + } + + return "", nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/blob_test.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/blob_test.go new file mode 100644 index 000000000000..eb0064e9e24f --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/blob_test.go @@ -0,0 +1,1584 @@ +package storage + +import ( + "bytes" + "crypto/rand" + "encoding/base64" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "sort" + "sync" + "testing" + "time" + + chk "gopkg.in/check.v1" +) + +type StorageBlobSuite struct{} + +var _ = chk.Suite(&StorageBlobSuite{}) + +const testContainerPrefix = "zzzztest-" + +func getBlobClient(c *chk.C) BlobStorageClient { + return getBasicClient(c).GetBlobService() +} + +func (s *StorageBlobSuite) Test_pathForContainer(c *chk.C) { + c.Assert(pathForContainer("foo"), chk.Equals, "/foo") +} + +func (s *StorageBlobSuite) Test_pathForBlob(c *chk.C) { + c.Assert(pathForBlob("foo", "blob"), chk.Equals, "/foo/blob") +} + +func (s *StorageBlobSuite) Test_blobSASStringToSign(c *chk.C) { + _, err := blobSASStringToSign("2012-02-12", "CS", "SE", "SP", "", "") + c.Assert(err, chk.NotNil) // not implemented SAS for versions earlier than 2013-08-15 + + out, err := blobSASStringToSign("2013-08-15", "CS", "SE", "SP", "", "") + c.Assert(err, chk.IsNil) + c.Assert(out, chk.Equals, "SP\n\nSE\nCS\n\n2013-08-15\n\n\n\n\n") + + // check format for 2015-04-05 version + out, err = blobSASStringToSign("2015-04-05", "CS", "SE", "SP", "127.0.0.1", "https,http") + c.Assert(err, chk.IsNil) + c.Assert(out, chk.Equals, "SP\n\nSE\n/blobCS\n\n127.0.0.1\nhttps,http\n2015-04-05\n\n\n\n\n") +} + +func (s *StorageBlobSuite) TestGetBlobSASURI(c *chk.C) { + api, err := NewClient("foo", "YmFy", DefaultBaseURL, "2013-08-15", true) + c.Assert(err, chk.IsNil) + cli := api.GetBlobService() + expiry := time.Time{} + + expectedParts := url.URL{ + Scheme: "https", + Host: "foo.blob.core.windows.net", + Path: "container/name", + RawQuery: url.Values{ + "sv": {"2013-08-15"}, + "sig": {"/OXG7rWh08jYwtU03GzJM0DHZtidRGpC6g69rSGm3I0="}, + "sr": {"b"}, + "sp": {"r"}, + "se": {"0001-01-01T00:00:00Z"}, + }.Encode()} + + u, err := cli.GetBlobSASURI("container", "name", expiry, "r") + c.Assert(err, chk.IsNil) + sasParts, err := url.Parse(u) + c.Assert(err, chk.IsNil) + c.Assert(expectedParts.String(), chk.Equals, sasParts.String()) + c.Assert(expectedParts.Query(), chk.DeepEquals, sasParts.Query()) +} + +func (s *StorageBlobSuite) TestGetBlobSASURIWithSignedIPAndProtocolValidAPIVersionPassed(c *chk.C) { + api, err := NewClient("foo", "YmFy", DefaultBaseURL, "2015-04-05", true) + c.Assert(err, chk.IsNil) + cli := api.GetBlobService() + expiry := time.Time{} + + expectedParts := url.URL{ + Scheme: "https", + Host: "foo.blob.core.windows.net", + Path: "/container/name", + RawQuery: url.Values{ + "sv": {"2015-04-05"}, + "sig": {"VBOYJmt89UuBRXrxNzmsCMoC+8PXX2yklV71QcL1BfM="}, + "sr": {"b"}, + "sip": {"127.0.0.1"}, + "sp": {"r"}, + "se": {"0001-01-01T00:00:00Z"}, + "spr": {"https"}, + }.Encode()} + + u, err := cli.GetBlobSASURIWithSignedIPAndProtocol("container", "name", expiry, "r", "127.0.0.1", true) + c.Assert(err, chk.IsNil) + sasParts, err := url.Parse(u) + c.Assert(err, chk.IsNil) + c.Assert(sasParts.Query(), chk.DeepEquals, expectedParts.Query()) +} + +// Trying to use SignedIP and Protocol but using an older version of the API. +// Should ignore the signedIP/protocol and just use what the older version requires. +func (s *StorageBlobSuite) TestGetBlobSASURIWithSignedIPAndProtocolUsingOldAPIVersion(c *chk.C) { + api, err := NewClient("foo", "YmFy", DefaultBaseURL, "2013-08-15", true) + c.Assert(err, chk.IsNil) + cli := api.GetBlobService() + expiry := time.Time{} + + expectedParts := url.URL{ + Scheme: "https", + Host: "foo.blob.core.windows.net", + Path: "/container/name", + RawQuery: url.Values{ + "sv": {"2013-08-15"}, + "sig": {"/OXG7rWh08jYwtU03GzJM0DHZtidRGpC6g69rSGm3I0="}, + "sr": {"b"}, + "sp": {"r"}, + "se": {"0001-01-01T00:00:00Z"}, + }.Encode()} + + u, err := cli.GetBlobSASURIWithSignedIPAndProtocol("container", "name", expiry, "r", "", true) + c.Assert(err, chk.IsNil) + sasParts, err := url.Parse(u) + c.Assert(err, chk.IsNil) + c.Assert(expectedParts.String(), chk.Equals, sasParts.String()) + c.Assert(expectedParts.Query(), chk.DeepEquals, sasParts.Query()) +} + +func (s *StorageBlobSuite) TestBlobSASURICorrectness(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + blob := randNameWithSpecialChars(5) + body := []byte(randString(100)) + expiry := time.Now().UTC().Add(time.Hour) + permissions := "r" + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.DeleteContainer(cnt) + + c.Assert(cli.putSingleBlockBlob(cnt, blob, body), chk.IsNil) + + sasURI, err := cli.GetBlobSASURI(cnt, blob, expiry, permissions) + c.Assert(err, chk.IsNil) + + resp, err := http.Get(sasURI) + c.Assert(err, chk.IsNil) + + blobResp, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + c.Assert(err, chk.IsNil) + + c.Assert(resp.StatusCode, chk.Equals, http.StatusOK) + c.Assert(len(blobResp), chk.Equals, len(body)) +} + +func (s *StorageBlobSuite) TestListContainersPagination(c *chk.C) { + cli := getBlobClient(c) + c.Assert(deleteTestContainers(cli), chk.IsNil) + + const n = 5 + const pageSize = 2 + + // Create test containers + created := []string{} + for i := 0; i < n; i++ { + name := randContainer() + c.Assert(cli.CreateContainer(name, ContainerAccessTypePrivate), chk.IsNil) + created = append(created, name) + } + sort.Strings(created) + + // Defer test container deletions + defer func() { + var wg sync.WaitGroup + for _, cnt := range created { + wg.Add(1) + go func(name string) { + c.Assert(cli.DeleteContainer(name), chk.IsNil) + wg.Done() + }(cnt) + } + wg.Wait() + }() + + // Paginate results + seen := []string{} + marker := "" + for { + resp, err := cli.ListContainers(ListContainersParameters{ + Prefix: testContainerPrefix, + MaxResults: pageSize, + Marker: marker}) + c.Assert(err, chk.IsNil) + + containers := resp.Containers + if len(containers) > pageSize { + c.Fatalf("Got a bigger page. Expected: %d, got: %d", pageSize, len(containers)) + } + + for _, c := range containers { + seen = append(seen, c.Name) + } + + marker = resp.NextMarker + if marker == "" || len(containers) == 0 { + break + } + } + + c.Assert(seen, chk.DeepEquals, created) +} + +func (s *StorageBlobSuite) TestContainerExists(c *chk.C) { + cnt := randContainer() + cli := getBlobClient(c) + ok, err := cli.ContainerExists(cnt) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, false) + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypeBlob), chk.IsNil) + defer cli.DeleteContainer(cnt) + + ok, err = cli.ContainerExists(cnt) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, true) +} + +func (s *StorageBlobSuite) TestCreateContainerDeleteContainer(c *chk.C) { + cnt := randContainer() + cli := getBlobClient(c) + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + c.Assert(cli.DeleteContainer(cnt), chk.IsNil) +} + +func (s *StorageBlobSuite) TestCreateContainerIfNotExists(c *chk.C) { + cnt := randContainer() + cli := getBlobClient(c) + defer cli.DeleteContainer(cnt) + + // First create + ok, err := cli.CreateContainerIfNotExists(cnt, ContainerAccessTypePrivate) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, true) + + // Second create, should not give errors + ok, err = cli.CreateContainerIfNotExists(cnt, ContainerAccessTypePrivate) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, false) +} + +func (s *StorageBlobSuite) TestDeleteContainerIfExists(c *chk.C) { + cnt := randContainer() + cli := getBlobClient(c) + + // Nonexisting container + c.Assert(cli.DeleteContainer(cnt), chk.NotNil) + + ok, err := cli.DeleteContainerIfExists(cnt) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, false) + + // Existing container + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + ok, err = cli.DeleteContainerIfExists(cnt) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, true) +} + +func (s *StorageBlobSuite) TestBlobExists(c *chk.C) { + cnt := randContainer() + blob := randName(5) + cli := getBlobClient(c) + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypeBlob), chk.IsNil) + defer cli.DeleteContainer(cnt) + c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte("Hello!")), chk.IsNil) + defer cli.DeleteBlob(cnt, blob, nil) + + ok, err := cli.BlobExists(cnt, blob+".foo") + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, false) + + ok, err = cli.BlobExists(cnt, blob) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, true) +} + +func (s *StorageBlobSuite) TestGetBlobURL(c *chk.C) { + api, err := NewBasicClient("foo", "YmFy") + c.Assert(err, chk.IsNil) + cli := api.GetBlobService() + + c.Assert(cli.GetBlobURL("c", "nested/blob"), chk.Equals, "https://foo.blob.core.windows.net/c/nested/blob") + c.Assert(cli.GetBlobURL("", "blob"), chk.Equals, "https://foo.blob.core.windows.net/$root/blob") + c.Assert(cli.GetBlobURL("", "nested/blob"), chk.Equals, "https://foo.blob.core.windows.net/$root/nested/blob") +} + +func (s *StorageBlobSuite) TestBlobCopy(c *chk.C) { + if testing.Short() { + c.Skip("skipping blob copy in short mode, no SLA on async operation") + } + + cli := getBlobClient(c) + cnt := randContainer() + src := randName(5) + dst := randName(5) + body := []byte(randString(1024)) + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + c.Assert(cli.putSingleBlockBlob(cnt, src, body), chk.IsNil) + defer cli.DeleteBlob(cnt, src, nil) + + c.Assert(cli.CopyBlob(cnt, dst, cli.GetBlobURL(cnt, src)), chk.IsNil) + defer cli.DeleteBlob(cnt, dst, nil) + + blobBody, err := cli.GetBlob(cnt, dst) + c.Assert(err, chk.IsNil) + + b, err := ioutil.ReadAll(blobBody) + defer blobBody.Close() + c.Assert(err, chk.IsNil) + c.Assert(b, chk.DeepEquals, body) +} + +func (s *StorageBlobSuite) TestStartBlobCopy(c *chk.C) { + if testing.Short() { + c.Skip("skipping blob copy in short mode, no SLA on async operation") + } + + cli := getBlobClient(c) + cnt := randContainer() + src := randName(5) + dst := randName(5) + body := []byte(randString(1024)) + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + c.Assert(cli.putSingleBlockBlob(cnt, src, body), chk.IsNil) + defer cli.DeleteBlob(cnt, src, nil) + + // given we dont know when it will start, can we even test destination creation? + // will just test that an error wasn't thrown for now. + copyID, err := cli.StartBlobCopy(cnt, dst, cli.GetBlobURL(cnt, src)) + c.Assert(copyID, chk.NotNil) + c.Assert(err, chk.IsNil) +} + +// Tests abort of blobcopy. Given the blobcopy is usually over before we can actually trigger an abort +// it is agreed that we perform a copy then try and perform an abort. It should result in a HTTP status of 409. +// So basically we're testing negative scenario (as good as we can do for now) +func (s *StorageBlobSuite) TestAbortBlobCopy(c *chk.C) { + if testing.Short() { + c.Skip("skipping blob copy in short mode, no SLA on async operation") + } + + cli := getBlobClient(c) + cnt := randContainer() + src := randName(5) + dst := randName(5) + body := []byte(randString(1024)) + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + c.Assert(cli.putSingleBlockBlob(cnt, src, body), chk.IsNil) + defer cli.DeleteBlob(cnt, src, nil) + + // given we dont know when it will start, can we even test destination creation? + // will just test that an error wasn't thrown for now. + copyID, err := cli.StartBlobCopy(cnt, dst, cli.GetBlobURL(cnt, src)) + c.Assert(copyID, chk.NotNil) + c.Assert(err, chk.IsNil) + + err = cli.WaitForBlobCopy(cnt, dst, copyID) + c.Assert(err, chk.IsNil) + + // abort abort abort, but we *know* its already completed. + err = cli.AbortBlobCopy(cnt, dst, copyID, "", 0) + + // abort should fail (over already) + c.Assert(err.(AzureStorageServiceError).StatusCode, chk.Equals, http.StatusConflict) +} + +func (s *StorageBlobSuite) TestDeleteBlobIfExists(c *chk.C) { + cnt := randContainer() + blob := randName(5) + + cli := getBlobClient(c) + c.Assert(cli.DeleteBlob(cnt, blob, nil), chk.NotNil) + + ok, err := cli.DeleteBlobIfExists(cnt, blob, nil) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, false) +} + +func (s *StorageBlobSuite) TestDeleteBlobWithConditions(c *chk.C) { + cnt := randContainer() + blob := randName(5) + + cli := getBlobClient(c) + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + c.Assert(cli.CreateBlockBlob(cnt, blob), chk.IsNil) + oldProps, err := cli.GetBlobProperties(cnt, blob) + c.Assert(err, chk.IsNil) + + // Update metadata, so Etag changes + c.Assert(cli.SetBlobMetadata(cnt, blob, map[string]string{}, nil), chk.IsNil) + newProps, err := cli.GetBlobProperties(cnt, blob) + c.Assert(err, chk.IsNil) + + // "Delete if matches old Etag" should fail without deleting. + err = cli.DeleteBlob(cnt, blob, map[string]string{ + "If-Match": oldProps.Etag, + }) + c.Assert(err, chk.FitsTypeOf, AzureStorageServiceError{}) + c.Assert(err.(AzureStorageServiceError).StatusCode, chk.Equals, http.StatusPreconditionFailed) + _, err = cli.GetBlob(cnt, blob) + c.Assert(err, chk.IsNil) + + // "Delete if matches new Etag" should succeed. + err = cli.DeleteBlob(cnt, blob, map[string]string{ + "If-Match": newProps.Etag, + }) + c.Assert(err, chk.IsNil) + _, err = cli.GetBlob(cnt, blob) + c.Assert(err, chk.Not(chk.IsNil)) +} + +func (s *StorageBlobSuite) TestGetBlobProperties(c *chk.C) { + cnt := randContainer() + blob := randName(5) + contents := randString(64) + + cli := getBlobClient(c) + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.DeleteContainer(cnt) + + // Nonexisting blob + _, err := cli.GetBlobProperties(cnt, blob) + c.Assert(err, chk.NotNil) + + // Put the blob + c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte(contents)), chk.IsNil) + + // Get blob properties + props, err := cli.GetBlobProperties(cnt, blob) + c.Assert(err, chk.IsNil) + + c.Assert(props.ContentLength, chk.Equals, int64(len(contents))) + c.Assert(props.ContentType, chk.Equals, "application/octet-stream") + c.Assert(props.BlobType, chk.Equals, BlobTypeBlock) +} + +func (s *StorageBlobSuite) TestListBlobsPagination(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.DeleteContainer(cnt) + + blobs := []string{} + const n = 5 + const pageSize = 2 + for i := 0; i < n; i++ { + name := randName(5) + c.Assert(cli.putSingleBlockBlob(cnt, name, []byte("Hello, world!")), chk.IsNil) + blobs = append(blobs, name) + } + sort.Strings(blobs) + + // Paginate + seen := []string{} + marker := "" + for { + resp, err := cli.ListBlobs(cnt, ListBlobsParameters{ + MaxResults: pageSize, + Marker: marker}) + c.Assert(err, chk.IsNil) + + for _, v := range resp.Blobs { + seen = append(seen, v.Name) + } + + marker = resp.NextMarker + if marker == "" || len(resp.Blobs) == 0 { + break + } + } + + // Compare + c.Assert(seen, chk.DeepEquals, blobs) +} + +// listBlobsAsFiles is a helper function to list blobs as "folders" and "files". +func listBlobsAsFiles(cli BlobStorageClient, cnt string, parentDir string) (folders []string, files []string, err error) { + var blobParams ListBlobsParameters + var blobListResponse BlobListResponse + + // Top level "folders" + blobParams = ListBlobsParameters{ + Delimiter: "/", + Prefix: parentDir, + } + + blobListResponse, err = cli.ListBlobs(cnt, blobParams) + if err != nil { + return nil, nil, err + } + + // These are treated as "folders" under the parentDir. + folders = blobListResponse.BlobPrefixes + + // "Files"" are blobs which are under the parentDir. + files = make([]string, len(blobListResponse.Blobs)) + for i := range blobListResponse.Blobs { + files[i] = blobListResponse.Blobs[i].Name + } + + return folders, files, nil +} + +// TestListBlobsTraversal tests that we can correctly traverse +// blobs in blob storage as if it were a file system by using +// a combination of Prefix, Delimiter, and BlobPrefixes. +// +// Blob storage is flat, but we can *simulate* the file +// system with folders and files using conventions in naming. +// With the blob namedd "/usr/bin/ls", when we use delimiter '/', +// the "ls" would be a "file"; with "/", /usr" and "/usr/bin" being +// the "folders" +// +// NOTE: The use of delimiter (eg forward slash) is extremely fiddly +// and difficult to get right so some discipline in naming and rules +// when using the API is required to get everything to work as expected. +// +// Assuming our delimiter is a forward slash, the rules are: +// +// - Do use a leading forward slash in blob names to make things +// consistent and simpler (see further). +// Note that doing so will show "" as the only top-level +// folder in the container in Azure portal, which may look strange. +// +// - The "folder names" are returned *with trailing forward slash* as per MSDN. +// +// - The "folder names" will be "absolue paths", e.g. listing things under "/usr/" +// will return folder names "/usr/bin/". +// +// - The "file names" are returned as full blob names, e.g. when listing +// things under "/usr/bin/", the file names will be "/usr/bin/ls" and +// "/usr/bin/cat". +// +// - Everything is returned with case-sensitive order as expected in real file system +// as per MSDN. +// +// - To list things under a "folder" always use trailing forward slash. +// +// Example: to list top level folders we use root folder named "" with +// trailing forward slash, so we use "/". +// +// Example: to list folders under "/usr", we again append forward slash and +// so we use "/usr/". +// +// Because we use leading forward slash we don't need to have different +// treatment of "get top-level folders" and "get non-top-level folders" +// scenarios. +func (s *StorageBlobSuite) TestListBlobsTraversal(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.DeleteContainer(cnt) + + // Note use of leading forward slash as per naming rules. + blobsToCreate := []string{ + "/usr/bin/ls", + "/usr/bin/cat", + "/usr/lib64/libc.so", + "/etc/hosts", + "/etc/init.d/iptables", + } + + // Create the above blobs + for _, blobName := range blobsToCreate { + err := cli.CreateBlockBlob(cnt, blobName) + c.Assert(err, chk.IsNil) + } + + var folders []string + var files []string + var err error + + // Top level folders and files. + folders, files, err = listBlobsAsFiles(cli, cnt, "/") + c.Assert(err, chk.IsNil) + c.Assert(folders, chk.DeepEquals, []string{"/etc/", "/usr/"}) + c.Assert(files, chk.DeepEquals, []string{}) + + // Things under /etc/. Note use of trailing forward slash here as per rules. + folders, files, err = listBlobsAsFiles(cli, cnt, "/etc/") + c.Assert(err, chk.IsNil) + c.Assert(folders, chk.DeepEquals, []string{"/etc/init.d/"}) + c.Assert(files, chk.DeepEquals, []string{"/etc/hosts"}) + + // Things under /etc/init.d/ + folders, files, err = listBlobsAsFiles(cli, cnt, "/etc/init.d/") + c.Assert(err, chk.IsNil) + c.Assert(folders, chk.DeepEquals, []string(nil)) + c.Assert(files, chk.DeepEquals, []string{"/etc/init.d/iptables"}) + + // Things under /usr/ + folders, files, err = listBlobsAsFiles(cli, cnt, "/usr/") + c.Assert(err, chk.IsNil) + c.Assert(folders, chk.DeepEquals, []string{"/usr/bin/", "/usr/lib64/"}) + c.Assert(files, chk.DeepEquals, []string{}) + + // Things under /usr/bin/ + folders, files, err = listBlobsAsFiles(cli, cnt, "/usr/bin/") + c.Assert(err, chk.IsNil) + c.Assert(folders, chk.DeepEquals, []string(nil)) + c.Assert(files, chk.DeepEquals, []string{"/usr/bin/cat", "/usr/bin/ls"}) +} + +func (s *StorageBlobSuite) TestListBlobsWithMetadata(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + expectMeta := make(map[string]BlobMetadata) + + // Put 4 blobs with metadata + for i := 0; i < 4; i++ { + name := randName(5) + c.Assert(cli.putSingleBlockBlob(cnt, name, []byte("Hello, world!")), chk.IsNil) + c.Assert(cli.SetBlobMetadata(cnt, name, map[string]string{ + "Foo": name, + "Bar_BAZ": "Waz Qux", + }, nil), chk.IsNil) + expectMeta[name] = BlobMetadata{ + "foo": name, + "bar_baz": "Waz Qux", + } + } + + // Put one more blob with no metadata + blobWithoutMetadata := randName(5) + c.Assert(cli.putSingleBlockBlob(cnt, blobWithoutMetadata, []byte("Hello, world!")), chk.IsNil) + expectMeta[blobWithoutMetadata] = nil + + // Get ListBlobs with include:"metadata" + resp, err := cli.ListBlobs(cnt, ListBlobsParameters{ + MaxResults: 5, + Include: "metadata"}) + c.Assert(err, chk.IsNil) + + respBlobs := make(map[string]Blob) + for _, v := range resp.Blobs { + respBlobs[v.Name] = v + } + + // Verify the metadata is as expected + for name := range expectMeta { + c.Check(respBlobs[name].Metadata, chk.DeepEquals, expectMeta[name]) + } +} + +// Ensure it's possible to generate a ListBlobs response with +// metadata, e.g., for a stub server. +func (s *StorageBlobSuite) TestMarshalBlobMetadata(c *chk.C) { + buf, err := xml.Marshal(Blob{ + Name: randName(5), + Properties: BlobProperties{}, + Metadata: BlobMetadata{"foo": "baz < waz"}, + }) + c.Assert(err, chk.IsNil) + c.Assert(string(buf), chk.Matches, `.*baz < waz.*`) +} + +func (s *StorageBlobSuite) TestGetAndSetMetadata(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randName(5) + c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte{}), chk.IsNil) + + m, err := cli.GetBlobMetadata(cnt, blob) + c.Assert(err, chk.IsNil) + c.Assert(m, chk.Not(chk.Equals), nil) + c.Assert(len(m), chk.Equals, 0) + + mPut := map[string]string{ + "foo": "bar", + "bar_baz": "waz qux", + } + + err = cli.SetBlobMetadata(cnt, blob, mPut, nil) + c.Assert(err, chk.IsNil) + + m, err = cli.GetBlobMetadata(cnt, blob) + c.Assert(err, chk.IsNil) + c.Check(m, chk.DeepEquals, mPut) + + // Case munging + + mPutUpper := map[string]string{ + "Foo": "different bar", + "bar_BAZ": "different waz qux", + } + mExpectLower := map[string]string{ + "foo": "different bar", + "bar_baz": "different waz qux", + } + + err = cli.SetBlobMetadata(cnt, blob, mPutUpper, nil) + c.Assert(err, chk.IsNil) + + m, err = cli.GetBlobMetadata(cnt, blob) + c.Assert(err, chk.IsNil) + c.Check(m, chk.DeepEquals, mExpectLower) +} + +func (s *StorageBlobSuite) TestSetMetadataWithExtraHeaders(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randName(5) + c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte{}), chk.IsNil) + + mPut := map[string]string{ + "foo": "bar", + "bar_baz": "waz qux", + } + + extraHeaders := map[string]string{ + "If-Match": "incorrect-etag", + } + + // Set with incorrect If-Match in extra headers should result in error + err := cli.SetBlobMetadata(cnt, blob, mPut, extraHeaders) + c.Assert(err, chk.NotNil) + + props, err := cli.GetBlobProperties(cnt, blob) + extraHeaders = map[string]string{ + "If-Match": props.Etag, + } + + // Set with matching If-Match in extra headers should succeed + err = cli.SetBlobMetadata(cnt, blob, mPut, extraHeaders) + c.Assert(err, chk.IsNil) +} + +func (s *StorageBlobSuite) TestSetBlobProperties(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randName(5) + c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte{}), chk.IsNil) + + mPut := BlobHeaders{ + CacheControl: "private, max-age=0, no-cache", + ContentMD5: "oBATU+oaDduHWbVZLuzIJw==", + ContentType: "application/json", + ContentEncoding: "gzip", + ContentLanguage: "de-DE", + } + + err := cli.SetBlobProperties(cnt, blob, mPut) + c.Assert(err, chk.IsNil) + + props, err := cli.GetBlobProperties(cnt, blob) + c.Assert(err, chk.IsNil) + + c.Check(mPut.CacheControl, chk.Equals, props.CacheControl) + c.Check(mPut.ContentType, chk.Equals, props.ContentType) + c.Check(mPut.ContentMD5, chk.Equals, props.ContentMD5) + c.Check(mPut.ContentEncoding, chk.Equals, props.ContentEncoding) + c.Check(mPut.ContentLanguage, chk.Equals, props.ContentLanguage) +} + +func (s *StorageBlobSuite) createContainerPermissions(accessType ContainerAccessType, + timeout int, leaseID string, ID string, canRead bool, + canWrite bool, canDelete bool) ContainerPermissions { + perms := ContainerPermissions{} + perms.AccessOptions.ContainerAccess = accessType + perms.AccessOptions.Timeout = timeout + perms.AccessOptions.LeaseID = leaseID + + if ID != "" { + perms.AccessPolicy.ID = ID + perms.AccessPolicy.StartTime = time.Now() + perms.AccessPolicy.ExpiryTime = time.Now().Add(time.Hour * 10) + perms.AccessPolicy.CanRead = canRead + perms.AccessPolicy.CanWrite = canWrite + perms.AccessPolicy.CanDelete = canDelete + } + + return perms +} + +func (s *StorageBlobSuite) TestSetContainerPermissionsWithTimeoutSuccessfully(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + perms := s.createContainerPermissions(ContainerAccessTypeBlob, 30, "", "MTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTa=", true, true, true) + + err := cli.SetContainerPermissions(cnt, perms) + c.Assert(err, chk.IsNil) +} + +func (s *StorageBlobSuite) TestSetContainerPermissionsSuccessfully(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + perms := s.createContainerPermissions(ContainerAccessTypeBlob, 0, "", "MTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTa=", true, true, true) + + err := cli.SetContainerPermissions(cnt, perms) + c.Assert(err, chk.IsNil) +} + +func (s *StorageBlobSuite) TestSetThenGetContainerPermissionsSuccessfully(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + perms := s.createContainerPermissions(ContainerAccessTypeBlob, 0, "", "MTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTa=", true, true, true) + + err := cli.SetContainerPermissions(cnt, perms) + c.Assert(err, chk.IsNil) + + returnedPerms, err := cli.GetContainerPermissions(cnt, 0, "") + c.Assert(err, chk.IsNil) + + // check container permissions itself. + c.Assert(returnedPerms.ContainerAccess, chk.Equals, perms.AccessOptions.ContainerAccess) + + // now check policy set. + c.Assert(returnedPerms.AccessPolicy.SignedIdentifiers, chk.HasLen, 1) + c.Assert(returnedPerms.AccessPolicy.SignedIdentifiers[0].ID, chk.Equals, perms.AccessPolicy.ID) + + // test timestamps down the second + // rounding start/expiry time original perms since the returned perms would have been rounded. + // so need rounded vs rounded. + c.Assert(returnedPerms.AccessPolicy.SignedIdentifiers[0].AccessPolicy.StartTime.Round(time.Second).Format(time.RFC1123), chk.Equals, perms.AccessPolicy.StartTime.Round(time.Second).Format(time.RFC1123)) + c.Assert(returnedPerms.AccessPolicy.SignedIdentifiers[0].AccessPolicy.ExpiryTime.Round(time.Second).Format(time.RFC1123), chk.Equals, perms.AccessPolicy.ExpiryTime.Round(time.Second).Format(time.RFC1123)) + c.Assert(returnedPerms.AccessPolicy.SignedIdentifiers[0].AccessPolicy.Permission, chk.Equals, "rwd") +} + +func (s *StorageBlobSuite) TestSetContainerPermissionsOnlySuccessfully(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + perms := s.createContainerPermissions(ContainerAccessTypeBlob, 0, "", "", true, true, true) + + err := cli.SetContainerPermissions(cnt, perms) + c.Assert(err, chk.IsNil) +} + +func (s *StorageBlobSuite) TestSetThenGetContainerPermissionsOnlySuccessfully(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + perms := s.createContainerPermissions(ContainerAccessTypeBlob, 0, "", "", true, true, true) + + err := cli.SetContainerPermissions(cnt, perms) + c.Assert(err, chk.IsNil) + + returnedPerms, err := cli.GetContainerPermissions(cnt, 0, "") + c.Assert(err, chk.IsNil) + + // check container permissions itself. + c.Assert(returnedPerms.ContainerAccess, chk.Equals, perms.AccessOptions.ContainerAccess) + + // now check there are NO policies set + c.Assert(returnedPerms.AccessPolicy.SignedIdentifiers, chk.HasLen, 0) +} + +func (s *StorageBlobSuite) TestSnapshotBlob(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randName(5) + c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte{}), chk.IsNil) + + snapshotTime, err := cli.SnapshotBlob(cnt, blob, 0, nil) + c.Assert(err, chk.IsNil) + c.Assert(snapshotTime, chk.NotNil) +} + +func (s *StorageBlobSuite) TestSnapshotBlobWithTimeout(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randName(5) + c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte{}), chk.IsNil) + + snapshotTime, err := cli.SnapshotBlob(cnt, blob, 30, nil) + c.Assert(err, chk.IsNil) + c.Assert(snapshotTime, chk.NotNil) +} + +func (s *StorageBlobSuite) TestSnapshotBlobWithValidLease(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randName(5) + c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte{}), chk.IsNil) + + // generate lease. + currentLeaseID, err := cli.AcquireLease(cnt, blob, 30, "") + c.Assert(err, chk.IsNil) + + extraHeaders := map[string]string{ + leaseID: currentLeaseID, + } + + snapshotTime, err := cli.SnapshotBlob(cnt, blob, 0, extraHeaders) + c.Assert(err, chk.IsNil) + c.Assert(snapshotTime, chk.NotNil) +} + +func (s *StorageBlobSuite) TestSnapshotBlobWithInvalidLease(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randName(5) + c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte{}), chk.IsNil) + + // generate lease. + _, err := cli.AcquireLease(cnt, blob, 30, "") + c.Assert(err, chk.IsNil) + c.Assert(leaseID, chk.NotNil) + + extraHeaders := map[string]string{ + leaseID: "718e3c89-da3d-4201-b616-dd794b0bd7c1", + } + + snapshotTime, err := cli.SnapshotBlob(cnt, blob, 0, extraHeaders) + c.Assert(err, chk.NotNil) + c.Assert(snapshotTime, chk.IsNil) +} + +func (s *StorageBlobSuite) TestAcquireLeaseWithNoProposedLeaseID(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randName(5) + c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte{}), chk.IsNil) + + _, err := cli.AcquireLease(cnt, blob, 30, "") + c.Assert(err, chk.IsNil) +} + +func (s *StorageBlobSuite) TestAcquireLeaseWithProposedLeaseID(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randName(5) + c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte{}), chk.IsNil) + + proposedLeaseID := "dfe6dde8-68d5-4910-9248-c97c61768fea" + leaseID, err := cli.AcquireLease(cnt, blob, 30, proposedLeaseID) + c.Assert(err, chk.IsNil) + c.Assert(leaseID, chk.Equals, proposedLeaseID) +} + +func (s *StorageBlobSuite) TestAcquireLeaseWithBadProposedLeaseID(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randName(5) + c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte{}), chk.IsNil) + + proposedLeaseID := "badbadbad" + _, err := cli.AcquireLease(cnt, blob, 30, proposedLeaseID) + c.Assert(err, chk.NotNil) +} + +func (s *StorageBlobSuite) TestRenewLeaseSuccessful(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randName(5) + c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte{}), chk.IsNil) + + proposedLeaseID := "dfe6dde8-68d5-4910-9248-c97c61768fea" + leaseID, err := cli.AcquireLease(cnt, blob, 30, proposedLeaseID) + c.Assert(err, chk.IsNil) + + err = cli.RenewLease(cnt, blob, leaseID) + c.Assert(err, chk.IsNil) +} + +func (s *StorageBlobSuite) TestRenewLeaseAgainstNoCurrentLease(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randName(5) + c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte{}), chk.IsNil) + + badLeaseID := "1f812371-a41d-49e6-b123-f4b542e85144" + err := cli.RenewLease(cnt, blob, badLeaseID) + c.Assert(err, chk.NotNil) +} + +func (s *StorageBlobSuite) TestChangeLeaseSuccessful(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randName(5) + c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte{}), chk.IsNil) + proposedLeaseID := "dfe6dde8-68d5-4910-9248-c97c61768fea" + leaseID, err := cli.AcquireLease(cnt, blob, 30, proposedLeaseID) + c.Assert(err, chk.IsNil) + + newProposedLeaseID := "dfe6dde8-68d5-4910-9248-c97c61768fbb" + newLeaseID, err := cli.ChangeLease(cnt, blob, leaseID, newProposedLeaseID) + c.Assert(err, chk.IsNil) + c.Assert(newLeaseID, chk.Equals, newProposedLeaseID) +} + +func (s *StorageBlobSuite) TestChangeLeaseNotSuccessfulbadProposedLeaseID(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randName(5) + c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte{}), chk.IsNil) + proposedLeaseID := "dfe6dde8-68d5-4910-9248-c97c61768fea" + leaseID, err := cli.AcquireLease(cnt, blob, 30, proposedLeaseID) + c.Assert(err, chk.IsNil) + + newProposedLeaseID := "1f812371-a41d-49e6-b123-f4b542e" + _, err = cli.ChangeLease(cnt, blob, leaseID, newProposedLeaseID) + c.Assert(err, chk.NotNil) +} + +func (s *StorageBlobSuite) TestReleaseLeaseSuccessful(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randName(5) + c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte{}), chk.IsNil) + proposedLeaseID := "dfe6dde8-68d5-4910-9248-c97c61768fea" + leaseID, err := cli.AcquireLease(cnt, blob, 30, proposedLeaseID) + c.Assert(err, chk.IsNil) + + err = cli.ReleaseLease(cnt, blob, leaseID) + c.Assert(err, chk.IsNil) +} + +func (s *StorageBlobSuite) TestReleaseLeaseNotSuccessfulBadLeaseID(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randName(5) + c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte{}), chk.IsNil) + proposedLeaseID := "dfe6dde8-68d5-4910-9248-c97c61768fea" + _, err := cli.AcquireLease(cnt, blob, 30, proposedLeaseID) + c.Assert(err, chk.IsNil) + + err = cli.ReleaseLease(cnt, blob, "badleaseid") + c.Assert(err, chk.NotNil) +} + +func (s *StorageBlobSuite) TestBreakLeaseSuccessful(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randName(5) + c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte{}), chk.IsNil) + + proposedLeaseID := "dfe6dde8-68d5-4910-9248-c97c61768fea" + _, err := cli.AcquireLease(cnt, blob, 30, proposedLeaseID) + c.Assert(err, chk.IsNil) + + _, err = cli.BreakLease(cnt, blob) + c.Assert(err, chk.IsNil) +} + +func (s *StorageBlobSuite) TestPutEmptyBlockBlob(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randName(5) + c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte{}), chk.IsNil) + + props, err := cli.GetBlobProperties(cnt, blob) + c.Assert(err, chk.IsNil) + c.Assert(props.ContentLength, chk.Not(chk.Equals), 0) +} + +func (s *StorageBlobSuite) TestGetBlobRange(c *chk.C) { + cnt := randContainer() + blob := randName(5) + body := "0123456789" + + cli := getBlobClient(c) + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypeBlob), chk.IsNil) + defer cli.DeleteContainer(cnt) + + c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte(body)), chk.IsNil) + defer cli.DeleteBlob(cnt, blob, nil) + + // Read 1-3 + for _, r := range []struct { + rangeStr string + expected string + }{ + {"0-", body}, + {"1-3", body[1 : 3+1]}, + {"3-", body[3:]}, + } { + resp, err := cli.GetBlobRange(cnt, blob, r.rangeStr, nil) + c.Assert(err, chk.IsNil) + blobBody, err := ioutil.ReadAll(resp) + c.Assert(err, chk.IsNil) + + str := string(blobBody) + c.Assert(str, chk.Equals, r.expected) + } +} + +func (s *StorageBlobSuite) TestCreateBlockBlobFromReader(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + name := randName(5) + data := randBytes(8888) + c.Assert(cli.CreateBlockBlobFromReader(cnt, name, uint64(len(data)), bytes.NewReader(data), nil), chk.IsNil) + + body, err := cli.GetBlob(cnt, name) + c.Assert(err, chk.IsNil) + gotData, err := ioutil.ReadAll(body) + body.Close() + + c.Assert(err, chk.IsNil) + c.Assert(gotData, chk.DeepEquals, data) +} + +func (s *StorageBlobSuite) TestCreateBlockBlobFromReaderWithShortData(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + name := randName(5) + data := randBytes(8888) + err := cli.CreateBlockBlobFromReader(cnt, name, 9999, bytes.NewReader(data), nil) + c.Assert(err, chk.Not(chk.IsNil)) + + _, err = cli.GetBlob(cnt, name) + // Upload was incomplete: blob should not have been created. + c.Assert(err, chk.Not(chk.IsNil)) +} + +func (s *StorageBlobSuite) TestPutBlock(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randName(5) + chunk := []byte(randString(1024)) + blockID := base64.StdEncoding.EncodeToString([]byte("foo")) + c.Assert(cli.PutBlock(cnt, blob, blockID, chunk), chk.IsNil) +} + +func (s *StorageBlobSuite) TestGetBlockList_PutBlockList(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randName(5) + chunk := []byte(randString(1024)) + blockID := base64.StdEncoding.EncodeToString([]byte("foo")) + + // Put one block + c.Assert(cli.PutBlock(cnt, blob, blockID, chunk), chk.IsNil) + defer cli.deleteBlob(cnt, blob, nil) + + // Get committed blocks + committed, err := cli.GetBlockList(cnt, blob, BlockListTypeCommitted) + c.Assert(err, chk.IsNil) + + if len(committed.CommittedBlocks) > 0 { + c.Fatal("There are committed blocks") + } + + // Get uncommitted blocks + uncommitted, err := cli.GetBlockList(cnt, blob, BlockListTypeUncommitted) + c.Assert(err, chk.IsNil) + + c.Assert(len(uncommitted.UncommittedBlocks), chk.Equals, 1) + // Commit block list + c.Assert(cli.PutBlockList(cnt, blob, []Block{{blockID, BlockStatusUncommitted}}), chk.IsNil) + + // Get all blocks + all, err := cli.GetBlockList(cnt, blob, BlockListTypeAll) + c.Assert(err, chk.IsNil) + c.Assert(len(all.CommittedBlocks), chk.Equals, 1) + c.Assert(len(all.UncommittedBlocks), chk.Equals, 0) + + // Verify the block + thatBlock := all.CommittedBlocks[0] + c.Assert(thatBlock.Name, chk.Equals, blockID) + c.Assert(thatBlock.Size, chk.Equals, int64(len(chunk))) +} + +func (s *StorageBlobSuite) TestCreateBlockBlob(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randName(5) + c.Assert(cli.CreateBlockBlob(cnt, blob), chk.IsNil) + + // Verify + blocks, err := cli.GetBlockList(cnt, blob, BlockListTypeAll) + c.Assert(err, chk.IsNil) + c.Assert(len(blocks.CommittedBlocks), chk.Equals, 0) + c.Assert(len(blocks.UncommittedBlocks), chk.Equals, 0) +} + +func (s *StorageBlobSuite) TestPutPageBlob(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randName(5) + size := int64(10 * 1024 * 1024) + c.Assert(cli.PutPageBlob(cnt, blob, size, nil), chk.IsNil) + + // Verify + props, err := cli.GetBlobProperties(cnt, blob) + c.Assert(err, chk.IsNil) + c.Assert(props.ContentLength, chk.Equals, size) + c.Assert(props.BlobType, chk.Equals, BlobTypePage) +} + +func (s *StorageBlobSuite) TestPutPagesUpdate(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randName(5) + size := int64(10 * 1024 * 1024) // larger than we'll use + c.Assert(cli.PutPageBlob(cnt, blob, size, nil), chk.IsNil) + + chunk1 := []byte(randString(1024)) + chunk2 := []byte(randString(512)) + + // Append chunks + c.Assert(cli.PutPage(cnt, blob, 0, int64(len(chunk1)-1), PageWriteTypeUpdate, chunk1, nil), chk.IsNil) + c.Assert(cli.PutPage(cnt, blob, int64(len(chunk1)), int64(len(chunk1)+len(chunk2)-1), PageWriteTypeUpdate, chunk2, nil), chk.IsNil) + + // Verify contents + out, err := cli.GetBlobRange(cnt, blob, fmt.Sprintf("%v-%v", 0, len(chunk1)+len(chunk2)-1), nil) + c.Assert(err, chk.IsNil) + defer out.Close() + blobContents, err := ioutil.ReadAll(out) + c.Assert(err, chk.IsNil) + c.Assert(blobContents, chk.DeepEquals, append(chunk1, chunk2...)) + out.Close() + + // Overwrite first half of chunk1 + chunk0 := []byte(randString(512)) + c.Assert(cli.PutPage(cnt, blob, 0, int64(len(chunk0)-1), PageWriteTypeUpdate, chunk0, nil), chk.IsNil) + + // Verify contents + out, err = cli.GetBlobRange(cnt, blob, fmt.Sprintf("%v-%v", 0, len(chunk1)+len(chunk2)-1), nil) + c.Assert(err, chk.IsNil) + defer out.Close() + blobContents, err = ioutil.ReadAll(out) + c.Assert(err, chk.IsNil) + c.Assert(blobContents, chk.DeepEquals, append(append(chunk0, chunk1[512:]...), chunk2...)) +} + +func (s *StorageBlobSuite) TestPutPagesClear(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randName(5) + size := int64(10 * 1024 * 1024) // larger than we'll use + c.Assert(cli.PutPageBlob(cnt, blob, size, nil), chk.IsNil) + + // Put 0-2047 + chunk := []byte(randString(2048)) + c.Assert(cli.PutPage(cnt, blob, 0, 2047, PageWriteTypeUpdate, chunk, nil), chk.IsNil) + + // Clear 512-1023 + c.Assert(cli.PutPage(cnt, blob, 512, 1023, PageWriteTypeClear, nil, nil), chk.IsNil) + + // Verify contents + out, err := cli.GetBlobRange(cnt, blob, "0-2047", nil) + c.Assert(err, chk.IsNil) + contents, err := ioutil.ReadAll(out) + c.Assert(err, chk.IsNil) + defer out.Close() + c.Assert(contents, chk.DeepEquals, append(append(chunk[:512], make([]byte, 512)...), chunk[1024:]...)) +} + +func (s *StorageBlobSuite) TestGetPageRanges(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randName(5) + size := int64(10 * 1024 * 1024) // larger than we'll use + c.Assert(cli.PutPageBlob(cnt, blob, size, nil), chk.IsNil) + + // Get page ranges on empty blob + out, err := cli.GetPageRanges(cnt, blob) + c.Assert(err, chk.IsNil) + c.Assert(len(out.PageList), chk.Equals, 0) + + // Add 0-512 page + c.Assert(cli.PutPage(cnt, blob, 0, 511, PageWriteTypeUpdate, []byte(randString(512)), nil), chk.IsNil) + + out, err = cli.GetPageRanges(cnt, blob) + c.Assert(err, chk.IsNil) + c.Assert(len(out.PageList), chk.Equals, 1) + + // Add 1024-2048 + c.Assert(cli.PutPage(cnt, blob, 1024, 2047, PageWriteTypeUpdate, []byte(randString(1024)), nil), chk.IsNil) + + out, err = cli.GetPageRanges(cnt, blob) + c.Assert(err, chk.IsNil) + c.Assert(len(out.PageList), chk.Equals, 2) +} + +func (s *StorageBlobSuite) TestPutAppendBlob(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randName(5) + c.Assert(cli.PutAppendBlob(cnt, blob, nil), chk.IsNil) + + // Verify + props, err := cli.GetBlobProperties(cnt, blob) + c.Assert(err, chk.IsNil) + c.Assert(props.ContentLength, chk.Equals, int64(0)) + c.Assert(props.BlobType, chk.Equals, BlobTypeAppend) +} + +func (s *StorageBlobSuite) TestPutAppendBlobAppendBlocks(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randName(5) + c.Assert(cli.PutAppendBlob(cnt, blob, nil), chk.IsNil) + + chunk1 := []byte(randString(1024)) + chunk2 := []byte(randString(512)) + + // Append first block + c.Assert(cli.AppendBlock(cnt, blob, chunk1, nil), chk.IsNil) + + // Verify contents + out, err := cli.GetBlobRange(cnt, blob, fmt.Sprintf("%v-%v", 0, len(chunk1)-1), nil) + c.Assert(err, chk.IsNil) + defer out.Close() + blobContents, err := ioutil.ReadAll(out) + c.Assert(err, chk.IsNil) + c.Assert(blobContents, chk.DeepEquals, chunk1) + out.Close() + + // Append second block + c.Assert(cli.AppendBlock(cnt, blob, chunk2, nil), chk.IsNil) + + // Verify contents + out, err = cli.GetBlobRange(cnt, blob, fmt.Sprintf("%v-%v", 0, len(chunk1)+len(chunk2)-1), nil) + c.Assert(err, chk.IsNil) + defer out.Close() + blobContents, err = ioutil.ReadAll(out) + c.Assert(err, chk.IsNil) + c.Assert(blobContents, chk.DeepEquals, append(chunk1, chunk2...)) + out.Close() +} + +func deleteTestContainers(cli BlobStorageClient) error { + for { + resp, err := cli.ListContainers(ListContainersParameters{Prefix: testContainerPrefix}) + if err != nil { + return err + } + if len(resp.Containers) == 0 { + break + } + for _, c := range resp.Containers { + err = cli.DeleteContainer(c.Name) + if err != nil { + return err + } + } + } + return nil +} + +func (b BlobStorageClient) putSingleBlockBlob(container, name string, chunk []byte) error { + if len(chunk) > MaxBlobBlockSize { + return fmt.Errorf("storage: provided chunk (%d bytes) cannot fit into single-block blob (max %d bytes)", len(chunk), MaxBlobBlockSize) + } + + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) + headers := b.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypeBlock) + headers["Content-Length"] = fmt.Sprintf("%v", len(chunk)) + + resp, err := b.client.exec("PUT", uri, headers, bytes.NewReader(chunk)) + if err != nil { + return err + } + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +func (s *StorageBlobSuite) TestPutAppendBlobSpecialChars(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randNameWithSpecialChars(5) + c.Assert(cli.PutAppendBlob(cnt, blob, nil), chk.IsNil) + + // Verify metadata + props, err := cli.GetBlobProperties(cnt, blob) + c.Assert(err, chk.IsNil) + c.Assert(props.ContentLength, chk.Equals, int64(0)) + c.Assert(props.BlobType, chk.Equals, BlobTypeAppend) + + chunk1 := []byte(randString(1024)) + chunk2 := []byte(randString(512)) + + // Append first block + c.Assert(cli.AppendBlock(cnt, blob, chunk1, nil), chk.IsNil) + + // Verify contents + out, err := cli.GetBlobRange(cnt, blob, fmt.Sprintf("%v-%v", 0, len(chunk1)-1), nil) + c.Assert(err, chk.IsNil) + defer out.Close() + blobContents, err := ioutil.ReadAll(out) + c.Assert(err, chk.IsNil) + c.Assert(blobContents, chk.DeepEquals, chunk1) + out.Close() + + // Append second block + c.Assert(cli.AppendBlock(cnt, blob, chunk2, nil), chk.IsNil) + + // Verify contents + out, err = cli.GetBlobRange(cnt, blob, fmt.Sprintf("%v-%v", 0, len(chunk1)+len(chunk2)-1), nil) + c.Assert(err, chk.IsNil) + defer out.Close() + blobContents, err = ioutil.ReadAll(out) + c.Assert(err, chk.IsNil) + c.Assert(blobContents, chk.DeepEquals, append(chunk1, chunk2...)) + out.Close() +} + +func randContainer() string { + return testContainerPrefix + randString(32-len(testContainerPrefix)) +} + +func randString(n int) string { + if n <= 0 { + panic("negative number") + } + const alphanum = "0123456789abcdefghijklmnopqrstuvwxyz" + var bytes = make([]byte, n) + rand.Read(bytes) + for i, b := range bytes { + bytes[i] = alphanum[b%byte(len(alphanum))] + } + return string(bytes) +} + +func randBytes(n int) []byte { + data := make([]byte, n) + if _, err := io.ReadFull(rand.Reader, data); err != nil { + panic(err) + } + return data +} + +func randName(n int) string { + name := randString(n) + "/" + randString(n) + return name +} + +func randNameWithSpecialChars(n int) string { + name := randString(n) + "/" + randString(n) + "-._~:?#[]@!$&'()*,;+= " + randString(n) + return name +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go new file mode 100644 index 000000000000..77528511a47a --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go @@ -0,0 +1,552 @@ +// Package storage provides clients for Microsoft Azure Storage Services. +package storage + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "encoding/xml" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "regexp" + "sort" + "strconv" + "strings" +) + +const ( + // DefaultBaseURL is the domain name used for storage requests when a + // default client is created. + DefaultBaseURL = "core.windows.net" + + // DefaultAPIVersion is the Azure Storage API version string used when a + // basic client is created. + DefaultAPIVersion = "2015-02-21" + + defaultUseHTTPS = true + + // StorageEmulatorAccountName is the fixed storage account used by Azure Storage Emulator + StorageEmulatorAccountName = "devstoreaccount1" + + // StorageEmulatorAccountKey is the the fixed storage account used by Azure Storage Emulator + StorageEmulatorAccountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==" + + blobServiceName = "blob" + tableServiceName = "table" + queueServiceName = "queue" + fileServiceName = "file" + + storageEmulatorBlob = "127.0.0.1:10000" + storageEmulatorTable = "127.0.0.1:10002" + storageEmulatorQueue = "127.0.0.1:10001" +) + +// Client is the object that needs to be constructed to perform +// operations on the storage account. +type Client struct { + // HTTPClient is the http.Client used to initiate API + // requests. If it is nil, http.DefaultClient is used. + HTTPClient *http.Client + + accountName string + accountKey []byte + useHTTPS bool + baseURL string + apiVersion string +} + +type storageResponse struct { + statusCode int + headers http.Header + body io.ReadCloser +} + +type odataResponse struct { + storageResponse + odata odataErrorMessage +} + +// AzureStorageServiceError contains fields of the error response from +// Azure Storage Service REST API. See https://msdn.microsoft.com/en-us/library/azure/dd179382.aspx +// Some fields might be specific to certain calls. +type AzureStorageServiceError struct { + Code string `xml:"Code"` + Message string `xml:"Message"` + AuthenticationErrorDetail string `xml:"AuthenticationErrorDetail"` + QueryParameterName string `xml:"QueryParameterName"` + QueryParameterValue string `xml:"QueryParameterValue"` + Reason string `xml:"Reason"` + StatusCode int + RequestID string +} + +type odataErrorMessageMessage struct { + Lang string `json:"lang"` + Value string `json:"value"` +} + +type odataErrorMessageInternal struct { + Code string `json:"code"` + Message odataErrorMessageMessage `json:"message"` +} + +type odataErrorMessage struct { + Err odataErrorMessageInternal `json:"odata.error"` +} + +// UnexpectedStatusCodeError is returned when a storage service responds with neither an error +// nor with an HTTP status code indicating success. +type UnexpectedStatusCodeError struct { + allowed []int + got int +} + +func (e UnexpectedStatusCodeError) Error() string { + s := func(i int) string { return fmt.Sprintf("%d %s", i, http.StatusText(i)) } + + got := s(e.got) + expected := []string{} + for _, v := range e.allowed { + expected = append(expected, s(v)) + } + return fmt.Sprintf("storage: status code from service response is %s; was expecting %s", got, strings.Join(expected, " or ")) +} + +// Got is the actual status code returned by Azure. +func (e UnexpectedStatusCodeError) Got() int { + return e.got +} + +// NewBasicClient constructs a Client with given storage service name and +// key. +func NewBasicClient(accountName, accountKey string) (Client, error) { + if accountName == StorageEmulatorAccountName { + return NewEmulatorClient() + } + return NewClient(accountName, accountKey, DefaultBaseURL, DefaultAPIVersion, defaultUseHTTPS) + +} + +//NewEmulatorClient contructs a Client intended to only work with Azure +//Storage Emulator +func NewEmulatorClient() (Client, error) { + return NewClient(StorageEmulatorAccountName, StorageEmulatorAccountKey, DefaultBaseURL, DefaultAPIVersion, false) +} + +// NewClient constructs a Client. This should be used if the caller wants +// to specify whether to use HTTPS, a specific REST API version or a custom +// storage endpoint than Azure Public Cloud. +func NewClient(accountName, accountKey, blobServiceBaseURL, apiVersion string, useHTTPS bool) (Client, error) { + var c Client + if accountName == "" { + return c, fmt.Errorf("azure: account name required") + } else if accountKey == "" { + return c, fmt.Errorf("azure: account key required") + } else if blobServiceBaseURL == "" { + return c, fmt.Errorf("azure: base storage service url required") + } + + key, err := base64.StdEncoding.DecodeString(accountKey) + if err != nil { + return c, fmt.Errorf("azure: malformed storage account key: %v", err) + } + + return Client{ + accountName: accountName, + accountKey: key, + useHTTPS: useHTTPS, + baseURL: blobServiceBaseURL, + apiVersion: apiVersion, + }, nil +} + +func (c Client) getBaseURL(service string) string { + scheme := "http" + if c.useHTTPS { + scheme = "https" + } + host := "" + if c.accountName == StorageEmulatorAccountName { + switch service { + case blobServiceName: + host = storageEmulatorBlob + case tableServiceName: + host = storageEmulatorTable + case queueServiceName: + host = storageEmulatorQueue + } + } else { + host = fmt.Sprintf("%s.%s.%s", c.accountName, service, c.baseURL) + } + + u := &url.URL{ + Scheme: scheme, + Host: host} + return u.String() +} + +func (c Client) getEndpoint(service, path string, params url.Values) string { + u, err := url.Parse(c.getBaseURL(service)) + if err != nil { + // really should not be happening + panic(err) + } + + // API doesn't accept path segments not starting with '/' + if !strings.HasPrefix(path, "/") { + path = fmt.Sprintf("/%v", path) + } + + if c.accountName == StorageEmulatorAccountName { + path = fmt.Sprintf("/%v%v", StorageEmulatorAccountName, path) + } + + u.Path = path + u.RawQuery = params.Encode() + return u.String() +} + +// GetBlobService returns a BlobStorageClient which can operate on the blob +// service of the storage account. +func (c Client) GetBlobService() BlobStorageClient { + return BlobStorageClient{c} +} + +// GetQueueService returns a QueueServiceClient which can operate on the queue +// service of the storage account. +func (c Client) GetQueueService() QueueServiceClient { + return QueueServiceClient{c} +} + +// GetTableService returns a TableServiceClient which can operate on the table +// service of the storage account. +func (c Client) GetTableService() TableServiceClient { + return TableServiceClient{c} +} + +// GetFileService returns a FileServiceClient which can operate on the file +// service of the storage account. +func (c Client) GetFileService() FileServiceClient { + return FileServiceClient{c} +} + +func (c Client) createAuthorizationHeader(canonicalizedString string) string { + signature := c.computeHmac256(canonicalizedString) + return fmt.Sprintf("%s %s:%s", "SharedKey", c.getCanonicalizedAccountName(), signature) +} + +func (c Client) getAuthorizationHeader(verb, url string, headers map[string]string) (string, error) { + canonicalizedResource, err := c.buildCanonicalizedResource(url) + if err != nil { + return "", err + } + + canonicalizedString := c.buildCanonicalizedString(verb, headers, canonicalizedResource) + return c.createAuthorizationHeader(canonicalizedString), nil +} + +func (c Client) getStandardHeaders() map[string]string { + return map[string]string{ + "x-ms-version": c.apiVersion, + "x-ms-date": currentTimeRfc1123Formatted(), + } +} + +func (c Client) getCanonicalizedAccountName() string { + // since we may be trying to access a secondary storage account, we need to + // remove the -secondary part of the storage name + return strings.TrimSuffix(c.accountName, "-secondary") +} + +func (c Client) buildCanonicalizedHeader(headers map[string]string) string { + cm := make(map[string]string) + + for k, v := range headers { + headerName := strings.TrimSpace(strings.ToLower(k)) + match, _ := regexp.MatchString("x-ms-", headerName) + if match { + cm[headerName] = v + } + } + + if len(cm) == 0 { + return "" + } + + keys := make([]string, 0, len(cm)) + for key := range cm { + keys = append(keys, key) + } + + sort.Strings(keys) + + ch := "" + + for i, key := range keys { + if i == len(keys)-1 { + ch += fmt.Sprintf("%s:%s", key, cm[key]) + } else { + ch += fmt.Sprintf("%s:%s\n", key, cm[key]) + } + } + return ch +} + +func (c Client) buildCanonicalizedResourceTable(uri string) (string, error) { + errMsg := "buildCanonicalizedResourceTable error: %s" + u, err := url.Parse(uri) + if err != nil { + return "", fmt.Errorf(errMsg, err.Error()) + } + + cr := "/" + c.getCanonicalizedAccountName() + + if len(u.Path) > 0 { + cr += u.EscapedPath() + } + + return cr, nil +} + +func (c Client) buildCanonicalizedResource(uri string) (string, error) { + errMsg := "buildCanonicalizedResource error: %s" + u, err := url.Parse(uri) + if err != nil { + return "", fmt.Errorf(errMsg, err.Error()) + } + + cr := "/" + c.getCanonicalizedAccountName() + + if len(u.Path) > 0 { + // Any portion of the CanonicalizedResource string that is derived from + // the resource's URI should be encoded exactly as it is in the URI. + // -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx + cr += u.EscapedPath() + } + + params, err := url.ParseQuery(u.RawQuery) + if err != nil { + return "", fmt.Errorf(errMsg, err.Error()) + } + + if len(params) > 0 { + cr += "\n" + keys := make([]string, 0, len(params)) + for key := range params { + keys = append(keys, key) + } + + sort.Strings(keys) + + for i, key := range keys { + if len(params[key]) > 1 { + sort.Strings(params[key]) + } + + if i == len(keys)-1 { + cr += fmt.Sprintf("%s:%s", key, strings.Join(params[key], ",")) + } else { + cr += fmt.Sprintf("%s:%s\n", key, strings.Join(params[key], ",")) + } + } + } + + return cr, nil +} + +func (c Client) buildCanonicalizedString(verb string, headers map[string]string, canonicalizedResource string) string { + contentLength := headers["Content-Length"] + if contentLength == "0" { + contentLength = "" + } + canonicalizedString := fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", + verb, + headers["Content-Encoding"], + headers["Content-Language"], + contentLength, + headers["Content-MD5"], + headers["Content-Type"], + headers["Date"], + headers["If-Modified-Since"], + headers["If-Match"], + headers["If-None-Match"], + headers["If-Unmodified-Since"], + headers["Range"], + c.buildCanonicalizedHeader(headers), + canonicalizedResource) + + return canonicalizedString +} + +func (c Client) exec(verb, url string, headers map[string]string, body io.Reader) (*storageResponse, error) { + authHeader, err := c.getAuthorizationHeader(verb, url, headers) + if err != nil { + return nil, err + } + headers["Authorization"] = authHeader + if err != nil { + return nil, err + } + + req, err := http.NewRequest(verb, url, body) + if err != nil { + return nil, errors.New("azure/storage: error creating request: " + err.Error()) + } + + if clstr, ok := headers["Content-Length"]; ok { + // content length header is being signed, but completely ignored by golang. + // instead we have to use the ContentLength property on the request struct + // (see https://golang.org/src/net/http/request.go?s=18140:18370#L536 and + // https://golang.org/src/net/http/transfer.go?s=1739:2467#L49) + req.ContentLength, err = strconv.ParseInt(clstr, 10, 64) + if err != nil { + return nil, err + } + } + for k, v := range headers { + req.Header.Add(k, v) + } + + httpClient := c.HTTPClient + if httpClient == nil { + httpClient = http.DefaultClient + } + resp, err := httpClient.Do(req) + if err != nil { + return nil, err + } + + statusCode := resp.StatusCode + if statusCode >= 400 && statusCode <= 505 { + var respBody []byte + respBody, err = readResponseBody(resp) + if err != nil { + return nil, err + } + + if len(respBody) == 0 { + // no error in response body + err = fmt.Errorf("storage: service returned without a response body (%s)", resp.Status) + } else { + // response contains storage service error object, unmarshal + storageErr, errIn := serviceErrFromXML(respBody, resp.StatusCode, resp.Header.Get("x-ms-request-id")) + if err != nil { // error unmarshaling the error response + err = errIn + } + err = storageErr + } + return &storageResponse{ + statusCode: resp.StatusCode, + headers: resp.Header, + body: ioutil.NopCloser(bytes.NewReader(respBody)), /* restore the body */ + }, err + } + + return &storageResponse{ + statusCode: resp.StatusCode, + headers: resp.Header, + body: resp.Body}, nil +} + +func (c Client) execInternalJSON(verb, url string, headers map[string]string, body io.Reader) (*odataResponse, error) { + req, err := http.NewRequest(verb, url, body) + for k, v := range headers { + req.Header.Add(k, v) + } + + httpClient := c.HTTPClient + if httpClient == nil { + httpClient = http.DefaultClient + } + + resp, err := httpClient.Do(req) + if err != nil { + return nil, err + } + + respToRet := &odataResponse{} + respToRet.body = resp.Body + respToRet.statusCode = resp.StatusCode + respToRet.headers = resp.Header + + statusCode := resp.StatusCode + if statusCode >= 400 && statusCode <= 505 { + var respBody []byte + respBody, err = readResponseBody(resp) + if err != nil { + return nil, err + } + + if len(respBody) == 0 { + // no error in response body + err = fmt.Errorf("storage: service returned without a response body (%d)", resp.StatusCode) + return respToRet, err + } + // try unmarshal as odata.error json + err = json.Unmarshal(respBody, &respToRet.odata) + return respToRet, err + } + + return respToRet, nil +} + +func (c Client) createSharedKeyLite(url string, headers map[string]string) (string, error) { + can, err := c.buildCanonicalizedResourceTable(url) + + if err != nil { + return "", err + } + strToSign := headers["x-ms-date"] + "\n" + can + + hmac := c.computeHmac256(strToSign) + return fmt.Sprintf("SharedKeyLite %s:%s", c.accountName, hmac), nil +} + +func (c Client) execTable(verb, url string, headers map[string]string, body io.Reader) (*odataResponse, error) { + var err error + headers["Authorization"], err = c.createSharedKeyLite(url, headers) + if err != nil { + return nil, err + } + + return c.execInternalJSON(verb, url, headers, body) +} + +func readResponseBody(resp *http.Response) ([]byte, error) { + defer resp.Body.Close() + out, err := ioutil.ReadAll(resp.Body) + if err == io.EOF { + err = nil + } + return out, err +} + +func serviceErrFromXML(body []byte, statusCode int, requestID string) (AzureStorageServiceError, error) { + var storageErr AzureStorageServiceError + if err := xml.Unmarshal(body, &storageErr); err != nil { + return storageErr, err + } + storageErr.StatusCode = statusCode + storageErr.RequestID = requestID + return storageErr, nil +} + +func (e AzureStorageServiceError) Error() string { + return fmt.Sprintf("storage: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=%s, RequestId=%s, QueryParameterName=%s, QueryParameterValue=%s", + e.StatusCode, e.Code, e.Message, e.RequestID, e.QueryParameterName, e.QueryParameterValue) +} + +// checkRespCode returns UnexpectedStatusError if the given response code is not +// one of the allowed status codes; otherwise nil. +func checkRespCode(respCode int, allowed []int) error { + for _, v := range allowed { + if respCode == v { + return nil + } + } + return UnexpectedStatusCodeError{allowed, respCode} +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/client_test.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/client_test.go new file mode 100644 index 000000000000..038299fd942f --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/client_test.go @@ -0,0 +1,231 @@ +package storage + +import ( + "encoding/base64" + "net/url" + "os" + "testing" + + chk "gopkg.in/check.v1" +) + +// Hook up gocheck to testing +func Test(t *testing.T) { chk.TestingT(t) } + +type StorageClientSuite struct{} + +var _ = chk.Suite(&StorageClientSuite{}) + +// getBasicClient returns a test client from storage credentials in the env +func getBasicClient(c *chk.C) Client { + name := os.Getenv("ACCOUNT_NAME") + if name == "" { + c.Fatal("ACCOUNT_NAME not set, need an empty storage account to test") + } + key := os.Getenv("ACCOUNT_KEY") + if key == "" { + c.Fatal("ACCOUNT_KEY not set") + } + cli, err := NewBasicClient(name, key) + c.Assert(err, chk.IsNil) + return cli +} + +//getEmulatorClient returns a test client for Azure Storeage Emulator +func getEmulatorClient(c *chk.C) Client { + cli, err := NewBasicClient(StorageEmulatorAccountName, "") + c.Assert(err, chk.IsNil) + return cli +} + +func (s *StorageClientSuite) TestNewEmulatorClient(c *chk.C) { + cli, err := NewBasicClient(StorageEmulatorAccountName, "") + c.Assert(err, chk.IsNil) + c.Assert(cli.accountName, chk.Equals, StorageEmulatorAccountName) + expectedKey, err := base64.StdEncoding.DecodeString(StorageEmulatorAccountKey) + c.Assert(err, chk.IsNil) + c.Assert(cli.accountKey, chk.DeepEquals, expectedKey) +} + +func (s *StorageClientSuite) TestMalformedKeyError(c *chk.C) { + _, err := NewBasicClient("foo", "malformed") + c.Assert(err, chk.ErrorMatches, "azure: malformed storage account key: .*") +} + +func (s *StorageClientSuite) TestGetBaseURL_Basic_Https(c *chk.C) { + cli, err := NewBasicClient("foo", "YmFy") + c.Assert(err, chk.IsNil) + c.Assert(cli.apiVersion, chk.Equals, DefaultAPIVersion) + c.Assert(err, chk.IsNil) + c.Assert(cli.getBaseURL("table"), chk.Equals, "https://foo.table.core.windows.net") +} + +func (s *StorageClientSuite) TestGetBaseURL_Custom_NoHttps(c *chk.C) { + apiVersion := "2015-01-01" // a non existing one + cli, err := NewClient("foo", "YmFy", "core.chinacloudapi.cn", apiVersion, false) + c.Assert(err, chk.IsNil) + c.Assert(cli.apiVersion, chk.Equals, apiVersion) + c.Assert(cli.getBaseURL("table"), chk.Equals, "http://foo.table.core.chinacloudapi.cn") +} + +func (s *StorageClientSuite) TestGetBaseURL_StorageEmulator(c *chk.C) { + cli, err := NewBasicClient(StorageEmulatorAccountName, StorageEmulatorAccountKey) + c.Assert(err, chk.IsNil) + + type test struct{ service, expected string } + tests := []test{ + {blobServiceName, "http://127.0.0.1:10000"}, + {tableServiceName, "http://127.0.0.1:10002"}, + {queueServiceName, "http://127.0.0.1:10001"}, + } + for _, i := range tests { + baseURL := cli.getBaseURL(i.service) + c.Assert(baseURL, chk.Equals, i.expected) + } +} + +func (s *StorageClientSuite) TestGetEndpoint_None(c *chk.C) { + cli, err := NewBasicClient("foo", "YmFy") + c.Assert(err, chk.IsNil) + output := cli.getEndpoint(blobServiceName, "", url.Values{}) + c.Assert(output, chk.Equals, "https://foo.blob.core.windows.net/") +} + +func (s *StorageClientSuite) TestGetEndpoint_PathOnly(c *chk.C) { + cli, err := NewBasicClient("foo", "YmFy") + c.Assert(err, chk.IsNil) + output := cli.getEndpoint(blobServiceName, "path", url.Values{}) + c.Assert(output, chk.Equals, "https://foo.blob.core.windows.net/path") +} + +func (s *StorageClientSuite) TestGetEndpoint_ParamsOnly(c *chk.C) { + cli, err := NewBasicClient("foo", "YmFy") + c.Assert(err, chk.IsNil) + params := url.Values{} + params.Set("a", "b") + params.Set("c", "d") + output := cli.getEndpoint(blobServiceName, "", params) + c.Assert(output, chk.Equals, "https://foo.blob.core.windows.net/?a=b&c=d") +} + +func (s *StorageClientSuite) TestGetEndpoint_Mixed(c *chk.C) { + cli, err := NewBasicClient("foo", "YmFy") + c.Assert(err, chk.IsNil) + params := url.Values{} + params.Set("a", "b") + params.Set("c", "d") + output := cli.getEndpoint(blobServiceName, "path", params) + c.Assert(output, chk.Equals, "https://foo.blob.core.windows.net/path?a=b&c=d") +} + +func (s *StorageClientSuite) TestGetEndpoint_StorageEmulator(c *chk.C) { + cli, err := NewBasicClient(StorageEmulatorAccountName, StorageEmulatorAccountKey) + c.Assert(err, chk.IsNil) + + type test struct{ service, expected string } + tests := []test{ + {blobServiceName, "http://127.0.0.1:10000/devstoreaccount1/"}, + {tableServiceName, "http://127.0.0.1:10002/devstoreaccount1/"}, + {queueServiceName, "http://127.0.0.1:10001/devstoreaccount1/"}, + } + for _, i := range tests { + endpoint := cli.getEndpoint(i.service, "", url.Values{}) + c.Assert(endpoint, chk.Equals, i.expected) + } +} + +func (s *StorageClientSuite) Test_getStandardHeaders(c *chk.C) { + cli, err := NewBasicClient("foo", "YmFy") + c.Assert(err, chk.IsNil) + + headers := cli.getStandardHeaders() + c.Assert(len(headers), chk.Equals, 2) + c.Assert(headers["x-ms-version"], chk.Equals, cli.apiVersion) + if _, ok := headers["x-ms-date"]; !ok { + c.Fatal("Missing date header") + } +} + +func (s *StorageClientSuite) Test_buildCanonicalizedResourceTable(c *chk.C) { + cli, err := NewBasicClient("foo", "YmFy") + c.Assert(err, chk.IsNil) + + type test struct{ url, expected string } + tests := []test{ + {"https://foo.table.core.windows.net/mytable", "/foo/mytable"}, + {"https://foo.table.core.windows.net/mytable(PartitionKey='pkey',RowKey='rowkey%3D')", "/foo/mytable(PartitionKey='pkey',RowKey='rowkey%3D')"}, + } + + for _, i := range tests { + out, err := cli.buildCanonicalizedResourceTable(i.url) + c.Assert(err, chk.IsNil) + c.Assert(out, chk.Equals, i.expected) + } +} + +func (s *StorageClientSuite) Test_buildCanonicalizedResource(c *chk.C) { + cli, err := NewBasicClient("foo", "YmFy") + c.Assert(err, chk.IsNil) + + type test struct{ url, expected string } + tests := []test{ + {"https://foo.blob.core.windows.net/path?a=b&c=d", "/foo/path\na:b\nc:d"}, + {"https://foo.blob.core.windows.net/?comp=list", "/foo/\ncomp:list"}, + {"https://foo.blob.core.windows.net/cnt/blob", "/foo/cnt/blob"}, + {"https://foo.blob.core.windows.net/cnt/bl ob", "/foo/cnt/bl%20ob"}, + {"https://foo.blob.core.windows.net/c nt/blob", "/foo/c%20nt/blob"}, + {"https://foo.blob.core.windows.net/cnt/blob%3F%23%5B%5D%21$&%27%28%29%2A blob", "/foo/cnt/blob%3F%23%5B%5D%21$&%27%28%29%2A%20blob"}, + {"https://foo.blob.core.windows.net/cnt/blob-._~:,@;+=blob", "/foo/cnt/blob-._~:,@;+=blob"}, + {"https://foo.blob.core.windows.net/c nt/blob-._~:%3F%23%5B%5D@%21$&%27%28%29%2A,;+=/blob", "/foo/c%20nt/blob-._~:%3F%23%5B%5D@%21$&%27%28%29%2A,;+=/blob"}, + } + + for _, i := range tests { + out, err := cli.buildCanonicalizedResource(i.url) + c.Assert(err, chk.IsNil) + c.Assert(out, chk.Equals, i.expected) + } +} + +func (s *StorageClientSuite) Test_buildCanonicalizedHeader(c *chk.C) { + cli, err := NewBasicClient("foo", "YmFy") + c.Assert(err, chk.IsNil) + + type test struct { + headers map[string]string + expected string + } + tests := []test{ + {map[string]string{}, ""}, + {map[string]string{"x-ms-foo": "bar"}, "x-ms-foo:bar"}, + {map[string]string{"foo:": "bar"}, ""}, + {map[string]string{"foo:": "bar", "x-ms-foo": "bar"}, "x-ms-foo:bar"}, + {map[string]string{ + "x-ms-version": "9999-99-99", + "x-ms-blob-type": "BlockBlob"}, "x-ms-blob-type:BlockBlob\nx-ms-version:9999-99-99"}} + + for _, i := range tests { + c.Assert(cli.buildCanonicalizedHeader(i.headers), chk.Equals, i.expected) + } +} + +func (s *StorageClientSuite) TestReturnsStorageServiceError(c *chk.C) { + // attempt to delete a nonexisting container + _, err := getBlobClient(c).deleteContainer(randContainer()) + c.Assert(err, chk.NotNil) + + v, ok := err.(AzureStorageServiceError) + c.Check(ok, chk.Equals, true) + c.Assert(v.StatusCode, chk.Equals, 404) + c.Assert(v.Code, chk.Equals, "ContainerNotFound") + c.Assert(v.Code, chk.Not(chk.Equals), "") +} + +func (s *StorageClientSuite) Test_createAuthorizationHeader(c *chk.C) { + key := base64.StdEncoding.EncodeToString([]byte("bar")) + cli, err := NewBasicClient("foo", key) + c.Assert(err, chk.IsNil) + + canonicalizedString := `foobarzoo` + expected := `SharedKey foo:h5U0ATVX6SpbFX1H6GNuxIMeXXCILLoIvhflPtuQZ30=` + c.Assert(cli.createAuthorizationHeader(canonicalizedString), chk.Equals, expected) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go new file mode 100644 index 000000000000..f679395bde27 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go @@ -0,0 +1,878 @@ +package storage + +import ( + "encoding/xml" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" +) + +// FileServiceClient contains operations for Microsoft Azure File Service. +type FileServiceClient struct { + client Client +} + +// A Share is an entry in ShareListResponse. +type Share struct { + Name string `xml:"Name"` + Properties ShareProperties `xml:"Properties"` +} + +// A Directory is an entry in DirsAndFilesListResponse. +type Directory struct { + Name string `xml:"Name"` +} + +// A File is an entry in DirsAndFilesListResponse. +type File struct { + Name string `xml:"Name"` + Properties FileProperties `xml:"Properties"` +} + +// ShareProperties contains various properties of a share returned from +// various endpoints like ListShares. +type ShareProperties struct { + LastModified string `xml:"Last-Modified"` + Etag string `xml:"Etag"` + Quota string `xml:"Quota"` +} + +// DirectoryProperties contains various properties of a directory returned +// from various endpoints like GetDirectoryProperties. +type DirectoryProperties struct { + LastModified string `xml:"Last-Modified"` + Etag string `xml:"Etag"` +} + +// FileProperties contains various properties of a file returned from +// various endpoints like ListDirsAndFiles. +type FileProperties struct { + CacheControl string `header:"x-ms-cache-control"` + ContentLength uint64 `xml:"Content-Length"` + ContentType string `header:"x-ms-content-type"` + CopyCompletionTime string + CopyID string + CopySource string + CopyProgress string + CopyStatusDesc string + CopyStatus string + Disposition string `header:"x-ms-content-disposition"` + Encoding string `header:"x-ms-content-encoding"` + Etag string + Language string `header:"x-ms-content-language"` + LastModified string + MD5 string `header:"x-ms-content-md5"` +} + +// FileStream contains file data returned from a call to GetFile. +type FileStream struct { + Body io.ReadCloser + Properties *FileProperties + Metadata map[string]string +} + +// ShareListResponse contains the response fields from +// ListShares call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn167009.aspx +type ShareListResponse struct { + XMLName xml.Name `xml:"EnumerationResults"` + Xmlns string `xml:"xmlns,attr"` + Prefix string `xml:"Prefix"` + Marker string `xml:"Marker"` + NextMarker string `xml:"NextMarker"` + MaxResults int64 `xml:"MaxResults"` + Shares []Share `xml:"Shares>Share"` +} + +// ListSharesParameters defines the set of customizable parameters to make a +// List Shares call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn167009.aspx +type ListSharesParameters struct { + Prefix string + Marker string + Include string + MaxResults uint + Timeout uint +} + +// DirsAndFilesListResponse contains the response fields from +// a List Files and Directories call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn166980.aspx +type DirsAndFilesListResponse struct { + XMLName xml.Name `xml:"EnumerationResults"` + Xmlns string `xml:"xmlns,attr"` + Marker string `xml:"Marker"` + MaxResults int64 `xml:"MaxResults"` + Directories []Directory `xml:"Entries>Directory"` + Files []File `xml:"Entries>File"` + NextMarker string `xml:"NextMarker"` +} + +// FileRanges contains a list of file range information for a file. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn166984.aspx +type FileRanges struct { + ContentLength uint64 + LastModified string + ETag string + FileRanges []FileRange `xml:"Range"` +} + +// FileRange contains range information for a file. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn166984.aspx +type FileRange struct { + Start uint64 `xml:"Start"` + End uint64 `xml:"End"` +} + +// ListDirsAndFilesParameters defines the set of customizable parameters to +// make a List Files and Directories call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn166980.aspx +type ListDirsAndFilesParameters struct { + Marker string + MaxResults uint + Timeout uint +} + +// ShareHeaders contains various properties of a file and is an entry +// in SetShareProperties +type ShareHeaders struct { + Quota string `header:"x-ms-share-quota"` +} + +type compType string + +const ( + compNone compType = "" + compList compType = "list" + compMetadata compType = "metadata" + compProperties compType = "properties" + compRangeList compType = "rangelist" +) + +func (ct compType) String() string { + return string(ct) +} + +type resourceType string + +const ( + resourceDirectory resourceType = "directory" + resourceFile resourceType = "" + resourceShare resourceType = "share" +) + +func (rt resourceType) String() string { + return string(rt) +} + +func (p ListSharesParameters) getParameters() url.Values { + out := url.Values{} + + if p.Prefix != "" { + out.Set("prefix", p.Prefix) + } + if p.Marker != "" { + out.Set("marker", p.Marker) + } + if p.Include != "" { + out.Set("include", p.Include) + } + if p.MaxResults != 0 { + out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults)) + } + if p.Timeout != 0 { + out.Set("timeout", fmt.Sprintf("%v", p.Timeout)) + } + + return out +} + +func (p ListDirsAndFilesParameters) getParameters() url.Values { + out := url.Values{} + + if p.Marker != "" { + out.Set("marker", p.Marker) + } + if p.MaxResults != 0 { + out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults)) + } + if p.Timeout != 0 { + out.Set("timeout", fmt.Sprintf("%v", p.Timeout)) + } + + return out +} + +func (fr FileRange) String() string { + return fmt.Sprintf("bytes=%d-%d", fr.Start, fr.End) +} + +// ToPathSegment returns the URL path segment for the specified values +func ToPathSegment(parts ...string) string { + join := strings.Join(parts, "/") + if join[0] != '/' { + join = fmt.Sprintf("/%s", join) + } + return join +} + +// returns url.Values for the specified types +func getURLInitValues(comp compType, res resourceType) url.Values { + values := url.Values{} + if comp != compNone { + values.Set("comp", comp.String()) + } + if res != resourceFile { + values.Set("restype", res.String()) + } + return values +} + +// ListDirsAndFiles returns a list of files or directories under the specified share or +// directory. It also contains a pagination token and other response details. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn166980.aspx +func (f FileServiceClient) ListDirsAndFiles(path string, params ListDirsAndFilesParameters) (DirsAndFilesListResponse, error) { + q := mergeParams(params.getParameters(), getURLInitValues(compList, resourceDirectory)) + + var out DirsAndFilesListResponse + resp, err := f.listContent(path, q, nil) + if err != nil { + return out, err + } + + defer resp.body.Close() + err = xmlUnmarshal(resp.body, &out) + return out, err +} + +// ListFileRanges returns the list of valid ranges for a file. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn166984.aspx +func (f FileServiceClient) ListFileRanges(path string, listRange *FileRange) (FileRanges, error) { + params := url.Values{"comp": {"rangelist"}} + + // add optional range to list + var headers map[string]string + if listRange != nil { + headers = make(map[string]string) + headers["Range"] = listRange.String() + } + + var out FileRanges + resp, err := f.listContent(path, params, headers) + if err != nil { + return out, err + } + + defer resp.body.Close() + var cl uint64 + cl, err = strconv.ParseUint(resp.headers.Get("x-ms-content-length"), 10, 64) + if err != nil { + return out, err + } + + out.ContentLength = cl + out.ETag = resp.headers.Get("ETag") + out.LastModified = resp.headers.Get("Last-Modified") + + err = xmlUnmarshal(resp.body, &out) + return out, err +} + +// ListShares returns the list of shares in a storage account along with +// pagination token and other response details. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx +func (f FileServiceClient) ListShares(params ListSharesParameters) (ShareListResponse, error) { + q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}}) + + var out ShareListResponse + resp, err := f.listContent("", q, nil) + if err != nil { + return out, err + } + + defer resp.body.Close() + err = xmlUnmarshal(resp.body, &out) + return out, err +} + +// retrieves directory or share content +func (f FileServiceClient) listContent(path string, params url.Values, extraHeaders map[string]string) (*storageResponse, error) { + if err := f.checkForStorageEmulator(); err != nil { + return nil, err + } + + uri := f.client.getEndpoint(fileServiceName, path, params) + headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) + + resp, err := f.client.exec(http.MethodGet, uri, headers, nil) + if err != nil { + return nil, err + } + + if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + resp.body.Close() + return nil, err + } + + return resp, nil +} + +// CreateDirectory operation creates a new directory with optional metadata in the +// specified share. If a directory with the same name already exists, the operation fails. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn166993.aspx +func (f FileServiceClient) CreateDirectory(path string, metadata map[string]string) error { + return f.createResource(path, resourceDirectory, mergeMDIntoExtraHeaders(metadata, nil)) +} + +// CreateFile operation creates a new file with optional metadata or replaces an existing one. +// Note that this only initializes the file, call PutRange to add content. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn194271.aspx +func (f FileServiceClient) CreateFile(path string, maxSize uint64, metadata map[string]string) error { + extraHeaders := map[string]string{ + "x-ms-content-length": strconv.FormatUint(maxSize, 10), + "x-ms-type": "file", + } + return f.createResource(path, resourceFile, mergeMDIntoExtraHeaders(metadata, extraHeaders)) +} + +// ClearRange releases the specified range of space in storage. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn194276.aspx +func (f FileServiceClient) ClearRange(path string, fileRange FileRange) error { + return f.modifyRange(path, nil, fileRange) +} + +// PutRange writes a range of bytes to a file. Note that the length of bytes must +// match (rangeEnd - rangeStart) + 1 with a maximum size of 4MB. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn194276.aspx +func (f FileServiceClient) PutRange(path string, bytes io.Reader, fileRange FileRange) error { + return f.modifyRange(path, bytes, fileRange) +} + +// modifies a range of bytes in the specified file +func (f FileServiceClient) modifyRange(path string, bytes io.Reader, fileRange FileRange) error { + if err := f.checkForStorageEmulator(); err != nil { + return err + } + if fileRange.End < fileRange.Start { + return errors.New("the value for rangeEnd must be greater than or equal to rangeStart") + } + if bytes != nil && fileRange.End-fileRange.Start > 4194304 { + return errors.New("range cannot exceed 4MB in size") + } + + uri := f.client.getEndpoint(fileServiceName, path, url.Values{"comp": {"range"}}) + + // default to clear + write := "clear" + cl := uint64(0) + + // if bytes is not nil then this is an update operation + if bytes != nil { + write = "update" + cl = (fileRange.End - fileRange.Start) + 1 + } + + extraHeaders := map[string]string{ + "Content-Length": strconv.FormatUint(cl, 10), + "Range": fileRange.String(), + "x-ms-write": write, + } + + headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) + resp, err := f.client.exec(http.MethodPut, uri, headers, bytes) + if err != nil { + return err + } + + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// GetFile operation reads or downloads a file from the system, including its +// metadata and properties. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file +func (f FileServiceClient) GetFile(path string, fileRange *FileRange) (*FileStream, error) { + var extraHeaders map[string]string + if fileRange != nil { + extraHeaders = map[string]string{ + "Range": fileRange.String(), + } + } + + resp, err := f.getResourceNoClose(path, compNone, resourceFile, http.MethodGet, extraHeaders) + if err != nil { + return nil, err + } + + if err = checkRespCode(resp.statusCode, []int{http.StatusOK, http.StatusPartialContent}); err != nil { + resp.body.Close() + return nil, err + } + + props, err := getFileProps(resp.headers) + md := getFileMDFromHeaders(resp.headers) + return &FileStream{Body: resp.body, Properties: props, Metadata: md}, nil +} + +// CreateShare operation creates a new share with optional metadata under the specified account. +// If the share with the same name already exists, the operation fails. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn167008.aspx +func (f FileServiceClient) CreateShare(name string, metadata map[string]string) error { + return f.createResource(ToPathSegment(name), resourceShare, mergeMDIntoExtraHeaders(metadata, nil)) +} + +// DirectoryExists returns true if the specified directory exists on the specified share. +func (f FileServiceClient) DirectoryExists(path string) (bool, error) { + return f.resourceExists(path, resourceDirectory) +} + +// FileExists returns true if the specified file exists. +func (f FileServiceClient) FileExists(path string) (bool, error) { + return f.resourceExists(path, resourceFile) +} + +// ShareExists returns true if a share with given name exists +// on the storage account, otherwise returns false. +func (f FileServiceClient) ShareExists(name string) (bool, error) { + return f.resourceExists(ToPathSegment(name), resourceShare) +} + +// returns true if the specified directory or share exists +func (f FileServiceClient) resourceExists(path string, res resourceType) (bool, error) { + if err := f.checkForStorageEmulator(); err != nil { + return false, err + } + + uri := f.client.getEndpoint(fileServiceName, path, getURLInitValues(compNone, res)) + headers := f.client.getStandardHeaders() + + resp, err := f.client.exec(http.MethodHead, uri, headers, nil) + if resp != nil { + defer resp.body.Close() + if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound { + return resp.statusCode == http.StatusOK, nil + } + } + return false, err +} + +// GetDirectoryURL gets the canonical URL to the directory with the specified name +// in the specified share. This method does not create a publicly accessible URL if +// the file is private and this method does not check if the directory exists. +func (f FileServiceClient) GetDirectoryURL(path string) string { + return f.client.getEndpoint(fileServiceName, path, url.Values{}) +} + +// GetShareURL gets the canonical URL to the share with the specified name in the +// specified container. This method does not create a publicly accessible URL if +// the file is private and this method does not check if the share exists. +func (f FileServiceClient) GetShareURL(name string) string { + return f.client.getEndpoint(fileServiceName, ToPathSegment(name), url.Values{}) +} + +// CreateDirectoryIfNotExists creates a new directory on the specified share +// if it does not exist. Returns true if directory is newly created or false +// if the directory already exists. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn166993.aspx +func (f FileServiceClient) CreateDirectoryIfNotExists(path string) (bool, error) { + resp, err := f.createResourceNoClose(path, resourceDirectory, nil) + if resp != nil { + defer resp.body.Close() + if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict { + return resp.statusCode == http.StatusCreated, nil + } + } + return false, err +} + +// CreateShareIfNotExists creates a new share under the specified account if +// it does not exist. Returns true if container is newly created or false if +// container already exists. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn167008.aspx +func (f FileServiceClient) CreateShareIfNotExists(name string) (bool, error) { + resp, err := f.createResourceNoClose(ToPathSegment(name), resourceShare, nil) + if resp != nil { + defer resp.body.Close() + if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict { + return resp.statusCode == http.StatusCreated, nil + } + } + return false, err +} + +// creates a resource depending on the specified resource type +func (f FileServiceClient) createResource(path string, res resourceType, extraHeaders map[string]string) error { + resp, err := f.createResourceNoClose(path, res, extraHeaders) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// creates a resource depending on the specified resource type, doesn't close the response body +func (f FileServiceClient) createResourceNoClose(path string, res resourceType, extraHeaders map[string]string) (*storageResponse, error) { + if err := f.checkForStorageEmulator(); err != nil { + return nil, err + } + + values := getURLInitValues(compNone, res) + uri := f.client.getEndpoint(fileServiceName, path, values) + headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) + + return f.client.exec(http.MethodPut, uri, headers, nil) +} + +// GetDirectoryProperties provides various information about the specified directory. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn194272.aspx +func (f FileServiceClient) GetDirectoryProperties(path string) (*DirectoryProperties, error) { + headers, err := f.getResourceHeaders(path, compNone, resourceDirectory, http.MethodHead) + if err != nil { + return nil, err + } + + return &DirectoryProperties{ + LastModified: headers.Get("Last-Modified"), + Etag: headers.Get("Etag"), + }, nil +} + +// GetFileProperties provides various information about the specified file. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn166971.aspx +func (f FileServiceClient) GetFileProperties(path string) (*FileProperties, error) { + headers, err := f.getResourceHeaders(path, compNone, resourceFile, http.MethodHead) + if err != nil { + return nil, err + } + return getFileProps(headers) +} + +// returns file properties from the specified HTTP header +func getFileProps(header http.Header) (*FileProperties, error) { + size, err := strconv.ParseUint(header.Get("Content-Length"), 10, 64) + if err != nil { + return nil, err + } + + return &FileProperties{ + CacheControl: header.Get("Cache-Control"), + ContentLength: size, + ContentType: header.Get("Content-Type"), + CopyCompletionTime: header.Get("x-ms-copy-completion-time"), + CopyID: header.Get("x-ms-copy-id"), + CopyProgress: header.Get("x-ms-copy-progress"), + CopySource: header.Get("x-ms-copy-source"), + CopyStatus: header.Get("x-ms-copy-status"), + CopyStatusDesc: header.Get("x-ms-copy-status-description"), + Disposition: header.Get("Content-Disposition"), + Encoding: header.Get("Content-Encoding"), + Etag: header.Get("ETag"), + Language: header.Get("Content-Language"), + LastModified: header.Get("Last-Modified"), + MD5: header.Get("Content-MD5"), + }, nil +} + +// GetShareProperties provides various information about the specified +// file. See https://msdn.microsoft.com/en-us/library/azure/dn689099.aspx +func (f FileServiceClient) GetShareProperties(name string) (*ShareProperties, error) { + headers, err := f.getResourceHeaders(ToPathSegment(name), compNone, resourceShare, http.MethodHead) + if err != nil { + return nil, err + } + return &ShareProperties{ + LastModified: headers.Get("Last-Modified"), + Etag: headers.Get("Etag"), + Quota: headers.Get("x-ms-share-quota"), + }, nil +} + +// returns HTTP header data for the specified directory or share +func (f FileServiceClient) getResourceHeaders(path string, comp compType, res resourceType, verb string) (http.Header, error) { + resp, err := f.getResourceNoClose(path, comp, res, verb, nil) + if err != nil { + return nil, err + } + defer resp.body.Close() + + if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return nil, err + } + + return resp.headers, nil +} + +// gets the specified resource, doesn't close the response body +func (f FileServiceClient) getResourceNoClose(path string, comp compType, res resourceType, verb string, extraHeaders map[string]string) (*storageResponse, error) { + if err := f.checkForStorageEmulator(); err != nil { + return nil, err + } + + params := getURLInitValues(comp, res) + uri := f.client.getEndpoint(fileServiceName, path, params) + headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) + + return f.client.exec(verb, uri, headers, nil) +} + +// SetFileProperties operation sets system properties on the specified file. +// +// Some keys may be converted to Camel-Case before sending. All keys +// are returned in lower case by SetFileProperties. HTTP header names +// are case-insensitive so case munging should not matter to other +// applications either. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn166975.aspx +func (f FileServiceClient) SetFileProperties(path string, props FileProperties) error { + return f.setResourceHeaders(path, compProperties, resourceFile, headersFromStruct(props)) +} + +// SetShareProperties replaces the ShareHeaders for the specified file. +// +// Some keys may be converted to Camel-Case before sending. All keys +// are returned in lower case by SetShareProperties. HTTP header names +// are case-insensitive so case munging should not matter to other +// applications either. +// +// See https://msdn.microsoft.com/en-us/library/azure/mt427368.aspx +func (f FileServiceClient) SetShareProperties(name string, shareHeaders ShareHeaders) error { + return f.setResourceHeaders(ToPathSegment(name), compProperties, resourceShare, headersFromStruct(shareHeaders)) +} + +// DeleteDirectory operation removes the specified empty directory. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn166969.aspx +func (f FileServiceClient) DeleteDirectory(path string) error { + return f.deleteResource(path, resourceDirectory) +} + +// DeleteFile operation immediately removes the file from the storage account. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn689085.aspx +func (f FileServiceClient) DeleteFile(path string) error { + return f.deleteResource(path, resourceFile) +} + +// DeleteShare operation marks the specified share for deletion. The share +// and any files contained within it are later deleted during garbage +// collection. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn689090.aspx +func (f FileServiceClient) DeleteShare(name string) error { + return f.deleteResource(ToPathSegment(name), resourceShare) +} + +// DeleteShareIfExists operation marks the specified share for deletion if it +// exists. The share and any files contained within it are later deleted during +// garbage collection. Returns true if share existed and deleted with this call, +// false otherwise. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn689090.aspx +func (f FileServiceClient) DeleteShareIfExists(name string) (bool, error) { + resp, err := f.deleteResourceNoClose(ToPathSegment(name), resourceShare) + if resp != nil { + defer resp.body.Close() + if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { + return resp.statusCode == http.StatusAccepted, nil + } + } + return false, err +} + +// deletes the resource and returns the response +func (f FileServiceClient) deleteResource(path string, res resourceType) error { + resp, err := f.deleteResourceNoClose(path, res) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusAccepted}) +} + +// deletes the resource and returns the response, doesn't close the response body +func (f FileServiceClient) deleteResourceNoClose(path string, res resourceType) (*storageResponse, error) { + if err := f.checkForStorageEmulator(); err != nil { + return nil, err + } + + values := getURLInitValues(compNone, res) + uri := f.client.getEndpoint(fileServiceName, path, values) + return f.client.exec(http.MethodDelete, uri, f.client.getStandardHeaders(), nil) +} + +// SetDirectoryMetadata replaces the metadata for the specified directory. +// +// Some keys may be converted to Camel-Case before sending. All keys +// are returned in lower case by GetDirectoryMetadata. HTTP header names +// are case-insensitive so case munging should not matter to other +// applications either. +// +// See https://msdn.microsoft.com/en-us/library/azure/mt427370.aspx +func (f FileServiceClient) SetDirectoryMetadata(path string, metadata map[string]string) error { + return f.setResourceHeaders(path, compMetadata, resourceDirectory, mergeMDIntoExtraHeaders(metadata, nil)) +} + +// SetFileMetadata replaces the metadata for the specified file. +// +// Some keys may be converted to Camel-Case before sending. All keys +// are returned in lower case by GetFileMetadata. HTTP header names +// are case-insensitive so case munging should not matter to other +// applications either. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn689097.aspx +func (f FileServiceClient) SetFileMetadata(path string, metadata map[string]string) error { + return f.setResourceHeaders(path, compMetadata, resourceFile, mergeMDIntoExtraHeaders(metadata, nil)) +} + +// SetShareMetadata replaces the metadata for the specified Share. +// +// Some keys may be converted to Camel-Case before sending. All keys +// are returned in lower case by GetShareMetadata. HTTP header names +// are case-insensitive so case munging should not matter to other +// applications either. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx +func (f FileServiceClient) SetShareMetadata(name string, metadata map[string]string) error { + return f.setResourceHeaders(ToPathSegment(name), compMetadata, resourceShare, mergeMDIntoExtraHeaders(metadata, nil)) +} + +// merges metadata into extraHeaders and returns extraHeaders +func mergeMDIntoExtraHeaders(metadata, extraHeaders map[string]string) map[string]string { + if metadata == nil && extraHeaders == nil { + return nil + } + if extraHeaders == nil { + extraHeaders = make(map[string]string) + } + for k, v := range metadata { + extraHeaders[userDefinedMetadataHeaderPrefix+k] = v + } + return extraHeaders +} + +// merges extraHeaders into headers and returns headers +func mergeHeaders(headers, extraHeaders map[string]string) map[string]string { + for k, v := range extraHeaders { + headers[k] = v + } + return headers +} + +// sets extra header data for the specified resource +func (f FileServiceClient) setResourceHeaders(path string, comp compType, res resourceType, extraHeaders map[string]string) error { + if err := f.checkForStorageEmulator(); err != nil { + return err + } + + params := getURLInitValues(comp, res) + uri := f.client.getEndpoint(fileServiceName, path, params) + headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) + + resp, err := f.client.exec(http.MethodPut, uri, headers, nil) + if err != nil { + return err + } + defer resp.body.Close() + + return checkRespCode(resp.statusCode, []int{http.StatusOK}) +} + +// GetDirectoryMetadata returns all user-defined metadata for the specified directory. +// +// All metadata keys will be returned in lower case. (HTTP header +// names are case-insensitive.) +// +// See https://msdn.microsoft.com/en-us/library/azure/mt427371.aspx +func (f FileServiceClient) GetDirectoryMetadata(path string) (map[string]string, error) { + return f.getMetadata(path, resourceDirectory) +} + +// GetFileMetadata returns all user-defined metadata for the specified file. +// +// All metadata keys will be returned in lower case. (HTTP header +// names are case-insensitive.) +// +// See https://msdn.microsoft.com/en-us/library/azure/dn689098.aspx +func (f FileServiceClient) GetFileMetadata(path string) (map[string]string, error) { + return f.getMetadata(path, resourceFile) +} + +// GetShareMetadata returns all user-defined metadata for the specified share. +// +// All metadata keys will be returned in lower case. (HTTP header +// names are case-insensitive.) +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx +func (f FileServiceClient) GetShareMetadata(name string) (map[string]string, error) { + return f.getMetadata(ToPathSegment(name), resourceShare) +} + +// gets metadata for the specified resource +func (f FileServiceClient) getMetadata(path string, res resourceType) (map[string]string, error) { + if err := f.checkForStorageEmulator(); err != nil { + return nil, err + } + + headers, err := f.getResourceHeaders(path, compMetadata, res, http.MethodGet) + if err != nil { + return nil, err + } + + return getFileMDFromHeaders(headers), nil +} + +// returns a map of custom metadata values from the specified HTTP header +func getFileMDFromHeaders(header http.Header) map[string]string { + metadata := make(map[string]string) + for k, v := range header { + // Can't trust CanonicalHeaderKey() to munge case + // reliably. "_" is allowed in identifiers: + // https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx + // https://msdn.microsoft.com/library/aa664670(VS.71).aspx + // http://tools.ietf.org/html/rfc7230#section-3.2 + // ...but "_" is considered invalid by + // CanonicalMIMEHeaderKey in + // https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542 + // so k can be "X-Ms-Meta-Foo" or "x-ms-meta-foo_bar". + k = strings.ToLower(k) + if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) { + continue + } + // metadata["foo"] = content of the last X-Ms-Meta-Foo header + k = k[len(userDefinedMetadataHeaderPrefix):] + metadata[k] = v[len(v)-1] + } + return metadata +} + +//checkForStorageEmulator determines if the client is setup for use with +//Azure Storage Emulator, and returns a relevant error +func (f FileServiceClient) checkForStorageEmulator() error { + if f.client.accountName == StorageEmulatorAccountName { + return fmt.Errorf("Error: File service is not currently supported by Azure Storage Emulator") + } + return nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/file_test.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/file_test.go new file mode 100644 index 000000000000..c0558e5497e7 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/file_test.go @@ -0,0 +1,555 @@ +package storage + +import ( + "bytes" + "io" + "math/rand" + "strconv" + + chk "gopkg.in/check.v1" +) + +type StorageFileSuite struct{} + +var _ = chk.Suite(&StorageFileSuite{}) + +func getFileClient(c *chk.C) FileServiceClient { + return getBasicClient(c).GetFileService() +} + +func (s *StorageFileSuite) Test_pathSegments(c *chk.C) { + c.Assert(ToPathSegment("foo"), chk.Equals, "/foo") + c.Assert(ToPathSegment("foo", "bar"), chk.Equals, "/foo/bar") + c.Assert(ToPathSegment("foo", "bar", "baz"), chk.Equals, "/foo/bar/baz") +} + +func (s *StorageFileSuite) TestGetURL(c *chk.C) { + api, err := NewBasicClient("foo", "YmFy") + c.Assert(err, chk.IsNil) + cli := api.GetFileService() + + c.Assert(cli.GetShareURL("share"), chk.Equals, "https://foo.file.core.windows.net/share") + c.Assert(cli.GetDirectoryURL("share/dir"), chk.Equals, "https://foo.file.core.windows.net/share/dir") +} + +func (s *StorageFileSuite) TestCreateShareDeleteShare(c *chk.C) { + cli := getFileClient(c) + name := randShare() + c.Assert(cli.CreateShare(name, nil), chk.IsNil) + c.Assert(cli.DeleteShare(name), chk.IsNil) +} + +func (s *StorageFileSuite) TestCreateShareIfNotExists(c *chk.C) { + cli := getFileClient(c) + name := randShare() + defer cli.DeleteShare(name) + + // First create + ok, err := cli.CreateShareIfNotExists(name) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, true) + + // Second create, should not give errors + ok, err = cli.CreateShareIfNotExists(name) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, false) +} + +func (s *StorageFileSuite) TestDeleteShareIfNotExists(c *chk.C) { + cli := getFileClient(c) + name := randShare() + + // delete non-existing share + ok, err := cli.DeleteShareIfExists(name) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, false) + + c.Assert(cli.CreateShare(name, nil), chk.IsNil) + + // delete existing share + ok, err = cli.DeleteShareIfExists(name) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, true) +} + +func (s *StorageFileSuite) Test_checkForStorageEmulator(c *chk.C) { + f := getEmulatorClient(c).GetFileService() + err := f.checkForStorageEmulator() + c.Assert(err, chk.NotNil) +} + +func (s *StorageFileSuite) TestListShares(c *chk.C) { + cli := getFileClient(c) + c.Assert(deleteTestShares(cli), chk.IsNil) + + name := randShare() + + c.Assert(cli.CreateShare(name, nil), chk.IsNil) + defer cli.DeleteShare(name) + + resp, err := cli.ListShares(ListSharesParameters{ + MaxResults: 5, + Prefix: testSharePrefix}) + c.Assert(err, chk.IsNil) + + c.Check(len(resp.Shares), chk.Equals, 1) + c.Check(resp.Shares[0].Name, chk.Equals, name) + +} + +func (s *StorageFileSuite) TestShareExists(c *chk.C) { + cli := getFileClient(c) + name := randShare() + + ok, err := cli.ShareExists(name) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, false) + + c.Assert(cli.CreateShare(name, nil), chk.IsNil) + defer cli.DeleteShare(name) + + ok, err = cli.ShareExists(name) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, true) +} + +func (s *StorageFileSuite) TestGetAndSetShareProperties(c *chk.C) { + name := randShare() + quota := rand.Intn(5120) + + cli := getFileClient(c) + c.Assert(cli.CreateShare(name, nil), chk.IsNil) + defer cli.DeleteShare(name) + + err := cli.SetShareProperties(name, ShareHeaders{Quota: strconv.Itoa(quota)}) + c.Assert(err, chk.IsNil) + + props, err := cli.GetShareProperties(name) + c.Assert(err, chk.IsNil) + + c.Assert(props.Quota, chk.Equals, strconv.Itoa(quota)) +} + +func (s *StorageFileSuite) TestGetAndSetShareMetadata(c *chk.C) { + cli := getFileClient(c) + share1 := randShare() + + c.Assert(cli.CreateShare(share1, nil), chk.IsNil) + defer cli.DeleteShare(share1) + + m, err := cli.GetShareMetadata(share1) + c.Assert(err, chk.IsNil) + c.Assert(m, chk.Not(chk.Equals), nil) + c.Assert(len(m), chk.Equals, 0) + + share2 := randShare() + mCreate := map[string]string{ + "create": "data", + } + c.Assert(cli.CreateShare(share2, mCreate), chk.IsNil) + defer cli.DeleteShare(share2) + + m, err = cli.GetShareMetadata(share2) + c.Assert(err, chk.IsNil) + c.Assert(m, chk.Not(chk.Equals), nil) + c.Assert(len(m), chk.Equals, 1) + + mPut := map[string]string{ + "foo": "bar", + "bar_baz": "waz qux", + } + + err = cli.SetShareMetadata(share2, mPut) + c.Assert(err, chk.IsNil) + + m, err = cli.GetShareMetadata(share2) + c.Assert(err, chk.IsNil) + c.Check(m, chk.DeepEquals, mPut) + + // Case munging + + mPutUpper := map[string]string{ + "Foo": "different bar", + "bar_BAZ": "different waz qux", + } + mExpectLower := map[string]string{ + "foo": "different bar", + "bar_baz": "different waz qux", + } + + err = cli.SetShareMetadata(share2, mPutUpper) + c.Assert(err, chk.IsNil) + + m, err = cli.GetShareMetadata(share2) + c.Assert(err, chk.IsNil) + c.Check(m, chk.DeepEquals, mExpectLower) +} + +func (s *StorageFileSuite) TestListDirsAndFiles(c *chk.C) { + // create share + cli := getFileClient(c) + share := randShare() + + c.Assert(cli.CreateShare(share, nil), chk.IsNil) + defer cli.DeleteShare(share) + + // list contents, should be empty + resp, err := cli.ListDirsAndFiles(share, ListDirsAndFilesParameters{}) + c.Assert(err, chk.IsNil) + c.Assert(resp.Directories, chk.IsNil) + c.Assert(resp.Files, chk.IsNil) + + // create a directory and a file + dir := "SomeDirectory" + file := "foo.file" + c.Assert(cli.CreateDirectory(ToPathSegment(share, dir), nil), chk.IsNil) + c.Assert(cli.CreateFile(ToPathSegment(share, file), 512, nil), chk.IsNil) + + // list contents + resp, err = cli.ListDirsAndFiles(share, ListDirsAndFilesParameters{}) + c.Assert(err, chk.IsNil) + c.Assert(len(resp.Directories), chk.Equals, 1) + c.Assert(len(resp.Files), chk.Equals, 1) + c.Assert(resp.Directories[0].Name, chk.Equals, dir) + c.Assert(resp.Files[0].Name, chk.Equals, file) +} + +func (s *StorageFileSuite) TestCreateDirectory(c *chk.C) { + // create share + cli := getFileClient(c) + share := randShare() + + c.Assert(cli.CreateShare(share, nil), chk.IsNil) + defer cli.DeleteShare(share) + + // directory shouldn't exist + dir := ToPathSegment(share, "SomeDirectory") + exists, err := cli.DirectoryExists(dir) + c.Assert(err, chk.IsNil) + c.Assert(exists, chk.Equals, false) + + // create directory + exists, err = cli.CreateDirectoryIfNotExists(dir) + c.Assert(err, chk.IsNil) + c.Assert(exists, chk.Equals, true) + + // try to create again, should fail + c.Assert(cli.CreateDirectory(dir, nil), chk.NotNil) + exists, err = cli.CreateDirectoryIfNotExists(dir) + c.Assert(err, chk.IsNil) + c.Assert(exists, chk.Equals, false) + + // get properties + var props *DirectoryProperties + props, err = cli.GetDirectoryProperties(dir) + c.Assert(props.Etag, chk.Not(chk.Equals), "") + c.Assert(props.LastModified, chk.Not(chk.Equals), "") + + // delete directory and verify + c.Assert(cli.DeleteDirectory(dir), chk.IsNil) + exists, err = cli.DirectoryExists(dir) + c.Assert(err, chk.IsNil) + c.Assert(exists, chk.Equals, false) +} + +func (s *StorageFileSuite) TestCreateFile(c *chk.C) { + // create share + cli := getFileClient(c) + share := randShare() + + c.Assert(cli.CreateShare(share, nil), chk.IsNil) + defer cli.DeleteShare(share) + + // create directory structure + dir1 := ToPathSegment(share, "one") + c.Assert(cli.CreateDirectory(dir1, nil), chk.IsNil) + dir2 := ToPathSegment(dir1, "two") + c.Assert(cli.CreateDirectory(dir2, nil), chk.IsNil) + + // verify file doesn't exist + file := ToPathSegment(dir2, "some.file") + exists, err := cli.FileExists(file) + c.Assert(err, chk.IsNil) + c.Assert(exists, chk.Equals, false) + + // create file + c.Assert(cli.CreateFile(file, 1024, nil), chk.IsNil) + exists, err = cli.FileExists(file) + c.Assert(err, chk.IsNil) + c.Assert(exists, chk.Equals, true) + + // delete file and verify + c.Assert(cli.DeleteFile(file), chk.IsNil) + exists, err = cli.FileExists(file) + c.Assert(err, chk.IsNil) + c.Assert(exists, chk.Equals, false) +} + +func (s *StorageFileSuite) TestGetFile(c *chk.C) { + // create share + cli := getFileClient(c) + share := randShare() + + c.Assert(cli.CreateShare(share, nil), chk.IsNil) + defer cli.DeleteShare(share) + + // create file + const size = uint64(1024) + file := ToPathSegment(share, "some.file") + c.Assert(cli.CreateFile(file, size, nil), chk.IsNil) + + // fill file with some data + c.Assert(cli.PutRange(file, newByteStream(size), FileRange{End: size - 1}), chk.IsNil) + + // set some metadata + md := map[string]string{ + "something": "somethingvalue", + "another": "anothervalue", + } + c.Assert(cli.SetFileMetadata(file, md), chk.IsNil) + + // retrieve full file content and verify + stream, err := cli.GetFile(file, nil) + c.Assert(err, chk.IsNil) + defer stream.Body.Close() + var b1 [size]byte + count, _ := stream.Body.Read(b1[:]) + c.Assert(count, chk.Equals, int(size)) + var c1 [size]byte + newByteStream(size).Read(c1[:]) + c.Assert(b1, chk.DeepEquals, c1) + c.Assert(stream.Properties.ContentLength, chk.Equals, size) + c.Assert(stream.Metadata, chk.DeepEquals, md) + + // retrieve partial file content and verify + stream, err = cli.GetFile(file, &FileRange{Start: size / 2, End: size - 1}) + c.Assert(err, chk.IsNil) + defer stream.Body.Close() + var b2 [size / 2]byte + count, _ = stream.Body.Read(b2[:]) + c.Assert(count, chk.Equals, int(size)/2) + var c2 [size / 2]byte + newByteStream(size / 2).Read(c2[:]) + c.Assert(b2, chk.DeepEquals, c2) +} + +func (s *StorageFileSuite) TestFileRanges(c *chk.C) { + // create share + cli := getFileClient(c) + share := randShare() + + c.Assert(cli.CreateShare(share, nil), chk.IsNil) + defer cli.DeleteShare(share) + + // create file + fileSize := uint64(4096) + file := ToPathSegment(share, "test.dat") + c.Assert(cli.CreateFile(file, fileSize, nil), chk.IsNil) + + // verify there are no valid ranges + ranges, err := cli.ListFileRanges(file, nil) + c.Assert(err, chk.IsNil) + c.Assert(ranges.ContentLength, chk.Equals, fileSize) + c.Assert(ranges.FileRanges, chk.IsNil) + + // fill entire range and validate + c.Assert(cli.PutRange(file, newByteStream(fileSize), FileRange{End: fileSize - 1}), chk.IsNil) + ranges, err = cli.ListFileRanges(file, nil) + c.Assert(err, chk.IsNil) + c.Assert(len(ranges.FileRanges), chk.Equals, 1) + c.Assert((ranges.FileRanges[0].End-ranges.FileRanges[0].Start)+1, chk.Equals, fileSize) + + // clear entire range and validate + c.Assert(cli.ClearRange(file, FileRange{End: fileSize - 1}), chk.IsNil) + ranges, err = cli.ListFileRanges(file, nil) + c.Assert(err, chk.IsNil) + c.Assert(ranges.FileRanges, chk.IsNil) + + // put partial ranges on 512 byte aligned boundaries + putRanges := []FileRange{ + {End: 511}, + {Start: 1024, End: 1535}, + {Start: 2048, End: 2559}, + {Start: 3072, End: 3583}, + } + + for _, r := range putRanges { + err = cli.PutRange(file, newByteStream(512), r) + c.Assert(err, chk.IsNil) + } + + // validate all ranges + ranges, err = cli.ListFileRanges(file, nil) + c.Assert(err, chk.IsNil) + c.Assert(ranges.FileRanges, chk.DeepEquals, putRanges) + + // validate sub-ranges + ranges, err = cli.ListFileRanges(file, &FileRange{Start: 1000, End: 3000}) + c.Assert(err, chk.IsNil) + c.Assert(ranges.FileRanges, chk.DeepEquals, putRanges[1:3]) + + // clear partial range and validate + c.Assert(cli.ClearRange(file, putRanges[0]), chk.IsNil) + c.Assert(cli.ClearRange(file, putRanges[2]), chk.IsNil) + ranges, err = cli.ListFileRanges(file, nil) + c.Assert(err, chk.IsNil) + c.Assert(len(ranges.FileRanges), chk.Equals, 2) + c.Assert(ranges.FileRanges[0], chk.DeepEquals, putRanges[1]) + c.Assert(ranges.FileRanges[1], chk.DeepEquals, putRanges[3]) +} + +func (s *StorageFileSuite) TestFileProperties(c *chk.C) { + // create share + cli := getFileClient(c) + share := randShare() + + c.Assert(cli.CreateShare(share, nil), chk.IsNil) + defer cli.DeleteShare(share) + + fileSize := uint64(512) + file := ToPathSegment(share, "test.dat") + c.Assert(cli.CreateFile(file, fileSize, nil), chk.IsNil) + + // get initial set of properties + props, err := cli.GetFileProperties(file) + c.Assert(err, chk.IsNil) + c.Assert(props.ContentLength, chk.Equals, fileSize) + + // set some file properties + cc := "cachecontrol" + ct := "mytype" + enc := "noencoding" + lang := "neutral" + disp := "friendly" + props.CacheControl = cc + props.ContentType = ct + props.Disposition = disp + props.Encoding = enc + props.Language = lang + c.Assert(cli.SetFileProperties(file, *props), chk.IsNil) + + // retrieve and verify + props, err = cli.GetFileProperties(file) + c.Assert(err, chk.IsNil) + c.Assert(props.CacheControl, chk.Equals, cc) + c.Assert(props.ContentType, chk.Equals, ct) + c.Assert(props.Disposition, chk.Equals, disp) + c.Assert(props.Encoding, chk.Equals, enc) + c.Assert(props.Language, chk.Equals, lang) +} + +func (s *StorageFileSuite) TestDirectoryMetadata(c *chk.C) { + // create share + cli := getFileClient(c) + share := randShare() + + c.Assert(cli.CreateShare(share, nil), chk.IsNil) + defer cli.DeleteShare(share) + + dir1 := ToPathSegment(share, "testdir1") + c.Assert(cli.CreateDirectory(dir1, nil), chk.IsNil) + + // get metadata, shouldn't be any + md, err := cli.GetDirectoryMetadata(dir1) + c.Assert(err, chk.IsNil) + c.Assert(md, chk.HasLen, 0) + + mCreate := map[string]string{ + "create": "data", + } + dir2 := ToPathSegment(share, "testdir2") + c.Assert(cli.CreateDirectory(dir2, mCreate), chk.IsNil) + + // get metadata + md, err = cli.GetDirectoryMetadata(dir2) + c.Assert(err, chk.IsNil) + c.Assert(md, chk.HasLen, 1) + + // set some custom metadata + md = map[string]string{ + "something": "somethingvalue", + "another": "anothervalue", + } + c.Assert(cli.SetDirectoryMetadata(dir2, md), chk.IsNil) + + // retrieve and verify + var mdRes map[string]string + mdRes, err = cli.GetDirectoryMetadata(dir2) + c.Assert(err, chk.IsNil) + c.Assert(mdRes, chk.DeepEquals, md) +} + +func (s *StorageFileSuite) TestFileMetadata(c *chk.C) { + // create share + cli := getFileClient(c) + share := randShare() + + c.Assert(cli.CreateShare(share, nil), chk.IsNil) + defer cli.DeleteShare(share) + + fileSize := uint64(512) + file1 := ToPathSegment(share, "test1.dat") + c.Assert(cli.CreateFile(file1, fileSize, nil), chk.IsNil) + + // get metadata, shouldn't be any + md, err := cli.GetFileMetadata(file1) + c.Assert(err, chk.IsNil) + c.Assert(md, chk.HasLen, 0) + + mCreate := map[string]string{ + "create": "data", + } + file2 := ToPathSegment(share, "test2.dat") + c.Assert(cli.CreateFile(file2, fileSize, mCreate), chk.IsNil) + + // get metadata + md, err = cli.GetFileMetadata(file2) + c.Assert(err, chk.IsNil) + c.Assert(md, chk.HasLen, 1) + + // set some custom metadata + md = map[string]string{ + "something": "somethingvalue", + "another": "anothervalue", + } + c.Assert(cli.SetFileMetadata(file2, md), chk.IsNil) + + // retrieve and verify + var mdRes map[string]string + mdRes, err = cli.GetFileMetadata(file2) + c.Assert(err, chk.IsNil) + c.Assert(mdRes, chk.DeepEquals, md) +} + +func deleteTestShares(cli FileServiceClient) error { + for { + resp, err := cli.ListShares(ListSharesParameters{Prefix: testSharePrefix}) + if err != nil { + return err + } + if len(resp.Shares) == 0 { + break + } + for _, c := range resp.Shares { + err = cli.DeleteShare(c.Name) + if err != nil { + return err + } + } + } + return nil +} + +const testSharePrefix = "zzzzztest" + +func randShare() string { + return testSharePrefix + randString(32-len(testSharePrefix)) +} + +func newByteStream(count uint64) io.Reader { + b := make([]uint8, count) + for i := uint64(0); i < count; i++ { + b[i] = 0xff + } + return bytes.NewReader(b) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go new file mode 100644 index 000000000000..0cd3578442ea --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go @@ -0,0 +1,344 @@ +package storage + +import ( + "encoding/xml" + "fmt" + "net/http" + "net/url" + "strconv" + "strings" +) + +const ( + // casing is per Golang's http.Header canonicalizing the header names. + approximateMessagesCountHeader = "X-Ms-Approximate-Messages-Count" + userDefinedMetadataHeaderPrefix = "X-Ms-Meta-" +) + +// QueueServiceClient contains operations for Microsoft Azure Queue Storage +// Service. +type QueueServiceClient struct { + client Client +} + +func pathForQueue(queue string) string { return fmt.Sprintf("/%s", queue) } +func pathForQueueMessages(queue string) string { return fmt.Sprintf("/%s/messages", queue) } +func pathForMessage(queue, name string) string { return fmt.Sprintf("/%s/messages/%s", queue, name) } + +type putMessageRequest struct { + XMLName xml.Name `xml:"QueueMessage"` + MessageText string `xml:"MessageText"` +} + +// PutMessageParameters is the set of options can be specified for Put Messsage +// operation. A zero struct does not use any preferences for the request. +type PutMessageParameters struct { + VisibilityTimeout int + MessageTTL int +} + +func (p PutMessageParameters) getParameters() url.Values { + out := url.Values{} + if p.VisibilityTimeout != 0 { + out.Set("visibilitytimeout", strconv.Itoa(p.VisibilityTimeout)) + } + if p.MessageTTL != 0 { + out.Set("messagettl", strconv.Itoa(p.MessageTTL)) + } + return out +} + +// GetMessagesParameters is the set of options can be specified for Get +// Messsages operation. A zero struct does not use any preferences for the +// request. +type GetMessagesParameters struct { + NumOfMessages int + VisibilityTimeout int +} + +func (p GetMessagesParameters) getParameters() url.Values { + out := url.Values{} + if p.NumOfMessages != 0 { + out.Set("numofmessages", strconv.Itoa(p.NumOfMessages)) + } + if p.VisibilityTimeout != 0 { + out.Set("visibilitytimeout", strconv.Itoa(p.VisibilityTimeout)) + } + return out +} + +// PeekMessagesParameters is the set of options can be specified for Peek +// Messsage operation. A zero struct does not use any preferences for the +// request. +type PeekMessagesParameters struct { + NumOfMessages int +} + +func (p PeekMessagesParameters) getParameters() url.Values { + out := url.Values{"peekonly": {"true"}} // Required for peek operation + if p.NumOfMessages != 0 { + out.Set("numofmessages", strconv.Itoa(p.NumOfMessages)) + } + return out +} + +// UpdateMessageParameters is the set of options can be specified for Update Messsage +// operation. A zero struct does not use any preferences for the request. +type UpdateMessageParameters struct { + PopReceipt string + VisibilityTimeout int +} + +func (p UpdateMessageParameters) getParameters() url.Values { + out := url.Values{} + if p.PopReceipt != "" { + out.Set("popreceipt", p.PopReceipt) + } + if p.VisibilityTimeout != 0 { + out.Set("visibilitytimeout", strconv.Itoa(p.VisibilityTimeout)) + } + return out +} + +// GetMessagesResponse represents a response returned from Get Messages +// operation. +type GetMessagesResponse struct { + XMLName xml.Name `xml:"QueueMessagesList"` + QueueMessagesList []GetMessageResponse `xml:"QueueMessage"` +} + +// GetMessageResponse represents a QueueMessage object returned from Get +// Messages operation response. +type GetMessageResponse struct { + MessageID string `xml:"MessageId"` + InsertionTime string `xml:"InsertionTime"` + ExpirationTime string `xml:"ExpirationTime"` + PopReceipt string `xml:"PopReceipt"` + TimeNextVisible string `xml:"TimeNextVisible"` + DequeueCount int `xml:"DequeueCount"` + MessageText string `xml:"MessageText"` +} + +// PeekMessagesResponse represents a response returned from Get Messages +// operation. +type PeekMessagesResponse struct { + XMLName xml.Name `xml:"QueueMessagesList"` + QueueMessagesList []PeekMessageResponse `xml:"QueueMessage"` +} + +// PeekMessageResponse represents a QueueMessage object returned from Peek +// Messages operation response. +type PeekMessageResponse struct { + MessageID string `xml:"MessageId"` + InsertionTime string `xml:"InsertionTime"` + ExpirationTime string `xml:"ExpirationTime"` + DequeueCount int `xml:"DequeueCount"` + MessageText string `xml:"MessageText"` +} + +// QueueMetadataResponse represents user defined metadata and queue +// properties on a specific queue. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179384.aspx +type QueueMetadataResponse struct { + ApproximateMessageCount int + UserDefinedMetadata map[string]string +} + +// SetMetadata operation sets user-defined metadata on the specified queue. +// Metadata is associated with the queue as name-value pairs. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179348.aspx +func (c QueueServiceClient) SetMetadata(name string, metadata map[string]string) error { + uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": []string{"metadata"}}) + headers := c.client.getStandardHeaders() + for k, v := range metadata { + headers[userDefinedMetadataHeaderPrefix+k] = v + } + + resp, err := c.client.exec("PUT", uri, headers, nil) + if err != nil { + return err + } + defer resp.body.Close() + + return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) +} + +// GetMetadata operation retrieves user-defined metadata and queue +// properties on the specified queue. Metadata is associated with +// the queue as name-values pairs. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179384.aspx +// +// Because the way Golang's http client (and http.Header in particular) +// canonicalize header names, the returned metadata names would always +// be all lower case. +func (c QueueServiceClient) GetMetadata(name string) (QueueMetadataResponse, error) { + qm := QueueMetadataResponse{} + qm.UserDefinedMetadata = make(map[string]string) + uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": []string{"metadata"}}) + headers := c.client.getStandardHeaders() + resp, err := c.client.exec("GET", uri, headers, nil) + if err != nil { + return qm, err + } + defer resp.body.Close() + + for k, v := range resp.headers { + if len(v) != 1 { + return qm, fmt.Errorf("Unexpected number of values (%d) in response header '%s'", len(v), k) + } + + value := v[0] + + if k == approximateMessagesCountHeader { + qm.ApproximateMessageCount, err = strconv.Atoi(value) + if err != nil { + return qm, fmt.Errorf("Unexpected value in response header '%s': '%s' ", k, value) + } + } else if strings.HasPrefix(k, userDefinedMetadataHeaderPrefix) { + name := strings.TrimPrefix(k, userDefinedMetadataHeaderPrefix) + qm.UserDefinedMetadata[strings.ToLower(name)] = value + } + } + + return qm, checkRespCode(resp.statusCode, []int{http.StatusOK}) +} + +// CreateQueue operation creates a queue under the given account. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179342.aspx +func (c QueueServiceClient) CreateQueue(name string) error { + uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{}) + headers := c.client.getStandardHeaders() + resp, err := c.client.exec("PUT", uri, headers, nil) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// DeleteQueue operation permanently deletes the specified queue. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179436.aspx +func (c QueueServiceClient) DeleteQueue(name string) error { + uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{}) + resp, err := c.client.exec("DELETE", uri, c.client.getStandardHeaders(), nil) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) +} + +// QueueExists returns true if a queue with given name exists. +func (c QueueServiceClient) QueueExists(name string) (bool, error) { + uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": {"metadata"}}) + resp, err := c.client.exec("GET", uri, c.client.getStandardHeaders(), nil) + if resp != nil && (resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound) { + return resp.statusCode == http.StatusOK, nil + } + + return false, err +} + +// PutMessage operation adds a new message to the back of the message queue. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179346.aspx +func (c QueueServiceClient) PutMessage(queue string, message string, params PutMessageParameters) error { + uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters()) + req := putMessageRequest{MessageText: message} + body, nn, err := xmlMarshal(req) + if err != nil { + return err + } + headers := c.client.getStandardHeaders() + headers["Content-Length"] = strconv.Itoa(nn) + resp, err := c.client.exec("POST", uri, headers, body) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// ClearMessages operation deletes all messages from the specified queue. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179454.aspx +func (c QueueServiceClient) ClearMessages(queue string) error { + uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), url.Values{}) + resp, err := c.client.exec("DELETE", uri, c.client.getStandardHeaders(), nil) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) +} + +// GetMessages operation retrieves one or more messages from the front of the +// queue. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179474.aspx +func (c QueueServiceClient) GetMessages(queue string, params GetMessagesParameters) (GetMessagesResponse, error) { + var r GetMessagesResponse + uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters()) + resp, err := c.client.exec("GET", uri, c.client.getStandardHeaders(), nil) + if err != nil { + return r, err + } + defer resp.body.Close() + err = xmlUnmarshal(resp.body, &r) + return r, err +} + +// PeekMessages retrieves one or more messages from the front of the queue, but +// does not alter the visibility of the message. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179472.aspx +func (c QueueServiceClient) PeekMessages(queue string, params PeekMessagesParameters) (PeekMessagesResponse, error) { + var r PeekMessagesResponse + uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters()) + resp, err := c.client.exec("GET", uri, c.client.getStandardHeaders(), nil) + if err != nil { + return r, err + } + defer resp.body.Close() + err = xmlUnmarshal(resp.body, &r) + return r, err +} + +// DeleteMessage operation deletes the specified message. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179347.aspx +func (c QueueServiceClient) DeleteMessage(queue, messageID, popReceipt string) error { + uri := c.client.getEndpoint(queueServiceName, pathForMessage(queue, messageID), url.Values{ + "popreceipt": {popReceipt}}) + resp, err := c.client.exec("DELETE", uri, c.client.getStandardHeaders(), nil) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) +} + +// UpdateMessage operation deletes the specified message. +// +// See https://msdn.microsoft.com/en-us/library/azure/hh452234.aspx +func (c QueueServiceClient) UpdateMessage(queue string, messageID string, message string, params UpdateMessageParameters) error { + uri := c.client.getEndpoint(queueServiceName, pathForMessage(queue, messageID), params.getParameters()) + req := putMessageRequest{MessageText: message} + body, nn, err := xmlMarshal(req) + if err != nil { + return err + } + headers := c.client.getStandardHeaders() + headers["Content-Length"] = fmt.Sprintf("%d", nn) + resp, err := c.client.exec("PUT", uri, headers, body) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/queue_test.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/queue_test.go new file mode 100644 index 000000000000..45a8901d6beb --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/queue_test.go @@ -0,0 +1,142 @@ +package storage + +import ( + "time" + + chk "gopkg.in/check.v1" +) + +type StorageQueueSuite struct{} + +var _ = chk.Suite(&StorageQueueSuite{}) + +func getQueueClient(c *chk.C) QueueServiceClient { + return getBasicClient(c).GetQueueService() +} + +func (s *StorageQueueSuite) Test_pathForQueue(c *chk.C) { + c.Assert(pathForQueue("q"), chk.Equals, "/q") +} + +func (s *StorageQueueSuite) Test_pathForQueueMessages(c *chk.C) { + c.Assert(pathForQueueMessages("q"), chk.Equals, "/q/messages") +} + +func (s *StorageQueueSuite) Test_pathForMessage(c *chk.C) { + c.Assert(pathForMessage("q", "m"), chk.Equals, "/q/messages/m") +} + +func (s *StorageQueueSuite) TestCreateQueue_DeleteQueue(c *chk.C) { + cli := getQueueClient(c) + name := randString(20) + c.Assert(cli.CreateQueue(name), chk.IsNil) + c.Assert(cli.DeleteQueue(name), chk.IsNil) +} + +func (s *StorageQueueSuite) Test_GetMetadata_GetApproximateCount(c *chk.C) { + cli := getQueueClient(c) + name := randString(20) + c.Assert(cli.CreateQueue(name), chk.IsNil) + defer cli.DeleteQueue(name) + + qm, err := cli.GetMetadata(name) + c.Assert(err, chk.IsNil) + c.Assert(qm.ApproximateMessageCount, chk.Equals, 0) + + for ix := 0; ix < 3; ix++ { + err = cli.PutMessage(name, "foobar", PutMessageParameters{}) + c.Assert(err, chk.IsNil) + } + time.Sleep(1 * time.Second) + + qm, err = cli.GetMetadata(name) + c.Assert(err, chk.IsNil) + c.Assert(qm.ApproximateMessageCount, chk.Equals, 3) +} + +func (s *StorageQueueSuite) Test_SetMetadataGetMetadata_Roundtrips(c *chk.C) { + cli := getQueueClient(c) + name := randString(20) + c.Assert(cli.CreateQueue(name), chk.IsNil) + defer cli.DeleteQueue(name) + + metadata := make(map[string]string) + metadata["Foo1"] = "bar1" + metadata["fooBaz"] = "bar" + err := cli.SetMetadata(name, metadata) + c.Assert(err, chk.IsNil) + + qm, err := cli.GetMetadata(name) + c.Assert(err, chk.IsNil) + c.Assert(qm.UserDefinedMetadata["foo1"], chk.Equals, "bar1") + c.Assert(qm.UserDefinedMetadata["foobaz"], chk.Equals, "bar") +} + +func (s *StorageQueueSuite) TestQueueExists(c *chk.C) { + cli := getQueueClient(c) + ok, err := cli.QueueExists("nonexistent-queue") + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, false) + + name := randString(20) + c.Assert(cli.CreateQueue(name), chk.IsNil) + defer cli.DeleteQueue(name) + + ok, err = cli.QueueExists(name) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, true) +} + +func (s *StorageQueueSuite) TestPutMessage_PeekMessage_UpdateMessage_DeleteMessage(c *chk.C) { + q := randString(20) + cli := getQueueClient(c) + c.Assert(cli.CreateQueue(q), chk.IsNil) + defer cli.DeleteQueue(q) + + msg := randString(64 * 1024) // exercise max length + c.Assert(cli.PutMessage(q, msg, PutMessageParameters{}), chk.IsNil) + r, err := cli.PeekMessages(q, PeekMessagesParameters{}) + c.Assert(err, chk.IsNil) + c.Assert(len(r.QueueMessagesList), chk.Equals, 1) + c.Assert(r.QueueMessagesList[0].MessageText, chk.Equals, msg) + + gr, gerr := cli.GetMessages(q, GetMessagesParameters{NumOfMessages: 1, VisibilityTimeout: 2}) + c.Assert(gerr, chk.IsNil) + + updatedMsg := "Test Message" + c.Assert(cli.UpdateMessage(q, r.QueueMessagesList[0].MessageID, updatedMsg, + UpdateMessageParameters{PopReceipt: gr.QueueMessagesList[0].PopReceipt, VisibilityTimeout: 2}), chk.IsNil) + r, err = cli.PeekMessages(q, PeekMessagesParameters{}) + c.Assert(err, chk.IsNil) + c.Assert(len(r.QueueMessagesList), chk.Equals, 0) +} + +func (s *StorageQueueSuite) TestGetMessages(c *chk.C) { + q := randString(20) + cli := getQueueClient(c) + c.Assert(cli.CreateQueue(q), chk.IsNil) + defer cli.DeleteQueue(q) + + n := 4 + for i := 0; i < n; i++ { + c.Assert(cli.PutMessage(q, randString(10), PutMessageParameters{}), chk.IsNil) + } + + r, err := cli.GetMessages(q, GetMessagesParameters{NumOfMessages: n}) + c.Assert(err, chk.IsNil) + c.Assert(len(r.QueueMessagesList), chk.Equals, n) +} + +func (s *StorageQueueSuite) TestDeleteMessages(c *chk.C) { + q := randString(20) + cli := getQueueClient(c) + c.Assert(cli.CreateQueue(q), chk.IsNil) + defer cli.DeleteQueue(q) + + c.Assert(cli.PutMessage(q, "message", PutMessageParameters{}), chk.IsNil) + r, err := cli.GetMessages(q, GetMessagesParameters{VisibilityTimeout: 1}) + c.Assert(err, chk.IsNil) + c.Assert(len(r.QueueMessagesList), chk.Equals, 1) + m := r.QueueMessagesList[0] + c.Assert(cli.DeleteMessage(q, m.MessageID, m.PopReceipt), chk.IsNil) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go new file mode 100644 index 000000000000..39e997503552 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go @@ -0,0 +1,129 @@ +package storage + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "net/url" +) + +// TableServiceClient contains operations for Microsoft Azure Table Storage +// Service. +type TableServiceClient struct { + client Client +} + +// AzureTable is the typedef of the Azure Table name +type AzureTable string + +const ( + tablesURIPath = "/Tables" +) + +type createTableRequest struct { + TableName string `json:"TableName"` +} + +func pathForTable(table AzureTable) string { return fmt.Sprintf("%s", table) } + +func (c *TableServiceClient) getStandardHeaders() map[string]string { + return map[string]string{ + "x-ms-version": "2015-02-21", + "x-ms-date": currentTimeRfc1123Formatted(), + "Accept": "application/json;odata=nometadata", + "Accept-Charset": "UTF-8", + "Content-Type": "application/json", + } +} + +// QueryTables returns the tables created in the +// *TableServiceClient storage account. +func (c *TableServiceClient) QueryTables() ([]AzureTable, error) { + uri := c.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{}) + + headers := c.getStandardHeaders() + headers["Content-Length"] = "0" + + resp, err := c.client.execTable("GET", uri, headers, nil) + if err != nil { + return nil, err + } + defer resp.body.Close() + + if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return nil, err + } + + buf := new(bytes.Buffer) + buf.ReadFrom(resp.body) + + var respArray queryTablesResponse + if err := json.Unmarshal(buf.Bytes(), &respArray); err != nil { + return nil, err + } + + s := make([]AzureTable, len(respArray.TableName)) + for i, elem := range respArray.TableName { + s[i] = AzureTable(elem.TableName) + } + + return s, nil +} + +// CreateTable creates the table given the specific +// name. This function fails if the name is not compliant +// with the specification or the tables already exists. +func (c *TableServiceClient) CreateTable(table AzureTable) error { + uri := c.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{}) + + headers := c.getStandardHeaders() + + req := createTableRequest{TableName: string(table)} + buf := new(bytes.Buffer) + + if err := json.NewEncoder(buf).Encode(req); err != nil { + return err + } + + headers["Content-Length"] = fmt.Sprintf("%d", buf.Len()) + + resp, err := c.client.execTable("POST", uri, headers, buf) + + if err != nil { + return err + } + defer resp.body.Close() + + if err := checkRespCode(resp.statusCode, []int{http.StatusCreated}); err != nil { + return err + } + + return nil +} + +// DeleteTable deletes the table given the specific +// name. This function fails if the table is not present. +// Be advised: DeleteTable deletes all the entries +// that may be present. +func (c *TableServiceClient) DeleteTable(table AzureTable) error { + uri := c.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{}) + uri += fmt.Sprintf("('%s')", string(table)) + + headers := c.getStandardHeaders() + + headers["Content-Length"] = "0" + + resp, err := c.client.execTable("DELETE", uri, headers, nil) + + if err != nil { + return err + } + defer resp.body.Close() + + if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { + return err + + } + return nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/table_entities.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/table_entities.go new file mode 100644 index 000000000000..a26d9c6f581f --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/table_entities.go @@ -0,0 +1,357 @@ +package storage + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "reflect" +) + +// Annotating as secure for gas scanning +/* #nosec */ +const ( + partitionKeyNode = "PartitionKey" + rowKeyNode = "RowKey" + tag = "table" + tagIgnore = "-" + continuationTokenPartitionKeyHeader = "X-Ms-Continuation-Nextpartitionkey" + continuationTokenRowHeader = "X-Ms-Continuation-Nextrowkey" + maxTopParameter = 1000 +) + +type queryTablesResponse struct { + TableName []struct { + TableName string `json:"TableName"` + } `json:"value"` +} + +const ( + tableOperationTypeInsert = iota + tableOperationTypeUpdate = iota + tableOperationTypeMerge = iota + tableOperationTypeInsertOrReplace = iota + tableOperationTypeInsertOrMerge = iota +) + +type tableOperation int + +// TableEntity interface specifies +// the functions needed to support +// marshaling and unmarshaling into +// Azure Tables. The struct must only contain +// simple types because Azure Tables do not +// support hierarchy. +type TableEntity interface { + PartitionKey() string + RowKey() string + SetPartitionKey(string) error + SetRowKey(string) error +} + +// ContinuationToken is an opaque (ie not useful to inspect) +// struct that Get... methods can return if there are more +// entries to be returned than the ones already +// returned. Just pass it to the same function to continue +// receiving the remaining entries. +type ContinuationToken struct { + NextPartitionKey string + NextRowKey string +} + +type getTableEntriesResponse struct { + Elements []map[string]interface{} `json:"value"` +} + +// QueryTableEntities queries the specified table and returns the unmarshaled +// entities of type retType. +// top parameter limits the returned entries up to top. Maximum top +// allowed by Azure API is 1000. In case there are more than top entries to be +// returned the function will return a non nil *ContinuationToken. You can call the +// same function again passing the received ContinuationToken as previousContToken +// parameter in order to get the following entries. The query parameter +// is the odata query. To retrieve all the entries pass the empty string. +// The function returns a pointer to a TableEntity slice, the *ContinuationToken +// if there are more entries to be returned and an error in case something went +// wrong. +// +// Example: +// entities, cToken, err = tSvc.QueryTableEntities("table", cToken, reflect.TypeOf(entity), 20, "") +func (c *TableServiceClient) QueryTableEntities(tableName AzureTable, previousContToken *ContinuationToken, retType reflect.Type, top int, query string) ([]TableEntity, *ContinuationToken, error) { + if top > maxTopParameter { + return nil, nil, fmt.Errorf("top accepts at maximum %d elements. Requested %d instead", maxTopParameter, top) + } + + uri := c.client.getEndpoint(tableServiceName, pathForTable(tableName), url.Values{}) + uri += fmt.Sprintf("?$top=%d", top) + if query != "" { + uri += fmt.Sprintf("&$filter=%s", url.QueryEscape(query)) + } + + if previousContToken != nil { + uri += fmt.Sprintf("&NextPartitionKey=%s&NextRowKey=%s", previousContToken.NextPartitionKey, previousContToken.NextRowKey) + } + + headers := c.getStandardHeaders() + + headers["Content-Length"] = "0" + + resp, err := c.client.execTable("GET", uri, headers, nil) + + if err != nil { + return nil, nil, err + } + + contToken := extractContinuationTokenFromHeaders(resp.headers) + + if err != nil { + return nil, contToken, err + } + defer resp.body.Close() + + if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return nil, contToken, err + } + + retEntries, err := deserializeEntity(retType, resp.body) + if err != nil { + return nil, contToken, err + } + + return retEntries, contToken, nil +} + +// InsertEntity inserts an entity in the specified table. +// The function fails if there is an entity with the same +// PartitionKey and RowKey in the table. +func (c *TableServiceClient) InsertEntity(table AzureTable, entity TableEntity) error { + var err error + + if sc, err := c.execTable(table, entity, false, "POST"); err != nil { + return checkRespCode(sc, []int{http.StatusCreated}) + } + + return err +} + +func (c *TableServiceClient) execTable(table AzureTable, entity TableEntity, specifyKeysInURL bool, method string) (int, error) { + uri := c.client.getEndpoint(tableServiceName, pathForTable(table), url.Values{}) + if specifyKeysInURL { + uri += fmt.Sprintf("(PartitionKey='%s',RowKey='%s')", url.QueryEscape(entity.PartitionKey()), url.QueryEscape(entity.RowKey())) + } + + headers := c.getStandardHeaders() + + var buf bytes.Buffer + + if err := injectPartitionAndRowKeys(entity, &buf); err != nil { + return 0, err + } + + headers["Content-Length"] = fmt.Sprintf("%d", buf.Len()) + + var err error + var resp *odataResponse + + resp, err = c.client.execTable(method, uri, headers, &buf) + + if err != nil { + return 0, err + } + + defer resp.body.Close() + + return resp.statusCode, nil +} + +// UpdateEntity updates the contents of an entity with the +// one passed as parameter. The function fails if there is no entity +// with the same PartitionKey and RowKey in the table. +func (c *TableServiceClient) UpdateEntity(table AzureTable, entity TableEntity) error { + var err error + + if sc, err := c.execTable(table, entity, true, "PUT"); err != nil { + return checkRespCode(sc, []int{http.StatusNoContent}) + } + return err +} + +// MergeEntity merges the contents of an entity with the +// one passed as parameter. +// The function fails if there is no entity +// with the same PartitionKey and RowKey in the table. +func (c *TableServiceClient) MergeEntity(table AzureTable, entity TableEntity) error { + var err error + + if sc, err := c.execTable(table, entity, true, "MERGE"); err != nil { + return checkRespCode(sc, []int{http.StatusNoContent}) + } + return err +} + +// DeleteEntityWithoutCheck deletes the entity matching by +// PartitionKey and RowKey. There is no check on IfMatch +// parameter so the entity is always deleted. +// The function fails if there is no entity +// with the same PartitionKey and RowKey in the table. +func (c *TableServiceClient) DeleteEntityWithoutCheck(table AzureTable, entity TableEntity) error { + return c.DeleteEntity(table, entity, "*") +} + +// DeleteEntity deletes the entity matching by +// PartitionKey, RowKey and ifMatch field. +// The function fails if there is no entity +// with the same PartitionKey and RowKey in the table or +// the ifMatch is different. +func (c *TableServiceClient) DeleteEntity(table AzureTable, entity TableEntity, ifMatch string) error { + uri := c.client.getEndpoint(tableServiceName, pathForTable(table), url.Values{}) + uri += fmt.Sprintf("(PartitionKey='%s',RowKey='%s')", url.QueryEscape(entity.PartitionKey()), url.QueryEscape(entity.RowKey())) + + headers := c.getStandardHeaders() + + headers["Content-Length"] = "0" + headers["If-Match"] = ifMatch + + resp, err := c.client.execTable("DELETE", uri, headers, nil) + + if err != nil { + return err + } + defer resp.body.Close() + + if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil { + return err + } + + return nil +} + +// InsertOrReplaceEntity inserts an entity in the specified table +// or replaced the existing one. +func (c *TableServiceClient) InsertOrReplaceEntity(table AzureTable, entity TableEntity) error { + var err error + + if sc, err := c.execTable(table, entity, true, "PUT"); err != nil { + return checkRespCode(sc, []int{http.StatusNoContent}) + } + return err +} + +// InsertOrMergeEntity inserts an entity in the specified table +// or merges the existing one. +func (c *TableServiceClient) InsertOrMergeEntity(table AzureTable, entity TableEntity) error { + var err error + + if sc, err := c.execTable(table, entity, true, "MERGE"); err != nil { + return checkRespCode(sc, []int{http.StatusNoContent}) + } + return err +} + +func injectPartitionAndRowKeys(entity TableEntity, buf *bytes.Buffer) error { + if err := json.NewEncoder(buf).Encode(entity); err != nil { + return err + } + + dec := make(map[string]interface{}) + if err := json.NewDecoder(buf).Decode(&dec); err != nil { + return err + } + + // Inject PartitionKey and RowKey + dec[partitionKeyNode] = entity.PartitionKey() + dec[rowKeyNode] = entity.RowKey() + + // Remove tagged fields + // The tag is defined in the const section + // This is useful to avoid storing the PartitionKey and RowKey twice. + numFields := reflect.ValueOf(entity).Elem().NumField() + for i := 0; i < numFields; i++ { + f := reflect.ValueOf(entity).Elem().Type().Field(i) + + if f.Tag.Get(tag) == tagIgnore { + // we must look for its JSON name in the dictionary + // as the user can rename it using a tag + jsonName := f.Name + if f.Tag.Get("json") != "" { + jsonName = f.Tag.Get("json") + } + delete(dec, jsonName) + } + } + + buf.Reset() + + if err := json.NewEncoder(buf).Encode(&dec); err != nil { + return err + } + + return nil +} + +func deserializeEntity(retType reflect.Type, reader io.Reader) ([]TableEntity, error) { + buf := new(bytes.Buffer) + + var ret getTableEntriesResponse + if err := json.NewDecoder(reader).Decode(&ret); err != nil { + return nil, err + } + + tEntries := make([]TableEntity, len(ret.Elements)) + + for i, entry := range ret.Elements { + + buf.Reset() + if err := json.NewEncoder(buf).Encode(entry); err != nil { + return nil, err + } + + dec := make(map[string]interface{}) + if err := json.NewDecoder(buf).Decode(&dec); err != nil { + return nil, err + } + + var pKey, rKey string + // strip pk and rk + for key, val := range dec { + switch key { + case partitionKeyNode: + pKey = val.(string) + case rowKeyNode: + rKey = val.(string) + } + } + + delete(dec, partitionKeyNode) + delete(dec, rowKeyNode) + + buf.Reset() + if err := json.NewEncoder(buf).Encode(dec); err != nil { + return nil, err + } + + // Create a empty retType instance + tEntries[i] = reflect.New(retType.Elem()).Interface().(TableEntity) + // Popolate it with the values + if err := json.NewDecoder(buf).Decode(&tEntries[i]); err != nil { + return nil, err + } + + // Reset PartitionKey and RowKey + tEntries[i].SetPartitionKey(pKey) + tEntries[i].SetRowKey(rKey) + } + + return tEntries, nil +} + +func extractContinuationTokenFromHeaders(h http.Header) *ContinuationToken { + ct := ContinuationToken{h.Get(continuationTokenPartitionKeyHeader), h.Get(continuationTokenRowHeader)} + + if ct.NextPartitionKey != "" && ct.NextRowKey != "" { + return &ct + } + return nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/table_test.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/table_test.go new file mode 100644 index 000000000000..307e14a3924d --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/table_test.go @@ -0,0 +1,287 @@ +package storage + +import ( + "crypto/rand" + "encoding/base64" + "fmt" + "reflect" + + chk "gopkg.in/check.v1" +) + +type TableClient struct{} + +func getTableClient(c *chk.C) TableServiceClient { + return getBasicClient(c).GetTableService() +} + +type CustomEntity struct { + Name string `json:"name"` + Surname string `json:"surname"` + Number int + PKey string `json:"pk" table:"-"` + RKey string `json:"rk" table:"-"` +} + +type CustomEntityExtended struct { + *CustomEntity + ExtraField string +} + +func (c *CustomEntity) PartitionKey() string { + return c.PKey +} + +func (c *CustomEntity) RowKey() string { + return c.RKey +} + +func (c *CustomEntity) SetPartitionKey(s string) error { + c.PKey = s + return nil +} + +func (c *CustomEntity) SetRowKey(s string) error { + c.RKey = s + return nil +} + +func (s *StorageBlobSuite) Test_SharedKeyLite(c *chk.C) { + cli := getTableClient(c) + + // override the accountKey and accountName + // but make sure to reset when returning + oldAK := cli.client.accountKey + oldAN := cli.client.accountName + + defer func() { + cli.client.accountKey = oldAK + cli.client.accountName = oldAN + }() + + // don't worry, I've already changed mine :) + key, err := base64.StdEncoding.DecodeString("zHDHGs7C+Di9pZSDMuarxJJz3xRBzAHBYaobxpLEc7kwTptR/hPEa9j93hIfb2Tbe9IA50MViGmjQ6nUF/OVvA==") + if err != nil { + c.Fail() + } + + cli.client.accountKey = key + cli.client.accountName = "mindgotest" + + headers := map[string]string{ + "Accept-Charset": "UTF-8", + "Content-Type": "application/json", + "x-ms-date": "Wed, 23 Sep 2015 16:40:05 GMT", + "Content-Length": "0", + "x-ms-version": "2015-02-21", + "Accept": "application/json;odata=nometadata", + } + url := "https://mindgotest.table.core.windows.net/tquery()" + + ret, err := cli.client.createSharedKeyLite(url, headers) + if err != nil { + c.Fail() + } + + c.Assert(ret, chk.Equals, "SharedKeyLite mindgotest:+32DTgsPUgXPo/O7RYaTs0DllA6FTXMj3uK4Qst8y/E=") +} + +func (s *StorageBlobSuite) Test_CreateAndDeleteTable(c *chk.C) { + cli := getTableClient(c) + + tn := AzureTable(randTable()) + + err := cli.CreateTable(tn) + c.Assert(err, chk.IsNil) + + err = cli.DeleteTable(tn) + c.Assert(err, chk.IsNil) +} + +func (s *StorageBlobSuite) Test_InsertEntities(c *chk.C) { + cli := getTableClient(c) + + tn := AzureTable(randTable()) + + err := cli.CreateTable(tn) + c.Assert(err, chk.IsNil) + defer cli.DeleteTable(tn) + + ce := &CustomEntity{Name: "Luke", Surname: "Skywalker", Number: 1543, PKey: "pkey"} + + for i := 0; i < 12; i++ { + ce.SetRowKey(fmt.Sprintf("%d", i)) + + err = cli.InsertEntity(tn, ce) + c.Assert(err, chk.IsNil) + } +} + +func (s *StorageBlobSuite) Test_InsertOrReplaceEntities(c *chk.C) { + cli := getTableClient(c) + + tn := AzureTable(randTable()) + + err := cli.CreateTable(tn) + c.Assert(err, chk.IsNil) + defer cli.DeleteTable(tn) + + ce := &CustomEntity{Name: "Darth", Surname: "Skywalker", Number: 60, PKey: "pkey", RKey: "5"} + + err = cli.InsertOrReplaceEntity(tn, ce) + c.Assert(err, chk.IsNil) + + cextra := &CustomEntityExtended{&CustomEntity{PKey: "pkey", RKey: "5"}, "extra"} + err = cli.InsertOrReplaceEntity(tn, cextra) + c.Assert(err, chk.IsNil) +} + +func (s *StorageBlobSuite) Test_InsertOrMergeEntities(c *chk.C) { + cli := getTableClient(c) + + tn := AzureTable(randTable()) + + err := cli.CreateTable(tn) + c.Assert(err, chk.IsNil) + defer cli.DeleteTable(tn) + + ce := &CustomEntity{Name: "Darth", Surname: "Skywalker", Number: 60, PKey: "pkey", RKey: "5"} + + err = cli.InsertOrMergeEntity(tn, ce) + c.Assert(err, chk.IsNil) + + cextra := &CustomEntityExtended{&CustomEntity{PKey: "pkey", RKey: "5"}, "extra"} + err = cli.InsertOrReplaceEntity(tn, cextra) + c.Assert(err, chk.IsNil) +} + +func (s *StorageBlobSuite) Test_InsertAndGetEntities(c *chk.C) { + cli := getTableClient(c) + + tn := AzureTable(randTable()) + + err := cli.CreateTable(tn) + c.Assert(err, chk.IsNil) + defer cli.DeleteTable(tn) + + ce := &CustomEntity{Name: "Darth", Surname: "Skywalker", Number: 60, PKey: "pkey", RKey: "100"} + c.Assert(cli.InsertOrReplaceEntity(tn, ce), chk.IsNil) + + ce.SetRowKey("200") + c.Assert(cli.InsertOrReplaceEntity(tn, ce), chk.IsNil) + + entries, _, err := cli.QueryTableEntities(tn, nil, reflect.TypeOf(ce), 10, "") + c.Assert(err, chk.IsNil) + + c.Assert(len(entries), chk.Equals, 2) + + c.Assert(ce.RowKey(), chk.Equals, entries[1].RowKey()) + + c.Assert(entries[1].(*CustomEntity), chk.DeepEquals, ce) +} + +func (s *StorageBlobSuite) Test_InsertAndQueryEntities(c *chk.C) { + cli := getTableClient(c) + + tn := AzureTable(randTable()) + + err := cli.CreateTable(tn) + c.Assert(err, chk.IsNil) + defer cli.DeleteTable(tn) + + ce := &CustomEntity{Name: "Darth", Surname: "Skywalker", Number: 60, PKey: "pkey", RKey: "100"} + c.Assert(cli.InsertOrReplaceEntity(tn, ce), chk.IsNil) + + ce.SetRowKey("200") + c.Assert(cli.InsertOrReplaceEntity(tn, ce), chk.IsNil) + + entries, _, err := cli.QueryTableEntities(tn, nil, reflect.TypeOf(ce), 10, "RowKey eq '200'") + c.Assert(err, chk.IsNil) + + c.Assert(len(entries), chk.Equals, 1) + + c.Assert(ce.RowKey(), chk.Equals, entries[0].RowKey()) +} + +func (s *StorageBlobSuite) Test_InsertAndDeleteEntities(c *chk.C) { + cli := getTableClient(c) + + tn := AzureTable(randTable()) + + err := cli.CreateTable(tn) + c.Assert(err, chk.IsNil) + defer cli.DeleteTable(tn) + + ce := &CustomEntity{Name: "Test", Surname: "Test2", Number: 0, PKey: "pkey", RKey: "r01"} + c.Assert(cli.InsertOrReplaceEntity(tn, ce), chk.IsNil) + + ce.Number = 1 + ce.SetRowKey("r02") + c.Assert(cli.InsertOrReplaceEntity(tn, ce), chk.IsNil) + + entries, _, err := cli.QueryTableEntities(tn, nil, reflect.TypeOf(ce), 10, "Number eq 1") + c.Assert(err, chk.IsNil) + + c.Assert(len(entries), chk.Equals, 1) + + c.Assert(entries[0].(*CustomEntity), chk.DeepEquals, ce) + + c.Assert(cli.DeleteEntityWithoutCheck(tn, entries[0]), chk.IsNil) + + entries, _, err = cli.QueryTableEntities(tn, nil, reflect.TypeOf(ce), 10, "") + c.Assert(err, chk.IsNil) + + // only 1 entry must be present + c.Assert(len(entries), chk.Equals, 1) +} + +func (s *StorageBlobSuite) Test_ContinuationToken(c *chk.C) { + cli := getTableClient(c) + + tn := AzureTable(randTable()) + + err := cli.CreateTable(tn) + c.Assert(err, chk.IsNil) + defer cli.DeleteTable(tn) + + var ce *CustomEntity + var ceList [5]*CustomEntity + + for i := 0; i < 5; i++ { + ce = &CustomEntity{Name: "Test", Surname: "Test2", Number: i, PKey: "pkey", RKey: fmt.Sprintf("r%d", i)} + ceList[i] = ce + c.Assert(cli.InsertOrReplaceEntity(tn, ce), chk.IsNil) + } + + // retrieve using top = 2. Should return 2 entries, 2 entries and finally + // 1 entry + entries, contToken, err := cli.QueryTableEntities(tn, nil, reflect.TypeOf(ce), 2, "") + c.Assert(err, chk.IsNil) + c.Assert(len(entries), chk.Equals, 2) + c.Assert(entries[0].(*CustomEntity), chk.DeepEquals, ceList[0]) + c.Assert(entries[1].(*CustomEntity), chk.DeepEquals, ceList[1]) + c.Assert(contToken, chk.NotNil) + + entries, contToken, err = cli.QueryTableEntities(tn, contToken, reflect.TypeOf(ce), 2, "") + c.Assert(err, chk.IsNil) + c.Assert(len(entries), chk.Equals, 2) + c.Assert(entries[0].(*CustomEntity), chk.DeepEquals, ceList[2]) + c.Assert(entries[1].(*CustomEntity), chk.DeepEquals, ceList[3]) + c.Assert(contToken, chk.NotNil) + + entries, contToken, err = cli.QueryTableEntities(tn, contToken, reflect.TypeOf(ce), 2, "") + c.Assert(err, chk.IsNil) + c.Assert(len(entries), chk.Equals, 1) + c.Assert(entries[0].(*CustomEntity), chk.DeepEquals, ceList[4]) + c.Assert(contToken, chk.IsNil) +} + +func randTable() string { + const alphanum = "abcdefghijklmnopqrstuvwxyz" + var bytes = make([]byte, 32) + rand.Read(bytes) + for i, b := range bytes { + bytes[i] = alphanum[b%byte(len(alphanum))] + } + return string(bytes) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go new file mode 100644 index 000000000000..57ca1b6d937e --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go @@ -0,0 +1,85 @@ +package storage + +import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "reflect" + "time" +) + +func (c Client) computeHmac256(message string) string { + h := hmac.New(sha256.New, c.accountKey) + h.Write([]byte(message)) + return base64.StdEncoding.EncodeToString(h.Sum(nil)) +} + +func currentTimeRfc1123Formatted() string { + return timeRfc1123Formatted(time.Now().UTC()) +} + +func timeRfc1123Formatted(t time.Time) string { + return t.Format(http.TimeFormat) +} + +func mergeParams(v1, v2 url.Values) url.Values { + out := url.Values{} + for k, v := range v1 { + out[k] = v + } + for k, v := range v2 { + vals, ok := out[k] + if ok { + vals = append(vals, v...) + out[k] = vals + } else { + out[k] = v + } + } + return out +} + +func prepareBlockListRequest(blocks []Block) string { + s := `` + for _, v := range blocks { + s += fmt.Sprintf("<%s>%s", v.Status, v.ID, v.Status) + } + s += `` + return s +} + +func xmlUnmarshal(body io.Reader, v interface{}) error { + data, err := ioutil.ReadAll(body) + if err != nil { + return err + } + return xml.Unmarshal(data, v) +} + +func xmlMarshal(v interface{}) (io.Reader, int, error) { + b, err := xml.Marshal(v) + if err != nil { + return nil, 0, err + } + return bytes.NewReader(b), len(b), nil +} + +func headersFromStruct(v interface{}) map[string]string { + headers := make(map[string]string) + value := reflect.ValueOf(v) + for i := 0; i < value.NumField(); i++ { + key := value.Type().Field(i).Tag.Get("header") + val := value.Field(i).String() + if key != "" && val != "" { + headers[key] = val + } + } + return headers +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/util_test.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/util_test.go new file mode 100644 index 000000000000..a1817d162428 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/azure-sdk-for-go/storage/util_test.go @@ -0,0 +1,86 @@ +package storage + +import ( + "encoding/xml" + "io/ioutil" + "net/url" + "strings" + "time" + + chk "gopkg.in/check.v1" +) + +func (s *StorageClientSuite) Test_timeRfc1123Formatted(c *chk.C) { + now := time.Now().UTC() + expectedLayout := "Mon, 02 Jan 2006 15:04:05 GMT" + c.Assert(timeRfc1123Formatted(now), chk.Equals, now.Format(expectedLayout)) +} + +func (s *StorageClientSuite) Test_mergeParams(c *chk.C) { + v1 := url.Values{ + "k1": {"v1"}, + "k2": {"v2"}} + v2 := url.Values{ + "k1": {"v11"}, + "k3": {"v3"}} + out := mergeParams(v1, v2) + c.Assert(out.Get("k1"), chk.Equals, "v1") + c.Assert(out.Get("k2"), chk.Equals, "v2") + c.Assert(out.Get("k3"), chk.Equals, "v3") + c.Assert(out["k1"], chk.DeepEquals, []string{"v1", "v11"}) +} + +func (s *StorageClientSuite) Test_prepareBlockListRequest(c *chk.C) { + empty := []Block{} + expected := `` + c.Assert(prepareBlockListRequest(empty), chk.DeepEquals, expected) + + blocks := []Block{{"foo", BlockStatusLatest}, {"bar", BlockStatusUncommitted}} + expected = `foobar` + c.Assert(prepareBlockListRequest(blocks), chk.DeepEquals, expected) +} + +func (s *StorageClientSuite) Test_xmlUnmarshal(c *chk.C) { + xml := ` + + myblob + ` + var blob Blob + body := ioutil.NopCloser(strings.NewReader(xml)) + c.Assert(xmlUnmarshal(body, &blob), chk.IsNil) + c.Assert(blob.Name, chk.Equals, "myblob") +} + +func (s *StorageClientSuite) Test_xmlMarshal(c *chk.C) { + type t struct { + XMLName xml.Name `xml:"S"` + Name string `xml:"Name"` + } + + b := t{Name: "myblob"} + expected := `myblob` + r, i, err := xmlMarshal(b) + c.Assert(err, chk.IsNil) + o, err := ioutil.ReadAll(r) + c.Assert(err, chk.IsNil) + out := string(o) + c.Assert(out, chk.Equals, expected) + c.Assert(i, chk.Equals, len(expected)) +} + +func (s *StorageClientSuite) Test_headersFromStruct(c *chk.C) { + type t struct { + header1 string `header:"HEADER1"` + header2 string `header:"HEADER2"` + } + + h := t{header1: "value1", header2: "value2"} + expected := map[string]string{ + "HEADER1": "value1", + "HEADER2": "value2", + } + + out := headersFromStruct(h) + + c.Assert(out, chk.DeepEquals, expected) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/LICENSE b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/LICENSE new file mode 100644 index 000000000000..e3d9a64d1d85 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Microsoft Corporation + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/README.md b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/README.md new file mode 100644 index 000000000000..e25e38210168 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/README.md @@ -0,0 +1,9 @@ +# go-ansiterm + +This is a cross platform Ansi Terminal Emulation library. It reads a stream of Ansi characters and produces the appropriate function calls. The results of the function calls are platform dependent. + +For example the parser might receive "ESC, [, A" as a stream of three characters. This is the code for Cursor Up (http://www.vt100.net/docs/vt510-rm/CUU). The parser then calls the cursor up function (CUU()) on an event handler. The event handler determines what platform specific work must be done to cause the cursor to move up one position. + +The parser (parser.go) is a partial implementation of this state machine (http://vt100.net/emu/vt500_parser.png). There are also two event handler implementations, one for tests (test_event_handler.go) to validate that the expected events are being produced and called, the other is a Windows implementation (winterm/win_event_handler.go). + +See parser_test.go for examples exercising the state machine and generating appropriate function calls. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/constants.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/constants.go new file mode 100644 index 000000000000..ebfce8a8d38b --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/constants.go @@ -0,0 +1,188 @@ +package ansiterm + +const LogEnv = "DEBUG_TERMINAL" + +// ANSI constants +// References: +// -- http://www.ecma-international.org/publications/standards/Ecma-048.htm +// -- http://man7.org/linux/man-pages/man4/console_codes.4.html +// -- http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html +// -- http://en.wikipedia.org/wiki/ANSI_escape_code +// -- http://vt100.net/emu/dec_ansi_parser +// -- http://vt100.net/emu/vt500_parser.svg +// -- http://invisible-island.net/xterm/ctlseqs/ctlseqs.html +// -- http://www.inwap.com/pdp10/ansicode.txt +const ( + // ECMA-48 Set Graphics Rendition + // Note: + // -- Constants leading with an underscore (e.g., _ANSI_xxx) are unsupported or reserved + // -- Fonts could possibly be supported via SetCurrentConsoleFontEx + // -- Windows does not expose the per-window cursor (i.e., caret) blink times + ANSI_SGR_RESET = 0 + ANSI_SGR_BOLD = 1 + ANSI_SGR_DIM = 2 + _ANSI_SGR_ITALIC = 3 + ANSI_SGR_UNDERLINE = 4 + _ANSI_SGR_BLINKSLOW = 5 + _ANSI_SGR_BLINKFAST = 6 + ANSI_SGR_REVERSE = 7 + _ANSI_SGR_INVISIBLE = 8 + _ANSI_SGR_LINETHROUGH = 9 + _ANSI_SGR_FONT_00 = 10 + _ANSI_SGR_FONT_01 = 11 + _ANSI_SGR_FONT_02 = 12 + _ANSI_SGR_FONT_03 = 13 + _ANSI_SGR_FONT_04 = 14 + _ANSI_SGR_FONT_05 = 15 + _ANSI_SGR_FONT_06 = 16 + _ANSI_SGR_FONT_07 = 17 + _ANSI_SGR_FONT_08 = 18 + _ANSI_SGR_FONT_09 = 19 + _ANSI_SGR_FONT_10 = 20 + _ANSI_SGR_DOUBLEUNDERLINE = 21 + ANSI_SGR_BOLD_DIM_OFF = 22 + _ANSI_SGR_ITALIC_OFF = 23 + ANSI_SGR_UNDERLINE_OFF = 24 + _ANSI_SGR_BLINK_OFF = 25 + _ANSI_SGR_RESERVED_00 = 26 + ANSI_SGR_REVERSE_OFF = 27 + _ANSI_SGR_INVISIBLE_OFF = 28 + _ANSI_SGR_LINETHROUGH_OFF = 29 + ANSI_SGR_FOREGROUND_BLACK = 30 + ANSI_SGR_FOREGROUND_RED = 31 + ANSI_SGR_FOREGROUND_GREEN = 32 + ANSI_SGR_FOREGROUND_YELLOW = 33 + ANSI_SGR_FOREGROUND_BLUE = 34 + ANSI_SGR_FOREGROUND_MAGENTA = 35 + ANSI_SGR_FOREGROUND_CYAN = 36 + ANSI_SGR_FOREGROUND_WHITE = 37 + _ANSI_SGR_RESERVED_01 = 38 + ANSI_SGR_FOREGROUND_DEFAULT = 39 + ANSI_SGR_BACKGROUND_BLACK = 40 + ANSI_SGR_BACKGROUND_RED = 41 + ANSI_SGR_BACKGROUND_GREEN = 42 + ANSI_SGR_BACKGROUND_YELLOW = 43 + ANSI_SGR_BACKGROUND_BLUE = 44 + ANSI_SGR_BACKGROUND_MAGENTA = 45 + ANSI_SGR_BACKGROUND_CYAN = 46 + ANSI_SGR_BACKGROUND_WHITE = 47 + _ANSI_SGR_RESERVED_02 = 48 + ANSI_SGR_BACKGROUND_DEFAULT = 49 + // 50 - 65: Unsupported + + ANSI_MAX_CMD_LENGTH = 4096 + + MAX_INPUT_EVENTS = 128 + DEFAULT_WIDTH = 80 + DEFAULT_HEIGHT = 24 + + ANSI_BEL = 0x07 + ANSI_BACKSPACE = 0x08 + ANSI_TAB = 0x09 + ANSI_LINE_FEED = 0x0A + ANSI_VERTICAL_TAB = 0x0B + ANSI_FORM_FEED = 0x0C + ANSI_CARRIAGE_RETURN = 0x0D + ANSI_ESCAPE_PRIMARY = 0x1B + ANSI_ESCAPE_SECONDARY = 0x5B + ANSI_OSC_STRING_ENTRY = 0x5D + ANSI_COMMAND_FIRST = 0x40 + ANSI_COMMAND_LAST = 0x7E + DCS_ENTRY = 0x90 + CSI_ENTRY = 0x9B + OSC_STRING = 0x9D + ANSI_PARAMETER_SEP = ";" + ANSI_CMD_G0 = '(' + ANSI_CMD_G1 = ')' + ANSI_CMD_G2 = '*' + ANSI_CMD_G3 = '+' + ANSI_CMD_DECPNM = '>' + ANSI_CMD_DECPAM = '=' + ANSI_CMD_OSC = ']' + ANSI_CMD_STR_TERM = '\\' + + KEY_CONTROL_PARAM_2 = ";2" + KEY_CONTROL_PARAM_3 = ";3" + KEY_CONTROL_PARAM_4 = ";4" + KEY_CONTROL_PARAM_5 = ";5" + KEY_CONTROL_PARAM_6 = ";6" + KEY_CONTROL_PARAM_7 = ";7" + KEY_CONTROL_PARAM_8 = ";8" + KEY_ESC_CSI = "\x1B[" + KEY_ESC_N = "\x1BN" + KEY_ESC_O = "\x1BO" + + FILL_CHARACTER = ' ' +) + +func getByteRange(start byte, end byte) []byte { + bytes := make([]byte, 0, 32) + for i := start; i <= end; i++ { + bytes = append(bytes, byte(i)) + } + + return bytes +} + +var ToGroundBytes = getToGroundBytes() +var Executors = getExecuteBytes() + +// SPACE 20+A0 hex Always and everywhere a blank space +// Intermediate 20-2F hex !"#$%&'()*+,-./ +var Intermeds = getByteRange(0x20, 0x2F) + +// Parameters 30-3F hex 0123456789:;<=>? +// CSI Parameters 30-39, 3B hex 0123456789; +var CsiParams = getByteRange(0x30, 0x3F) + +var CsiCollectables = append(getByteRange(0x30, 0x39), getByteRange(0x3B, 0x3F)...) + +// Uppercase 40-5F hex @ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_ +var UpperCase = getByteRange(0x40, 0x5F) + +// Lowercase 60-7E hex `abcdefghijlkmnopqrstuvwxyz{|}~ +var LowerCase = getByteRange(0x60, 0x7E) + +// Alphabetics 40-7E hex (all of upper and lower case) +var Alphabetics = append(UpperCase, LowerCase...) + +var Printables = getByteRange(0x20, 0x7F) + +var EscapeIntermediateToGroundBytes = getByteRange(0x30, 0x7E) +var EscapeToGroundBytes = getEscapeToGroundBytes() + +// See http://www.vt100.net/emu/vt500_parser.png for description of the complex +// byte ranges below + +func getEscapeToGroundBytes() []byte { + escapeToGroundBytes := getByteRange(0x30, 0x4F) + escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x51, 0x57)...) + escapeToGroundBytes = append(escapeToGroundBytes, 0x59) + escapeToGroundBytes = append(escapeToGroundBytes, 0x5A) + escapeToGroundBytes = append(escapeToGroundBytes, 0x5C) + escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x60, 0x7E)...) + return escapeToGroundBytes +} + +func getExecuteBytes() []byte { + executeBytes := getByteRange(0x00, 0x17) + executeBytes = append(executeBytes, 0x19) + executeBytes = append(executeBytes, getByteRange(0x1C, 0x1F)...) + return executeBytes +} + +func getToGroundBytes() []byte { + groundBytes := []byte{0x18} + groundBytes = append(groundBytes, 0x1A) + groundBytes = append(groundBytes, getByteRange(0x80, 0x8F)...) + groundBytes = append(groundBytes, getByteRange(0x91, 0x97)...) + groundBytes = append(groundBytes, 0x99) + groundBytes = append(groundBytes, 0x9A) + groundBytes = append(groundBytes, 0x9C) + return groundBytes +} + +// Delete 7F hex Always and everywhere ignored +// C1 Control 80-9F hex 32 additional control characters +// G1 Displayable A1-FE hex 94 additional displayable characters +// Special A0+FF hex Same as SPACE and DELETE diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/context.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/context.go new file mode 100644 index 000000000000..d55cc2aec793 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/context.go @@ -0,0 +1,7 @@ +package ansiterm + +type AnsiContext struct { + currentChar byte + paramBuffer []byte + interBuffer []byte +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go new file mode 100644 index 000000000000..9fd4bd28e23f --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go @@ -0,0 +1,49 @@ +package ansiterm + +type CsiEntryState struct { + BaseState +} + +func (csiState CsiEntryState) Handle(b byte) (s State, e error) { + logger.Infof("CsiEntry::Handle %#x", b) + + nextState, err := csiState.BaseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case sliceContains(Alphabetics, b): + return csiState.parser.Ground, nil + case sliceContains(CsiCollectables, b): + return csiState.parser.CsiParam, nil + case sliceContains(Executors, b): + return csiState, csiState.parser.execute() + } + + return csiState, nil +} + +func (csiState CsiEntryState) Transition(s State) error { + logger.Infof("CsiEntry::Transition %s --> %s", csiState.Name(), s.Name()) + csiState.BaseState.Transition(s) + + switch s { + case csiState.parser.Ground: + return csiState.parser.csiDispatch() + case csiState.parser.CsiParam: + switch { + case sliceContains(CsiParams, csiState.parser.context.currentChar): + csiState.parser.collectParam() + case sliceContains(Intermeds, csiState.parser.context.currentChar): + csiState.parser.collectInter() + } + } + + return nil +} + +func (csiState CsiEntryState) Enter() error { + csiState.parser.clear() + return nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/csi_param_state.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/csi_param_state.go new file mode 100644 index 000000000000..27807dd35b99 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/csi_param_state.go @@ -0,0 +1,38 @@ +package ansiterm + +type CsiParamState struct { + BaseState +} + +func (csiState CsiParamState) Handle(b byte) (s State, e error) { + logger.Infof("CsiParam::Handle %#x", b) + + nextState, err := csiState.BaseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case sliceContains(Alphabetics, b): + return csiState.parser.Ground, nil + case sliceContains(CsiCollectables, b): + csiState.parser.collectParam() + return csiState, nil + case sliceContains(Executors, b): + return csiState, csiState.parser.execute() + } + + return csiState, nil +} + +func (csiState CsiParamState) Transition(s State) error { + logger.Infof("CsiParam::Transition %s --> %s", csiState.Name(), s.Name()) + csiState.BaseState.Transition(s) + + switch s { + case csiState.parser.Ground: + return csiState.parser.csiDispatch() + } + + return nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go new file mode 100644 index 000000000000..b14e0ce97736 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go @@ -0,0 +1,36 @@ +package ansiterm + +type EscapeIntermediateState struct { + BaseState +} + +func (escState EscapeIntermediateState) Handle(b byte) (s State, e error) { + logger.Infof("EscapeIntermediateState::Handle %#x", b) + nextState, err := escState.BaseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case sliceContains(Intermeds, b): + return escState, escState.parser.collectInter() + case sliceContains(Executors, b): + return escState, escState.parser.execute() + case sliceContains(EscapeIntermediateToGroundBytes, b): + return escState.parser.Ground, nil + } + + return escState, nil +} + +func (escState EscapeIntermediateState) Transition(s State) error { + logger.Infof("EscapeIntermediateState::Transition %s --> %s", escState.Name(), s.Name()) + escState.BaseState.Transition(s) + + switch s { + case escState.parser.Ground: + return escState.parser.escDispatch() + } + + return nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/escape_state.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/escape_state.go new file mode 100644 index 000000000000..232dd8e0d17e --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/escape_state.go @@ -0,0 +1,47 @@ +package ansiterm + +type EscapeState struct { + BaseState +} + +func (escState EscapeState) Handle(b byte) (s State, e error) { + logger.Infof("EscapeState::Handle %#x", b) + nextState, err := escState.BaseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case b == ANSI_ESCAPE_SECONDARY: + return escState.parser.CsiEntry, nil + case b == ANSI_OSC_STRING_ENTRY: + return escState.parser.OscString, nil + case sliceContains(Executors, b): + return escState, escState.parser.execute() + case sliceContains(EscapeToGroundBytes, b): + return escState.parser.Ground, nil + case sliceContains(Intermeds, b): + return escState.parser.EscapeIntermediate, nil + } + + return escState, nil +} + +func (escState EscapeState) Transition(s State) error { + logger.Infof("Escape::Transition %s --> %s", escState.Name(), s.Name()) + escState.BaseState.Transition(s) + + switch s { + case escState.parser.Ground: + return escState.parser.escDispatch() + case escState.parser.EscapeIntermediate: + return escState.parser.collectInter() + } + + return nil +} + +func (escState EscapeState) Enter() error { + escState.parser.clear() + return nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/event_handler.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/event_handler.go new file mode 100644 index 000000000000..98087b38c202 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/event_handler.go @@ -0,0 +1,90 @@ +package ansiterm + +type AnsiEventHandler interface { + // Print + Print(b byte) error + + // Execute C0 commands + Execute(b byte) error + + // CUrsor Up + CUU(int) error + + // CUrsor Down + CUD(int) error + + // CUrsor Forward + CUF(int) error + + // CUrsor Backward + CUB(int) error + + // Cursor to Next Line + CNL(int) error + + // Cursor to Previous Line + CPL(int) error + + // Cursor Horizontal position Absolute + CHA(int) error + + // Vertical line Position Absolute + VPA(int) error + + // CUrsor Position + CUP(int, int) error + + // Horizontal and Vertical Position (depends on PUM) + HVP(int, int) error + + // Text Cursor Enable Mode + DECTCEM(bool) error + + // Origin Mode + DECOM(bool) error + + // 132 Column Mode + DECCOLM(bool) error + + // Erase in Display + ED(int) error + + // Erase in Line + EL(int) error + + // Insert Line + IL(int) error + + // Delete Line + DL(int) error + + // Insert Character + ICH(int) error + + // Delete Character + DCH(int) error + + // Set Graphics Rendition + SGR([]int) error + + // Pan Down + SU(int) error + + // Pan Up + SD(int) error + + // Device Attributes + DA([]string) error + + // Set Top and Bottom Margins + DECSTBM(int, int) error + + // Index + IND() error + + // Reverse Index + RI() error + + // Flush updates from previous commands + Flush() error +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/ground_state.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/ground_state.go new file mode 100644 index 000000000000..d600e3e4ae65 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/ground_state.go @@ -0,0 +1,24 @@ +package ansiterm + +type GroundState struct { + BaseState +} + +func (gs GroundState) Handle(b byte) (s State, e error) { + gs.parser.context.currentChar = b + + nextState, err := gs.BaseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case sliceContains(Printables, b): + return gs, gs.parser.print() + + case sliceContains(Executors, b): + return gs, gs.parser.execute() + } + + return gs, nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/osc_string_state.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/osc_string_state.go new file mode 100644 index 000000000000..4490e3cdf6b4 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/osc_string_state.go @@ -0,0 +1,31 @@ +package ansiterm + +type OscStringState struct { + BaseState +} + +func (oscState OscStringState) Handle(b byte) (s State, e error) { + logger.Infof("OscString::Handle %#x", b) + nextState, err := oscState.BaseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case isOscStringTerminator(b): + return oscState.parser.Ground, nil + } + + return oscState, nil +} + +// See below for OSC string terminators for linux +// http://man7.org/linux/man-pages/man4/console_codes.4.html +func isOscStringTerminator(b byte) bool { + + if b == ANSI_BEL || b == 0x5C { + return true + } + + return false +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/parser.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/parser.go new file mode 100644 index 000000000000..ef5e0ad19984 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/parser.go @@ -0,0 +1,137 @@ +package ansiterm + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + + "github.com/Sirupsen/logrus" +) + +var logger *logrus.Logger + +type AnsiParser struct { + currState State + eventHandler AnsiEventHandler + context *AnsiContext + CsiEntry State + CsiParam State + DcsEntry State + Escape State + EscapeIntermediate State + Error State + Ground State + OscString State + stateMap []State +} + +func CreateParser(initialState string, evtHandler AnsiEventHandler) *AnsiParser { + logFile := ioutil.Discard + + if isDebugEnv := os.Getenv(LogEnv); isDebugEnv == "1" { + logFile, _ = os.Create("ansiParser.log") + } + + logger = &logrus.Logger{ + Out: logFile, + Formatter: new(logrus.TextFormatter), + Level: logrus.InfoLevel, + } + + parser := &AnsiParser{ + eventHandler: evtHandler, + context: &AnsiContext{}, + } + + parser.CsiEntry = CsiEntryState{BaseState{name: "CsiEntry", parser: parser}} + parser.CsiParam = CsiParamState{BaseState{name: "CsiParam", parser: parser}} + parser.DcsEntry = DcsEntryState{BaseState{name: "DcsEntry", parser: parser}} + parser.Escape = EscapeState{BaseState{name: "Escape", parser: parser}} + parser.EscapeIntermediate = EscapeIntermediateState{BaseState{name: "EscapeIntermediate", parser: parser}} + parser.Error = ErrorState{BaseState{name: "Error", parser: parser}} + parser.Ground = GroundState{BaseState{name: "Ground", parser: parser}} + parser.OscString = OscStringState{BaseState{name: "OscString", parser: parser}} + + parser.stateMap = []State{ + parser.CsiEntry, + parser.CsiParam, + parser.DcsEntry, + parser.Escape, + parser.EscapeIntermediate, + parser.Error, + parser.Ground, + parser.OscString, + } + + parser.currState = getState(initialState, parser.stateMap) + + logger.Infof("CreateParser: parser %p", parser) + return parser +} + +func getState(name string, states []State) State { + for _, el := range states { + if el.Name() == name { + return el + } + } + + return nil +} + +func (ap *AnsiParser) Parse(bytes []byte) (int, error) { + for i, b := range bytes { + if err := ap.handle(b); err != nil { + return i, err + } + } + + return len(bytes), ap.eventHandler.Flush() +} + +func (ap *AnsiParser) handle(b byte) error { + ap.context.currentChar = b + newState, err := ap.currState.Handle(b) + if err != nil { + return err + } + + if newState == nil { + logger.Warning("newState is nil") + return errors.New(fmt.Sprintf("New state of 'nil' is invalid.")) + } + + if newState != ap.currState { + if err := ap.changeState(newState); err != nil { + return err + } + } + + return nil +} + +func (ap *AnsiParser) changeState(newState State) error { + logger.Infof("ChangeState %s --> %s", ap.currState.Name(), newState.Name()) + + // Exit old state + if err := ap.currState.Exit(); err != nil { + logger.Infof("Exit state '%s' failed with : '%v'", ap.currState.Name(), err) + return err + } + + // Perform transition action + if err := ap.currState.Transition(newState); err != nil { + logger.Infof("Transition from '%s' to '%s' failed with: '%v'", ap.currState.Name(), newState.Name, err) + return err + } + + // Enter new state + if err := newState.Enter(); err != nil { + logger.Infof("Enter state '%s' failed with: '%v'", newState.Name(), err) + return err + } + + ap.currState = newState + return nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go new file mode 100644 index 000000000000..438802097ddb --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go @@ -0,0 +1,103 @@ +package ansiterm + +import ( + "strconv" +) + +func parseParams(bytes []byte) ([]string, error) { + paramBuff := make([]byte, 0, 0) + params := []string{} + + for _, v := range bytes { + if v == ';' { + if len(paramBuff) > 0 { + // Completed parameter, append it to the list + s := string(paramBuff) + params = append(params, s) + paramBuff = make([]byte, 0, 0) + } + } else { + paramBuff = append(paramBuff, v) + } + } + + // Last parameter may not be terminated with ';' + if len(paramBuff) > 0 { + s := string(paramBuff) + params = append(params, s) + } + + logger.Infof("Parsed params: %v with length: %d", params, len(params)) + return params, nil +} + +func parseCmd(context AnsiContext) (string, error) { + return string(context.currentChar), nil +} + +func getInt(params []string, dflt int) int { + i := getInts(params, 1, dflt)[0] + logger.Infof("getInt: %v", i) + return i +} + +func getInts(params []string, minCount int, dflt int) []int { + ints := []int{} + + for _, v := range params { + i, _ := strconv.Atoi(v) + // Zero is mapped to the default value in VT100. + if i == 0 { + i = dflt + } + ints = append(ints, i) + } + + if len(ints) < minCount { + remaining := minCount - len(ints) + for i := 0; i < remaining; i++ { + ints = append(ints, dflt) + } + } + + logger.Infof("getInts: %v", ints) + + return ints +} + +func (ap *AnsiParser) modeDispatch(param string, set bool) error { + switch param { + case "?3": + return ap.eventHandler.DECCOLM(set) + case "?6": + return ap.eventHandler.DECOM(set) + case "?25": + return ap.eventHandler.DECTCEM(set) + } + return nil +} + +func (ap *AnsiParser) hDispatch(params []string) error { + if len(params) == 1 { + return ap.modeDispatch(params[0], true) + } + + return nil +} + +func (ap *AnsiParser) lDispatch(params []string) error { + if len(params) == 1 { + return ap.modeDispatch(params[0], false) + } + + return nil +} + +func getEraseParam(params []string) int { + param := getInt(params, 0) + if param < 0 || 3 < param { + param = 0 + } + + return param +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/parser_actions.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/parser_actions.go new file mode 100644 index 000000000000..260e6aae3c52 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/parser_actions.go @@ -0,0 +1,122 @@ +package ansiterm + +import ( + "fmt" +) + +func (ap *AnsiParser) collectParam() error { + currChar := ap.context.currentChar + logger.Infof("collectParam %#x", currChar) + ap.context.paramBuffer = append(ap.context.paramBuffer, currChar) + return nil +} + +func (ap *AnsiParser) collectInter() error { + currChar := ap.context.currentChar + logger.Infof("collectInter %#x", currChar) + ap.context.paramBuffer = append(ap.context.interBuffer, currChar) + return nil +} + +func (ap *AnsiParser) escDispatch() error { + cmd, _ := parseCmd(*ap.context) + intermeds := ap.context.interBuffer + logger.Infof("escDispatch currentChar: %#x", ap.context.currentChar) + logger.Infof("escDispatch: %v(%v)", cmd, intermeds) + + switch cmd { + case "D": // IND + return ap.eventHandler.IND() + case "E": // NEL, equivalent to CRLF + err := ap.eventHandler.Execute(ANSI_CARRIAGE_RETURN) + if err == nil { + err = ap.eventHandler.Execute(ANSI_LINE_FEED) + } + return err + case "M": // RI + return ap.eventHandler.RI() + } + + return nil +} + +func (ap *AnsiParser) csiDispatch() error { + cmd, _ := parseCmd(*ap.context) + params, _ := parseParams(ap.context.paramBuffer) + + logger.Infof("csiDispatch: %v(%v)", cmd, params) + + switch cmd { + case "@": + return ap.eventHandler.ICH(getInt(params, 1)) + case "A": + return ap.eventHandler.CUU(getInt(params, 1)) + case "B": + return ap.eventHandler.CUD(getInt(params, 1)) + case "C": + return ap.eventHandler.CUF(getInt(params, 1)) + case "D": + return ap.eventHandler.CUB(getInt(params, 1)) + case "E": + return ap.eventHandler.CNL(getInt(params, 1)) + case "F": + return ap.eventHandler.CPL(getInt(params, 1)) + case "G": + return ap.eventHandler.CHA(getInt(params, 1)) + case "H": + ints := getInts(params, 2, 1) + x, y := ints[0], ints[1] + return ap.eventHandler.CUP(x, y) + case "J": + param := getEraseParam(params) + return ap.eventHandler.ED(param) + case "K": + param := getEraseParam(params) + return ap.eventHandler.EL(param) + case "L": + return ap.eventHandler.IL(getInt(params, 1)) + case "M": + return ap.eventHandler.DL(getInt(params, 1)) + case "P": + return ap.eventHandler.DCH(getInt(params, 1)) + case "S": + return ap.eventHandler.SU(getInt(params, 1)) + case "T": + return ap.eventHandler.SD(getInt(params, 1)) + case "c": + return ap.eventHandler.DA(params) + case "d": + return ap.eventHandler.VPA(getInt(params, 1)) + case "f": + ints := getInts(params, 2, 1) + x, y := ints[0], ints[1] + return ap.eventHandler.HVP(x, y) + case "h": + return ap.hDispatch(params) + case "l": + return ap.lDispatch(params) + case "m": + return ap.eventHandler.SGR(getInts(params, 1, 0)) + case "r": + ints := getInts(params, 2, 1) + top, bottom := ints[0], ints[1] + return ap.eventHandler.DECSTBM(top, bottom) + default: + logger.Errorf(fmt.Sprintf("Unsupported CSI command: '%s', with full context: %v", cmd, ap.context)) + return nil + } + +} + +func (ap *AnsiParser) print() error { + return ap.eventHandler.Print(ap.context.currentChar) +} + +func (ap *AnsiParser) clear() error { + ap.context = &AnsiContext{} + return nil +} + +func (ap *AnsiParser) execute() error { + return ap.eventHandler.Execute(ap.context.currentChar) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/parser_test.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/parser_test.go new file mode 100644 index 000000000000..6e6deb99c758 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/parser_test.go @@ -0,0 +1,141 @@ +package ansiterm + +import ( + "fmt" + "testing" +) + +func TestStateTransitions(t *testing.T) { + stateTransitionHelper(t, "CsiEntry", "Ground", Alphabetics) + stateTransitionHelper(t, "CsiEntry", "CsiParam", CsiCollectables) + stateTransitionHelper(t, "Escape", "CsiEntry", []byte{ANSI_ESCAPE_SECONDARY}) + stateTransitionHelper(t, "Escape", "OscString", []byte{0x5D}) + stateTransitionHelper(t, "Escape", "Ground", EscapeToGroundBytes) + stateTransitionHelper(t, "Escape", "EscapeIntermediate", Intermeds) + stateTransitionHelper(t, "EscapeIntermediate", "EscapeIntermediate", Intermeds) + stateTransitionHelper(t, "EscapeIntermediate", "EscapeIntermediate", Executors) + stateTransitionHelper(t, "EscapeIntermediate", "Ground", EscapeIntermediateToGroundBytes) + stateTransitionHelper(t, "OscString", "Ground", []byte{ANSI_BEL}) + stateTransitionHelper(t, "OscString", "Ground", []byte{0x5C}) + stateTransitionHelper(t, "Ground", "Ground", Executors) +} + +func TestAnyToX(t *testing.T) { + anyToXHelper(t, []byte{ANSI_ESCAPE_PRIMARY}, "Escape") + anyToXHelper(t, []byte{DCS_ENTRY}, "DcsEntry") + anyToXHelper(t, []byte{OSC_STRING}, "OscString") + anyToXHelper(t, []byte{CSI_ENTRY}, "CsiEntry") + anyToXHelper(t, ToGroundBytes, "Ground") +} + +func TestCollectCsiParams(t *testing.T) { + parser, _ := createTestParser("CsiEntry") + parser.Parse(CsiCollectables) + + buffer := parser.context.paramBuffer + bufferCount := len(buffer) + + if bufferCount != len(CsiCollectables) { + t.Errorf("Buffer: %v", buffer) + t.Errorf("CsiParams: %v", CsiCollectables) + t.Errorf("Buffer count failure: %d != %d", bufferCount, len(CsiParams)) + return + } + + for i, v := range CsiCollectables { + if v != buffer[i] { + t.Errorf("Buffer: %v", buffer) + t.Errorf("CsiParams: %v", CsiParams) + t.Errorf("Mismatch at buffer[%d] = %d", i, buffer[i]) + } + } +} + +func TestParseParams(t *testing.T) { + parseParamsHelper(t, []byte{}, []string{}) + parseParamsHelper(t, []byte{';'}, []string{}) + parseParamsHelper(t, []byte{';', ';'}, []string{}) + parseParamsHelper(t, []byte{'7'}, []string{"7"}) + parseParamsHelper(t, []byte{'7', ';'}, []string{"7"}) + parseParamsHelper(t, []byte{'7', ';', ';'}, []string{"7"}) + parseParamsHelper(t, []byte{'7', ';', ';', '8'}, []string{"7", "8"}) + parseParamsHelper(t, []byte{'7', ';', '8', ';'}, []string{"7", "8"}) + parseParamsHelper(t, []byte{'7', ';', ';', '8', ';', ';'}, []string{"7", "8"}) + parseParamsHelper(t, []byte{'7', '8'}, []string{"78"}) + parseParamsHelper(t, []byte{'7', '8', ';'}, []string{"78"}) + parseParamsHelper(t, []byte{'7', '8', ';', '9', '0'}, []string{"78", "90"}) + parseParamsHelper(t, []byte{'7', '8', ';', ';', '9', '0'}, []string{"78", "90"}) + parseParamsHelper(t, []byte{'7', '8', ';', '9', '0', ';'}, []string{"78", "90"}) + parseParamsHelper(t, []byte{'7', '8', ';', '9', '0', ';', ';'}, []string{"78", "90"}) +} + +func TestCursor(t *testing.T) { + cursorSingleParamHelper(t, 'A', "CUU") + cursorSingleParamHelper(t, 'B', "CUD") + cursorSingleParamHelper(t, 'C', "CUF") + cursorSingleParamHelper(t, 'D', "CUB") + cursorSingleParamHelper(t, 'E', "CNL") + cursorSingleParamHelper(t, 'F', "CPL") + cursorSingleParamHelper(t, 'G', "CHA") + cursorTwoParamHelper(t, 'H', "CUP") + cursorTwoParamHelper(t, 'f', "HVP") + funcCallParamHelper(t, []byte{'?', '2', '5', 'h'}, "CsiEntry", "Ground", []string{"DECTCEM([true])"}) + funcCallParamHelper(t, []byte{'?', '2', '5', 'l'}, "CsiEntry", "Ground", []string{"DECTCEM([false])"}) +} + +func TestErase(t *testing.T) { + // Erase in Display + eraseHelper(t, 'J', "ED") + + // Erase in Line + eraseHelper(t, 'K', "EL") +} + +func TestSelectGraphicRendition(t *testing.T) { + funcCallParamHelper(t, []byte{'m'}, "CsiEntry", "Ground", []string{"SGR([0])"}) + funcCallParamHelper(t, []byte{'0', 'm'}, "CsiEntry", "Ground", []string{"SGR([0])"}) + funcCallParamHelper(t, []byte{'0', ';', '1', 'm'}, "CsiEntry", "Ground", []string{"SGR([0 1])"}) + funcCallParamHelper(t, []byte{'0', ';', '1', ';', '2', 'm'}, "CsiEntry", "Ground", []string{"SGR([0 1 2])"}) +} + +func TestScroll(t *testing.T) { + scrollHelper(t, 'S', "SU") + scrollHelper(t, 'T', "SD") +} + +func TestPrint(t *testing.T) { + parser, evtHandler := createTestParser("Ground") + parser.Parse(Printables) + validateState(t, parser.currState, "Ground") + + for i, v := range Printables { + expectedCall := fmt.Sprintf("Print([%s])", string(v)) + actualCall := evtHandler.FunctionCalls[i] + if actualCall != expectedCall { + t.Errorf("Actual != Expected: %v != %v at %d", actualCall, expectedCall, i) + } + } +} + +func TestClear(t *testing.T) { + p, _ := createTestParser("Ground") + fillContext(p.context) + p.clear() + validateEmptyContext(t, p.context) +} + +func TestClearOnStateChange(t *testing.T) { + clearOnStateChangeHelper(t, "Ground", "Escape", []byte{ANSI_ESCAPE_PRIMARY}) + clearOnStateChangeHelper(t, "Ground", "CsiEntry", []byte{CSI_ENTRY}) +} + +func TestC0(t *testing.T) { + expectedCall := "Execute([" + string(ANSI_LINE_FEED) + "])" + c0Helper(t, []byte{ANSI_LINE_FEED}, "Ground", []string{expectedCall}) + expectedCall = "Execute([" + string(ANSI_CARRIAGE_RETURN) + "])" + c0Helper(t, []byte{ANSI_CARRIAGE_RETURN}, "Ground", []string{expectedCall}) +} + +func TestEscDispatch(t *testing.T) { + funcCallParamHelper(t, []byte{'M'}, "Escape", "Ground", []string{"RI([])"}) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/parser_test_helpers_test.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/parser_test_helpers_test.go new file mode 100644 index 000000000000..562f215d342d --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/parser_test_helpers_test.go @@ -0,0 +1,114 @@ +package ansiterm + +import ( + "fmt" + "testing" +) + +func getStateNames() []string { + parser, _ := createTestParser("Ground") + + stateNames := []string{} + for _, state := range parser.stateMap { + stateNames = append(stateNames, state.Name()) + } + + return stateNames +} + +func stateTransitionHelper(t *testing.T, start string, end string, bytes []byte) { + for _, b := range bytes { + bytes := []byte{byte(b)} + parser, _ := createTestParser(start) + parser.Parse(bytes) + validateState(t, parser.currState, end) + } +} + +func anyToXHelper(t *testing.T, bytes []byte, expectedState string) { + for _, s := range getStateNames() { + stateTransitionHelper(t, s, expectedState, bytes) + } +} + +func funcCallParamHelper(t *testing.T, bytes []byte, start string, expected string, expectedCalls []string) { + parser, evtHandler := createTestParser(start) + parser.Parse(bytes) + validateState(t, parser.currState, expected) + validateFuncCalls(t, evtHandler.FunctionCalls, expectedCalls) +} + +func parseParamsHelper(t *testing.T, bytes []byte, expectedParams []string) { + params, err := parseParams(bytes) + + if err != nil { + t.Errorf("Parameter parse error: %v", err) + return + } + + if len(params) != len(expectedParams) { + t.Errorf("Parsed parameters: %v", params) + t.Errorf("Expected parameters: %v", expectedParams) + t.Errorf("Parameter length failure: %d != %d", len(params), len(expectedParams)) + return + } + + for i, v := range expectedParams { + if v != params[i] { + t.Errorf("Parsed parameters: %v", params) + t.Errorf("Expected parameters: %v", expectedParams) + t.Errorf("Parameter parse failure: %s != %s at position %d", v, params[i], i) + } + } +} + +func cursorSingleParamHelper(t *testing.T, command byte, funcName string) { + funcCallParamHelper(t, []byte{command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)}) + funcCallParamHelper(t, []byte{'0', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)}) + funcCallParamHelper(t, []byte{'2', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2])", funcName)}) + funcCallParamHelper(t, []byte{'2', '3', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([23])", funcName)}) + funcCallParamHelper(t, []byte{'2', ';', '3', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2])", funcName)}) + funcCallParamHelper(t, []byte{'2', ';', '3', ';', '4', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2])", funcName)}) +} + +func cursorTwoParamHelper(t *testing.T, command byte, funcName string) { + funcCallParamHelper(t, []byte{command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1 1])", funcName)}) + funcCallParamHelper(t, []byte{'0', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1 1])", funcName)}) + funcCallParamHelper(t, []byte{'2', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2 1])", funcName)}) + funcCallParamHelper(t, []byte{'2', '3', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([23 1])", funcName)}) + funcCallParamHelper(t, []byte{'2', ';', '3', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2 3])", funcName)}) + funcCallParamHelper(t, []byte{'2', ';', '3', ';', '4', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2 3])", funcName)}) +} + +func eraseHelper(t *testing.T, command byte, funcName string) { + funcCallParamHelper(t, []byte{command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([0])", funcName)}) + funcCallParamHelper(t, []byte{'0', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([0])", funcName)}) + funcCallParamHelper(t, []byte{'1', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)}) + funcCallParamHelper(t, []byte{'2', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2])", funcName)}) + funcCallParamHelper(t, []byte{'3', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([3])", funcName)}) + funcCallParamHelper(t, []byte{'4', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([0])", funcName)}) + funcCallParamHelper(t, []byte{'1', ';', '2', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)}) +} + +func scrollHelper(t *testing.T, command byte, funcName string) { + funcCallParamHelper(t, []byte{command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)}) + funcCallParamHelper(t, []byte{'0', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)}) + funcCallParamHelper(t, []byte{'1', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)}) + funcCallParamHelper(t, []byte{'5', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([5])", funcName)}) + funcCallParamHelper(t, []byte{'4', ';', '6', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([4])", funcName)}) +} + +func clearOnStateChangeHelper(t *testing.T, start string, end string, bytes []byte) { + p, _ := createTestParser(start) + fillContext(p.context) + p.Parse(bytes) + validateState(t, p.currState, end) + validateEmptyContext(t, p.context) +} + +func c0Helper(t *testing.T, bytes []byte, expectedState string, expectedCalls []string) { + parser, evtHandler := createTestParser("Ground") + parser.Parse(bytes) + validateState(t, parser.currState, expectedState) + validateFuncCalls(t, evtHandler.FunctionCalls, expectedCalls) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/parser_test_utilities_test.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/parser_test_utilities_test.go new file mode 100644 index 000000000000..51d1d49bf03f --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/parser_test_utilities_test.go @@ -0,0 +1,66 @@ +package ansiterm + +import ( + "testing" +) + +func createTestParser(s string) (*AnsiParser, *TestAnsiEventHandler) { + evtHandler := CreateTestAnsiEventHandler() + parser := CreateParser(s, evtHandler) + + return parser, evtHandler +} + +func validateState(t *testing.T, actualState State, expectedStateName string) { + actualName := "Nil" + + if actualState != nil { + actualName = actualState.Name() + } + + if actualName != expectedStateName { + t.Errorf("Invalid State: '%s' != '%s'", actualName, expectedStateName) + } +} + +func validateFuncCalls(t *testing.T, actualCalls []string, expectedCalls []string) { + actualCount := len(actualCalls) + expectedCount := len(expectedCalls) + + if actualCount != expectedCount { + t.Errorf("Actual calls: %v", actualCalls) + t.Errorf("Expected calls: %v", expectedCalls) + t.Errorf("Call count error: %d != %d", actualCount, expectedCount) + return + } + + for i, v := range actualCalls { + if v != expectedCalls[i] { + t.Errorf("Actual calls: %v", actualCalls) + t.Errorf("Expected calls: %v", expectedCalls) + t.Errorf("Mismatched calls: %s != %s with lengths %d and %d", v, expectedCalls[i], len(v), len(expectedCalls[i])) + } + } +} + +func fillContext(context *AnsiContext) { + context.currentChar = 'A' + context.paramBuffer = []byte{'C', 'D', 'E'} + context.interBuffer = []byte{'F', 'G', 'H'} +} + +func validateEmptyContext(t *testing.T, context *AnsiContext) { + var expectedCurrChar byte = 0x0 + if context.currentChar != expectedCurrChar { + t.Errorf("Currentchar mismatch '%#x' != '%#x'", context.currentChar, expectedCurrChar) + } + + if len(context.paramBuffer) != 0 { + t.Errorf("Non-empty parameter buffer: %v", context.paramBuffer) + } + + if len(context.paramBuffer) != 0 { + t.Errorf("Non-empty intermediate buffer: %v", context.interBuffer) + } + +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/states.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/states.go new file mode 100644 index 000000000000..0cbdcb3c8345 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/states.go @@ -0,0 +1,71 @@ +package ansiterm + +type StateId int + +type State interface { + Enter() error + Exit() error + Handle(byte) (State, error) + Name() string + Transition(State) error +} + +type BaseState struct { + name string + parser *AnsiParser +} + +func (base BaseState) Enter() error { + return nil +} + +func (base BaseState) Exit() error { + return nil +} + +func (base BaseState) Handle(b byte) (s State, e error) { + + switch { + case b == CSI_ENTRY: + return base.parser.CsiEntry, nil + case b == DCS_ENTRY: + return base.parser.DcsEntry, nil + case b == ANSI_ESCAPE_PRIMARY: + return base.parser.Escape, nil + case b == OSC_STRING: + return base.parser.OscString, nil + case sliceContains(ToGroundBytes, b): + return base.parser.Ground, nil + } + + return nil, nil +} + +func (base BaseState) Name() string { + return base.name +} + +func (base BaseState) Transition(s State) error { + if s == base.parser.Ground { + execBytes := []byte{0x18} + execBytes = append(execBytes, 0x1A) + execBytes = append(execBytes, getByteRange(0x80, 0x8F)...) + execBytes = append(execBytes, getByteRange(0x91, 0x97)...) + execBytes = append(execBytes, 0x99) + execBytes = append(execBytes, 0x9A) + + if sliceContains(execBytes, base.parser.context.currentChar) { + return base.parser.execute() + } + } + + return nil +} + +type DcsEntryState struct { + BaseState +} + +type ErrorState struct { + BaseState +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/test_event_handler_test.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/test_event_handler_test.go new file mode 100644 index 000000000000..60f9f30b98d7 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/test_event_handler_test.go @@ -0,0 +1,173 @@ +package ansiterm + +import ( + "fmt" + "strconv" +) + +type TestAnsiEventHandler struct { + FunctionCalls []string +} + +func CreateTestAnsiEventHandler() *TestAnsiEventHandler { + evtHandler := TestAnsiEventHandler{} + evtHandler.FunctionCalls = make([]string, 0) + return &evtHandler +} + +func (h *TestAnsiEventHandler) recordCall(call string, params []string) { + s := fmt.Sprintf("%s(%v)", call, params) + h.FunctionCalls = append(h.FunctionCalls, s) +} + +func (h *TestAnsiEventHandler) Print(b byte) error { + h.recordCall("Print", []string{string(b)}) + return nil +} + +func (h *TestAnsiEventHandler) Execute(b byte) error { + h.recordCall("Execute", []string{string(b)}) + return nil +} + +func (h *TestAnsiEventHandler) CUU(param int) error { + h.recordCall("CUU", []string{strconv.Itoa(param)}) + return nil +} + +func (h *TestAnsiEventHandler) CUD(param int) error { + h.recordCall("CUD", []string{strconv.Itoa(param)}) + return nil +} + +func (h *TestAnsiEventHandler) CUF(param int) error { + h.recordCall("CUF", []string{strconv.Itoa(param)}) + return nil +} + +func (h *TestAnsiEventHandler) CUB(param int) error { + h.recordCall("CUB", []string{strconv.Itoa(param)}) + return nil +} + +func (h *TestAnsiEventHandler) CNL(param int) error { + h.recordCall("CNL", []string{strconv.Itoa(param)}) + return nil +} + +func (h *TestAnsiEventHandler) CPL(param int) error { + h.recordCall("CPL", []string{strconv.Itoa(param)}) + return nil +} + +func (h *TestAnsiEventHandler) CHA(param int) error { + h.recordCall("CHA", []string{strconv.Itoa(param)}) + return nil +} + +func (h *TestAnsiEventHandler) VPA(param int) error { + h.recordCall("VPA", []string{strconv.Itoa(param)}) + return nil +} + +func (h *TestAnsiEventHandler) CUP(x int, y int) error { + xS, yS := strconv.Itoa(x), strconv.Itoa(y) + h.recordCall("CUP", []string{xS, yS}) + return nil +} + +func (h *TestAnsiEventHandler) HVP(x int, y int) error { + xS, yS := strconv.Itoa(x), strconv.Itoa(y) + h.recordCall("HVP", []string{xS, yS}) + return nil +} + +func (h *TestAnsiEventHandler) DECTCEM(visible bool) error { + h.recordCall("DECTCEM", []string{strconv.FormatBool(visible)}) + return nil +} + +func (h *TestAnsiEventHandler) DECOM(visible bool) error { + h.recordCall("DECOM", []string{strconv.FormatBool(visible)}) + return nil +} + +func (h *TestAnsiEventHandler) DECCOLM(use132 bool) error { + h.recordCall("DECOLM", []string{strconv.FormatBool(use132)}) + return nil +} + +func (h *TestAnsiEventHandler) ED(param int) error { + h.recordCall("ED", []string{strconv.Itoa(param)}) + return nil +} + +func (h *TestAnsiEventHandler) EL(param int) error { + h.recordCall("EL", []string{strconv.Itoa(param)}) + return nil +} + +func (h *TestAnsiEventHandler) IL(param int) error { + h.recordCall("IL", []string{strconv.Itoa(param)}) + return nil +} + +func (h *TestAnsiEventHandler) DL(param int) error { + h.recordCall("DL", []string{strconv.Itoa(param)}) + return nil +} + +func (h *TestAnsiEventHandler) ICH(param int) error { + h.recordCall("ICH", []string{strconv.Itoa(param)}) + return nil +} + +func (h *TestAnsiEventHandler) DCH(param int) error { + h.recordCall("DCH", []string{strconv.Itoa(param)}) + return nil +} + +func (h *TestAnsiEventHandler) SGR(params []int) error { + strings := []string{} + for _, v := range params { + strings = append(strings, strconv.Itoa(v)) + } + + h.recordCall("SGR", strings) + return nil +} + +func (h *TestAnsiEventHandler) SU(param int) error { + h.recordCall("SU", []string{strconv.Itoa(param)}) + return nil +} + +func (h *TestAnsiEventHandler) SD(param int) error { + h.recordCall("SD", []string{strconv.Itoa(param)}) + return nil +} + +func (h *TestAnsiEventHandler) DA(params []string) error { + h.recordCall("DA", params) + return nil +} + +func (h *TestAnsiEventHandler) DECSTBM(top int, bottom int) error { + topS, bottomS := strconv.Itoa(top), strconv.Itoa(bottom) + h.recordCall("DECSTBM", []string{topS, bottomS}) + return nil +} + +func (h *TestAnsiEventHandler) RI() error { + h.recordCall("RI", nil) + return nil +} + +func (h *TestAnsiEventHandler) IND() error { + h.recordCall("IND", nil) + return nil +} + +func (h *TestAnsiEventHandler) Flush() error { + return nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/utilities.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/utilities.go new file mode 100644 index 000000000000..392114493a22 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/utilities.go @@ -0,0 +1,21 @@ +package ansiterm + +import ( + "strconv" +) + +func sliceContains(bytes []byte, b byte) bool { + for _, v := range bytes { + if v == b { + return true + } + } + + return false +} + +func convertBytesToInteger(bytes []byte) int { + s := string(bytes) + i, _ := strconv.Atoi(s) + return i +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go new file mode 100644 index 000000000000..78fe92fe65f1 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go @@ -0,0 +1,182 @@ +// +build windows + +package winterm + +import ( + "fmt" + "os" + "strconv" + "strings" + "syscall" + + . "github.com/Azure/go-ansiterm" +) + +// Windows keyboard constants +// See https://msdn.microsoft.com/en-us/library/windows/desktop/dd375731(v=vs.85).aspx. +const ( + VK_PRIOR = 0x21 // PAGE UP key + VK_NEXT = 0x22 // PAGE DOWN key + VK_END = 0x23 // END key + VK_HOME = 0x24 // HOME key + VK_LEFT = 0x25 // LEFT ARROW key + VK_UP = 0x26 // UP ARROW key + VK_RIGHT = 0x27 // RIGHT ARROW key + VK_DOWN = 0x28 // DOWN ARROW key + VK_SELECT = 0x29 // SELECT key + VK_PRINT = 0x2A // PRINT key + VK_EXECUTE = 0x2B // EXECUTE key + VK_SNAPSHOT = 0x2C // PRINT SCREEN key + VK_INSERT = 0x2D // INS key + VK_DELETE = 0x2E // DEL key + VK_HELP = 0x2F // HELP key + VK_F1 = 0x70 // F1 key + VK_F2 = 0x71 // F2 key + VK_F3 = 0x72 // F3 key + VK_F4 = 0x73 // F4 key + VK_F5 = 0x74 // F5 key + VK_F6 = 0x75 // F6 key + VK_F7 = 0x76 // F7 key + VK_F8 = 0x77 // F8 key + VK_F9 = 0x78 // F9 key + VK_F10 = 0x79 // F10 key + VK_F11 = 0x7A // F11 key + VK_F12 = 0x7B // F12 key + + RIGHT_ALT_PRESSED = 0x0001 + LEFT_ALT_PRESSED = 0x0002 + RIGHT_CTRL_PRESSED = 0x0004 + LEFT_CTRL_PRESSED = 0x0008 + SHIFT_PRESSED = 0x0010 + NUMLOCK_ON = 0x0020 + SCROLLLOCK_ON = 0x0040 + CAPSLOCK_ON = 0x0080 + ENHANCED_KEY = 0x0100 +) + +type ansiCommand struct { + CommandBytes []byte + Command string + Parameters []string + IsSpecial bool +} + +func newAnsiCommand(command []byte) *ansiCommand { + + if isCharacterSelectionCmdChar(command[1]) { + // Is Character Set Selection commands + return &ansiCommand{ + CommandBytes: command, + Command: string(command), + IsSpecial: true, + } + } + + // last char is command character + lastCharIndex := len(command) - 1 + + ac := &ansiCommand{ + CommandBytes: command, + Command: string(command[lastCharIndex]), + IsSpecial: false, + } + + // more than a single escape + if lastCharIndex != 0 { + start := 1 + // skip if double char escape sequence + if command[0] == ANSI_ESCAPE_PRIMARY && command[1] == ANSI_ESCAPE_SECONDARY { + start++ + } + // convert this to GetNextParam method + ac.Parameters = strings.Split(string(command[start:lastCharIndex]), ANSI_PARAMETER_SEP) + } + + return ac +} + +func (ac *ansiCommand) paramAsSHORT(index int, defaultValue SHORT) SHORT { + if index < 0 || index >= len(ac.Parameters) { + return defaultValue + } + + param, err := strconv.ParseInt(ac.Parameters[index], 10, 16) + if err != nil { + return defaultValue + } + + return SHORT(param) +} + +func (ac *ansiCommand) String() string { + return fmt.Sprintf("0x%v \"%v\" (\"%v\")", + bytesToHex(ac.CommandBytes), + ac.Command, + strings.Join(ac.Parameters, "\",\"")) +} + +// isAnsiCommandChar returns true if the passed byte falls within the range of ANSI commands. +// See http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html. +func isAnsiCommandChar(b byte) bool { + switch { + case ANSI_COMMAND_FIRST <= b && b <= ANSI_COMMAND_LAST && b != ANSI_ESCAPE_SECONDARY: + return true + case b == ANSI_CMD_G1 || b == ANSI_CMD_OSC || b == ANSI_CMD_DECPAM || b == ANSI_CMD_DECPNM: + // non-CSI escape sequence terminator + return true + case b == ANSI_CMD_STR_TERM || b == ANSI_BEL: + // String escape sequence terminator + return true + } + return false +} + +func isXtermOscSequence(command []byte, current byte) bool { + return (len(command) >= 2 && command[0] == ANSI_ESCAPE_PRIMARY && command[1] == ANSI_CMD_OSC && current != ANSI_BEL) +} + +func isCharacterSelectionCmdChar(b byte) bool { + return (b == ANSI_CMD_G0 || b == ANSI_CMD_G1 || b == ANSI_CMD_G2 || b == ANSI_CMD_G3) +} + +// bytesToHex converts a slice of bytes to a human-readable string. +func bytesToHex(b []byte) string { + hex := make([]string, len(b)) + for i, ch := range b { + hex[i] = fmt.Sprintf("%X", ch) + } + return strings.Join(hex, "") +} + +// ensureInRange adjusts the passed value, if necessary, to ensure it is within +// the passed min / max range. +func ensureInRange(n SHORT, min SHORT, max SHORT) SHORT { + if n < min { + return min + } else if n > max { + return max + } else { + return n + } +} + +func GetStdFile(nFile int) (*os.File, uintptr) { + var file *os.File + switch nFile { + case syscall.STD_INPUT_HANDLE: + file = os.Stdin + case syscall.STD_OUTPUT_HANDLE: + file = os.Stdout + case syscall.STD_ERROR_HANDLE: + file = os.Stderr + default: + panic(fmt.Errorf("Invalid standard handle identifier: %v", nFile)) + } + + fd, err := syscall.GetStdHandle(nFile) + if err != nil { + panic(fmt.Errorf("Invalid standard handle indentifier: %v -- %v", nFile, err)) + } + + return file, uintptr(fd) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/winterm/api.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/winterm/api.go new file mode 100644 index 000000000000..1f2f3853cc4e --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/winterm/api.go @@ -0,0 +1,329 @@ +// +build windows + +package winterm + +import ( + "fmt" + "syscall" + "unsafe" +) + +//=========================================================================================================== +// IMPORTANT NOTE: +// +// The methods below make extensive use of the "unsafe" package to obtain the required pointers. +// Beginning in Go 1.3, the garbage collector may release local variables (e.g., incoming arguments, stack +// variables) the pointers reference *before* the API completes. +// +// As a result, in those cases, the code must hint that the variables remain in active by invoking the +// dummy method "use" (see below). Newer versions of Go are planned to change the mechanism to no longer +// require unsafe pointers. +// +// If you add or modify methods, ENSURE protection of local variables through the "use" builtin to inform +// the garbage collector the variables remain in use if: +// +// -- The value is not a pointer (e.g., int32, struct) +// -- The value is not referenced by the method after passing the pointer to Windows +// +// See http://golang.org/doc/go1.3. +//=========================================================================================================== + +var ( + kernel32DLL = syscall.NewLazyDLL("kernel32.dll") + + getConsoleCursorInfoProc = kernel32DLL.NewProc("GetConsoleCursorInfo") + setConsoleCursorInfoProc = kernel32DLL.NewProc("SetConsoleCursorInfo") + setConsoleCursorPositionProc = kernel32DLL.NewProc("SetConsoleCursorPosition") + setConsoleModeProc = kernel32DLL.NewProc("SetConsoleMode") + getConsoleScreenBufferInfoProc = kernel32DLL.NewProc("GetConsoleScreenBufferInfo") + setConsoleScreenBufferSizeProc = kernel32DLL.NewProc("SetConsoleScreenBufferSize") + scrollConsoleScreenBufferProc = kernel32DLL.NewProc("ScrollConsoleScreenBufferA") + setConsoleTextAttributeProc = kernel32DLL.NewProc("SetConsoleTextAttribute") + setConsoleWindowInfoProc = kernel32DLL.NewProc("SetConsoleWindowInfo") + writeConsoleOutputProc = kernel32DLL.NewProc("WriteConsoleOutputW") + readConsoleInputProc = kernel32DLL.NewProc("ReadConsoleInputW") + waitForSingleObjectProc = kernel32DLL.NewProc("WaitForSingleObject") +) + +// Windows Console constants +const ( + // Console modes + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx. + ENABLE_PROCESSED_INPUT = 0x0001 + ENABLE_LINE_INPUT = 0x0002 + ENABLE_ECHO_INPUT = 0x0004 + ENABLE_WINDOW_INPUT = 0x0008 + ENABLE_MOUSE_INPUT = 0x0010 + ENABLE_INSERT_MODE = 0x0020 + ENABLE_QUICK_EDIT_MODE = 0x0040 + ENABLE_EXTENDED_FLAGS = 0x0080 + + ENABLE_PROCESSED_OUTPUT = 0x0001 + ENABLE_WRAP_AT_EOL_OUTPUT = 0x0002 + + // Character attributes + // Note: + // -- The attributes are combined to produce various colors (e.g., Blue + Green will create Cyan). + // Clearing all foreground or background colors results in black; setting all creates white. + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682088(v=vs.85).aspx#_win32_character_attributes. + FOREGROUND_BLUE WORD = 0x0001 + FOREGROUND_GREEN WORD = 0x0002 + FOREGROUND_RED WORD = 0x0004 + FOREGROUND_INTENSITY WORD = 0x0008 + FOREGROUND_MASK WORD = 0x000F + + BACKGROUND_BLUE WORD = 0x0010 + BACKGROUND_GREEN WORD = 0x0020 + BACKGROUND_RED WORD = 0x0040 + BACKGROUND_INTENSITY WORD = 0x0080 + BACKGROUND_MASK WORD = 0x00F0 + + COMMON_LVB_MASK WORD = 0xFF00 + COMMON_LVB_REVERSE_VIDEO WORD = 0x4000 + COMMON_LVB_UNDERSCORE WORD = 0x8000 + + // Input event types + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx. + KEY_EVENT = 0x0001 + MOUSE_EVENT = 0x0002 + WINDOW_BUFFER_SIZE_EVENT = 0x0004 + MENU_EVENT = 0x0008 + FOCUS_EVENT = 0x0010 + + // WaitForSingleObject return codes + WAIT_ABANDONED = 0x00000080 + WAIT_FAILED = 0xFFFFFFFF + WAIT_SIGNALED = 0x0000000 + WAIT_TIMEOUT = 0x00000102 + + // WaitForSingleObject wait duration + WAIT_INFINITE = 0xFFFFFFFF + WAIT_ONE_SECOND = 1000 + WAIT_HALF_SECOND = 500 + WAIT_QUARTER_SECOND = 250 +) + +// Windows API Console types +// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/aa383751(v=vs.85).aspx for core types (e.g., SHORT) +// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682101(v=vs.85).aspx for Console specific types (e.g., COORD) +// -- See https://msdn.microsoft.com/en-us/library/aa296569(v=vs.60).aspx for comments on alignment +type ( + SHORT int16 + BOOL int32 + WORD uint16 + WCHAR uint16 + DWORD uint32 + + CHAR_INFO struct { + UnicodeChar WCHAR + Attributes WORD + } + + CONSOLE_CURSOR_INFO struct { + Size DWORD + Visible BOOL + } + + CONSOLE_SCREEN_BUFFER_INFO struct { + Size COORD + CursorPosition COORD + Attributes WORD + Window SMALL_RECT + MaximumWindowSize COORD + } + + COORD struct { + X SHORT + Y SHORT + } + + SMALL_RECT struct { + Left SHORT + Top SHORT + Right SHORT + Bottom SHORT + } + + // INPUT_RECORD is a C/C++ union of which KEY_EVENT_RECORD is one case, it is also the largest + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx. + INPUT_RECORD struct { + EventType WORD + KeyEvent KEY_EVENT_RECORD + } + + KEY_EVENT_RECORD struct { + KeyDown BOOL + RepeatCount WORD + VirtualKeyCode WORD + VirtualScanCode WORD + UnicodeChar WCHAR + ControlKeyState DWORD + } + + WINDOW_BUFFER_SIZE struct { + Size COORD + } +) + +// boolToBOOL converts a Go bool into a Windows BOOL. +func boolToBOOL(f bool) BOOL { + if f { + return BOOL(1) + } else { + return BOOL(0) + } +} + +// GetConsoleCursorInfo retrieves information about the size and visiblity of the console cursor. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683163(v=vs.85).aspx. +func GetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error { + r1, r2, err := getConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0) + return checkError(r1, r2, err) +} + +// SetConsoleCursorInfo sets the size and visiblity of the console cursor. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686019(v=vs.85).aspx. +func SetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error { + r1, r2, err := setConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0) + return checkError(r1, r2, err) +} + +// SetConsoleCursorPosition location of the console cursor. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686025(v=vs.85).aspx. +func SetConsoleCursorPosition(handle uintptr, coord COORD) error { + r1, r2, err := setConsoleCursorPositionProc.Call(handle, coordToPointer(coord)) + use(coord) + return checkError(r1, r2, err) +} + +// GetConsoleMode gets the console mode for given file descriptor +// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx. +func GetConsoleMode(handle uintptr) (mode uint32, err error) { + err = syscall.GetConsoleMode(syscall.Handle(handle), &mode) + return mode, err +} + +// SetConsoleMode sets the console mode for given file descriptor +// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx. +func SetConsoleMode(handle uintptr, mode uint32) error { + r1, r2, err := setConsoleModeProc.Call(handle, uintptr(mode), 0) + use(mode) + return checkError(r1, r2, err) +} + +// GetConsoleScreenBufferInfo retrieves information about the specified console screen buffer. +// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683171(v=vs.85).aspx. +func GetConsoleScreenBufferInfo(handle uintptr) (*CONSOLE_SCREEN_BUFFER_INFO, error) { + info := CONSOLE_SCREEN_BUFFER_INFO{} + err := checkError(getConsoleScreenBufferInfoProc.Call(handle, uintptr(unsafe.Pointer(&info)), 0)) + if err != nil { + return nil, err + } + return &info, nil +} + +func ScrollConsoleScreenBuffer(handle uintptr, scrollRect SMALL_RECT, clipRect SMALL_RECT, destOrigin COORD, char CHAR_INFO) error { + r1, r2, err := scrollConsoleScreenBufferProc.Call(handle, uintptr(unsafe.Pointer(&scrollRect)), uintptr(unsafe.Pointer(&clipRect)), coordToPointer(destOrigin), uintptr(unsafe.Pointer(&char))) + use(scrollRect) + use(clipRect) + use(destOrigin) + use(char) + return checkError(r1, r2, err) +} + +// SetConsoleScreenBufferSize sets the size of the console screen buffer. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686044(v=vs.85).aspx. +func SetConsoleScreenBufferSize(handle uintptr, coord COORD) error { + r1, r2, err := setConsoleScreenBufferSizeProc.Call(handle, coordToPointer(coord)) + use(coord) + return checkError(r1, r2, err) +} + +// SetConsoleTextAttribute sets the attributes of characters written to the +// console screen buffer by the WriteFile or WriteConsole function. +// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686047(v=vs.85).aspx. +func SetConsoleTextAttribute(handle uintptr, attribute WORD) error { + r1, r2, err := setConsoleTextAttributeProc.Call(handle, uintptr(attribute), 0) + use(attribute) + return checkError(r1, r2, err) +} + +// SetConsoleWindowInfo sets the size and position of the console screen buffer's window. +// Note that the size and location must be within and no larger than the backing console screen buffer. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686125(v=vs.85).aspx. +func SetConsoleWindowInfo(handle uintptr, isAbsolute bool, rect SMALL_RECT) error { + r1, r2, err := setConsoleWindowInfoProc.Call(handle, uintptr(boolToBOOL(isAbsolute)), uintptr(unsafe.Pointer(&rect))) + use(isAbsolute) + use(rect) + return checkError(r1, r2, err) +} + +// WriteConsoleOutput writes the CHAR_INFOs from the provided buffer to the active console buffer. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687404(v=vs.85).aspx. +func WriteConsoleOutput(handle uintptr, buffer []CHAR_INFO, bufferSize COORD, bufferCoord COORD, writeRegion *SMALL_RECT) error { + r1, r2, err := writeConsoleOutputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), coordToPointer(bufferSize), coordToPointer(bufferCoord), uintptr(unsafe.Pointer(writeRegion))) + use(buffer) + use(bufferSize) + use(bufferCoord) + return checkError(r1, r2, err) +} + +// ReadConsoleInput reads (and removes) data from the console input buffer. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms684961(v=vs.85).aspx. +func ReadConsoleInput(handle uintptr, buffer []INPUT_RECORD, count *uint32) error { + r1, r2, err := readConsoleInputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), uintptr(len(buffer)), uintptr(unsafe.Pointer(count))) + use(buffer) + return checkError(r1, r2, err) +} + +// WaitForSingleObject waits for the passed handle to be signaled. +// It returns true if the handle was signaled; false otherwise. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687032(v=vs.85).aspx. +func WaitForSingleObject(handle uintptr, msWait uint32) (bool, error) { + r1, _, err := waitForSingleObjectProc.Call(handle, uintptr(DWORD(msWait))) + switch r1 { + case WAIT_ABANDONED, WAIT_TIMEOUT: + return false, nil + case WAIT_SIGNALED: + return true, nil + } + use(msWait) + return false, err +} + +// String helpers +func (info CONSOLE_SCREEN_BUFFER_INFO) String() string { + return fmt.Sprintf("Size(%v) Cursor(%v) Window(%v) Max(%v)", info.Size, info.CursorPosition, info.Window, info.MaximumWindowSize) +} + +func (coord COORD) String() string { + return fmt.Sprintf("%v,%v", coord.X, coord.Y) +} + +func (rect SMALL_RECT) String() string { + return fmt.Sprintf("(%v,%v),(%v,%v)", rect.Left, rect.Top, rect.Right, rect.Bottom) +} + +// checkError evaluates the results of a Windows API call and returns the error if it failed. +func checkError(r1, r2 uintptr, err error) error { + // Windows APIs return non-zero to indicate success + if r1 != 0 { + return nil + } + + // Return the error if provided, otherwise default to EINVAL + if err != nil { + return err + } + return syscall.EINVAL +} + +// coordToPointer converts a COORD into a uintptr (by fooling the type system). +func coordToPointer(c COORD) uintptr { + // Note: This code assumes the two SHORTs are correctly laid out; the "cast" to DWORD is just to get a pointer to pass. + return uintptr(*((*DWORD)(unsafe.Pointer(&c)))) +} + +// use is a no-op, but the compiler cannot see that it is. +// Calling use(p) ensures that p is kept live until that point. +func use(p interface{}) {} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go new file mode 100644 index 000000000000..94665db6fb0c --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go @@ -0,0 +1,102 @@ +// +build windows + +package winterm + +import ( + . "github.com/Azure/go-ansiterm" +) + +const ( + FOREGROUND_COLOR_MASK = FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE + BACKGROUND_COLOR_MASK = BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE +) + +// collectAnsiIntoWindowsAttributes modifies the passed Windows text mode flags to reflect the +// request represented by the passed ANSI mode. +func collectAnsiIntoWindowsAttributes(windowsMode WORD, inverted bool, baseMode WORD, ansiMode SHORT) (WORD, bool) { + switch ansiMode { + + // Mode styles + case ANSI_SGR_BOLD: + windowsMode = windowsMode | FOREGROUND_INTENSITY + + case ANSI_SGR_DIM, ANSI_SGR_BOLD_DIM_OFF: + windowsMode &^= FOREGROUND_INTENSITY + + case ANSI_SGR_UNDERLINE: + windowsMode = windowsMode | COMMON_LVB_UNDERSCORE + + case ANSI_SGR_REVERSE: + inverted = true + + case ANSI_SGR_REVERSE_OFF: + inverted = false + + case ANSI_SGR_UNDERLINE_OFF: + windowsMode &^= COMMON_LVB_UNDERSCORE + + // Foreground colors + case ANSI_SGR_FOREGROUND_DEFAULT: + windowsMode = (windowsMode &^ FOREGROUND_MASK) | (baseMode & FOREGROUND_MASK) + + case ANSI_SGR_FOREGROUND_BLACK: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) + + case ANSI_SGR_FOREGROUND_RED: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED + + case ANSI_SGR_FOREGROUND_GREEN: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN + + case ANSI_SGR_FOREGROUND_YELLOW: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN + + case ANSI_SGR_FOREGROUND_BLUE: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_BLUE + + case ANSI_SGR_FOREGROUND_MAGENTA: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_BLUE + + case ANSI_SGR_FOREGROUND_CYAN: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN | FOREGROUND_BLUE + + case ANSI_SGR_FOREGROUND_WHITE: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE + + // Background colors + case ANSI_SGR_BACKGROUND_DEFAULT: + // Black with no intensity + windowsMode = (windowsMode &^ BACKGROUND_MASK) | (baseMode & BACKGROUND_MASK) + + case ANSI_SGR_BACKGROUND_BLACK: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) + + case ANSI_SGR_BACKGROUND_RED: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED + + case ANSI_SGR_BACKGROUND_GREEN: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN + + case ANSI_SGR_BACKGROUND_YELLOW: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN + + case ANSI_SGR_BACKGROUND_BLUE: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_BLUE + + case ANSI_SGR_BACKGROUND_MAGENTA: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_BLUE + + case ANSI_SGR_BACKGROUND_CYAN: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN | BACKGROUND_BLUE + + case ANSI_SGR_BACKGROUND_WHITE: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE + } + + return windowsMode, inverted +} + +// invertAttributes inverts the foreground and background colors of a Windows attributes value +func invertAttributes(windowsMode WORD) WORD { + return (COMMON_LVB_MASK & windowsMode) | ((FOREGROUND_MASK & windowsMode) << 4) | ((BACKGROUND_MASK & windowsMode) >> 4) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go new file mode 100644 index 000000000000..e4b1c255a463 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go @@ -0,0 +1,101 @@ +// +build windows + +package winterm + +const ( + Horizontal = iota + Vertical +) + +func (h *WindowsAnsiEventHandler) getCursorWindow(info *CONSOLE_SCREEN_BUFFER_INFO) SMALL_RECT { + if h.originMode { + sr := h.effectiveSr(info.Window) + return SMALL_RECT{ + Top: sr.top, + Bottom: sr.bottom, + Left: 0, + Right: info.Size.X - 1, + } + } else { + return SMALL_RECT{ + Top: info.Window.Top, + Bottom: info.Window.Bottom, + Left: 0, + Right: info.Size.X - 1, + } + } +} + +// setCursorPosition sets the cursor to the specified position, bounded to the screen size +func (h *WindowsAnsiEventHandler) setCursorPosition(position COORD, window SMALL_RECT) error { + position.X = ensureInRange(position.X, window.Left, window.Right) + position.Y = ensureInRange(position.Y, window.Top, window.Bottom) + err := SetConsoleCursorPosition(h.fd, position) + if err != nil { + return err + } + logger.Infof("Cursor position set: (%d, %d)", position.X, position.Y) + return err +} + +func (h *WindowsAnsiEventHandler) moveCursorVertical(param int) error { + return h.moveCursor(Vertical, param) +} + +func (h *WindowsAnsiEventHandler) moveCursorHorizontal(param int) error { + return h.moveCursor(Horizontal, param) +} + +func (h *WindowsAnsiEventHandler) moveCursor(moveMode int, param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + position := info.CursorPosition + switch moveMode { + case Horizontal: + position.X += SHORT(param) + case Vertical: + position.Y += SHORT(param) + } + + if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { + return err + } + + return nil +} + +func (h *WindowsAnsiEventHandler) moveCursorLine(param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + position := info.CursorPosition + position.X = 0 + position.Y += SHORT(param) + + if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { + return err + } + + return nil +} + +func (h *WindowsAnsiEventHandler) moveCursorColumn(param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + position := info.CursorPosition + position.X = SHORT(param) - 1 + + if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go new file mode 100644 index 000000000000..f02a5b261b54 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go @@ -0,0 +1,86 @@ +// +build windows + +package winterm + +import ( + . "github.com/Azure/go-ansiterm" +) + +func (h *WindowsAnsiEventHandler) clearRange(attributes WORD, fromCoord COORD, toCoord COORD) error { + // Ignore an invalid (negative area) request + if toCoord.Y < fromCoord.Y { + return nil + } + + var err error + + var coordStart = COORD{} + var coordEnd = COORD{} + + xCurrent, yCurrent := fromCoord.X, fromCoord.Y + xEnd, yEnd := toCoord.X, toCoord.Y + + // Clear any partial initial line + if xCurrent > 0 { + coordStart.X, coordStart.Y = xCurrent, yCurrent + coordEnd.X, coordEnd.Y = xEnd, yCurrent + + err = h.clearRect(attributes, coordStart, coordEnd) + if err != nil { + return err + } + + xCurrent = 0 + yCurrent += 1 + } + + // Clear intervening rectangular section + if yCurrent < yEnd { + coordStart.X, coordStart.Y = xCurrent, yCurrent + coordEnd.X, coordEnd.Y = xEnd, yEnd-1 + + err = h.clearRect(attributes, coordStart, coordEnd) + if err != nil { + return err + } + + xCurrent = 0 + yCurrent = yEnd + } + + // Clear remaining partial ending line + coordStart.X, coordStart.Y = xCurrent, yCurrent + coordEnd.X, coordEnd.Y = xEnd, yEnd + + err = h.clearRect(attributes, coordStart, coordEnd) + if err != nil { + return err + } + + return nil +} + +func (h *WindowsAnsiEventHandler) clearRect(attributes WORD, fromCoord COORD, toCoord COORD) error { + region := SMALL_RECT{Top: fromCoord.Y, Left: fromCoord.X, Bottom: toCoord.Y, Right: toCoord.X} + width := toCoord.X - fromCoord.X + 1 + height := toCoord.Y - fromCoord.Y + 1 + size := uint32(width) * uint32(height) + + if size <= 0 { + return nil + } + + buffer := make([]CHAR_INFO, size) + + char := CHAR_INFO{WCHAR(FILL_CHARACTER), attributes} + for i := 0; i < int(size); i++ { + buffer[i] = char + } + + err := WriteConsoleOutput(h.fd, buffer, COORD{X: width, Y: height}, COORD{X: 0, Y: 0}, ®ion) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go new file mode 100644 index 000000000000..ed1998245c07 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go @@ -0,0 +1,118 @@ +// +build windows + +package winterm + +// effectiveSr gets the current effective scroll region in buffer coordinates +func (h *WindowsAnsiEventHandler) effectiveSr(window SMALL_RECT) scrollRegion { + top := AddInRange(window.Top, h.sr.top, window.Top, window.Bottom) + bottom := AddInRange(window.Top, h.sr.bottom, window.Top, window.Bottom) + if top >= bottom { + top = window.Top + bottom = window.Bottom + } + return scrollRegion{top: top, bottom: bottom} +} + +func (h *WindowsAnsiEventHandler) scrollUp(param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + sr := h.effectiveSr(info.Window) + return h.scroll(param, sr, info) +} + +func (h *WindowsAnsiEventHandler) scrollDown(param int) error { + return h.scrollUp(-param) +} + +func (h *WindowsAnsiEventHandler) deleteLines(param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + start := info.CursorPosition.Y + sr := h.effectiveSr(info.Window) + // Lines cannot be inserted or deleted outside the scrolling region. + if start >= sr.top && start <= sr.bottom { + sr.top = start + return h.scroll(param, sr, info) + } else { + return nil + } +} + +func (h *WindowsAnsiEventHandler) insertLines(param int) error { + return h.deleteLines(-param) +} + +// scroll scrolls the provided scroll region by param lines. The scroll region is in buffer coordinates. +func (h *WindowsAnsiEventHandler) scroll(param int, sr scrollRegion, info *CONSOLE_SCREEN_BUFFER_INFO) error { + logger.Infof("scroll: scrollTop: %d, scrollBottom: %d", sr.top, sr.bottom) + logger.Infof("scroll: windowTop: %d, windowBottom: %d", info.Window.Top, info.Window.Bottom) + + // Copy from and clip to the scroll region (full buffer width) + scrollRect := SMALL_RECT{ + Top: sr.top, + Bottom: sr.bottom, + Left: 0, + Right: info.Size.X - 1, + } + + // Origin to which area should be copied + destOrigin := COORD{ + X: 0, + Y: sr.top - SHORT(param), + } + + char := CHAR_INFO{ + UnicodeChar: ' ', + Attributes: h.attributes, + } + + if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil { + return err + } + return nil +} + +func (h *WindowsAnsiEventHandler) deleteCharacters(param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + return h.scrollLine(param, info.CursorPosition, info) +} + +func (h *WindowsAnsiEventHandler) insertCharacters(param int) error { + return h.deleteCharacters(-param) +} + +// scrollLine scrolls a line horizontally starting at the provided position by a number of columns. +func (h *WindowsAnsiEventHandler) scrollLine(columns int, position COORD, info *CONSOLE_SCREEN_BUFFER_INFO) error { + // Copy from and clip to the scroll region (full buffer width) + scrollRect := SMALL_RECT{ + Top: position.Y, + Bottom: position.Y, + Left: position.X, + Right: info.Size.X - 1, + } + + // Origin to which area should be copied + destOrigin := COORD{ + X: position.X - SHORT(columns), + Y: position.Y, + } + + char := CHAR_INFO{ + UnicodeChar: ' ', + Attributes: h.attributes, + } + + if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go new file mode 100644 index 000000000000..2f963ff132d1 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go @@ -0,0 +1,9 @@ +// +build windows + +package winterm + +// AddInRange increments a value by the passed quantity while ensuring the values +// always remain within the supplied min / max range. +func AddInRange(n SHORT, increment SHORT, min SHORT, max SHORT) SHORT { + return ensureInRange(n+increment, min, max) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go new file mode 100644 index 000000000000..2d492b32e422 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go @@ -0,0 +1,725 @@ +// +build windows + +package winterm + +import ( + "bytes" + "io/ioutil" + "os" + "strconv" + + . "github.com/Azure/go-ansiterm" + "github.com/Sirupsen/logrus" +) + +var logger *logrus.Logger + +type WindowsAnsiEventHandler struct { + fd uintptr + file *os.File + infoReset *CONSOLE_SCREEN_BUFFER_INFO + sr scrollRegion + buffer bytes.Buffer + attributes WORD + inverted bool + wrapNext bool + drewMarginByte bool + originMode bool + marginByte byte + curInfo *CONSOLE_SCREEN_BUFFER_INFO + curPos COORD +} + +func CreateWinEventHandler(fd uintptr, file *os.File) AnsiEventHandler { + logFile := ioutil.Discard + + if isDebugEnv := os.Getenv(LogEnv); isDebugEnv == "1" { + logFile, _ = os.Create("winEventHandler.log") + } + + logger = &logrus.Logger{ + Out: logFile, + Formatter: new(logrus.TextFormatter), + Level: logrus.DebugLevel, + } + + infoReset, err := GetConsoleScreenBufferInfo(fd) + if err != nil { + return nil + } + + return &WindowsAnsiEventHandler{ + fd: fd, + file: file, + infoReset: infoReset, + attributes: infoReset.Attributes, + } +} + +type scrollRegion struct { + top SHORT + bottom SHORT +} + +// simulateLF simulates a LF or CR+LF by scrolling if necessary to handle the +// current cursor position and scroll region settings, in which case it returns +// true. If no special handling is necessary, then it does nothing and returns +// false. +// +// In the false case, the caller should ensure that a carriage return +// and line feed are inserted or that the text is otherwise wrapped. +func (h *WindowsAnsiEventHandler) simulateLF(includeCR bool) (bool, error) { + if h.wrapNext { + if err := h.Flush(); err != nil { + return false, err + } + h.clearWrap() + } + pos, info, err := h.getCurrentInfo() + if err != nil { + return false, err + } + sr := h.effectiveSr(info.Window) + if pos.Y == sr.bottom { + // Scrolling is necessary. Let Windows automatically scroll if the scrolling region + // is the full window. + if sr.top == info.Window.Top && sr.bottom == info.Window.Bottom { + if includeCR { + pos.X = 0 + h.updatePos(pos) + } + return false, nil + } else { + // A custom scroll region is active. Scroll the window manually to simulate + // the LF. + if err := h.Flush(); err != nil { + return false, err + } + logger.Info("Simulating LF inside scroll region") + if err := h.scrollUp(1); err != nil { + return false, err + } + if includeCR { + pos.X = 0 + if err := SetConsoleCursorPosition(h.fd, pos); err != nil { + return false, err + } + } + return true, nil + } + } else if pos.Y < info.Window.Bottom { + // Let Windows handle the LF. + pos.Y++ + if includeCR { + pos.X = 0 + } + h.updatePos(pos) + return false, nil + } else { + // The cursor is at the bottom of the screen but outside the scroll + // region. Skip the LF. + logger.Info("Simulating LF outside scroll region") + if includeCR { + if err := h.Flush(); err != nil { + return false, err + } + pos.X = 0 + if err := SetConsoleCursorPosition(h.fd, pos); err != nil { + return false, err + } + } + return true, nil + } +} + +// executeLF executes a LF without a CR. +func (h *WindowsAnsiEventHandler) executeLF() error { + handled, err := h.simulateLF(false) + if err != nil { + return err + } + if !handled { + // Windows LF will reset the cursor column position. Write the LF + // and restore the cursor position. + pos, _, err := h.getCurrentInfo() + if err != nil { + return err + } + h.buffer.WriteByte(ANSI_LINE_FEED) + if pos.X != 0 { + if err := h.Flush(); err != nil { + return err + } + logger.Info("Resetting cursor position for LF without CR") + if err := SetConsoleCursorPosition(h.fd, pos); err != nil { + return err + } + } + } + return nil +} + +func (h *WindowsAnsiEventHandler) Print(b byte) error { + if h.wrapNext { + h.buffer.WriteByte(h.marginByte) + h.clearWrap() + if _, err := h.simulateLF(true); err != nil { + return err + } + } + pos, info, err := h.getCurrentInfo() + if err != nil { + return err + } + if pos.X == info.Size.X-1 { + h.wrapNext = true + h.marginByte = b + } else { + pos.X++ + h.updatePos(pos) + h.buffer.WriteByte(b) + } + return nil +} + +func (h *WindowsAnsiEventHandler) Execute(b byte) error { + switch b { + case ANSI_TAB: + logger.Info("Execute(TAB)") + // Move to the next tab stop, but preserve auto-wrap if already set. + if !h.wrapNext { + pos, info, err := h.getCurrentInfo() + if err != nil { + return err + } + pos.X = (pos.X + 8) - pos.X%8 + if pos.X >= info.Size.X { + pos.X = info.Size.X - 1 + } + if err := h.Flush(); err != nil { + return err + } + if err := SetConsoleCursorPosition(h.fd, pos); err != nil { + return err + } + } + return nil + + case ANSI_BEL: + h.buffer.WriteByte(ANSI_BEL) + return nil + + case ANSI_BACKSPACE: + if h.wrapNext { + if err := h.Flush(); err != nil { + return err + } + h.clearWrap() + } + pos, _, err := h.getCurrentInfo() + if err != nil { + return err + } + if pos.X > 0 { + pos.X-- + h.updatePos(pos) + h.buffer.WriteByte(ANSI_BACKSPACE) + } + return nil + + case ANSI_VERTICAL_TAB, ANSI_FORM_FEED: + // Treat as true LF. + return h.executeLF() + + case ANSI_LINE_FEED: + // Simulate a CR and LF for now since there is no way in go-ansiterm + // to tell if the LF should include CR (and more things break when it's + // missing than when it's incorrectly added). + handled, err := h.simulateLF(true) + if handled || err != nil { + return err + } + return h.buffer.WriteByte(ANSI_LINE_FEED) + + case ANSI_CARRIAGE_RETURN: + if h.wrapNext { + if err := h.Flush(); err != nil { + return err + } + h.clearWrap() + } + pos, _, err := h.getCurrentInfo() + if err != nil { + return err + } + if pos.X != 0 { + pos.X = 0 + h.updatePos(pos) + h.buffer.WriteByte(ANSI_CARRIAGE_RETURN) + } + return nil + + default: + return nil + } +} + +func (h *WindowsAnsiEventHandler) CUU(param int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("CUU: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorVertical(-param) +} + +func (h *WindowsAnsiEventHandler) CUD(param int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("CUD: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorVertical(param) +} + +func (h *WindowsAnsiEventHandler) CUF(param int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("CUF: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorHorizontal(param) +} + +func (h *WindowsAnsiEventHandler) CUB(param int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("CUB: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorHorizontal(-param) +} + +func (h *WindowsAnsiEventHandler) CNL(param int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("CNL: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorLine(param) +} + +func (h *WindowsAnsiEventHandler) CPL(param int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("CPL: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorLine(-param) +} + +func (h *WindowsAnsiEventHandler) CHA(param int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("CHA: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorColumn(param) +} + +func (h *WindowsAnsiEventHandler) VPA(param int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("VPA: [[%d]]", param) + h.clearWrap() + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + window := h.getCursorWindow(info) + position := info.CursorPosition + position.Y = window.Top + SHORT(param) - 1 + return h.setCursorPosition(position, window) +} + +func (h *WindowsAnsiEventHandler) CUP(row int, col int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("CUP: [[%d %d]]", row, col) + h.clearWrap() + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + window := h.getCursorWindow(info) + position := COORD{window.Left + SHORT(col) - 1, window.Top + SHORT(row) - 1} + return h.setCursorPosition(position, window) +} + +func (h *WindowsAnsiEventHandler) HVP(row int, col int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("HVP: [[%d %d]]", row, col) + h.clearWrap() + return h.CUP(row, col) +} + +func (h *WindowsAnsiEventHandler) DECTCEM(visible bool) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("DECTCEM: [%v]", []string{strconv.FormatBool(visible)}) + h.clearWrap() + return nil +} + +func (h *WindowsAnsiEventHandler) DECOM(enable bool) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("DECOM: [%v]", []string{strconv.FormatBool(enable)}) + h.clearWrap() + h.originMode = enable + return h.CUP(1, 1) +} + +func (h *WindowsAnsiEventHandler) DECCOLM(use132 bool) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("DECCOLM: [%v]", []string{strconv.FormatBool(use132)}) + h.clearWrap() + if err := h.ED(2); err != nil { + return err + } + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + targetWidth := SHORT(80) + if use132 { + targetWidth = 132 + } + if info.Size.X < targetWidth { + if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil { + logger.Info("set buffer failed:", err) + return err + } + } + window := info.Window + window.Left = 0 + window.Right = targetWidth - 1 + if err := SetConsoleWindowInfo(h.fd, true, window); err != nil { + logger.Info("set window failed:", err) + return err + } + if info.Size.X > targetWidth { + if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil { + logger.Info("set buffer failed:", err) + return err + } + } + return SetConsoleCursorPosition(h.fd, COORD{0, 0}) +} + +func (h *WindowsAnsiEventHandler) ED(param int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("ED: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + + // [J -- Erases from the cursor to the end of the screen, including the cursor position. + // [1J -- Erases from the beginning of the screen to the cursor, including the cursor position. + // [2J -- Erases the complete display. The cursor does not move. + // Notes: + // -- Clearing the entire buffer, versus just the Window, works best for Windows Consoles + + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + var start COORD + var end COORD + + switch param { + case 0: + start = info.CursorPosition + end = COORD{info.Size.X - 1, info.Size.Y - 1} + + case 1: + start = COORD{0, 0} + end = info.CursorPosition + + case 2: + start = COORD{0, 0} + end = COORD{info.Size.X - 1, info.Size.Y - 1} + } + + err = h.clearRange(h.attributes, start, end) + if err != nil { + return err + } + + // If the whole buffer was cleared, move the window to the top while preserving + // the window-relative cursor position. + if param == 2 { + pos := info.CursorPosition + window := info.Window + pos.Y -= window.Top + window.Bottom -= window.Top + window.Top = 0 + if err := SetConsoleCursorPosition(h.fd, pos); err != nil { + return err + } + if err := SetConsoleWindowInfo(h.fd, true, window); err != nil { + return err + } + } + + return nil +} + +func (h *WindowsAnsiEventHandler) EL(param int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("EL: [%v]", strconv.Itoa(param)) + h.clearWrap() + + // [K -- Erases from the cursor to the end of the line, including the cursor position. + // [1K -- Erases from the beginning of the line to the cursor, including the cursor position. + // [2K -- Erases the complete line. + + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + var start COORD + var end COORD + + switch param { + case 0: + start = info.CursorPosition + end = COORD{info.Size.X, info.CursorPosition.Y} + + case 1: + start = COORD{0, info.CursorPosition.Y} + end = info.CursorPosition + + case 2: + start = COORD{0, info.CursorPosition.Y} + end = COORD{info.Size.X, info.CursorPosition.Y} + } + + err = h.clearRange(h.attributes, start, end) + if err != nil { + return err + } + + return nil +} + +func (h *WindowsAnsiEventHandler) IL(param int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("IL: [%v]", strconv.Itoa(param)) + h.clearWrap() + return h.insertLines(param) +} + +func (h *WindowsAnsiEventHandler) DL(param int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("DL: [%v]", strconv.Itoa(param)) + h.clearWrap() + return h.deleteLines(param) +} + +func (h *WindowsAnsiEventHandler) ICH(param int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("ICH: [%v]", strconv.Itoa(param)) + h.clearWrap() + return h.insertCharacters(param) +} + +func (h *WindowsAnsiEventHandler) DCH(param int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("DCH: [%v]", strconv.Itoa(param)) + h.clearWrap() + return h.deleteCharacters(param) +} + +func (h *WindowsAnsiEventHandler) SGR(params []int) error { + if err := h.Flush(); err != nil { + return err + } + strings := []string{} + for _, v := range params { + strings = append(strings, strconv.Itoa(v)) + } + + logger.Infof("SGR: [%v]", strings) + + if len(params) <= 0 { + h.attributes = h.infoReset.Attributes + h.inverted = false + } else { + for _, attr := range params { + + if attr == ANSI_SGR_RESET { + h.attributes = h.infoReset.Attributes + h.inverted = false + continue + } + + h.attributes, h.inverted = collectAnsiIntoWindowsAttributes(h.attributes, h.inverted, h.infoReset.Attributes, SHORT(attr)) + } + } + + attributes := h.attributes + if h.inverted { + attributes = invertAttributes(attributes) + } + err := SetConsoleTextAttribute(h.fd, attributes) + if err != nil { + return err + } + + return nil +} + +func (h *WindowsAnsiEventHandler) SU(param int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("SU: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.scrollUp(param) +} + +func (h *WindowsAnsiEventHandler) SD(param int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("SD: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.scrollDown(param) +} + +func (h *WindowsAnsiEventHandler) DA(params []string) error { + logger.Infof("DA: [%v]", params) + // DA cannot be implemented because it must send data on the VT100 input stream, + // which is not available to go-ansiterm. + return nil +} + +func (h *WindowsAnsiEventHandler) DECSTBM(top int, bottom int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("DECSTBM: [%d, %d]", top, bottom) + + // Windows is 0 indexed, Linux is 1 indexed + h.sr.top = SHORT(top - 1) + h.sr.bottom = SHORT(bottom - 1) + + // This command also moves the cursor to the origin. + h.clearWrap() + return h.CUP(1, 1) +} + +func (h *WindowsAnsiEventHandler) RI() error { + if err := h.Flush(); err != nil { + return err + } + logger.Info("RI: []") + h.clearWrap() + + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + sr := h.effectiveSr(info.Window) + if info.CursorPosition.Y == sr.top { + return h.scrollDown(1) + } else { + return h.moveCursorVertical(-1) + } +} + +func (h *WindowsAnsiEventHandler) IND() error { + logger.Info("IND: []") + return h.executeLF() +} + +func (h *WindowsAnsiEventHandler) Flush() error { + h.curInfo = nil + if h.buffer.Len() > 0 { + logger.Infof("Flush: [%s]", h.buffer.Bytes()) + if _, err := h.buffer.WriteTo(h.file); err != nil { + return err + } + } + + if h.wrapNext && !h.drewMarginByte { + logger.Infof("Flush: drawing margin byte '%c'", h.marginByte) + + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + charInfo := []CHAR_INFO{{UnicodeChar: WCHAR(h.marginByte), Attributes: info.Attributes}} + size := COORD{1, 1} + position := COORD{0, 0} + region := SMALL_RECT{Left: info.CursorPosition.X, Top: info.CursorPosition.Y, Right: info.CursorPosition.X, Bottom: info.CursorPosition.Y} + if err := WriteConsoleOutput(h.fd, charInfo, size, position, ®ion); err != nil { + return err + } + h.drewMarginByte = true + } + return nil +} + +// cacheConsoleInfo ensures that the current console screen information has been queried +// since the last call to Flush(). It must be called before accessing h.curInfo or h.curPos. +func (h *WindowsAnsiEventHandler) getCurrentInfo() (COORD, *CONSOLE_SCREEN_BUFFER_INFO, error) { + if h.curInfo == nil { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return COORD{}, nil, err + } + h.curInfo = info + h.curPos = info.CursorPosition + } + return h.curPos, h.curInfo, nil +} + +func (h *WindowsAnsiEventHandler) updatePos(pos COORD) { + if h.curInfo == nil { + panic("failed to call getCurrentInfo before calling updatePos") + } + h.curPos = pos +} + +// clearWrap clears the state where the cursor is in the margin +// waiting for the next character before wrapping the line. This must +// be done before most operations that act on the cursor. +func (h *WindowsAnsiEventHandler) clearWrap() { + h.wrapNext = false + h.drewMarginByte = false +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/LICENSE b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/LICENSE new file mode 100644 index 000000000000..b9d6a27ea92e --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/autorest.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/autorest.go new file mode 100644 index 000000000000..9804f401ef9f --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/autorest.go @@ -0,0 +1,114 @@ +/* +Package autorest implements an HTTP request pipeline suitable for use across multiple go-routines +and provides the shared routines relied on by AutoRest (see https://github.com/Azure/autorest/) +generated Go code. + +The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending, +and Responding. A typical pattern is: + + req, err := Prepare(&http.Request{}, + token.WithAuthorization()) + + resp, err := Send(req, + WithLogging(logger), + DoErrorIfStatusCode(http.StatusInternalServerError), + DoCloseIfError(), + DoRetryForAttempts(5, time.Second)) + + err = Respond(resp, + ByClosing()) + +Each phase relies on decorators to modify and / or manage processing. Decorators may first modify +and then pass the data along, pass the data first and then modify the result, or wrap themselves +around passing the data (such as a logger might do). Decorators run in the order provided. For +example, the following: + + req, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + WithPath("a"), + WithPath("b"), + WithPath("c")) + +will set the URL to: + + https://microsoft.com/a/b/c + +Preparers and Responders may be shared and re-used (assuming the underlying decorators support +sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders +shared among multiple go-routines, and a single Sender shared among multiple sending go-routines, +all bound together by means of input / output channels. + +Decorators hold their passed state within a closure (such as the path components in the example +above). Be careful to share Preparers and Responders only in a context where such held state +applies. For example, it may not make sense to share a Preparer that applies a query string from a +fixed set of values. Similarly, sharing a Responder that reads the response body into a passed +struct (e.g., ByUnmarshallingJson) is likely incorrect. + +Lastly, the Swagger specification (https://swagger.io) that drives AutoRest +(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The +github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure +correct parsing and formatting. + +Errors raised by autorest objects and methods will conform to the autorest.Error interface. + +See the included examples for more detail. For details on the suggested use of this package by +generated clients, see the Client described below. +*/ +package autorest + +import ( + "net/http" + "time" +) + +const ( + // HeaderLocation specifies the HTTP Location header. + HeaderLocation = "Location" + + // HeaderRetryAfter specifies the HTTP Retry-After header. + HeaderRetryAfter = "Retry-After" +) + +// ResponseHasStatusCode returns true if the status code in the HTTP Response is in the passed set +// and false otherwise. +func ResponseHasStatusCode(resp *http.Response, codes ...int) bool { + return containsInt(codes, resp.StatusCode) +} + +// GetLocation retrieves the URL from the Location header of the passed response. +func GetLocation(resp *http.Response) string { + return resp.Header.Get(HeaderLocation) +} + +// GetRetryAfter extracts the retry delay from the Retry-After header of the passed response. If +// the header is absent or is malformed, it will return the supplied default delay time.Duration. +func GetRetryAfter(resp *http.Response, defaultDelay time.Duration) time.Duration { + retry := resp.Header.Get(HeaderRetryAfter) + if retry == "" { + return defaultDelay + } + + d, err := time.ParseDuration(retry + "s") + if err != nil { + return defaultDelay + } + + return d +} + +// NewPollingRequest allocates and returns a new http.Request to poll for the passed response. +func NewPollingRequest(resp *http.Response, cancel <-chan struct{}) (*http.Request, error) { + location := GetLocation(resp) + if location == "" { + return nil, NewErrorWithResponse("autorest", "NewPollingRequest", resp, "Location header missing from response that requires polling") + } + + req, err := Prepare(&http.Request{Cancel: cancel}, + AsGet(), + WithBaseURL(location)) + if err != nil { + return nil, NewErrorWithError(err, "autorest", "NewPollingRequest", nil, "Failure creating poll request to %s", location) + } + + return req, nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/autorest_test.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/autorest_test.go new file mode 100644 index 000000000000..58f6501140dc --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/autorest_test.go @@ -0,0 +1,126 @@ +package autorest + +import ( + "net/http" + "testing" + + "github.com/Azure/go-autorest/autorest/mocks" +) + +func TestResponseHasStatusCode(t *testing.T) { + codes := []int{http.StatusOK, http.StatusAccepted} + resp := &http.Response{StatusCode: http.StatusAccepted} + if !ResponseHasStatusCode(resp, codes...) { + t.Fatalf("autorest: ResponseHasStatusCode failed to find %v in %v", resp.StatusCode, codes) + } +} + +func TestResponseHasStatusCodeNotPresent(t *testing.T) { + codes := []int{http.StatusOK, http.StatusAccepted} + resp := &http.Response{StatusCode: http.StatusInternalServerError} + if ResponseHasStatusCode(resp, codes...) { + t.Fatalf("autorest: ResponseHasStatusCode unexpectedly found %v in %v", resp.StatusCode, codes) + } +} + +func TestNewPollingRequestDoesNotReturnARequestWhenLocationHeaderIsMissing(t *testing.T) { + resp := mocks.NewResponseWithStatus("500 InternalServerError", http.StatusInternalServerError) + + req, _ := NewPollingRequest(resp, nil) + if req != nil { + t.Fatal("autorest: NewPollingRequest returned an http.Request when the Location header was missing") + } +} + +func TestNewPollingRequestReturnsAnErrorWhenPrepareFails(t *testing.T) { + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(resp) + resp.Header.Set(http.CanonicalHeaderKey(HeaderLocation), mocks.TestBadURL) + + _, err := NewPollingRequest(resp, nil) + if err == nil { + t.Fatal("autorest: NewPollingRequest failed to return an error when Prepare fails") + } +} + +func TestNewPollingRequestDoesNotReturnARequestWhenPrepareFails(t *testing.T) { + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(resp) + resp.Header.Set(http.CanonicalHeaderKey(HeaderLocation), mocks.TestBadURL) + + req, _ := NewPollingRequest(resp, nil) + if req != nil { + t.Fatal("autorest: NewPollingRequest returned an http.Request when Prepare failed") + } +} + +func TestNewPollingRequestReturnsAGetRequest(t *testing.T) { + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(resp) + + req, _ := NewPollingRequest(resp, nil) + if req.Method != "GET" { + t.Fatalf("autorest: NewPollingRequest did not create an HTTP GET request -- actual method %v", req.Method) + } +} + +func TestNewPollingRequestProvidesTheURL(t *testing.T) { + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(resp) + + req, _ := NewPollingRequest(resp, nil) + if req.URL.String() != mocks.TestURL { + t.Fatalf("autorest: NewPollingRequest did not create an HTTP with the expected URL -- received %v, expected %v", req.URL, mocks.TestURL) + } +} + +func TestGetLocation(t *testing.T) { + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(resp) + + l := GetLocation(resp) + if len(l) == 0 { + t.Fatalf("autorest: GetLocation failed to return Location header -- expected %v, received %v", mocks.TestURL, l) + } +} + +func TestGetLocationReturnsEmptyStringForMissingLocation(t *testing.T) { + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + + l := GetLocation(resp) + if len(l) != 0 { + t.Fatalf("autorest: GetLocation return a value without a Location header -- received %v", l) + } +} + +func TestGetRetryAfter(t *testing.T) { + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(resp) + + d := GetRetryAfter(resp, DefaultPollingDelay) + if d != mocks.TestDelay { + t.Fatalf("autorest: GetRetryAfter failed to returned the expected delay -- expected %v, received %v", mocks.TestDelay, d) + } +} + +func TestGetRetryAfterReturnsDefaultDelayIfRetryHeaderIsMissing(t *testing.T) { + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + + d := GetRetryAfter(resp, DefaultPollingDelay) + if d != DefaultPollingDelay { + t.Fatalf("autorest: GetRetryAfter failed to returned the default delay for a missing Retry-After header -- expected %v, received %v", + DefaultPollingDelay, d) + } +} + +func TestGetRetryAfterReturnsDefaultDelayIfRetryHeaderIsMalformed(t *testing.T) { + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(resp) + resp.Header.Set(http.CanonicalHeaderKey(HeaderRetryAfter), "a very bad non-integer value") + + d := GetRetryAfter(resp, DefaultPollingDelay) + if d != DefaultPollingDelay { + t.Fatalf("autorest: GetRetryAfter failed to returned the default delay for a malformed Retry-After header -- expected %v, received %v", + DefaultPollingDelay, d) + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/azure/async.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/azure/async.go new file mode 100644 index 000000000000..280d32a61dde --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/azure/async.go @@ -0,0 +1,307 @@ +package azure + +import ( + "bytes" + "fmt" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/date" + "io/ioutil" + "net/http" + "strings" + "time" +) + +const ( + headerAsyncOperation = "Azure-AsyncOperation" +) + +const ( + methodDelete = "DELETE" + methodPatch = "PATCH" + methodPost = "POST" + methodPut = "PUT" + methodGet = "GET" + + operationInProgress string = "InProgress" + operationCanceled string = "Canceled" + operationFailed string = "Failed" + operationSucceeded string = "Succeeded" +) + +// DoPollForAsynchronous returns a SendDecorator that polls if the http.Response is for an Azure +// long-running operation. It will delay between requests for the duration specified in the +// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled by +// closing the optional channel on the http.Request. +func DoPollForAsynchronous(delay time.Duration) autorest.SendDecorator { + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + resp, err = s.Do(r) + if err != nil { + return resp, err + } + pollingCodes := []int{http.StatusAccepted, http.StatusCreated, http.StatusOK} + if !autorest.ResponseHasStatusCode(resp, pollingCodes...) { + return resp, nil + } + + ps := pollingState{} + for err == nil { + err = updatePollingState(resp, &ps) + if err != nil { + break + } + if ps.hasTerminated() { + if !ps.hasSucceeded() { + err = ps + } + break + } + + r, err = newPollingRequest(resp, ps) + if err != nil { + return resp, err + } + + delay = autorest.GetRetryAfter(resp, delay) + resp, err = autorest.SendWithSender(s, r, + autorest.AfterDelay(delay)) + } + + return resp, err + }) + } +} + +func getAsyncOperation(resp *http.Response) string { + return resp.Header.Get(http.CanonicalHeaderKey(headerAsyncOperation)) +} + +func hasSucceeded(state string) bool { + return state == operationSucceeded +} + +func hasTerminated(state string) bool { + switch state { + case operationCanceled, operationFailed, operationSucceeded: + return true + default: + return false + } +} + +func hasFailed(state string) bool { + return state == operationFailed +} + +type provisioningTracker interface { + state() string + hasSucceeded() bool + hasTerminated() bool +} + +type operationResource struct { + // Note: + // The specification states services should return the "id" field. However some return it as + // "operationId". + ID string `json:"id"` + OperationID string `json:"operationId"` + Name string `json:"name"` + Status string `json:"status"` + Properties map[string]interface{} `json:"properties"` + OperationError ServiceError `json:"error"` + StartTime date.Time `json:"startTime"` + EndTime date.Time `json:"endTime"` + PercentComplete float64 `json:"percentComplete"` +} + +func (or operationResource) state() string { + return or.Status +} + +func (or operationResource) hasSucceeded() bool { + return hasSucceeded(or.state()) +} + +func (or operationResource) hasTerminated() bool { + return hasTerminated(or.state()) +} + +type provisioningProperties struct { + ProvisioningState string `json:"provisioningState"` +} + +type provisioningStatus struct { + Properties provisioningProperties `json:"properties,omitempty"` + ProvisioningError ServiceError `json:"error,omitempty"` +} + +func (ps provisioningStatus) state() string { + return ps.Properties.ProvisioningState +} + +func (ps provisioningStatus) hasSucceeded() bool { + return hasSucceeded(ps.state()) +} + +func (ps provisioningStatus) hasTerminated() bool { + return hasTerminated(ps.state()) +} + +func (ps provisioningStatus) hasProvisioningError() bool { + return ps.ProvisioningError != ServiceError{} +} + +type pollingResponseFormat string + +const ( + usesOperationResponse pollingResponseFormat = "OperationResponse" + usesProvisioningStatus pollingResponseFormat = "ProvisioningStatus" + formatIsUnknown pollingResponseFormat = "" +) + +type pollingState struct { + responseFormat pollingResponseFormat + uri string + state string + code string + message string +} + +func (ps pollingState) hasSucceeded() bool { + return hasSucceeded(ps.state) +} + +func (ps pollingState) hasTerminated() bool { + return hasTerminated(ps.state) +} + +func (ps pollingState) hasFailed() bool { + return hasFailed(ps.state) +} + +func (ps pollingState) Error() string { + return fmt.Sprintf("Long running operation terminated with status '%s': Code=%q Message=%q", ps.state, ps.code, ps.message) +} + +// updatePollingState maps the operation status -- retrieved from either a provisioningState +// field, the status field of an OperationResource, or inferred from the HTTP status code -- +// into a well-known states. Since the process begins from the initial request, the state +// always comes from either a the provisioningState returned or is inferred from the HTTP +// status code. Subsequent requests will read an Azure OperationResource object if the +// service initially returned the Azure-AsyncOperation header. The responseFormat field notes +// the expected response format. +func updatePollingState(resp *http.Response, ps *pollingState) error { + // Determine the response shape + // -- The first response will always be a provisioningStatus response; only the polling requests, + // depending on the header returned, may be something otherwise. + var pt provisioningTracker + if ps.responseFormat == usesOperationResponse { + pt = &operationResource{} + } else { + pt = &provisioningStatus{} + } + + // If this is the first request (that is, the polling response shape is unknown), determine how + // to poll and what to expect + if ps.responseFormat == formatIsUnknown { + req := resp.Request + if req == nil { + return autorest.NewError("azure", "updatePollingState", "Azure Polling Error - Original HTTP request is missing") + } + + // Prefer the Azure-AsyncOperation header + ps.uri = getAsyncOperation(resp) + if ps.uri != "" { + ps.responseFormat = usesOperationResponse + } else { + ps.responseFormat = usesProvisioningStatus + } + + // Else, use the Location header + if ps.uri == "" { + ps.uri = autorest.GetLocation(resp) + } + + // Lastly, requests against an existing resource, use the last request URI + if ps.uri == "" { + m := strings.ToUpper(req.Method) + if m == methodPatch || m == methodPut || m == methodGet { + ps.uri = req.URL.String() + } + } + } + + // Read and interpret the response (saving the Body in case no polling is necessary) + b := &bytes.Buffer{} + err := autorest.Respond(resp, + autorest.ByCopying(b), + autorest.ByUnmarshallingJSON(pt), + autorest.ByClosing()) + resp.Body = ioutil.NopCloser(b) + if err != nil { + return err + } + + // Interpret the results + // -- Terminal states apply regardless + // -- Unknown states are per-service inprogress states + // -- Otherwise, infer state from HTTP status code + if pt.hasTerminated() { + ps.state = pt.state() + } else if pt.state() != "" { + ps.state = operationInProgress + } else { + switch resp.StatusCode { + case http.StatusAccepted: + ps.state = operationInProgress + + case http.StatusNoContent, http.StatusCreated, http.StatusOK: + ps.state = operationSucceeded + + default: + ps.state = operationFailed + } + } + + if ps.state == operationInProgress && ps.uri == "" { + return autorest.NewError("azure", "updatePollingState", "Azure Polling Error - Unable to obtain polling URI for %s %s", resp.Request.Method, resp.Request.URL) + } + + // For failed operation, check for error code and message in + // -- Operation resource + // -- Response + // -- Otherwise, Unknown + if ps.hasFailed() { + if ps.responseFormat == usesOperationResponse { + or := pt.(*operationResource) + ps.code = or.OperationError.Code + ps.message = or.OperationError.Message + } else { + p := pt.(*provisioningStatus) + if p.hasProvisioningError() { + ps.code = p.ProvisioningError.Code + ps.message = p.ProvisioningError.Message + } else { + ps.code = "Unknown" + ps.message = "None" + } + } + } + return nil +} + +func newPollingRequest(resp *http.Response, ps pollingState) (*http.Request, error) { + req := resp.Request + if req == nil { + return nil, autorest.NewError("azure", "newPollingRequest", "Azure Polling Error - Original HTTP request is missing") + } + + reqPoll, err := autorest.Prepare(&http.Request{Cancel: req.Cancel}, + autorest.AsGet(), + autorest.WithBaseURL(ps.uri)) + if err != nil { + return nil, autorest.NewErrorWithError(err, "azure", "newPollingRequest", nil, "Failure creating poll request to %s", ps.uri) + } + + return reqPoll, nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/azure/async_test.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/azure/async_test.go new file mode 100644 index 000000000000..0a06b0b15c5d --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/azure/async_test.go @@ -0,0 +1,1115 @@ +package azure + +import ( + "fmt" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/mocks" + "io/ioutil" + "net/http" + "reflect" + "strings" + "sync" + "testing" + "time" +) + +func TestGetAsyncOperation_ReturnsAzureAsyncOperationHeader(t *testing.T) { + r := newAsynchronousResponse() + + if getAsyncOperation(r) != mocks.TestAzureAsyncURL { + t.Fatalf("azure: getAsyncOperation failed to extract the Azure-AsyncOperation header -- expected %v, received %v", mocks.TestURL, getAsyncOperation(r)) + } +} + +func TestGetAsyncOperation_ReturnsEmptyStringIfHeaderIsAbsent(t *testing.T) { + r := mocks.NewResponse() + + if len(getAsyncOperation(r)) != 0 { + t.Fatalf("azure: getAsyncOperation failed to return empty string when the Azure-AsyncOperation header is absent -- received %v", getAsyncOperation(r)) + } +} + +func TestHasSucceeded_ReturnsTrueForSuccess(t *testing.T) { + if !hasSucceeded(operationSucceeded) { + t.Fatal("azure: hasSucceeded failed to return true for success") + } +} + +func TestHasSucceeded_ReturnsFalseOtherwise(t *testing.T) { + if hasSucceeded("not a success string") { + t.Fatal("azure: hasSucceeded returned true for a non-success") + } +} + +func TestHasTerminated_ReturnsTrueForValidTerminationStates(t *testing.T) { + for _, state := range []string{operationSucceeded, operationCanceled, operationFailed} { + if !hasTerminated(state) { + t.Fatalf("azure: hasTerminated failed to return true for the '%s' state", state) + } + } +} + +func TestHasTerminated_ReturnsFalseForUnknownStates(t *testing.T) { + if hasTerminated("not a known state") { + t.Fatal("azure: hasTerminated returned true for an unknown state") + } +} + +func TestOperationError_ErrorReturnsAString(t *testing.T) { + s := (ServiceError{Code: "server code", Message: "server error"}).Error() + if s == "" { + t.Fatalf("azure: operationError#Error failed to return an error") + } + if !strings.Contains(s, "server code") || !strings.Contains(s, "server error") { + t.Fatalf("azure: operationError#Error returned a malformed error -- error='%v'", s) + } +} + +func TestOperationResource_StateReturnsState(t *testing.T) { + if (operationResource{Status: "state"}).state() != "state" { + t.Fatalf("azure: operationResource#state failed to return the correct state") + } +} + +func TestOperationResource_HasSucceededReturnsFalseIfNotSuccess(t *testing.T) { + if (operationResource{Status: "not a success string"}).hasSucceeded() { + t.Fatalf("azure: operationResource#hasSucceeded failed to return false for a canceled operation") + } +} + +func TestOperationResource_HasSucceededReturnsTrueIfSuccessful(t *testing.T) { + if !(operationResource{Status: operationSucceeded}).hasSucceeded() { + t.Fatalf("azure: operationResource#hasSucceeded failed to return true for a successful operation") + } +} + +func TestOperationResource_HasTerminatedReturnsTrueForKnownStates(t *testing.T) { + for _, state := range []string{operationSucceeded, operationCanceled, operationFailed} { + if !(operationResource{Status: state}).hasTerminated() { + t.Fatalf("azure: operationResource#hasTerminated failed to return true for the '%s' state", state) + } + } +} + +func TestOperationResource_HasTerminatedReturnsFalseForUnknownStates(t *testing.T) { + if (operationResource{Status: "not a known state"}).hasTerminated() { + t.Fatalf("azure: operationResource#hasTerminated returned true for a non-terminal operation") + } +} + +func TestProvisioningStatus_StateReturnsState(t *testing.T) { + if (provisioningStatus{Properties: provisioningProperties{"state"}}).state() != "state" { + t.Fatalf("azure: provisioningStatus#state failed to return the correct state") + } +} + +func TestProvisioningStatus_HasSucceededReturnsFalseIfNotSuccess(t *testing.T) { + if (provisioningStatus{Properties: provisioningProperties{"not a success string"}}).hasSucceeded() { + t.Fatalf("azure: provisioningStatus#hasSucceeded failed to return false for a canceled operation") + } +} + +func TestProvisioningStatus_HasSucceededReturnsTrueIfSuccessful(t *testing.T) { + if !(provisioningStatus{Properties: provisioningProperties{operationSucceeded}}).hasSucceeded() { + t.Fatalf("azure: provisioningStatus#hasSucceeded failed to return true for a successful operation") + } +} + +func TestProvisioningStatus_HasTerminatedReturnsTrueForKnownStates(t *testing.T) { + for _, state := range []string{operationSucceeded, operationCanceled, operationFailed} { + if !(provisioningStatus{Properties: provisioningProperties{state}}).hasTerminated() { + t.Fatalf("azure: provisioningStatus#hasTerminated failed to return true for the '%s' state", state) + } + } +} + +func TestProvisioningStatus_HasTerminatedReturnsFalseForUnknownStates(t *testing.T) { + if (provisioningStatus{Properties: provisioningProperties{"not a known state"}}).hasTerminated() { + t.Fatalf("azure: provisioningStatus#hasTerminated returned true for a non-terminal operation") + } +} + +func TestPollingState_HasSucceededReturnsFalseIfNotSuccess(t *testing.T) { + if (pollingState{state: "not a success string"}).hasSucceeded() { + t.Fatalf("azure: pollingState#hasSucceeded failed to return false for a canceled operation") + } +} + +func TestPollingState_HasSucceededReturnsTrueIfSuccessful(t *testing.T) { + if !(pollingState{state: operationSucceeded}).hasSucceeded() { + t.Fatalf("azure: pollingState#hasSucceeded failed to return true for a successful operation") + } +} + +func TestPollingState_HasTerminatedReturnsTrueForKnownStates(t *testing.T) { + for _, state := range []string{operationSucceeded, operationCanceled, operationFailed} { + if !(pollingState{state: state}).hasTerminated() { + t.Fatalf("azure: pollingState#hasTerminated failed to return true for the '%s' state", state) + } + } +} + +func TestPollingState_HasTerminatedReturnsFalseForUnknownStates(t *testing.T) { + if (pollingState{state: "not a known state"}).hasTerminated() { + t.Fatalf("azure: pollingState#hasTerminated returned true for a non-terminal operation") + } +} + +func TestUpdatePollingState_ReturnsAnErrorIfOneOccurs(t *testing.T) { + resp := mocks.NewResponseWithContent(operationResourceIllegal) + err := updatePollingState(resp, &pollingState{}) + if err == nil { + t.Fatalf("azure: updatePollingState failed to return an error after a JSON parsing error") + } +} + +func TestUpdatePollingState_ReturnsTerminatedForKnownProvisioningStates(t *testing.T) { + for _, state := range []string{operationSucceeded, operationCanceled, operationFailed} { + resp := mocks.NewResponseWithContent(fmt.Sprintf(pollingStateFormat, state)) + resp.StatusCode = 42 + ps := &pollingState{responseFormat: usesProvisioningStatus} + updatePollingState(resp, ps) + if !ps.hasTerminated() { + t.Fatalf("azure: updatePollingState failed to return a terminating pollingState for the '%s' state", state) + } + } +} + +func TestUpdatePollingState_ReturnsSuccessForSuccessfulProvisioningState(t *testing.T) { + resp := mocks.NewResponseWithContent(fmt.Sprintf(pollingStateFormat, operationSucceeded)) + resp.StatusCode = 42 + ps := &pollingState{responseFormat: usesProvisioningStatus} + updatePollingState(resp, ps) + if !ps.hasSucceeded() { + t.Fatalf("azure: updatePollingState failed to return a successful pollingState for the '%s' state", operationSucceeded) + } +} + +func TestUpdatePollingState_ReturnsInProgressForAllOtherProvisioningStates(t *testing.T) { + s := "not a recognized state" + resp := mocks.NewResponseWithContent(fmt.Sprintf(pollingStateFormat, s)) + resp.StatusCode = 42 + ps := &pollingState{responseFormat: usesProvisioningStatus} + updatePollingState(resp, ps) + if ps.hasTerminated() { + t.Fatalf("azure: updatePollingState returned terminated for unknown state '%s'", s) + } +} + +func TestUpdatePollingState_ReturnsSuccessWhenProvisioningStateFieldIsAbsentForSuccessStatusCodes(t *testing.T) { + for _, sc := range []int{http.StatusOK, http.StatusCreated, http.StatusNoContent} { + resp := mocks.NewResponseWithContent(pollingStateEmpty) + resp.StatusCode = sc + ps := &pollingState{responseFormat: usesProvisioningStatus} + updatePollingState(resp, ps) + if !ps.hasSucceeded() { + t.Fatalf("azure: updatePollingState failed to return success when the provisionState field is absent for Status Code %d", sc) + } + } +} + +func TestUpdatePollingState_ReturnsInProgressWhenProvisioningStateFieldIsAbsentForAccepted(t *testing.T) { + resp := mocks.NewResponseWithContent(pollingStateEmpty) + resp.StatusCode = http.StatusAccepted + ps := &pollingState{responseFormat: usesProvisioningStatus} + updatePollingState(resp, ps) + if ps.hasTerminated() { + t.Fatalf("azure: updatePollingState returned terminated when the provisionState field is absent for Status Code Accepted") + } +} + +func TestUpdatePollingState_ReturnsFailedWhenProvisioningStateFieldIsAbsentForUnknownStatusCodes(t *testing.T) { + resp := mocks.NewResponseWithContent(pollingStateEmpty) + resp.StatusCode = 42 + ps := &pollingState{responseFormat: usesProvisioningStatus} + updatePollingState(resp, ps) + if !ps.hasTerminated() || ps.hasSucceeded() { + t.Fatalf("azure: updatePollingState did not return failed when the provisionState field is absent for an unknown Status Code") + } +} + +func TestUpdatePollingState_ReturnsTerminatedForKnownOperationResourceStates(t *testing.T) { + for _, state := range []string{operationSucceeded, operationCanceled, operationFailed} { + resp := mocks.NewResponseWithContent(fmt.Sprintf(operationResourceFormat, state)) + resp.StatusCode = 42 + ps := &pollingState{responseFormat: usesOperationResponse} + updatePollingState(resp, ps) + if !ps.hasTerminated() { + t.Fatalf("azure: updatePollingState failed to return a terminating pollingState for the '%s' state", state) + } + } +} + +func TestUpdatePollingState_ReturnsSuccessForSuccessfulOperationResourceState(t *testing.T) { + resp := mocks.NewResponseWithContent(fmt.Sprintf(operationResourceFormat, operationSucceeded)) + resp.StatusCode = 42 + ps := &pollingState{responseFormat: usesOperationResponse} + updatePollingState(resp, ps) + if !ps.hasSucceeded() { + t.Fatalf("azure: updatePollingState failed to return a successful pollingState for the '%s' state", operationSucceeded) + } +} + +func TestUpdatePollingState_ReturnsInProgressForAllOtherOperationResourceStates(t *testing.T) { + s := "not a recognized state" + resp := mocks.NewResponseWithContent(fmt.Sprintf(operationResourceFormat, s)) + resp.StatusCode = 42 + ps := &pollingState{responseFormat: usesOperationResponse} + updatePollingState(resp, ps) + if ps.hasTerminated() { + t.Fatalf("azure: updatePollingState returned terminated for unknown state '%s'", s) + } +} + +func TestUpdatePollingState_CopiesTheResponseBody(t *testing.T) { + s := fmt.Sprintf(pollingStateFormat, operationSucceeded) + resp := mocks.NewResponseWithContent(s) + resp.StatusCode = 42 + ps := &pollingState{responseFormat: usesOperationResponse} + updatePollingState(resp, ps) + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatalf("azure: updatePollingState failed to replace the http.Response Body -- Error='%v'", err) + } + if string(b) != s { + t.Fatalf("azure: updatePollingState failed to copy the http.Response Body -- Expected='%s' Received='%s'", s, string(b)) + } +} + +func TestUpdatePollingState_ClosesTheOriginalResponseBody(t *testing.T) { + resp := mocks.NewResponse() + b := resp.Body.(*mocks.Body) + ps := &pollingState{responseFormat: usesProvisioningStatus} + updatePollingState(resp, ps) + if b.IsOpen() { + t.Fatal("azure: updatePollingState failed to close the original http.Response Body") + } +} + +func TestUpdatePollingState_FailsWhenResponseLacksRequest(t *testing.T) { + resp := newAsynchronousResponse() + resp.Request = nil + + ps := pollingState{} + err := updatePollingState(resp, &ps) + if err == nil { + t.Fatal("azure: updatePollingState failed to return an error when the http.Response lacked the original http.Request") + } +} + +func TestUpdatePollingState_SetsTheResponseFormatWhenUsingTheAzureAsyncOperationHeader(t *testing.T) { + ps := pollingState{} + updatePollingState(newAsynchronousResponse(), &ps) + + if ps.responseFormat != usesOperationResponse { + t.Fatal("azure: updatePollingState failed to set the correct response format when using the Azure-AsyncOperation header") + } +} + +func TestUpdatePollingState_SetsTheResponseFormatWhenUsingTheAzureAsyncOperationHeaderIsMissing(t *testing.T) { + resp := newAsynchronousResponse() + resp.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + + ps := pollingState{} + updatePollingState(resp, &ps) + + if ps.responseFormat != usesProvisioningStatus { + t.Fatal("azure: updatePollingState failed to set the correct response format when the Azure-AsyncOperation header is absent") + } +} + +func TestUpdatePollingState_DoesNotChangeAnExistingReponseFormat(t *testing.T) { + resp := newAsynchronousResponse() + resp.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + + ps := pollingState{responseFormat: usesOperationResponse} + updatePollingState(resp, &ps) + + if ps.responseFormat != usesOperationResponse { + t.Fatal("azure: updatePollingState failed to leave an existing response format setting") + } +} + +func TestUpdatePollingState_PrefersTheAzureAsyncOperationHeader(t *testing.T) { + resp := newAsynchronousResponse() + + ps := pollingState{} + updatePollingState(resp, &ps) + + if ps.uri != mocks.TestAzureAsyncURL { + t.Fatal("azure: updatePollingState failed to prefer the Azure-AsyncOperation header") + } +} + +func TestUpdatePollingState_PrefersLocationWhenTheAzureAsyncOperationHeaderMissing(t *testing.T) { + resp := newAsynchronousResponse() + resp.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + + ps := pollingState{} + updatePollingState(resp, &ps) + + if ps.uri != mocks.TestLocationURL { + t.Fatal("azure: updatePollingState failed to prefer the Location header when the Azure-AsyncOperation header is missing") + } +} + +func TestUpdatePollingState_UsesTheObjectLocationIfAsyncHeadersAreMissing(t *testing.T) { + resp := newAsynchronousResponse() + resp.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + resp.Header.Del(http.CanonicalHeaderKey(autorest.HeaderLocation)) + resp.Request.Method = methodPatch + + ps := pollingState{} + updatePollingState(resp, &ps) + + if ps.uri != mocks.TestURL { + t.Fatal("azure: updatePollingState failed to use the Object URL when the asynchronous headers are missing") + } +} + +func TestUpdatePollingState_RecognizesLowerCaseHTTPVerbs(t *testing.T) { + for _, m := range []string{"patch", "put", "get"} { + resp := newAsynchronousResponse() + resp.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + resp.Header.Del(http.CanonicalHeaderKey(autorest.HeaderLocation)) + resp.Request.Method = m + + ps := pollingState{} + updatePollingState(resp, &ps) + + if ps.uri != mocks.TestURL { + t.Fatalf("azure: updatePollingState failed to recognize the lower-case HTTP verb '%s'", m) + } + } +} + +func TestUpdatePollingState_ReturnsAnErrorIfAsyncHeadersAreMissingForANewOrDeletedObject(t *testing.T) { + resp := newAsynchronousResponse() + resp.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + resp.Header.Del(http.CanonicalHeaderKey(autorest.HeaderLocation)) + + for _, m := range []string{methodDelete, methodPost} { + resp.Request.Method = m + err := updatePollingState(resp, &pollingState{}) + if err == nil { + t.Fatalf("azure: updatePollingState failed to return an error even though it could not determine the polling URL for Method '%s'", m) + } + } +} + +func TestNewPollingRequest_FailsWhenResponseLacksRequest(t *testing.T) { + resp := newAsynchronousResponse() + resp.Request = nil + + _, err := newPollingRequest(resp, pollingState{}) + if err == nil { + t.Fatal("azure: newPollingRequest failed to return an error when the http.Response lacked the original http.Request") + } +} + +func TestNewPollingRequest_ReturnsAnErrorWhenPrepareFails(t *testing.T) { + _, err := newPollingRequest(newAsynchronousResponse(), pollingState{responseFormat: usesOperationResponse, uri: mocks.TestBadURL}) + if err == nil { + t.Fatal("azure: newPollingRequest failed to return an error when Prepare fails") + } +} + +func TestNewPollingRequest_DoesNotReturnARequestWhenPrepareFails(t *testing.T) { + req, _ := newPollingRequest(newAsynchronousResponse(), pollingState{responseFormat: usesOperationResponse, uri: mocks.TestBadURL}) + if req != nil { + t.Fatal("azure: newPollingRequest returned an http.Request when Prepare failed") + } +} + +func TestNewPollingRequest_ReturnsAGetRequest(t *testing.T) { + req, _ := newPollingRequest(newAsynchronousResponse(), pollingState{responseFormat: usesOperationResponse, uri: mocks.TestAzureAsyncURL}) + if req.Method != "GET" { + t.Fatalf("azure: newPollingRequest did not create an HTTP GET request -- actual method %v", req.Method) + } +} + +func TestDoPollForAsynchronous_IgnoresUnspecifiedStatusCodes(t *testing.T) { + client := mocks.NewSender() + + r, _ := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Duration(0))) + + if client.Attempts() != 1 { + t.Fatalf("azure: DoPollForAsynchronous polled for unspecified status code") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_PollsForSpecifiedStatusCodes(t *testing.T) { + client := mocks.NewSender() + client.AppendResponse(newAsynchronousResponse()) + + r, _ := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + if client.Attempts() != 2 { + t.Fatalf("azure: DoPollForAsynchronous failed to poll for specified status code") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_CanBeCanceled(t *testing.T) { + cancel := make(chan struct{}) + delay := 5 * time.Second + + r1 := newAsynchronousResponse() + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(newOperationResourceResponse("Busy"), -1) + + var wg sync.WaitGroup + wg.Add(1) + start := time.Now() + go func() { + req := mocks.NewRequest() + req.Cancel = cancel + + wg.Done() + + r, _ := autorest.SendWithSender(client, req, + DoPollForAsynchronous(10*time.Second)) + autorest.Respond(r, + autorest.ByClosing()) + }() + wg.Wait() + close(cancel) + time.Sleep(5 * time.Millisecond) + if time.Since(start) >= delay { + t.Fatalf("azure: DoPollForAsynchronous failed to cancel") + } +} + +func TestDoPollForAsynchronous_ClosesAllNonreturnedResponseBodiesWhenPolling(t *testing.T) { + r1 := newAsynchronousResponse() + b1 := r1.Body.(*mocks.Body) + r2 := newOperationResourceResponse("busy") + b2 := r2.Body.(*mocks.Body) + r3 := newOperationResourceResponse(operationSucceeded) + b3 := r3.Body.(*mocks.Body) + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(r2, 2) + client.AppendResponse(r3) + + r, _ := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + if b1.IsOpen() || b2.IsOpen() || b3.IsOpen() { + t.Fatalf("azure: DoPollForAsynchronous did not close unreturned response bodies") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_LeavesLastResponseBodyOpen(t *testing.T) { + r1 := newAsynchronousResponse() + r2 := newOperationResourceResponse("busy") + r3 := newOperationResourceResponse(operationSucceeded) + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(r2, 2) + client.AppendResponse(r3) + + r, _ := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + b, err := ioutil.ReadAll(r.Body) + if len(b) <= 0 || err != nil { + t.Fatalf("azure: DoPollForAsynchronous did not leave open the body of the last response - Error='%v'", err) + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_DoesNotPollIfOriginalRequestReturnedAnError(t *testing.T) { + r1 := newAsynchronousResponse() + r2 := newOperationResourceResponse("busy") + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendResponse(r2) + client.SetError(fmt.Errorf("Faux Error")) + + r, _ := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + if client.Attempts() != 1 { + t.Fatalf("azure: DoPollForAsynchronous tried to poll after receiving an error") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_DoesNotPollIfCreatingOperationRequestFails(t *testing.T) { + r1 := newAsynchronousResponse() + mocks.SetResponseHeader(r1, http.CanonicalHeaderKey(headerAsyncOperation), mocks.TestBadURL) + r2 := newOperationResourceResponse("busy") + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(r2, 2) + + r, _ := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + if client.Attempts() > 1 { + t.Fatalf("azure: DoPollForAsynchronous polled with an invalidly formed operation request") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_StopsPollingAfterAnError(t *testing.T) { + r1 := newAsynchronousResponse() + r2 := newOperationResourceResponse("busy") + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(r2, 2) + client.SetError(fmt.Errorf("Faux Error")) + client.SetEmitErrorAfter(2) + + r, _ := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + if client.Attempts() > 3 { + t.Fatalf("azure: DoPollForAsynchronous failed to stop polling after receiving an error") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_ReturnsPollingError(t *testing.T) { + client := mocks.NewSender() + client.AppendAndRepeatResponse(newAsynchronousResponse(), 5) + client.SetError(fmt.Errorf("Faux Error")) + client.SetEmitErrorAfter(1) + + r, err := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + if err == nil { + t.Fatalf("azure: DoPollForAsynchronous failed to return error from polling") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_PollsForStatusAccepted(t *testing.T) { + r1 := newAsynchronousResponse() + r1.Status = "202 Accepted" + r1.StatusCode = http.StatusAccepted + r2 := newOperationResourceResponse("busy") + r3 := newOperationResourceResponse(operationCanceled) + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(r2, 2) + client.AppendAndRepeatResponse(r3, 1) + + r, _ := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + if client.Attempts() < 4 { + t.Fatalf("azure: DoPollForAsynchronous stopped polling before receiving a terminated OperationResource") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_PollsForStatusCreated(t *testing.T) { + r1 := newAsynchronousResponse() + r1.Status = "201 Created" + r1.StatusCode = http.StatusCreated + r2 := newOperationResourceResponse("busy") + r3 := newOperationResourceResponse(operationCanceled) + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(r2, 2) + client.AppendAndRepeatResponse(r3, 1) + + r, _ := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + if client.Attempts() < 4 { + t.Fatalf("azure: DoPollForAsynchronous stopped polling before receiving a terminated OperationResource") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_PollsUntilProvisioningStatusTerminates(t *testing.T) { + r1 := newAsynchronousResponse() + r1.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + r2 := newProvisioningStatusResponse("busy") + r2.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + r3 := newProvisioningStatusResponse(operationCanceled) + r3.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(r2, 2) + client.AppendAndRepeatResponse(r3, 1) + + r, _ := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + if client.Attempts() < 4 { + t.Fatalf("azure: DoPollForAsynchronous stopped polling before receiving a terminated OperationResource") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_PollsUntilProvisioningStatusSucceeds(t *testing.T) { + r1 := newAsynchronousResponse() + r1.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + r2 := newProvisioningStatusResponse("busy") + r2.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + r3 := newProvisioningStatusResponse(operationSucceeded) + r3.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(r2, 2) + client.AppendAndRepeatResponse(r3, 1) + + r, _ := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + if client.Attempts() < 4 { + t.Fatalf("azure: DoPollForAsynchronous stopped polling before receiving a terminated OperationResource") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_PollsUntilOperationResourceHasTerminated(t *testing.T) { + r1 := newAsynchronousResponse() + r2 := newOperationResourceResponse("busy") + r3 := newOperationResourceResponse(operationCanceled) + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(r2, 2) + client.AppendAndRepeatResponse(r3, 1) + + r, _ := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + if client.Attempts() < 4 { + t.Fatalf("azure: DoPollForAsynchronous stopped polling before receiving a terminated OperationResource") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_PollsUntilOperationResourceHasSucceeded(t *testing.T) { + r1 := newAsynchronousResponse() + r2 := newOperationResourceResponse("busy") + r3 := newOperationResourceResponse(operationSucceeded) + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(r2, 2) + client.AppendAndRepeatResponse(r3, 1) + + r, _ := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + if client.Attempts() < 4 { + t.Fatalf("azure: DoPollForAsynchronous stopped polling before receiving a terminated OperationResource") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_StopsPollingWhenOperationResourceHasTerminated(t *testing.T) { + r1 := newAsynchronousResponse() + r2 := newOperationResourceResponse("busy") + r3 := newOperationResourceResponse(operationCanceled) + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(r2, 2) + client.AppendAndRepeatResponse(r3, 2) + + r, _ := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + if client.Attempts() > 4 { + t.Fatalf("azure: DoPollForAsynchronous failed to stop after receiving a terminated OperationResource") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_ReturnsAnErrorForCanceledOperations(t *testing.T) { + r1 := newAsynchronousResponse() + r2 := newOperationResourceResponse("busy") + r3 := newOperationResourceErrorResponse(operationCanceled) + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(r2, 2) + client.AppendAndRepeatResponse(r3, 1) + + r, err := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + if err == nil || !strings.Contains(fmt.Sprintf("%v", err), "Canceled") { + t.Fatalf("azure: DoPollForAsynchronous failed to return an appropriate error for a canceled OperationResource") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_ReturnsAnErrorForFailedOperations(t *testing.T) { + r1 := newAsynchronousResponse() + r2 := newOperationResourceResponse("busy") + r3 := newOperationResourceErrorResponse(operationFailed) + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(r2, 2) + client.AppendAndRepeatResponse(r3, 1) + + r, err := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + if err == nil || !strings.Contains(fmt.Sprintf("%v", err), "Failed") { + t.Fatalf("azure: DoPollForAsynchronous failed to return an appropriate error for a canceled OperationResource") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_WithNilURI(t *testing.T) { + r1 := newAsynchronousResponse() + r1.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + r1.Header.Del(http.CanonicalHeaderKey(autorest.HeaderLocation)) + + r2 := newOperationResourceResponse("busy") + r2.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + r2.Header.Del(http.CanonicalHeaderKey(autorest.HeaderLocation)) + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendResponse(r2) + + req, _ := http.NewRequest("POST", "https://microsoft.com/a/b/c/", mocks.NewBody("")) + r, err := autorest.SendWithSender(client, req, + DoPollForAsynchronous(time.Millisecond)) + + if err == nil { + t.Fatalf("azure: DoPollForAsynchronous failed to return error for nil URI. got: nil; want: Azure Polling Error - Unable to obtain polling URI for POST") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_ReturnsAnUnknownErrorForFailedOperations(t *testing.T) { + // Return unknown error if error not present in last response + r1 := newAsynchronousResponse() + r1.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + r2 := newProvisioningStatusResponse("busy") + r2.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + r3 := newProvisioningStatusResponse(operationFailed) + r3.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(r2, 2) + client.AppendAndRepeatResponse(r3, 1) + + r, err := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + expected := makeLongRunningOperationErrorString("Unknown", "None") + if err.Error() != expected { + t.Fatalf("azure: DoPollForAsynchronous failed to return an appropriate error message for an unknown error. \n expected=%q \n got=%q", + expected, err.Error()) + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_ReturnsErrorForLastErrorResponse(t *testing.T) { + // Return error code and message if error present in last response + r1 := newAsynchronousResponse() + r1.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + r2 := newProvisioningStatusResponse("busy") + r2.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + r3 := newAsynchronousResponseWithError() + r3.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(r2, 2) + client.AppendAndRepeatResponse(r3, 1) + + r, err := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + expected := makeLongRunningOperationErrorString("InvalidParameter", "tom-service-DISCOVERY-server-base-v1.core.local' is not a valid captured VHD blob name prefix.") + if err.Error() != expected { + t.Fatalf("azure: DoPollForAsynchronous failed to return an appropriate error message for an unknown error. \n expected=%q \n got=%q", + expected, err.Error()) + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_ReturnsOperationResourceErrorForFailedOperations(t *testing.T) { + // Return Operation resource response with error code and message in last operation resource response + r1 := newAsynchronousResponse() + r2 := newOperationResourceResponse("busy") + r3 := newOperationResourceErrorResponse(operationFailed) + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(r2, 2) + client.AppendAndRepeatResponse(r3, 1) + + r, err := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + expected := makeLongRunningOperationErrorString("BadArgument", "The provided database 'foo' has an invalid username.") + if err.Error() != expected { + t.Fatalf("azure: DoPollForAsynchronous failed to return an appropriate error message for a failed Operations. \n expected=%q \n got=%q", + expected, err.Error()) + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_ReturnsErrorForFirstPutRequest(t *testing.T) { + // Return 400 bad response with error code and message in first put + r1 := newAsynchronousResponseWithError() + client := mocks.NewSender() + client.AppendResponse(r1) + + res, err := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + if err != nil { + t.Fatalf("azure: DoPollForAsynchronous failed to return an appropriate error message for a failed Operations. \n expected=%q \n got=%q", + errorResponse, err.Error()) + } + + err = autorest.Respond(res, + WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusCreated, http.StatusOK), + autorest.ByClosing()) + + reqError, ok := err.(*RequestError) + if !ok { + t.Fatalf("azure: returned error is not azure.RequestError: %T", err) + } + + expected := &RequestError{ + ServiceError: &ServiceError{ + Code: "InvalidParameter", + Message: "tom-service-DISCOVERY-server-base-v1.core.local' is not a valid captured VHD blob name prefix.", + }, + DetailedError: autorest.DetailedError{ + StatusCode: 400, + }, + } + if !reflect.DeepEqual(reqError, expected) { + t.Fatalf("azure: wrong error. expected=%q\ngot=%q", expected, reqError) + } + + defer res.Body.Close() + b, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal(err) + } + if string(b) != errorResponse { + t.Fatalf("azure: Response body is wrong. got=%q expected=%q", string(b), errorResponse) + } + +} + +func TestDoPollForAsynchronous_ReturnsNoErrorForSuccessfulOperations(t *testing.T) { + r1 := newAsynchronousResponse() + r2 := newOperationResourceResponse("busy") + r3 := newOperationResourceErrorResponse(operationSucceeded) + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(r2, 2) + client.AppendAndRepeatResponse(r3, 1) + + r, err := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + if err != nil { + t.Fatalf("azure: DoPollForAsynchronous returned an error for a successful OperationResource") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_StopsPollingIfItReceivesAnInvalidOperationResource(t *testing.T) { + r1 := newAsynchronousResponse() + r2 := newOperationResourceResponse("busy") + r3 := newOperationResourceResponse("busy") + r3.Body = mocks.NewBody(operationResourceIllegal) + r4 := newOperationResourceResponse(operationSucceeded) + + client := mocks.NewSender() + client.AppendResponse(r1) + client.AppendAndRepeatResponse(r2, 2) + client.AppendAndRepeatResponse(r3, 1) + client.AppendAndRepeatResponse(r4, 1) + + r, err := autorest.SendWithSender(client, mocks.NewRequest(), + DoPollForAsynchronous(time.Millisecond)) + + if client.Attempts() > 4 { + t.Fatalf("azure: DoPollForAsynchronous failed to stop polling after receiving an invalid OperationResource") + } + if err == nil { + t.Fatalf("azure: DoPollForAsynchronous failed to return an error after receving an invalid OperationResource") + } + + autorest.Respond(r, + autorest.ByClosing()) +} + +const ( + operationResourceIllegal = ` + This is not JSON and should fail...badly. + ` + pollingStateFormat = ` + { + "unused" : { + "somefield" : 42 + }, + "properties" : { + "provisioningState": "%s" + } + } + ` + + errorResponse = ` + { + "error" : { + "code" : "InvalidParameter", + "message" : "tom-service-DISCOVERY-server-base-v1.core.local' is not a valid captured VHD blob name prefix." + } + } + ` + + pollingStateEmpty = ` + { + "unused" : { + "somefield" : 42 + }, + "properties" : { + } + } + ` + + operationResourceFormat = ` + { + "id": "/subscriptions/id/locations/westus/operationsStatus/sameguid", + "name": "sameguid", + "status" : "%s", + "startTime" : "2006-01-02T15:04:05Z", + "endTime" : "2006-01-02T16:04:05Z", + "percentComplete" : 50.00, + + "properties" : {} + } + ` + + operationResourceErrorFormat = ` + { + "id": "/subscriptions/id/locations/westus/operationsStatus/sameguid", + "name": "sameguid", + "status" : "%s", + "startTime" : "2006-01-02T15:04:05Z", + "endTime" : "2006-01-02T16:04:05Z", + "percentComplete" : 50.00, + + "properties" : {}, + "error" : { + "code" : "BadArgument", + "message" : "The provided database 'foo' has an invalid username." + } + } + ` +) + +func newAsynchronousResponse() *http.Response { + r := mocks.NewResponseWithStatus("201 Created", http.StatusCreated) + r.Body = mocks.NewBody(fmt.Sprintf(pollingStateFormat, operationInProgress)) + mocks.SetResponseHeader(r, http.CanonicalHeaderKey(headerAsyncOperation), mocks.TestAzureAsyncURL) + mocks.SetResponseHeader(r, http.CanonicalHeaderKey(autorest.HeaderLocation), mocks.TestLocationURL) + mocks.SetRetryHeader(r, retryDelay) + r.Request = mocks.NewRequestForURL(mocks.TestURL) + return r +} + +func newAsynchronousResponseWithError() *http.Response { + r := mocks.NewResponseWithStatus("400 Bad Request", http.StatusBadRequest) + mocks.SetRetryHeader(r, retryDelay) + r.Request = mocks.NewRequestForURL(mocks.TestURL) + r.Body = mocks.NewBody(errorResponse) + return r +} + +func newOperationResourceResponse(status string) *http.Response { + r := newAsynchronousResponse() + r.Body = mocks.NewBody(fmt.Sprintf(operationResourceFormat, status)) + return r +} + +func newOperationResourceErrorResponse(status string) *http.Response { + r := newAsynchronousResponse() + r.Body = mocks.NewBody(fmt.Sprintf(operationResourceErrorFormat, status)) + return r +} + +func newProvisioningStatusResponse(status string) *http.Response { + r := newAsynchronousResponse() + r.Body = mocks.NewBody(fmt.Sprintf(pollingStateFormat, status)) + return r +} + +func makeLongRunningOperationErrorString(code string, message string) string { + return fmt.Sprintf("Long running operation terminated with status 'Failed': Code=%q Message=%q", code, message) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go new file mode 100644 index 000000000000..3f4d13421aac --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go @@ -0,0 +1,180 @@ +/* +Package azure provides Azure-specific implementations used with AutoRest. + +See the included examples for more detail. +*/ +package azure + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strconv" + + "github.com/Azure/go-autorest/autorest" +) + +const ( + // HeaderClientID is the Azure extension header to set a user-specified request ID. + HeaderClientID = "x-ms-client-request-id" + + // HeaderReturnClientID is the Azure extension header to set if the user-specified request ID + // should be included in the response. + HeaderReturnClientID = "x-ms-return-client-request-id" + + // HeaderRequestID is the Azure extension header of the service generated request ID returned + // in the response. + HeaderRequestID = "x-ms-request-id" +) + +// ServiceError encapsulates the error response from an Azure service. +type ServiceError struct { + Code string `json:"code"` + Message string `json:"message"` + Details *[]interface{} `json:"details"` +} + +func (se ServiceError) Error() string { + if se.Details != nil { + d, err := json.Marshal(*(se.Details)) + if err != nil { + return fmt.Sprintf("Code=%q Message=%q Details=%v", se.Code, se.Message, *se.Details) + } + return fmt.Sprintf("Code=%q Message=%q Details=%v", se.Code, se.Message, string(d)) + } + return fmt.Sprintf("Code=%q Message=%q", se.Code, se.Message) +} + +// RequestError describes an error response returned by Azure service. +type RequestError struct { + autorest.DetailedError + + // The error returned by the Azure service. + ServiceError *ServiceError `json:"error"` + + // The request id (from the x-ms-request-id-header) of the request. + RequestID string +} + +// Error returns a human-friendly error message from service error. +func (e RequestError) Error() string { + return fmt.Sprintf("autorest/azure: Service returned an error. Status=%v %v", + e.StatusCode, e.ServiceError) +} + +// IsAzureError returns true if the passed error is an Azure Service error; false otherwise. +func IsAzureError(e error) bool { + _, ok := e.(*RequestError) + return ok +} + +// NewErrorWithError creates a new Error conforming object from the +// passed packageType, method, statusCode of the given resp (UndefinedStatusCode +// if resp is nil), message, and original error. message is treated as a format +// string to which the optional args apply. +func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) RequestError { + if v, ok := original.(*RequestError); ok { + return *v + } + + statusCode := autorest.UndefinedStatusCode + if resp != nil { + statusCode = resp.StatusCode + } + return RequestError{ + DetailedError: autorest.DetailedError{ + Original: original, + PackageType: packageType, + Method: method, + StatusCode: statusCode, + Message: fmt.Sprintf(message, args...), + }, + } +} + +// WithReturningClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-client-request-id whose value is the passed, undecorated UUID (e.g., +// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). It also sets the x-ms-return-client-request-id +// header to true such that UUID accompanies the http.Response. +func WithReturningClientID(uuid string) autorest.PrepareDecorator { + preparer := autorest.CreatePreparer( + WithClientID(uuid), + WithReturnClientID(true)) + + return func(p autorest.Preparer) autorest.Preparer { + return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err != nil { + return r, err + } + return preparer.Prepare(r) + }) + } +} + +// WithClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-client-request-id whose value is passed, undecorated UUID (e.g., +// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). +func WithClientID(uuid string) autorest.PrepareDecorator { + return autorest.WithHeader(HeaderClientID, uuid) +} + +// WithReturnClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-return-client-request-id whose boolean value indicates if the value of the +// x-ms-client-request-id header should be included in the http.Response. +func WithReturnClientID(b bool) autorest.PrepareDecorator { + return autorest.WithHeader(HeaderReturnClientID, strconv.FormatBool(b)) +} + +// ExtractClientID extracts the client identifier from the x-ms-client-request-id header set on the +// http.Request sent to the service (and returned in the http.Response) +func ExtractClientID(resp *http.Response) string { + return autorest.ExtractHeaderValue(HeaderClientID, resp) +} + +// ExtractRequestID extracts the Azure server generated request identifier from the +// x-ms-request-id header. +func ExtractRequestID(resp *http.Response) string { + return autorest.ExtractHeaderValue(HeaderRequestID, resp) +} + +// WithErrorUnlessStatusCode returns a RespondDecorator that emits an +// azure.RequestError by reading the response body unless the response HTTP status code +// is among the set passed. +// +// If there is a chance service may return responses other than the Azure error +// format and the response cannot be parsed into an error, a decoding error will +// be returned containing the response body. In any case, the Responder will +// return an error if the status code is not satisfied. +// +// If this Responder returns an error, the response body will be replaced with +// an in-memory reader, which needs no further closing. +func WithErrorUnlessStatusCode(codes ...int) autorest.RespondDecorator { + return func(r autorest.Responder) autorest.Responder { + return autorest.ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && !autorest.ResponseHasStatusCode(resp, codes...) { + var e RequestError + defer resp.Body.Close() + + // Copy and replace the Body in case it does not contain an error object. + // This will leave the Body available to the caller. + b, decodeErr := autorest.CopyAndDecode(autorest.EncodedAsJSON, resp.Body, &e) + resp.Body = ioutil.NopCloser(&b) + if decodeErr != nil { + return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b.String(), decodeErr) + } else if e.ServiceError == nil { + e.ServiceError = &ServiceError{Code: "Unknown", Message: "Unknown service error"} + } + + e.RequestID = ExtractRequestID(resp) + if e.StatusCode == nil { + e.StatusCode = resp.StatusCode + } + err = &e + } + return err + }) + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/azure/azure_test.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/azure/azure_test.go new file mode 100644 index 000000000000..9e8f29535d86 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/azure/azure_test.go @@ -0,0 +1,431 @@ +package azure + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "strconv" + "testing" + "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/mocks" +) + +const ( + headerAuthorization = "Authorization" + longDelay = 5 * time.Second + retryDelay = 10 * time.Millisecond + testLogPrefix = "azure:" +) + +// Use a Client Inspector to set the request identifier. +func ExampleWithClientID() { + uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" + req, _ := autorest.Prepare(&http.Request{}, + autorest.AsGet(), + autorest.WithBaseURL("https://microsoft.com/a/b/c/")) + + c := autorest.Client{Sender: mocks.NewSender()} + c.RequestInspector = WithReturningClientID(uuid) + + autorest.SendWithSender(c, req) + fmt.Printf("Inspector added the %s header with the value %s\n", + HeaderClientID, req.Header.Get(HeaderClientID)) + fmt.Printf("Inspector added the %s header with the value %s\n", + HeaderReturnClientID, req.Header.Get(HeaderReturnClientID)) + // Output: + // Inspector added the x-ms-client-request-id header with the value 71FDB9F4-5E49-4C12-B266-DE7B4FD999A6 + // Inspector added the x-ms-return-client-request-id header with the value true +} + +func TestWithReturningClientIDReturnsError(t *testing.T) { + var errIn error + uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" + _, errOut := autorest.Prepare(&http.Request{}, + withErrorPrepareDecorator(&errIn), + WithReturningClientID(uuid)) + + if errOut == nil || errIn != errOut { + t.Fatalf("azure: WithReturningClientID failed to exit early when receiving an error -- expected (%v), received (%v)", + errIn, errOut) + } +} + +func TestWithClientID(t *testing.T) { + uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" + req, _ := autorest.Prepare(&http.Request{}, + WithClientID(uuid)) + + if req.Header.Get(HeaderClientID) != uuid { + t.Fatalf("azure: WithClientID failed to set %s -- expected %s, received %s", + HeaderClientID, uuid, req.Header.Get(HeaderClientID)) + } +} + +func TestWithReturnClientID(t *testing.T) { + b := false + req, _ := autorest.Prepare(&http.Request{}, + WithReturnClientID(b)) + + if req.Header.Get(HeaderReturnClientID) != strconv.FormatBool(b) { + t.Fatalf("azure: WithReturnClientID failed to set %s -- expected %s, received %s", + HeaderClientID, strconv.FormatBool(b), req.Header.Get(HeaderClientID)) + } +} + +func TestExtractClientID(t *testing.T) { + uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" + resp := mocks.NewResponse() + mocks.SetResponseHeader(resp, HeaderClientID, uuid) + + if ExtractClientID(resp) != uuid { + t.Fatalf("azure: ExtractClientID failed to extract the %s -- expected %s, received %s", + HeaderClientID, uuid, ExtractClientID(resp)) + } +} + +func TestExtractRequestID(t *testing.T) { + uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" + resp := mocks.NewResponse() + mocks.SetResponseHeader(resp, HeaderRequestID, uuid) + + if ExtractRequestID(resp) != uuid { + t.Fatalf("azure: ExtractRequestID failed to extract the %s -- expected %s, received %s", + HeaderRequestID, uuid, ExtractRequestID(resp)) + } +} + +func TestIsAzureError_ReturnsTrueForAzureError(t *testing.T) { + if !IsAzureError(&RequestError{}) { + t.Fatalf("azure: IsAzureError failed to return true for an Azure Service error") + } +} + +func TestIsAzureError_ReturnsFalseForNonAzureError(t *testing.T) { + if IsAzureError(fmt.Errorf("An Error")) { + t.Fatalf("azure: IsAzureError return true for an non-Azure Service error") + } +} + +func TestNewErrorWithError_UsesReponseStatusCode(t *testing.T) { + e := NewErrorWithError(fmt.Errorf("Error"), "packageType", "method", mocks.NewResponseWithStatus("Forbidden", http.StatusForbidden), "message") + if e.StatusCode != http.StatusForbidden { + t.Fatalf("azure: NewErrorWithError failed to use the Status Code of the passed Response -- expected %v, received %v", http.StatusForbidden, e.StatusCode) + } +} + +func TestNewErrorWithError_ReturnsUnwrappedError(t *testing.T) { + e1 := RequestError{} + e1.ServiceError = &ServiceError{Code: "42", Message: "A Message"} + e1.StatusCode = 200 + e1.RequestID = "A RequestID" + e2 := NewErrorWithError(&e1, "packageType", "method", nil, "message") + + if !reflect.DeepEqual(e1, e2) { + t.Fatalf("azure: NewErrorWithError wrapped an RequestError -- expected %T, received %T", e1, e2) + } +} + +func TestNewErrorWithError_WrapsAnError(t *testing.T) { + e1 := fmt.Errorf("Inner Error") + var e2 interface{} = NewErrorWithError(e1, "packageType", "method", nil, "message") + + if _, ok := e2.(RequestError); !ok { + t.Fatalf("azure: NewErrorWithError failed to wrap a standard error -- received %T", e2) + } +} + +func TestWithErrorUnlessStatusCode_NotAnAzureError(t *testing.T) { + body := ` + + IIS Error page + + Some non-JSON error page + ` + r := mocks.NewResponseWithContent(body) + r.Request = mocks.NewRequest() + r.StatusCode = http.StatusBadRequest + r.Status = http.StatusText(r.StatusCode) + + err := autorest.Respond(r, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + ok, _ := err.(*RequestError) + if ok != nil { + t.Fatalf("azure: azure.RequestError returned from malformed response: %v", err) + } + + // the error body should still be there + defer r.Body.Close() + b, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatal(err) + } + if string(b) != body { + t.Fatalf("response body is wrong. got=%q exptected=%q", string(b), body) + } +} + +func TestWithErrorUnlessStatusCode_FoundAzureErrorWithoutDetails(t *testing.T) { + j := `{ + "error": { + "code": "InternalError", + "message": "Azure is having trouble right now." + } + }` + uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" + r := mocks.NewResponseWithContent(j) + mocks.SetResponseHeader(r, HeaderRequestID, uuid) + r.Request = mocks.NewRequest() + r.StatusCode = http.StatusInternalServerError + r.Status = http.StatusText(r.StatusCode) + + err := autorest.Respond(r, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + + if err == nil { + t.Fatalf("azure: returned nil error for proper error response") + } + azErr, ok := err.(*RequestError) + if !ok { + t.Fatalf("azure: returned error is not azure.RequestError: %T", err) + } + + expected := "autorest/azure: Service returned an error. Status=500 Code=\"InternalError\" Message=\"Azure is having trouble right now.\"" + if !reflect.DeepEqual(expected, azErr.Error()) { + t.Fatalf("azure: service error is not unmarshaled properly.\nexpected=%v\ngot=%v", expected, azErr.Error()) + } + + if expected := http.StatusInternalServerError; azErr.StatusCode != expected { + t.Fatalf("azure: got wrong StatusCode=%d Expected=%d", azErr.StatusCode, expected) + } + if expected := uuid; azErr.RequestID != expected { + t.Fatalf("azure: wrong request ID in error. expected=%q; got=%q", expected, azErr.RequestID) + } + + _ = azErr.Error() + + // the error body should still be there + defer r.Body.Close() + b, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatal(err) + } + if string(b) != j { + t.Fatalf("response body is wrong. got=%q expected=%q", string(b), j) + } + +} + +func TestWithErrorUnlessStatusCode_FoundAzureErrorWithDetails(t *testing.T) { + j := `{ + "error": { + "code": "InternalError", + "message": "Azure is having trouble right now.", + "details": [{"code": "conflict1", "message":"error message1"}, + {"code": "conflict2", "message":"error message2"}] + } + }` + uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" + r := mocks.NewResponseWithContent(j) + mocks.SetResponseHeader(r, HeaderRequestID, uuid) + r.Request = mocks.NewRequest() + r.StatusCode = http.StatusInternalServerError + r.Status = http.StatusText(r.StatusCode) + + err := autorest.Respond(r, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + + if err == nil { + t.Fatalf("azure: returned nil error for proper error response") + } + azErr, ok := err.(*RequestError) + if !ok { + t.Fatalf("azure: returned error is not azure.RequestError: %T", err) + } + + if expected := "InternalError"; azErr.ServiceError.Code != expected { + t.Fatalf("azure: wrong error code. expected=%q; got=%q", expected, azErr.ServiceError.Code) + } + if azErr.ServiceError.Message == "" { + t.Fatalf("azure: error message is not unmarshaled properly") + } + b, _ := json.Marshal(*azErr.ServiceError.Details) + if string(b) != `[{"code":"conflict1","message":"error message1"},{"code":"conflict2","message":"error message2"}]` { + t.Fatalf("azure: error details is not unmarshaled properly") + } + + if expected := http.StatusInternalServerError; azErr.StatusCode != expected { + t.Fatalf("azure: got wrong StatusCode=%v Expected=%d", azErr.StatusCode, expected) + } + if expected := uuid; azErr.RequestID != expected { + t.Fatalf("azure: wrong request ID in error. expected=%q; got=%q", expected, azErr.RequestID) + } + + _ = azErr.Error() + + // the error body should still be there + defer r.Body.Close() + b, err = ioutil.ReadAll(r.Body) + if err != nil { + t.Fatal(err) + } + if string(b) != j { + t.Fatalf("response body is wrong. got=%q expected=%q", string(b), j) + } + +} + +func TestWithErrorUnlessStatusCode_NoAzureError(t *testing.T) { + j := `{ + "Status":"NotFound" + }` + uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" + r := mocks.NewResponseWithContent(j) + mocks.SetResponseHeader(r, HeaderRequestID, uuid) + r.Request = mocks.NewRequest() + r.StatusCode = http.StatusInternalServerError + r.Status = http.StatusText(r.StatusCode) + + err := autorest.Respond(r, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + if err == nil { + t.Fatalf("azure: returned nil error for proper error response") + } + azErr, ok := err.(*RequestError) + if !ok { + t.Fatalf("azure: returned error is not azure.RequestError: %T", err) + } + + expected := &ServiceError{ + Code: "Unknown", + Message: "Unknown service error", + } + + if !reflect.DeepEqual(expected, azErr.ServiceError) { + t.Fatalf("azure: service error is not unmarshaled properly. expected=%q\ngot=%q", expected, azErr.ServiceError) + } + + if expected := http.StatusInternalServerError; azErr.StatusCode != expected { + t.Fatalf("azure: got wrong StatusCode=%v Expected=%d", azErr.StatusCode, expected) + } + if expected := uuid; azErr.RequestID != expected { + t.Fatalf("azure: wrong request ID in error. expected=%q; got=%q", expected, azErr.RequestID) + } + + _ = azErr.Error() + + // the error body should still be there + defer r.Body.Close() + b, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatal(err) + } + if string(b) != j { + t.Fatalf("response body is wrong. got=%q expected=%q", string(b), j) + } + +} + +func TestRequestErrorString_WithError(t *testing.T) { + j := `{ + "error": { + "code": "InternalError", + "message": "Conflict", + "details": [{"code": "conflict1", "message":"error message1"}] + } + }` + uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" + r := mocks.NewResponseWithContent(j) + mocks.SetResponseHeader(r, HeaderRequestID, uuid) + r.Request = mocks.NewRequest() + r.StatusCode = http.StatusInternalServerError + r.Status = http.StatusText(r.StatusCode) + + err := autorest.Respond(r, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + + if err == nil { + t.Fatalf("azure: returned nil error for proper error response") + } + azErr, _ := err.(*RequestError) + expected := "autorest/azure: Service returned an error. Status=500 Code=\"InternalError\" Message=\"Conflict\" Details=[{\"code\":\"conflict1\",\"message\":\"error message1\"}]" + if expected != azErr.Error() { + t.Fatalf("azure: send wrong RequestError.\nexpected=%v\ngot=%v", expected, azErr.Error()) + } +} + +func withErrorPrepareDecorator(e *error) autorest.PrepareDecorator { + return func(p autorest.Preparer) autorest.Preparer { + return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { + *e = fmt.Errorf("azure: Faux Prepare Error") + return r, *e + }) + } +} + +func withAsyncResponseDecorator(n int) autorest.SendDecorator { + i := 0 + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err == nil { + if i < n { + resp.StatusCode = http.StatusCreated + resp.Header = http.Header{} + resp.Header.Add(http.CanonicalHeaderKey(headerAsyncOperation), mocks.TestURL) + i++ + } else { + resp.StatusCode = http.StatusOK + resp.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + } + } + return resp, err + }) + } +} + +type mockAuthorizer struct{} + +func (ma mockAuthorizer) WithAuthorization() autorest.PrepareDecorator { + return autorest.WithHeader(headerAuthorization, mocks.TestAuthorizationHeader) +} + +type mockFailingAuthorizer struct{} + +func (mfa mockFailingAuthorizer) WithAuthorization() autorest.PrepareDecorator { + return func(p autorest.Preparer) autorest.Preparer { + return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { + return r, fmt.Errorf("ERROR: mockFailingAuthorizer returned expected error") + }) + } +} + +type mockInspector struct { + wasInvoked bool +} + +func (mi *mockInspector) WithInspection() autorest.PrepareDecorator { + return func(p autorest.Preparer) autorest.Preparer { + return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { + mi.wasInvoked = true + return p.Prepare(r) + }) + } +} + +func (mi *mockInspector) ByInspecting() autorest.RespondDecorator { + return func(r autorest.Responder) autorest.Responder { + return autorest.ResponderFunc(func(resp *http.Response) error { + mi.wasInvoked = true + return r.Respond(resp) + }) + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/azure/config.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/azure/config.go new file mode 100644 index 000000000000..bea30b0d67ea --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/azure/config.go @@ -0,0 +1,13 @@ +package azure + +import ( + "net/url" +) + +// OAuthConfig represents the endpoints needed +// in OAuth operations +type OAuthConfig struct { + AuthorizeEndpoint url.URL + TokenEndpoint url.URL + DeviceCodeEndpoint url.URL +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/azure/devicetoken.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/azure/devicetoken.go new file mode 100644 index 000000000000..e1d5498a80f1 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/azure/devicetoken.go @@ -0,0 +1,193 @@ +package azure + +/* + This file is largely based on rjw57/oauth2device's code, with the follow differences: + * scope -> resource, and only allow a single one + * receive "Message" in the DeviceCode struct and show it to users as the prompt + * azure-xplat-cli has the following behavior that this emulates: + - does not send client_secret during the token exchange + - sends resource again in the token exchange request +*/ + +import ( + "fmt" + "net/http" + "net/url" + "time" + + "github.com/Azure/go-autorest/autorest" +) + +const ( + logPrefix = "autorest/azure/devicetoken:" +) + +var ( + // ErrDeviceGeneric represents an unknown error from the token endpoint when using device flow + ErrDeviceGeneric = fmt.Errorf("%s Error while retrieving OAuth token: Unknown Error", logPrefix) + + // ErrDeviceAccessDenied represents an access denied error from the token endpoint when using device flow + ErrDeviceAccessDenied = fmt.Errorf("%s Error while retrieving OAuth token: Access Denied", logPrefix) + + // ErrDeviceAuthorizationPending represents the server waiting on the user to complete the device flow + ErrDeviceAuthorizationPending = fmt.Errorf("%s Error while retrieving OAuth token: Authorization Pending", logPrefix) + + // ErrDeviceCodeExpired represents the server timing out and expiring the code during device flow + ErrDeviceCodeExpired = fmt.Errorf("%s Error while retrieving OAuth token: Code Expired", logPrefix) + + // ErrDeviceSlowDown represents the service telling us we're polling too often during device flow + ErrDeviceSlowDown = fmt.Errorf("%s Error while retrieving OAuth token: Slow Down", logPrefix) + + errCodeSendingFails = "Error occurred while sending request for Device Authorization Code" + errCodeHandlingFails = "Error occurred while handling response from the Device Endpoint" + errTokenSendingFails = "Error occurred while sending request with device code for a token" + errTokenHandlingFails = "Error occurred while handling response from the Token Endpoint (during device flow)" +) + +// DeviceCode is the object returned by the device auth endpoint +// It contains information to instruct the user to complete the auth flow +type DeviceCode struct { + DeviceCode *string `json:"device_code,omitempty"` + UserCode *string `json:"user_code,omitempty"` + VerificationURL *string `json:"verification_url,omitempty"` + ExpiresIn *int64 `json:"expires_in,string,omitempty"` + Interval *int64 `json:"interval,string,omitempty"` + + Message *string `json:"message"` // Azure specific + Resource string // store the following, stored when initiating, used when exchanging + OAuthConfig OAuthConfig + ClientID string +} + +// TokenError is the object returned by the token exchange endpoint +// when something is amiss +type TokenError struct { + Error *string `json:"error,omitempty"` + ErrorCodes []int `json:"error_codes,omitempty"` + ErrorDescription *string `json:"error_description,omitempty"` + Timestamp *string `json:"timestamp,omitempty"` + TraceID *string `json:"trace_id,omitempty"` +} + +// DeviceToken is the object return by the token exchange endpoint +// It can either look like a Token or an ErrorToken, so put both here +// and check for presence of "Error" to know if we are in error state +type deviceToken struct { + Token + TokenError +} + +// InitiateDeviceAuth initiates a device auth flow. It returns a DeviceCode +// that can be used with CheckForUserCompletion or WaitForUserCompletion. +func InitiateDeviceAuth(client *autorest.Client, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) { + req, _ := autorest.Prepare( + &http.Request{}, + autorest.AsPost(), + autorest.AsFormURLEncoded(), + autorest.WithBaseURL(oauthConfig.DeviceCodeEndpoint.String()), + autorest.WithFormData(url.Values{ + "client_id": []string{clientID}, + "resource": []string{resource}, + }), + ) + + resp, err := autorest.SendWithSender(client, req) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err) + } + + var code DeviceCode + err = autorest.Respond( + resp, + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&code), + autorest.ByClosing()) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err) + } + + code.ClientID = clientID + code.Resource = resource + code.OAuthConfig = oauthConfig + + return &code, nil +} + +// CheckForUserCompletion takes a DeviceCode and checks with the Azure AD OAuth endpoint +// to see if the device flow has: been completed, timed out, or otherwise failed +func CheckForUserCompletion(client *autorest.Client, code *DeviceCode) (*Token, error) { + req, _ := autorest.Prepare( + &http.Request{}, + autorest.AsPost(), + autorest.AsFormURLEncoded(), + autorest.WithBaseURL(code.OAuthConfig.TokenEndpoint.String()), + autorest.WithFormData(url.Values{ + "client_id": []string{code.ClientID}, + "code": []string{*code.DeviceCode}, + "grant_type": []string{OAuthGrantTypeDeviceCode}, + "resource": []string{code.Resource}, + }), + ) + + resp, err := autorest.SendWithSender(client, req) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err) + } + + var token deviceToken + err = autorest.Respond( + resp, + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusBadRequest), + autorest.ByUnmarshallingJSON(&token), + autorest.ByClosing()) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err) + } + + if token.Error == nil { + return &token.Token, nil + } + + switch *token.Error { + case "authorization_pending": + return nil, ErrDeviceAuthorizationPending + case "slow_down": + return nil, ErrDeviceSlowDown + case "access_denied": + return nil, ErrDeviceAccessDenied + case "code_expired": + return nil, ErrDeviceCodeExpired + default: + return nil, ErrDeviceGeneric + } +} + +// WaitForUserCompletion calls CheckForUserCompletion repeatedly until a token is granted or an error state occurs. +// This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'. +func WaitForUserCompletion(client *autorest.Client, code *DeviceCode) (*Token, error) { + intervalDuration := time.Duration(*code.Interval) * time.Second + waitDuration := intervalDuration + + for { + token, err := CheckForUserCompletion(client, code) + + if err == nil { + return token, nil + } + + switch err { + case ErrDeviceSlowDown: + waitDuration += waitDuration + case ErrDeviceAuthorizationPending: + // noop + default: // everything else is "fatal" to us + return nil, err + } + + if waitDuration > (intervalDuration * 3) { + return nil, fmt.Errorf("%s Error waiting for user to complete device flow. Server told us to slow_down too much", logPrefix) + } + + time.Sleep(waitDuration) + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/azure/devicetoken_test.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/azure/devicetoken_test.go new file mode 100644 index 000000000000..ab8a7889315e --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/azure/devicetoken_test.go @@ -0,0 +1,301 @@ +package azure + +import ( + "encoding/json" + "fmt" + "net/http" + "strings" + "testing" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/mocks" +) + +const ( + TestResource = "SomeResource" + TestClientID = "SomeClientID" + TestTenantID = "SomeTenantID" +) + +var ( + testOAuthConfig, _ = PublicCloud.OAuthConfigForTenant(TestTenantID) + TestOAuthConfig = *testOAuthConfig +) + +const MockDeviceCodeResponse = ` +{ + "device_code": "10000-40-1234567890", + "user_code": "ABCDEF", + "verification_url": "http://aka.ms/deviceauth", + "expires_in": "900", + "interval": "0" +} +` + +const MockDeviceTokenResponse = `{ + "access_token": "accessToken", + "refresh_token": "refreshToken", + "expires_in": "1000", + "expires_on": "2000", + "not_before": "3000", + "resource": "resource", + "token_type": "type" +} +` + +func TestDeviceCodeIncludesResource(t *testing.T) { + sender := mocks.NewSender() + sender.AppendResponse(mocks.NewResponseWithContent(MockDeviceCodeResponse)) + client := &autorest.Client{Sender: sender} + + code, err := InitiateDeviceAuth(client, TestOAuthConfig, TestClientID, TestResource) + if err != nil { + t.Fatalf("azure: unexpected error initiating device auth") + } + + if code.Resource != TestResource { + t.Fatalf("azure: InitiateDeviceAuth failed to stash the resource in the DeviceCode struct") + } +} + +func TestDeviceCodeReturnsErrorIfSendingFails(t *testing.T) { + sender := mocks.NewSender() + sender.SetError(fmt.Errorf("this is an error")) + client := &autorest.Client{Sender: sender} + + _, err := InitiateDeviceAuth(client, TestOAuthConfig, TestClientID, TestResource) + if err == nil || !strings.Contains(err.Error(), errCodeSendingFails) { + t.Fatalf("azure: failed to get correct error expected(%s) actual(%s)", errCodeSendingFails, err.Error()) + } +} + +func TestDeviceCodeReturnsErrorIfBadRequest(t *testing.T) { + sender := mocks.NewSender() + body := mocks.NewBody("doesn't matter") + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, 400, "Bad Request")) + client := &autorest.Client{Sender: sender} + + _, err := InitiateDeviceAuth(client, TestOAuthConfig, TestClientID, TestResource) + if err == nil || !strings.Contains(err.Error(), errCodeHandlingFails) { + t.Fatalf("azure: failed to get correct error expected(%s) actual(%s)", errCodeHandlingFails, err.Error()) + } + + if body.IsOpen() { + t.Fatalf("response body was left open!") + } +} + +func TestDeviceCodeReturnsErrorIfCannotDeserializeDeviceCode(t *testing.T) { + gibberishJSON := strings.Replace(MockDeviceCodeResponse, "expires_in", "\":, :gibberish", -1) + sender := mocks.NewSender() + body := mocks.NewBody(gibberishJSON) + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, 200, "OK")) + client := &autorest.Client{Sender: sender} + + _, err := InitiateDeviceAuth(client, TestOAuthConfig, TestClientID, TestResource) + if err == nil || !strings.Contains(err.Error(), errCodeHandlingFails) { + t.Fatalf("azure: failed to get correct error expected(%s) actual(%s)", errCodeHandlingFails, err.Error()) + } + + if body.IsOpen() { + t.Fatalf("response body was left open!") + } +} + +func deviceCode() *DeviceCode { + var deviceCode DeviceCode + json.Unmarshal([]byte(MockDeviceCodeResponse), &deviceCode) + deviceCode.Resource = TestResource + deviceCode.ClientID = TestClientID + return &deviceCode +} + +func TestDeviceTokenReturns(t *testing.T) { + sender := mocks.NewSender() + body := mocks.NewBody(MockDeviceTokenResponse) + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, 200, "OK")) + client := &autorest.Client{Sender: sender} + + _, err := WaitForUserCompletion(client, deviceCode()) + if err != nil { + t.Fatalf("azure: got error unexpectedly") + } + + if body.IsOpen() { + t.Fatalf("response body was left open!") + } +} + +func TestDeviceTokenReturnsErrorIfSendingFails(t *testing.T) { + sender := mocks.NewSender() + sender.SetError(fmt.Errorf("this is an error")) + client := &autorest.Client{Sender: sender} + + _, err := WaitForUserCompletion(client, deviceCode()) + if err == nil || !strings.Contains(err.Error(), errTokenSendingFails) { + t.Fatalf("azure: failed to get correct error expected(%s) actual(%s)", errTokenSendingFails, err.Error()) + } +} + +func TestDeviceTokenReturnsErrorIfServerError(t *testing.T) { + sender := mocks.NewSender() + body := mocks.NewBody("") + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, 500, "Internal Server Error")) + client := &autorest.Client{Sender: sender} + + _, err := WaitForUserCompletion(client, deviceCode()) + if err == nil || !strings.Contains(err.Error(), errTokenHandlingFails) { + t.Fatalf("azure: failed to get correct error expected(%s) actual(%s)", errTokenHandlingFails, err.Error()) + } + + if body.IsOpen() { + t.Fatalf("response body was left open!") + } +} + +func TestDeviceTokenReturnsErrorIfCannotDeserializeDeviceToken(t *testing.T) { + gibberishJSON := strings.Replace(MockDeviceTokenResponse, "expires_in", ";:\"gibberish", -1) + sender := mocks.NewSender() + body := mocks.NewBody(gibberishJSON) + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, 200, "OK")) + client := &autorest.Client{Sender: sender} + + _, err := WaitForUserCompletion(client, deviceCode()) + if err == nil || !strings.Contains(err.Error(), errTokenHandlingFails) { + t.Fatalf("azure: failed to get correct error expected(%s) actual(%s)", errTokenHandlingFails, err.Error()) + } + + if body.IsOpen() { + t.Fatalf("response body was left open!") + } +} + +func errorDeviceTokenResponse(message string) string { + return `{ "error": "` + message + `" }` +} + +func TestDeviceTokenReturnsErrorIfAuthorizationPending(t *testing.T) { + sender := mocks.NewSender() + body := mocks.NewBody(errorDeviceTokenResponse("authorization_pending")) + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, 400, "Bad Request")) + client := &autorest.Client{Sender: sender} + + _, err := CheckForUserCompletion(client, deviceCode()) + if err != ErrDeviceAuthorizationPending { + t.Fatalf("!!!") + } + + if body.IsOpen() { + t.Fatalf("response body was left open!") + } +} + +func TestDeviceTokenReturnsErrorIfSlowDown(t *testing.T) { + sender := mocks.NewSender() + body := mocks.NewBody(errorDeviceTokenResponse("slow_down")) + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, 400, "Bad Request")) + client := &autorest.Client{Sender: sender} + + _, err := CheckForUserCompletion(client, deviceCode()) + if err != ErrDeviceSlowDown { + t.Fatalf("!!!") + } + + if body.IsOpen() { + t.Fatalf("response body was left open!") + } +} + +type deviceTokenSender struct { + errorString string + attempts int +} + +func newDeviceTokenSender(deviceErrorString string) *deviceTokenSender { + return &deviceTokenSender{errorString: deviceErrorString, attempts: 0} +} + +func (s *deviceTokenSender) Do(req *http.Request) (*http.Response, error) { + var resp *http.Response + if s.attempts < 1 { + s.attempts++ + resp = mocks.NewResponseWithContent(errorDeviceTokenResponse(s.errorString)) + } else { + resp = mocks.NewResponseWithContent(MockDeviceTokenResponse) + } + return resp, nil +} + +// since the above only exercise CheckForUserCompletion, we repeat the test here, +// but with the intent of showing that WaitForUserCompletion loops properly. +func TestDeviceTokenSucceedsWithIntermediateAuthPending(t *testing.T) { + sender := newDeviceTokenSender("authorization_pending") + client := &autorest.Client{Sender: sender} + + _, err := WaitForUserCompletion(client, deviceCode()) + if err != nil { + t.Fatalf("unexpected error occurred") + } +} + +// same as above but with SlowDown now +func TestDeviceTokenSucceedsWithIntermediateSlowDown(t *testing.T) { + sender := newDeviceTokenSender("slow_down") + client := &autorest.Client{Sender: sender} + + _, err := WaitForUserCompletion(client, deviceCode()) + if err != nil { + t.Fatalf("unexpected error occurred") + } +} + +func TestDeviceTokenReturnsErrorIfAccessDenied(t *testing.T) { + sender := mocks.NewSender() + body := mocks.NewBody(errorDeviceTokenResponse("access_denied")) + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, 400, "Bad Request")) + client := &autorest.Client{Sender: sender} + + _, err := WaitForUserCompletion(client, deviceCode()) + if err != ErrDeviceAccessDenied { + t.Fatalf("azure: got wrong error expected(%s) actual(%s)", ErrDeviceAccessDenied.Error(), err.Error()) + } + + if body.IsOpen() { + t.Fatalf("response body was left open!") + } +} + +func TestDeviceTokenReturnsErrorIfCodeExpired(t *testing.T) { + sender := mocks.NewSender() + body := mocks.NewBody(errorDeviceTokenResponse("code_expired")) + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, 400, "Bad Request")) + client := &autorest.Client{Sender: sender} + + _, err := WaitForUserCompletion(client, deviceCode()) + if err != ErrDeviceCodeExpired { + t.Fatalf("azure: got wrong error expected(%s) actual(%s)", ErrDeviceCodeExpired.Error(), err.Error()) + } + + if body.IsOpen() { + t.Fatalf("response body was left open!") + } +} + +func TestDeviceTokenReturnsErrorForUnknownError(t *testing.T) { + sender := mocks.NewSender() + body := mocks.NewBody(errorDeviceTokenResponse("unknown_error")) + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, 400, "Bad Request")) + client := &autorest.Client{Sender: sender} + + _, err := WaitForUserCompletion(client, deviceCode()) + if err == nil { + t.Fatalf("failed to get error") + } + if err != ErrDeviceGeneric { + t.Fatalf("azure: got wrong error expected(%s) actual(%s)", ErrDeviceGeneric.Error(), err.Error()) + } + + if body.IsOpen() { + t.Fatalf("response body was left open!") + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go new file mode 100644 index 000000000000..3af1b48d270d --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go @@ -0,0 +1,162 @@ +package azure + +import ( + "fmt" + "net/url" + "strings" +) + +const ( + activeDirectoryAPIVersion = "1.0" +) + +var environments = map[string]Environment{ + "AZURECHINACLOUD": ChinaCloud, + "AZUREGERMANCLOUD": GermanCloud, + "AZUREPUBLICCLOUD": PublicCloud, + "AZUREUSGOVERNMENTCLOUD": USGovernmentCloud, +} + +// Environment represents a set of endpoints for each of Azure's Clouds. +type Environment struct { + Name string `json:"name"` + ManagementPortalURL string `json:"managementPortalURL"` + PublishSettingsURL string `json:"publishSettingsURL"` + ServiceManagementEndpoint string `json:"serviceManagementEndpoint"` + ResourceManagerEndpoint string `json:"resourceManagerEndpoint"` + ActiveDirectoryEndpoint string `json:"activeDirectoryEndpoint"` + GalleryEndpoint string `json:"galleryEndpoint"` + KeyVaultEndpoint string `json:"keyVaultEndpoint"` + GraphEndpoint string `json:"graphEndpoint"` + StorageEndpointSuffix string `json:"storageEndpointSuffix"` + SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"` + TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"` + KeyVaultDNSSuffix string `json:"keyVaultDNSSuffix"` + ServiceBusEndpointSuffix string `json:"serviceBusEndpointSuffix"` + ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"` + ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"` +} + +var ( + // PublicCloud is the default public Azure cloud environment + PublicCloud = Environment{ + Name: "AzurePublicCloud", + ManagementPortalURL: "https://manage.windowsazure.com/", + PublishSettingsURL: "https://manage.windowsazure.com/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.windows.net/", + ResourceManagerEndpoint: "https://management.azure.com/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.com/", + GalleryEndpoint: "https://gallery.azure.com/", + KeyVaultEndpoint: "https://vault.azure.net/", + GraphEndpoint: "https://graph.windows.net/", + StorageEndpointSuffix: "core.windows.net", + SQLDatabaseDNSSuffix: "database.windows.net", + TrafficManagerDNSSuffix: "trafficmanager.net", + KeyVaultDNSSuffix: "vault.azure.net", + ServiceBusEndpointSuffix: "servicebus.azure.com", + ServiceManagementVMDNSSuffix: "cloudapp.net", + ResourceManagerVMDNSSuffix: "cloudapp.azure.com", + } + + // USGovernmentCloud is the cloud environment for the US Government + USGovernmentCloud = Environment{ + Name: "AzureUSGovernmentCloud", + ManagementPortalURL: "https://manage.windowsazure.us/", + PublishSettingsURL: "https://manage.windowsazure.us/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.usgovcloudapi.net/", + ResourceManagerEndpoint: "https://management.usgovcloudapi.net/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.com/", + GalleryEndpoint: "https://gallery.usgovcloudapi.net/", + KeyVaultEndpoint: "https://vault.usgovcloudapi.net/", + GraphEndpoint: "https://graph.usgovcloudapi.net/", + StorageEndpointSuffix: "core.usgovcloudapi.net", + SQLDatabaseDNSSuffix: "database.usgovcloudapi.net", + TrafficManagerDNSSuffix: "usgovtrafficmanager.net", + KeyVaultDNSSuffix: "vault.usgovcloudapi.net", + ServiceBusEndpointSuffix: "servicebus.usgovcloudapi.net", + ServiceManagementVMDNSSuffix: "usgovcloudapp.net", + ResourceManagerVMDNSSuffix: "cloudapp.windowsazure.us", + } + + // ChinaCloud is the cloud environment operated in China + ChinaCloud = Environment{ + Name: "AzureChinaCloud", + ManagementPortalURL: "https://manage.chinacloudapi.com/", + PublishSettingsURL: "https://manage.chinacloudapi.com/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.chinacloudapi.cn/", + ResourceManagerEndpoint: "https://management.chinacloudapi.cn/", + ActiveDirectoryEndpoint: "https://login.chinacloudapi.cn/?api-version=1.0", + GalleryEndpoint: "https://gallery.chinacloudapi.cn/", + KeyVaultEndpoint: "https://vault.azure.cn/", + GraphEndpoint: "https://graph.chinacloudapi.cn/", + StorageEndpointSuffix: "core.chinacloudapi.cn", + SQLDatabaseDNSSuffix: "database.chinacloudapi.cn", + TrafficManagerDNSSuffix: "trafficmanager.cn", + KeyVaultDNSSuffix: "vault.azure.cn", + ServiceBusEndpointSuffix: "servicebus.chinacloudapi.net", + ServiceManagementVMDNSSuffix: "chinacloudapp.cn", + ResourceManagerVMDNSSuffix: "cloudapp.azure.cn", + } + + // GermanCloud is the cloud environment operated in Germany + GermanCloud = Environment{ + Name: "AzureGermanCloud", + ManagementPortalURL: "http://portal.microsoftazure.de/", + PublishSettingsURL: "https://manage.microsoftazure.de/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.cloudapi.de/", + ResourceManagerEndpoint: "https://management.microsoftazure.de/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.de/", + GalleryEndpoint: "https://gallery.cloudapi.de/", + KeyVaultEndpoint: "https://vault.microsoftazure.de/", + GraphEndpoint: "https://graph.cloudapi.de/", + StorageEndpointSuffix: "core.cloudapi.de", + SQLDatabaseDNSSuffix: "database.cloudapi.de", + TrafficManagerDNSSuffix: "azuretrafficmanager.de", + KeyVaultDNSSuffix: "vault.microsoftazure.de", + ServiceBusEndpointSuffix: "servicebus.cloudapi.de", + ServiceManagementVMDNSSuffix: "azurecloudapp.de", + ResourceManagerVMDNSSuffix: "cloudapp.microsoftazure.de", + } +) + +// EnvironmentFromName returns an Environment based on the common name specified +func EnvironmentFromName(name string) (Environment, error) { + name = strings.ToUpper(name) + env, ok := environments[name] + if !ok { + return env, fmt.Errorf("autorest/azure: There is no cloud environment matching the name %q", name) + } + return env, nil +} + +// OAuthConfigForTenant returns an OAuthConfig with tenant specific urls +func (env Environment) OAuthConfigForTenant(tenantID string) (*OAuthConfig, error) { + return OAuthConfigForTenant(env.ActiveDirectoryEndpoint, tenantID) +} + +// OAuthConfigForTenant returns an OAuthConfig with tenant specific urls for target cloud auth endpoint +func OAuthConfigForTenant(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) { + template := "%s/oauth2/%s?api-version=%s" + u, err := url.Parse(activeDirectoryEndpoint) + if err != nil { + return nil, err + } + authorizeURL, err := u.Parse(fmt.Sprintf(template, tenantID, "authorize", activeDirectoryAPIVersion)) + if err != nil { + return nil, err + } + tokenURL, err := u.Parse(fmt.Sprintf(template, tenantID, "token", activeDirectoryAPIVersion)) + if err != nil { + return nil, err + } + deviceCodeURL, err := u.Parse(fmt.Sprintf(template, tenantID, "devicecode", activeDirectoryAPIVersion)) + if err != nil { + return nil, err + } + + return &OAuthConfig{ + AuthorizeEndpoint: *authorizeURL, + TokenEndpoint: *tokenURL, + DeviceCodeEndpoint: *deviceCodeURL, + }, nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/azure/environments_test.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/azure/environments_test.go new file mode 100644 index 000000000000..73d49429507f --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/azure/environments_test.go @@ -0,0 +1,232 @@ +// test +package azure + +import ( + "encoding/json" + "testing" +) + +func TestOAuthConfigForTenant(t *testing.T) { + az := PublicCloud + + config, err := az.OAuthConfigForTenant("tenant-id-test") + if err != nil { + t.Fatalf("autorest/azure: Unexpected error while retrieving oauth configuration for tenant: %v.", err) + } + + expected := "https://login.microsoftonline.com/tenant-id-test/oauth2/authorize?api-version=1.0" + if config.AuthorizeEndpoint.String() != expected { + t.Fatalf("autorest/azure: Incorrect authorize url for Tenant from Environment. expected(%s). actual(%v).", expected, config.AuthorizeEndpoint) + } + + expected = "https://login.microsoftonline.com/tenant-id-test/oauth2/token?api-version=1.0" + if config.TokenEndpoint.String() != expected { + t.Fatalf("autorest/azure: Incorrect authorize url for Tenant from Environment. expected(%s). actual(%v).", expected, config.TokenEndpoint) + } + + expected = "https://login.microsoftonline.com/tenant-id-test/oauth2/devicecode?api-version=1.0" + if config.DeviceCodeEndpoint.String() != expected { + t.Fatalf("autorest/azure: Incorrect devicecode url for Tenant from Environment. expected(%s). actual(%v).", expected, config.DeviceCodeEndpoint) + } +} + +func TestEnvironmentFromName(t *testing.T) { + name := "azurechinacloud" + if env, _ := EnvironmentFromName(name); env != ChinaCloud { + t.Errorf("Expected to get ChinaCloud for %q", name) + } + + name = "AzureChinaCloud" + if env, _ := EnvironmentFromName(name); env != ChinaCloud { + t.Errorf("Expected to get ChinaCloud for %q", name) + } + + name = "azuregermancloud" + if env, _ := EnvironmentFromName(name); env != GermanCloud { + t.Errorf("Expected to get GermanCloud for %q", name) + } + + name = "AzureGermanCloud" + if env, _ := EnvironmentFromName(name); env != GermanCloud { + t.Errorf("Expected to get GermanCloud for %q", name) + } + + name = "azurepubliccloud" + if env, _ := EnvironmentFromName(name); env != PublicCloud { + t.Errorf("Expected to get PublicCloud for %q", name) + } + + name = "AzurePublicCloud" + if env, _ := EnvironmentFromName(name); env != PublicCloud { + t.Errorf("Expected to get PublicCloud for %q", name) + } + + name = "azureusgovernmentcloud" + if env, _ := EnvironmentFromName(name); env != USGovernmentCloud { + t.Errorf("Expected to get USGovernmentCloud for %q", name) + } + + name = "AzureUSGovernmentCloud" + if env, _ := EnvironmentFromName(name); env != USGovernmentCloud { + t.Errorf("Expected to get USGovernmentCloud for %q", name) + } + + name = "thisisnotarealcloudenv" + if _, err := EnvironmentFromName(name); err == nil { + t.Errorf("Expected to get an error for %q", name) + } +} + +func TestDeserializeEnvironment(t *testing.T) { + env := `{ + "name": "--name--", + "ActiveDirectoryEndpoint": "--active-directory-endpoint--", + "galleryEndpoint": "--gallery-endpoint--", + "graphEndpoint": "--graph-endpoint--", + "keyVaultDNSSuffix": "--key-vault-dns-suffix--", + "keyVaultEndpoint": "--key-vault-endpoint--", + "managementPortalURL": "--management-portal-url--", + "publishSettingsURL": "--publish-settings-url--", + "resourceManagerEndpoint": "--resource-manager-endpoint--", + "serviceBusEndpointSuffix": "--service-bus-endpoint-suffix--", + "serviceManagementEndpoint": "--service-management-endpoint--", + "sqlDatabaseDNSSuffix": "--sql-database-dns-suffix--", + "storageEndpointSuffix": "--storage-endpoint-suffix--", + "trafficManagerDNSSuffix": "--traffic-manager-dns-suffix--", + "serviceManagementVMDNSSuffix": "--asm-vm-dns-suffix--", + "resourceManagerVMDNSSuffix": "--arm-vm-dns-suffix--" + }` + + testSubject := Environment{} + err := json.Unmarshal([]byte(env), &testSubject) + if err != nil { + t.Fatalf("failed to unmarshal: %s", err) + } + + if "--name--" != testSubject.Name { + t.Errorf("Expected Name to be \"--name--\", but got %q", testSubject.Name) + } + if "--management-portal-url--" != testSubject.ManagementPortalURL { + t.Errorf("Expected ManagementPortalURL to be \"--management-portal-url--\", but got %q", testSubject.ManagementPortalURL) + } + if "--publish-settings-url--" != testSubject.PublishSettingsURL { + t.Errorf("Expected PublishSettingsURL to be \"--publish-settings-url--\", but got %q", testSubject.PublishSettingsURL) + } + if "--service-management-endpoint--" != testSubject.ServiceManagementEndpoint { + t.Errorf("Expected ServiceManagementEndpoint to be \"--service-management-endpoint--\", but got %q", testSubject.ServiceManagementEndpoint) + } + if "--resource-manager-endpoint--" != testSubject.ResourceManagerEndpoint { + t.Errorf("Expected ResourceManagerEndpoint to be \"--resource-manager-endpoint--\", but got %q", testSubject.ResourceManagerEndpoint) + } + if "--active-directory-endpoint--" != testSubject.ActiveDirectoryEndpoint { + t.Errorf("Expected ActiveDirectoryEndpoint to be \"--active-directory-endpoint--\", but got %q", testSubject.ActiveDirectoryEndpoint) + } + if "--gallery-endpoint--" != testSubject.GalleryEndpoint { + t.Errorf("Expected GalleryEndpoint to be \"--gallery-endpoint--\", but got %q", testSubject.GalleryEndpoint) + } + if "--key-vault-endpoint--" != testSubject.KeyVaultEndpoint { + t.Errorf("Expected KeyVaultEndpoint to be \"--key-vault-endpoint--\", but got %q", testSubject.KeyVaultEndpoint) + } + if "--graph-endpoint--" != testSubject.GraphEndpoint { + t.Errorf("Expected GraphEndpoint to be \"--graph-endpoint--\", but got %q", testSubject.GraphEndpoint) + } + if "--storage-endpoint-suffix--" != testSubject.StorageEndpointSuffix { + t.Errorf("Expected StorageEndpointSuffix to be \"--storage-endpoint-suffix--\", but got %q", testSubject.StorageEndpointSuffix) + } + if "--sql-database-dns-suffix--" != testSubject.SQLDatabaseDNSSuffix { + t.Errorf("Expected sql-database-dns-suffix to be \"--sql-database-dns-suffix--\", but got %q", testSubject.SQLDatabaseDNSSuffix) + } + if "--key-vault-dns-suffix--" != testSubject.KeyVaultDNSSuffix { + t.Errorf("Expected StorageEndpointSuffix to be \"--key-vault-dns-suffix--\", but got %q", testSubject.KeyVaultDNSSuffix) + } + if "--service-bus-endpoint-suffix--" != testSubject.ServiceBusEndpointSuffix { + t.Errorf("Expected StorageEndpointSuffix to be \"--service-bus-endpoint-suffix--\", but got %q", testSubject.ServiceBusEndpointSuffix) + } + if "--asm-vm-dns-suffix--" != testSubject.ServiceManagementVMDNSSuffix { + t.Errorf("Expected ServiceManagementVMDNSSuffix to be \"--asm-vm-dns-suffix--\", but got %q", testSubject.ServiceManagementVMDNSSuffix) + } + if "--arm-vm-dns-suffix--" != testSubject.ResourceManagerVMDNSSuffix { + t.Errorf("Expected ResourceManagerVMDNSSuffix to be \"--arm-vm-dns-suffix--\", but got %q", testSubject.ResourceManagerVMDNSSuffix) + } +} + +func TestRoundTripSerialization(t *testing.T) { + env := Environment{ + Name: "--unit-test--", + ManagementPortalURL: "--management-portal-url", + PublishSettingsURL: "--publish-settings-url--", + ServiceManagementEndpoint: "--service-management-endpoint--", + ResourceManagerEndpoint: "--resource-management-endpoint--", + ActiveDirectoryEndpoint: "--active-directory-endpoint--", + GalleryEndpoint: "--gallery-endpoint--", + KeyVaultEndpoint: "--key-vault--endpoint--", + GraphEndpoint: "--graph-endpoint--", + StorageEndpointSuffix: "--storage-endpoint-suffix--", + SQLDatabaseDNSSuffix: "--sql-database-dns-suffix--", + TrafficManagerDNSSuffix: "--traffic-manager-dns-suffix--", + KeyVaultDNSSuffix: "--key-vault-dns-suffix--", + ServiceBusEndpointSuffix: "--service-bus-endpoint-suffix--", + ServiceManagementVMDNSSuffix: "--asm-vm-dns-suffix--", + ResourceManagerVMDNSSuffix: "--arm-vm-dns-suffix--", + } + + bytes, err := json.Marshal(env) + if err != nil { + t.Fatalf("failed to marshal: %s", err) + } + + testSubject := Environment{} + err = json.Unmarshal(bytes, &testSubject) + if err != nil { + t.Fatalf("failed to unmarshal: %s", err) + } + + if env.Name != testSubject.Name { + t.Errorf("Expected Name to be %q, but got %q", env.Name, testSubject.Name) + } + if env.ManagementPortalURL != testSubject.ManagementPortalURL { + t.Errorf("Expected ManagementPortalURL to be %q, but got %q", env.ManagementPortalURL, testSubject.ManagementPortalURL) + } + if env.PublishSettingsURL != testSubject.PublishSettingsURL { + t.Errorf("Expected PublishSettingsURL to be %q, but got %q", env.PublishSettingsURL, testSubject.PublishSettingsURL) + } + if env.ServiceManagementEndpoint != testSubject.ServiceManagementEndpoint { + t.Errorf("Expected ServiceManagementEndpoint to be %q, but got %q", env.ServiceManagementEndpoint, testSubject.ServiceManagementEndpoint) + } + if env.ResourceManagerEndpoint != testSubject.ResourceManagerEndpoint { + t.Errorf("Expected ResourceManagerEndpoint to be %q, but got %q", env.ResourceManagerEndpoint, testSubject.ResourceManagerEndpoint) + } + if env.ActiveDirectoryEndpoint != testSubject.ActiveDirectoryEndpoint { + t.Errorf("Expected ActiveDirectoryEndpoint to be %q, but got %q", env.ActiveDirectoryEndpoint, testSubject.ActiveDirectoryEndpoint) + } + if env.GalleryEndpoint != testSubject.GalleryEndpoint { + t.Errorf("Expected GalleryEndpoint to be %q, but got %q", env.GalleryEndpoint, testSubject.GalleryEndpoint) + } + if env.KeyVaultEndpoint != testSubject.KeyVaultEndpoint { + t.Errorf("Expected KeyVaultEndpoint to be %q, but got %q", env.KeyVaultEndpoint, testSubject.KeyVaultEndpoint) + } + if env.GraphEndpoint != testSubject.GraphEndpoint { + t.Errorf("Expected GraphEndpoint to be %q, but got %q", env.GraphEndpoint, testSubject.GraphEndpoint) + } + if env.StorageEndpointSuffix != testSubject.StorageEndpointSuffix { + t.Errorf("Expected StorageEndpointSuffix to be %q, but got %q", env.StorageEndpointSuffix, testSubject.StorageEndpointSuffix) + } + if env.SQLDatabaseDNSSuffix != testSubject.SQLDatabaseDNSSuffix { + t.Errorf("Expected SQLDatabaseDNSSuffix to be %q, but got %q", env.SQLDatabaseDNSSuffix, testSubject.SQLDatabaseDNSSuffix) + } + if env.TrafficManagerDNSSuffix != testSubject.TrafficManagerDNSSuffix { + t.Errorf("Expected TrafficManagerDNSSuffix to be %q, but got %q", env.TrafficManagerDNSSuffix, testSubject.TrafficManagerDNSSuffix) + } + if env.KeyVaultDNSSuffix != testSubject.KeyVaultDNSSuffix { + t.Errorf("Expected KeyVaultDNSSuffix to be %q, but got %q", env.KeyVaultDNSSuffix, testSubject.KeyVaultDNSSuffix) + } + if env.ServiceBusEndpointSuffix != testSubject.ServiceBusEndpointSuffix { + t.Errorf("Expected ServiceBusEndpointSuffix to be %q, but got %q", env.ServiceBusEndpointSuffix, testSubject.ServiceBusEndpointSuffix) + } + if env.ServiceManagementVMDNSSuffix != testSubject.ServiceManagementVMDNSSuffix { + t.Errorf("Expected ServiceManagementVMDNSSuffix to be %q, but got %q", env.ServiceManagementVMDNSSuffix, testSubject.ServiceManagementVMDNSSuffix) + } + if env.ResourceManagerVMDNSSuffix != testSubject.ResourceManagerVMDNSSuffix { + t.Errorf("Expected ResourceManagerVMDNSSuffix to be %q, but got %q", env.ResourceManagerVMDNSSuffix, testSubject.ResourceManagerVMDNSSuffix) + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/azure/persist.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/azure/persist.go new file mode 100644 index 000000000000..d5cf62ddc7ba --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/azure/persist.go @@ -0,0 +1,59 @@ +package azure + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" +) + +// LoadToken restores a Token object from a file located at 'path'. +func LoadToken(path string) (*Token, error) { + file, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("failed to open file (%s) while loading token: %v", path, err) + } + defer file.Close() + + var token Token + + dec := json.NewDecoder(file) + if err = dec.Decode(&token); err != nil { + return nil, fmt.Errorf("failed to decode contents of file (%s) into Token representation: %v", path, err) + } + return &token, nil +} + +// SaveToken persists an oauth token at the given location on disk. +// It moves the new file into place so it can safely be used to replace an existing file +// that maybe accessed by multiple processes. +func SaveToken(path string, mode os.FileMode, token Token) error { + dir := filepath.Dir(path) + err := os.MkdirAll(dir, os.ModePerm) + if err != nil { + return fmt.Errorf("failed to create directory (%s) to store token in: %v", dir, err) + } + + newFile, err := ioutil.TempFile(dir, "token") + if err != nil { + return fmt.Errorf("failed to create the temp file to write the token: %v", err) + } + tempPath := newFile.Name() + + if err := json.NewEncoder(newFile).Encode(token); err != nil { + return fmt.Errorf("failed to encode token to file (%s) while saving token: %v", tempPath, err) + } + if err := newFile.Close(); err != nil { + return fmt.Errorf("failed to close temp file %s: %v", tempPath, err) + } + + // Atomic replace to avoid multi-writer file corruptions + if err := os.Rename(tempPath, path); err != nil { + return fmt.Errorf("failed to move temporary token to desired output location. src=%s dst=%s: %v", tempPath, path, err) + } + if err := os.Chmod(path, mode); err != nil { + return fmt.Errorf("failed to chmod the token file %s: %v", path, err) + } + return nil +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/azure/persist_test.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/azure/persist_test.go new file mode 100644 index 000000000000..bf5cb6453144 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/azure/persist_test.go @@ -0,0 +1,157 @@ +package azure + +import ( + "encoding/json" + "io/ioutil" + "os" + "path" + "reflect" + "runtime" + "strings" + "testing" +) + +const MockTokenJSON string = `{ + "access_token": "accessToken", + "refresh_token": "refreshToken", + "expires_in": "1000", + "expires_on": "2000", + "not_before": "3000", + "resource": "resource", + "token_type": "type" +}` + +var TestToken = Token{ + AccessToken: "accessToken", + RefreshToken: "refreshToken", + ExpiresIn: "1000", + ExpiresOn: "2000", + NotBefore: "3000", + Resource: "resource", + Type: "type", +} + +func writeTestTokenFile(t *testing.T, suffix string, contents string) *os.File { + f, err := ioutil.TempFile(os.TempDir(), suffix) + if err != nil { + t.Fatalf("azure: unexpected error when creating temp file: %v", err) + } + defer f.Close() + + _, err = f.Write([]byte(contents)) + if err != nil { + t.Fatalf("azure: unexpected error when writing temp test file: %v", err) + } + + return f +} + +func TestLoadToken(t *testing.T) { + f := writeTestTokenFile(t, "testloadtoken", MockTokenJSON) + defer os.Remove(f.Name()) + + expectedToken := TestToken + actualToken, err := LoadToken(f.Name()) + if err != nil { + t.Fatalf("azure: unexpected error loading token from file: %v", err) + } + + if *actualToken != expectedToken { + t.Fatalf("azure: failed to decode properly expected(%v) actual(%v)", expectedToken, *actualToken) + } + + // test that LoadToken closes the file properly + err = SaveToken(f.Name(), 0600, *actualToken) + if err != nil { + t.Fatalf("azure: could not save token after LoadToken: %v", err) + } +} + +func TestLoadTokenFailsBadPath(t *testing.T) { + _, err := LoadToken("/tmp/this_file_should_never_exist_really") + expectedSubstring := "failed to open file" + if err == nil || !strings.Contains(err.Error(), expectedSubstring) { + t.Fatalf("azure: failed to get correct error expected(%s) actual(%s)", expectedSubstring, err.Error()) + } +} + +func TestLoadTokenFailsBadJson(t *testing.T) { + gibberishJSON := strings.Replace(MockTokenJSON, "expires_on", ";:\"gibberish", -1) + f := writeTestTokenFile(t, "testloadtokenfailsbadjson", gibberishJSON) + defer os.Remove(f.Name()) + + _, err := LoadToken(f.Name()) + expectedSubstring := "failed to decode contents of file" + if err == nil || !strings.Contains(err.Error(), expectedSubstring) { + t.Fatalf("azure: failed to get correct error expected(%s) actual(%s)", expectedSubstring, err.Error()) + } +} + +func token() *Token { + var token Token + json.Unmarshal([]byte(MockTokenJSON), &token) + return &token +} + +func TestSaveToken(t *testing.T) { + f, err := ioutil.TempFile("", "testloadtoken") + if err != nil { + t.Fatalf("azure: unexpected error when creating temp file: %v", err) + } + defer os.Remove(f.Name()) + f.Close() + + mode := os.ModePerm & 0642 + err = SaveToken(f.Name(), mode, *token()) + if err != nil { + t.Fatalf("azure: unexpected error saving token to file: %v", err) + } + fi, err := os.Stat(f.Name()) // open a new stat as held ones are not fresh + if err != nil { + t.Fatalf("azure: stat failed: %v", err) + } + if runtime.GOOS != "windows" { // permissions don't work on Windows + if perm := fi.Mode().Perm(); perm != mode { + t.Fatalf("azure: wrong file perm. got:%s; expected:%s file :%s", perm, mode, f.Name()) + } + } + + var actualToken Token + var expectedToken Token + + json.Unmarshal([]byte(MockTokenJSON), expectedToken) + + contents, err := ioutil.ReadFile(f.Name()) + if err != nil { + t.Fatal("!!") + } + json.Unmarshal(contents, actualToken) + + if !reflect.DeepEqual(actualToken, expectedToken) { + t.Fatal("azure: token was not serialized correctly") + } +} + +func TestSaveTokenFailsNoPermission(t *testing.T) { + pathWhereWeShouldntHavePermission := "/usr/thiswontwork/atall" + if runtime.GOOS == "windows" { + pathWhereWeShouldntHavePermission = path.Join(os.Getenv("windir"), "system32\\mytokendir\\mytoken") + } + err := SaveToken(pathWhereWeShouldntHavePermission, 0644, *token()) + expectedSubstring := "failed to create directory" + if err == nil || !strings.Contains(err.Error(), expectedSubstring) { + t.Fatalf("azure: failed to get correct error expected(%s) actual(%v)", expectedSubstring, err) + } +} + +func TestSaveTokenFailsCantCreate(t *testing.T) { + tokenPath := "/thiswontwork" + if runtime.GOOS == "windows" { + tokenPath = path.Join(os.Getenv("windir"), "system32") + } + err := SaveToken(tokenPath, 0644, *token()) + expectedSubstring := "failed to create the temp file to write the token" + if err == nil || !strings.Contains(err.Error(), expectedSubstring) { + t.Fatalf("azure: failed to get correct error expected(%s) actual(%v)", expectedSubstring, err) + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/azure/token.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/azure/token.go new file mode 100644 index 000000000000..cfcd030114c6 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/azure/token.go @@ -0,0 +1,363 @@ +package azure + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/x509" + "encoding/base64" + "fmt" + "net/http" + "net/url" + "strconv" + "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/dgrijalva/jwt-go" +) + +const ( + defaultRefresh = 5 * time.Minute + tokenBaseDate = "1970-01-01T00:00:00Z" + + // OAuthGrantTypeDeviceCode is the "grant_type" identifier used in device flow + OAuthGrantTypeDeviceCode = "device_code" + + // OAuthGrantTypeClientCredentials is the "grant_type" identifier used in credential flows + OAuthGrantTypeClientCredentials = "client_credentials" + + // OAuthGrantTypeRefreshToken is the "grant_type" identifier used in refresh token flows + OAuthGrantTypeRefreshToken = "refresh_token" +) + +var expirationBase time.Time + +func init() { + expirationBase, _ = time.Parse(time.RFC3339, tokenBaseDate) +} + +// TokenRefreshCallback is the type representing callbacks that will be called after +// a successful token refresh +type TokenRefreshCallback func(Token) error + +// Token encapsulates the access token used to authorize Azure requests. +type Token struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + + ExpiresIn string `json:"expires_in"` + ExpiresOn string `json:"expires_on"` + NotBefore string `json:"not_before"` + + Resource string `json:"resource"` + Type string `json:"token_type"` +} + +// Expires returns the time.Time when the Token expires. +func (t Token) Expires() time.Time { + s, err := strconv.Atoi(t.ExpiresOn) + if err != nil { + s = -3600 + } + return expirationBase.Add(time.Duration(s) * time.Second).UTC() +} + +// IsExpired returns true if the Token is expired, false otherwise. +func (t Token) IsExpired() bool { + return t.WillExpireIn(0) +} + +// WillExpireIn returns true if the Token will expire after the passed time.Duration interval +// from now, false otherwise. +func (t Token) WillExpireIn(d time.Duration) bool { + return !t.Expires().After(time.Now().Add(d)) +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Bearer " followed by the AccessToken of the Token. +func (t *Token) WithAuthorization() autorest.PrepareDecorator { + return func(p autorest.Preparer) autorest.Preparer { + return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { + return (autorest.WithBearerAuthorization(t.AccessToken)(p)).Prepare(r) + }) + } +} + +// ServicePrincipalNoSecret represents a secret type that contains no secret +// meaning it is not valid for fetching a fresh token. This is used by Manual +type ServicePrincipalNoSecret struct { +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret +// It only returns an error for the ServicePrincipalNoSecret type +func (noSecret *ServicePrincipalNoSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + return fmt.Errorf("Manually created ServicePrincipalToken does not contain secret material to retrieve a new access token") +} + +// ServicePrincipalSecret is an interface that allows various secret mechanism to fill the form +// that is submitted when acquiring an oAuth token. +type ServicePrincipalSecret interface { + SetAuthenticationValues(spt *ServicePrincipalToken, values *url.Values) error +} + +// ServicePrincipalTokenSecret implements ServicePrincipalSecret for client_secret type authorization. +type ServicePrincipalTokenSecret struct { + ClientSecret string +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +// It will populate the form submitted during oAuth Token Acquisition using the client_secret. +func (tokenSecret *ServicePrincipalTokenSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + v.Set("client_secret", tokenSecret.ClientSecret) + return nil +} + +// ServicePrincipalCertificateSecret implements ServicePrincipalSecret for generic RSA cert auth with signed JWTs. +type ServicePrincipalCertificateSecret struct { + Certificate *x509.Certificate + PrivateKey *rsa.PrivateKey +} + +// SignJwt returns the JWT signed with the certificate's private key. +func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalToken) (string, error) { + hasher := sha1.New() + _, err := hasher.Write(secret.Certificate.Raw) + if err != nil { + return "", err + } + + thumbprint := base64.URLEncoding.EncodeToString(hasher.Sum(nil)) + + // The jti (JWT ID) claim provides a unique identifier for the JWT. + jti := make([]byte, 20) + _, err = rand.Read(jti) + if err != nil { + return "", err + } + + token := jwt.New(jwt.SigningMethodRS256) + token.Header["x5t"] = thumbprint + token.Claims = jwt.MapClaims{ + "aud": spt.oauthConfig.TokenEndpoint.String(), + "iss": spt.clientID, + "sub": spt.clientID, + "jti": base64.URLEncoding.EncodeToString(jti), + "nbf": time.Now().Unix(), + "exp": time.Now().Add(time.Hour * 24).Unix(), + } + + signedString, err := token.SignedString(secret.PrivateKey) + return signedString, err +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +// It will populate the form submitted during oAuth Token Acquisition using a JWT signed with a certificate. +func (secret *ServicePrincipalCertificateSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + jwt, err := secret.SignJwt(spt) + if err != nil { + return err + } + + v.Set("client_assertion", jwt) + v.Set("client_assertion_type", "urn:ietf:params:oauth:client-assertion-type:jwt-bearer") + return nil +} + +// ServicePrincipalToken encapsulates a Token created for a Service Principal. +type ServicePrincipalToken struct { + Token + + secret ServicePrincipalSecret + oauthConfig OAuthConfig + clientID string + resource string + autoRefresh bool + refreshWithin time.Duration + sender autorest.Sender + + refreshCallbacks []TokenRefreshCallback +} + +// NewServicePrincipalTokenWithSecret create a ServicePrincipalToken using the supplied ServicePrincipalSecret implementation. +func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, resource string, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + spt := &ServicePrincipalToken{ + oauthConfig: oauthConfig, + secret: secret, + clientID: id, + resource: resource, + autoRefresh: true, + refreshWithin: defaultRefresh, + sender: &http.Client{}, + refreshCallbacks: callbacks, + } + return spt, nil +} + +// NewServicePrincipalTokenFromManualToken creates a ServicePrincipalToken using the supplied token +func NewServicePrincipalTokenFromManualToken(oauthConfig OAuthConfig, clientID string, resource string, token Token, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + spt, err := NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalNoSecret{}, + callbacks...) + if err != nil { + return nil, err + } + + spt.Token = token + + return spt, nil +} + +// NewServicePrincipalToken creates a ServicePrincipalToken from the supplied Service Principal +// credentials scoped to the named resource. +func NewServicePrincipalToken(oauthConfig OAuthConfig, clientID string, secret string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalTokenSecret{ + ClientSecret: secret, + }, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromCertificate create a ServicePrincipalToken from the supplied pkcs12 bytes. +func NewServicePrincipalTokenFromCertificate(oauthConfig OAuthConfig, clientID string, certificate *x509.Certificate, privateKey *rsa.PrivateKey, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalCertificateSecret{ + PrivateKey: privateKey, + Certificate: certificate, + }, + callbacks..., + ) +} + +// EnsureFresh will refresh the token if it will expire within the refresh window (as set by +// RefreshWithin). +func (spt *ServicePrincipalToken) EnsureFresh() error { + if spt.WillExpireIn(spt.refreshWithin) { + return spt.Refresh() + } + return nil +} + +// InvokeRefreshCallbacks calls any TokenRefreshCallbacks that were added to the SPT during initialization +func (spt *ServicePrincipalToken) InvokeRefreshCallbacks(token Token) error { + if spt.refreshCallbacks != nil { + for _, callback := range spt.refreshCallbacks { + err := callback(spt.Token) + if err != nil { + return autorest.NewErrorWithError(err, + "azure.ServicePrincipalToken", "InvokeRefreshCallbacks", nil, "A TokenRefreshCallback handler returned an error") + } + } + } + return nil +} + +// Refresh obtains a fresh token for the Service Principal. +func (spt *ServicePrincipalToken) Refresh() error { + return spt.refreshInternal(spt.resource) +} + +// RefreshExchange refreshes the token, but for a different resource. +func (spt *ServicePrincipalToken) RefreshExchange(resource string) error { + return spt.refreshInternal(resource) +} + +func (spt *ServicePrincipalToken) refreshInternal(resource string) error { + v := url.Values{} + v.Set("client_id", spt.clientID) + v.Set("resource", resource) + + if spt.RefreshToken != "" { + v.Set("grant_type", OAuthGrantTypeRefreshToken) + v.Set("refresh_token", spt.RefreshToken) + } else { + v.Set("grant_type", OAuthGrantTypeClientCredentials) + err := spt.secret.SetAuthenticationValues(spt, &v) + if err != nil { + return err + } + } + + req, _ := autorest.Prepare(&http.Request{}, + autorest.AsPost(), + autorest.AsFormURLEncoded(), + autorest.WithBaseURL(spt.oauthConfig.TokenEndpoint.String()), + autorest.WithFormData(v)) + + resp, err := autorest.SendWithSender(spt.sender, req) + if err != nil { + return autorest.NewErrorWithError(err, + "azure.ServicePrincipalToken", "Refresh", resp, "Failure sending request for Service Principal %s", + spt.clientID) + } + + var newToken Token + err = autorest.Respond(resp, + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&newToken), + autorest.ByClosing()) + if err != nil { + return autorest.NewErrorWithError(err, + "azure.ServicePrincipalToken", "Refresh", resp, "Failure handling response to Service Principal %s request", + spt.clientID) + } + + spt.Token = newToken + + err = spt.InvokeRefreshCallbacks(newToken) + if err != nil { + // its already wrapped inside InvokeRefreshCallbacks + return err + } + + return nil +} + +// SetAutoRefresh enables or disables automatic refreshing of stale tokens. +func (spt *ServicePrincipalToken) SetAutoRefresh(autoRefresh bool) { + spt.autoRefresh = autoRefresh +} + +// SetRefreshWithin sets the interval within which if the token will expire, EnsureFresh will +// refresh the token. +func (spt *ServicePrincipalToken) SetRefreshWithin(d time.Duration) { + spt.refreshWithin = d + return +} + +// SetSender sets the autorest.Sender used when obtaining the Service Principal token. An +// undecorated http.Client is used by default. +func (spt *ServicePrincipalToken) SetSender(s autorest.Sender) { + spt.sender = s +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Bearer " followed by the AccessToken of the ServicePrincipalToken. +// +// By default, the token will automatically refresh if nearly expired (as determined by the +// RefreshWithin interval). Use the AutoRefresh method to enable or disable automatically refreshing +// tokens. +func (spt *ServicePrincipalToken) WithAuthorization() autorest.PrepareDecorator { + return func(p autorest.Preparer) autorest.Preparer { + return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { + if spt.autoRefresh { + err := spt.EnsureFresh() + if err != nil { + return r, autorest.NewErrorWithError(err, + "azure.ServicePrincipalToken", "WithAuthorization", nil, "Failed to refresh Service Principal Token for request to %s", + r.URL) + } + } + return (autorest.WithBearerAuthorization(spt.AccessToken)(p)).Prepare(r) + }) + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/azure/token_test.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/azure/token_test.go new file mode 100644 index 000000000000..3d8990fa307e --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/azure/token_test.go @@ -0,0 +1,512 @@ +package azure + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "fmt" + "io/ioutil" + "math/big" + "net/http" + "net/url" + "reflect" + "strconv" + "strings" + "testing" + "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/mocks" +) + +const ( + defaultFormData = "client_id=id&client_secret=secret&grant_type=client_credentials&resource=resource" + defaultManualFormData = "client_id=id&grant_type=refresh_token&refresh_token=refreshtoken&resource=resource" +) + +func TestTokenExpires(t *testing.T) { + tt := time.Now().Add(5 * time.Second) + tk := newTokenExpiresAt(tt) + + if tk.Expires().Equal(tt) { + t.Fatalf("azure: Token#Expires miscalculated expiration time -- received %v, expected %v", tk.Expires(), tt) + } +} + +func TestTokenIsExpired(t *testing.T) { + tk := newTokenExpiresAt(time.Now().Add(-5 * time.Second)) + + if !tk.IsExpired() { + t.Fatalf("azure: Token#IsExpired failed to mark a stale token as expired -- now %v, token expires at %v", + time.Now().UTC(), tk.Expires()) + } +} + +func TestTokenIsExpiredUninitialized(t *testing.T) { + tk := &Token{} + + if !tk.IsExpired() { + t.Fatalf("azure: An uninitialized Token failed to mark itself as expired (expiration time %v)", tk.Expires()) + } +} + +func TestTokenIsNoExpired(t *testing.T) { + tk := newTokenExpiresAt(time.Now().Add(1000 * time.Second)) + + if tk.IsExpired() { + t.Fatalf("azure: Token marked a fresh token as expired -- now %v, token expires at %v", time.Now().UTC(), tk.Expires()) + } +} + +func TestTokenWillExpireIn(t *testing.T) { + d := 5 * time.Second + tk := newTokenExpiresIn(d) + + if !tk.WillExpireIn(d) { + t.Fatal("azure: Token#WillExpireIn mismeasured expiration time") + } +} + +func TestTokenWithAuthorization(t *testing.T) { + tk := newToken() + + req, err := autorest.Prepare(&http.Request{}, tk.WithAuthorization()) + if err != nil { + t.Fatalf("azure: Token#WithAuthorization returned an error (%v)", err) + } else if req.Header.Get(http.CanonicalHeaderKey("Authorization")) != fmt.Sprintf("Bearer %s", tk.AccessToken) { + t.Fatal("azure: Token#WithAuthorization failed to set Authorization header") + } +} + +func TestServicePrincipalTokenSetAutoRefresh(t *testing.T) { + spt := newServicePrincipalToken() + + if !spt.autoRefresh { + t.Fatal("azure: ServicePrincipalToken did not default to automatic token refreshing") + } + + spt.SetAutoRefresh(false) + if spt.autoRefresh { + t.Fatal("azure: ServicePrincipalToken#SetAutoRefresh did not disable automatic token refreshing") + } +} + +func TestServicePrincipalTokenSetRefreshWithin(t *testing.T) { + spt := newServicePrincipalToken() + + if spt.refreshWithin != defaultRefresh { + t.Fatal("azure: ServicePrincipalToken did not correctly set the default refresh interval") + } + + spt.SetRefreshWithin(2 * defaultRefresh) + if spt.refreshWithin != 2*defaultRefresh { + t.Fatal("azure: ServicePrincipalToken#SetRefreshWithin did not set the refresh interval") + } +} + +func TestServicePrincipalTokenSetSender(t *testing.T) { + spt := newServicePrincipalToken() + + var s autorest.Sender + s = mocks.NewSender() + spt.SetSender(s) + if !reflect.DeepEqual(s, spt.sender) { + t.Fatal("azure: ServicePrincipalToken#SetSender did not set the sender") + } +} + +func TestServicePrincipalTokenRefreshUsesPOST(t *testing.T) { + spt := newServicePrincipalToken() + + body := mocks.NewBody("") + resp := mocks.NewResponseWithBodyAndStatus(body, 200, "OK") + + c := mocks.NewSender() + s := autorest.DecorateSender(c, + (func() autorest.SendDecorator { + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { + if r.Method != "POST" { + t.Fatalf("azure: ServicePrincipalToken#Refresh did not correctly set HTTP method -- expected %v, received %v", "POST", r.Method) + } + return resp, nil + }) + } + })()) + spt.SetSender(s) + spt.Refresh() + + if body.IsOpen() { + t.Fatalf("the response was not closed!") + } +} + +func TestServicePrincipalTokenRefreshSetsMimeType(t *testing.T) { + spt := newServicePrincipalToken() + + c := mocks.NewSender() + s := autorest.DecorateSender(c, + (func() autorest.SendDecorator { + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { + if r.Header.Get(http.CanonicalHeaderKey("Content-Type")) != "application/x-www-form-urlencoded" { + t.Fatalf("azure: ServicePrincipalToken#Refresh did not correctly set Content-Type -- expected %v, received %v", + "application/x-form-urlencoded", + r.Header.Get(http.CanonicalHeaderKey("Content-Type"))) + } + return mocks.NewResponse(), nil + }) + } + })()) + spt.SetSender(s) + spt.Refresh() +} + +func TestServicePrincipalTokenRefreshSetsURL(t *testing.T) { + spt := newServicePrincipalToken() + + c := mocks.NewSender() + s := autorest.DecorateSender(c, + (func() autorest.SendDecorator { + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { + if r.URL.String() != TestOAuthConfig.TokenEndpoint.String() { + t.Fatalf("azure: ServicePrincipalToken#Refresh did not correctly set the URL -- expected %v, received %v", + TestOAuthConfig.TokenEndpoint, r.URL) + } + return mocks.NewResponse(), nil + }) + } + })()) + spt.SetSender(s) + spt.Refresh() +} + +func testServicePrincipalTokenRefreshSetsBody(t *testing.T, spt *ServicePrincipalToken, f func(*testing.T, []byte)) { + c := mocks.NewSender() + s := autorest.DecorateSender(c, + (func() autorest.SendDecorator { + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { + b, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("azure: Failed to read body of Service Principal token request (%v)", err) + } + f(t, b) + return mocks.NewResponse(), nil + }) + } + })()) + spt.SetSender(s) + spt.Refresh() +} + +func TestServicePrincipalTokenManualRefreshSetsBody(t *testing.T) { + sptManual := newServicePrincipalTokenManual() + testServicePrincipalTokenRefreshSetsBody(t, sptManual, func(t *testing.T, b []byte) { + if string(b) != defaultManualFormData { + t.Fatalf("azure: ServicePrincipalToken#Refresh did not correctly set the HTTP Request Body -- expected %v, received %v", + defaultManualFormData, string(b)) + } + }) +} + +func TestServicePrincipalTokenCertficateRefreshSetsBody(t *testing.T) { + sptCert := newServicePrincipalTokenCertificate(t) + testServicePrincipalTokenRefreshSetsBody(t, sptCert, func(t *testing.T, b []byte) { + body := string(b) + + values, _ := url.ParseQuery(body) + if values["client_assertion_type"][0] != "urn:ietf:params:oauth:client-assertion-type:jwt-bearer" || + values["client_id"][0] != "id" || + values["grant_type"][0] != "client_credentials" || + values["resource"][0] != "resource" { + t.Fatalf("azure: ServicePrincipalTokenCertificate#Refresh did not correctly set the HTTP Request Body.") + } + }) +} + +func TestServicePrincipalTokenSecretRefreshSetsBody(t *testing.T) { + spt := newServicePrincipalToken() + testServicePrincipalTokenRefreshSetsBody(t, spt, func(t *testing.T, b []byte) { + if string(b) != defaultFormData { + t.Fatalf("azure: ServicePrincipalToken#Refresh did not correctly set the HTTP Request Body -- expected %v, received %v", + defaultFormData, string(b)) + } + + }) +} + +func TestServicePrincipalTokenRefreshClosesRequestBody(t *testing.T) { + spt := newServicePrincipalToken() + + resp := mocks.NewResponse() + c := mocks.NewSender() + s := autorest.DecorateSender(c, + (func() autorest.SendDecorator { + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { + return resp, nil + }) + } + })()) + spt.SetSender(s) + spt.Refresh() + + if resp.Body.(*mocks.Body).IsOpen() { + t.Fatal("azure: ServicePrincipalToken#Refresh failed to close the HTTP Response Body") + } +} + +func TestServicePrincipalTokenRefreshPropagatesErrors(t *testing.T) { + spt := newServicePrincipalToken() + + c := mocks.NewSender() + c.SetError(fmt.Errorf("Faux Error")) + spt.SetSender(c) + + err := spt.Refresh() + if err == nil { + t.Fatal("azure: Failed to propagate the request error") + } +} + +func TestServicePrincipalTokenRefreshReturnsErrorIfNotOk(t *testing.T) { + spt := newServicePrincipalToken() + + c := mocks.NewSender() + c.AppendResponse(mocks.NewResponseWithStatus("401 NotAuthorized", 401)) + spt.SetSender(c) + + err := spt.Refresh() + if err == nil { + t.Fatal("azure: Failed to return an when receiving a status code other than HTTP 200") + } +} + +func TestServicePrincipalTokenRefreshUnmarshals(t *testing.T) { + spt := newServicePrincipalToken() + + expiresOn := strconv.Itoa(int(time.Now().Add(3600 * time.Second).Sub(expirationBase).Seconds())) + j := newTokenJSON(expiresOn, "resource") + resp := mocks.NewResponseWithContent(j) + c := mocks.NewSender() + s := autorest.DecorateSender(c, + (func() autorest.SendDecorator { + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { + return resp, nil + }) + } + })()) + spt.SetSender(s) + + err := spt.Refresh() + if err != nil { + t.Fatalf("azure: ServicePrincipalToken#Refresh returned an unexpected error (%v)", err) + } else if spt.AccessToken != "accessToken" || + spt.ExpiresIn != "3600" || + spt.ExpiresOn != expiresOn || + spt.NotBefore != expiresOn || + spt.Resource != "resource" || + spt.Type != "Bearer" { + t.Fatalf("azure: ServicePrincipalToken#Refresh failed correctly unmarshal the JSON -- expected %v, received %v", + j, *spt) + } +} + +func TestServicePrincipalTokenEnsureFreshRefreshes(t *testing.T) { + spt := newServicePrincipalToken() + expireToken(&spt.Token) + + f := false + c := mocks.NewSender() + s := autorest.DecorateSender(c, + (func() autorest.SendDecorator { + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { + f = true + return mocks.NewResponse(), nil + }) + } + })()) + spt.SetSender(s) + spt.EnsureFresh() + if !f { + t.Fatal("azure: ServicePrincipalToken#EnsureFresh failed to call Refresh for stale token") + } +} + +func TestServicePrincipalTokenEnsureFreshSkipsIfFresh(t *testing.T) { + spt := newServicePrincipalToken() + setTokenToExpireIn(&spt.Token, 1000*time.Second) + + f := false + c := mocks.NewSender() + s := autorest.DecorateSender(c, + (func() autorest.SendDecorator { + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { + f = true + return mocks.NewResponse(), nil + }) + } + })()) + spt.SetSender(s) + spt.EnsureFresh() + if f { + t.Fatal("azure: ServicePrincipalToken#EnsureFresh invoked Refresh for fresh token") + } +} + +func TestServicePrincipalTokenWithAuthorization(t *testing.T) { + spt := newServicePrincipalToken() + setTokenToExpireIn(&spt.Token, 1000*time.Second) + r := mocks.NewRequest() + s := mocks.NewSender() + spt.SetSender(s) + + req, err := autorest.Prepare(r, spt.WithAuthorization()) + if err != nil { + t.Fatalf("azure: ServicePrincipalToken#WithAuthorization returned an error (%v)", err) + } else if req.Header.Get(http.CanonicalHeaderKey("Authorization")) != fmt.Sprintf("Bearer %s", spt.AccessToken) { + t.Fatal("azure: ServicePrincipalToken#WithAuthorization failed to set Authorization header") + } +} + +func TestServicePrincipalTokenWithAuthorizationReturnsErrorIfCannotRefresh(t *testing.T) { + spt := newServicePrincipalToken() + s := mocks.NewSender() + s.AppendResponse(mocks.NewResponseWithStatus("400 Bad Request", 400)) + spt.SetSender(s) + + _, err := autorest.Prepare(mocks.NewRequest(), spt.WithAuthorization()) + if err == nil { + t.Fatal("azure: ServicePrincipalToken#WithAuthorization failed to return an error when refresh fails") + } +} + +func TestRefreshCallback(t *testing.T) { + callbackTriggered := false + spt := newServicePrincipalToken(func(Token) error { + callbackTriggered = true + return nil + }) + + expiresOn := strconv.Itoa(int(time.Now().Add(3600 * time.Second).Sub(expirationBase).Seconds())) + + sender := mocks.NewSender() + j := newTokenJSON(expiresOn, "resource") + sender.AppendResponse(mocks.NewResponseWithContent(j)) + spt.SetSender(sender) + spt.Refresh() + + if !callbackTriggered { + t.Fatalf("azure: RefreshCallback failed to trigger call callback") + } +} + +func TestRefreshCallbackErrorPropagates(t *testing.T) { + errorText := "this is an error text" + spt := newServicePrincipalToken(func(Token) error { + return fmt.Errorf(errorText) + }) + + expiresOn := strconv.Itoa(int(time.Now().Add(3600 * time.Second).Sub(expirationBase).Seconds())) + + sender := mocks.NewSender() + j := newTokenJSON(expiresOn, "resource") + sender.AppendResponse(mocks.NewResponseWithContent(j)) + spt.SetSender(sender) + err := spt.Refresh() + + if err == nil || !strings.Contains(err.Error(), errorText) { + t.Fatalf("azure: RefreshCallback failed to propagate error") + } +} + +// This demonstrates the danger of manual token without a refresh token +func TestServicePrincipalTokenManualRefreshFailsWithoutRefresh(t *testing.T) { + spt := newServicePrincipalTokenManual() + spt.RefreshToken = "" + err := spt.Refresh() + if err == nil { + t.Fatalf("azure: ServicePrincipalToken#Refresh should have failed with a ManualTokenSecret without a refresh token") + } +} + +func newToken() *Token { + return &Token{ + AccessToken: "ASECRETVALUE", + Resource: "https://azure.microsoft.com/", + Type: "Bearer", + } +} + +func newTokenJSON(expiresOn string, resource string) string { + return fmt.Sprintf(`{ + "access_token" : "accessToken", + "expires_in" : "3600", + "expires_on" : "%s", + "not_before" : "%s", + "resource" : "%s", + "token_type" : "Bearer" + }`, + expiresOn, expiresOn, resource) +} + +func newTokenExpiresIn(expireIn time.Duration) *Token { + return setTokenToExpireIn(newToken(), expireIn) +} + +func newTokenExpiresAt(expireAt time.Time) *Token { + return setTokenToExpireAt(newToken(), expireAt) +} + +func expireToken(t *Token) *Token { + return setTokenToExpireIn(t, 0) +} + +func setTokenToExpireAt(t *Token, expireAt time.Time) *Token { + t.ExpiresIn = "3600" + t.ExpiresOn = strconv.Itoa(int(expireAt.Sub(expirationBase).Seconds())) + t.NotBefore = t.ExpiresOn + return t +} + +func setTokenToExpireIn(t *Token, expireIn time.Duration) *Token { + return setTokenToExpireAt(t, time.Now().Add(expireIn)) +} + +func newServicePrincipalToken(callbacks ...TokenRefreshCallback) *ServicePrincipalToken { + spt, _ := NewServicePrincipalToken(TestOAuthConfig, "id", "secret", "resource", callbacks...) + return spt +} + +func newServicePrincipalTokenManual() *ServicePrincipalToken { + token := newToken() + token.RefreshToken = "refreshtoken" + spt, _ := NewServicePrincipalTokenFromManualToken(TestOAuthConfig, "id", "resource", *token) + return spt +} + +func newServicePrincipalTokenCertificate(t *testing.T) *ServicePrincipalToken { + template := x509.Certificate{ + SerialNumber: big.NewInt(0), + Subject: pkix.Name{CommonName: "test"}, + BasicConstraintsValid: true, + } + privateKey, err := rsa.GenerateKey(rand.Reader, 2048) + certificateBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &privateKey.PublicKey, privateKey) + if err != nil { + t.Fatal(err) + } + certificate, err := x509.ParseCertificate(certificateBytes) + if err != nil { + t.Fatal(err) + } + + spt, _ := NewServicePrincipalTokenFromCertificate(TestOAuthConfig, "id", certificate, privateKey, "resource") + return spt +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/client.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/client.go new file mode 100644 index 000000000000..b55b3d103534 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/client.go @@ -0,0 +1,212 @@ +package autorest + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "net/http/cookiejar" + "time" +) + +const ( + // DefaultPollingDelay is a reasonable delay between polling requests. + DefaultPollingDelay = 60 * time.Second + + // DefaultPollingDuration is a reasonable total polling duration. + DefaultPollingDuration = 15 * time.Minute + + // DefaultRetryAttempts is number of attempts for retry status codes (5xx). + DefaultRetryAttempts = 3 +) + +var statusCodesForRetry = []int{ + http.StatusRequestTimeout, // 408 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 +} + +const ( + requestFormat = `HTTP Request Begin =================================================== +%s +===================================================== HTTP Request End +` + responseFormat = `HTTP Response Begin =================================================== +%s +===================================================== HTTP Response End +` +) + +// Response serves as the base for all responses from generated clients. It provides access to the +// last http.Response. +type Response struct { + *http.Response `json:"-"` +} + +// LoggingInspector implements request and response inspectors that log the full request and +// response to a supplied log. +type LoggingInspector struct { + Logger *log.Logger +} + +// WithInspection returns a PrepareDecorator that emits the http.Request to the supplied logger. The +// body is restored after being emitted. +// +// Note: Since it reads the entire Body, this decorator should not be used where body streaming is +// important. It is best used to trace JSON or similar body values. +func (li LoggingInspector) WithInspection() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + var body, b bytes.Buffer + + defer r.Body.Close() + + r.Body = ioutil.NopCloser(io.TeeReader(r.Body, &body)) + if err := r.Write(&b); err != nil { + return nil, fmt.Errorf("Failed to write response: %v", err) + } + + li.Logger.Printf(requestFormat, b.String()) + + r.Body = ioutil.NopCloser(&body) + return p.Prepare(r) + }) + } +} + +// ByInspecting returns a RespondDecorator that emits the http.Response to the supplied logger. The +// body is restored after being emitted. +// +// Note: Since it reads the entire Body, this decorator should not be used where body streaming is +// important. It is best used to trace JSON or similar body values. +func (li LoggingInspector) ByInspecting() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + var body, b bytes.Buffer + defer resp.Body.Close() + resp.Body = ioutil.NopCloser(io.TeeReader(resp.Body, &body)) + if err := resp.Write(&b); err != nil { + return fmt.Errorf("Failed to write response: %v", err) + } + + li.Logger.Printf(responseFormat, b.String()) + + resp.Body = ioutil.NopCloser(&body) + return r.Respond(resp) + }) + } +} + +// Client is the base for autorest generated clients. It provides default, "do nothing" +// implementations of an Authorizer, RequestInspector, and ResponseInspector. It also returns the +// standard, undecorated http.Client as a default Sender. +// +// Generated clients should also use Error (see NewError and NewErrorWithError) for errors and +// return responses that compose with Response. +// +// Most customization of generated clients is best achieved by supplying a custom Authorizer, custom +// RequestInspector, and / or custom ResponseInspector. Users may log requests, implement circuit +// breakers (see https://msdn.microsoft.com/en-us/library/dn589784.aspx) or otherwise influence +// sending the request by providing a decorated Sender. +type Client struct { + Authorizer Authorizer + Sender Sender + RequestInspector PrepareDecorator + ResponseInspector RespondDecorator + + // PollingDelay sets the polling frequency used in absence of a Retry-After HTTP header + PollingDelay time.Duration + + // PollingDuration sets the maximum polling time after which an error is returned. + PollingDuration time.Duration + + // RetryAttempts sets the default number of retry attempts for client. + RetryAttempts int + + // RetryDuration sets the delay duration for retries. + RetryDuration time.Duration + + // UserAgent, if not empty, will be set as the HTTP User-Agent header on all requests sent + // through the Do method. + UserAgent string + + Jar http.CookieJar +} + +// NewClientWithUserAgent returns an instance of a Client with the UserAgent set to the passed +// string. +func NewClientWithUserAgent(ua string) Client { + return Client{ + PollingDelay: DefaultPollingDelay, + PollingDuration: DefaultPollingDuration, + RetryAttempts: DefaultRetryAttempts, + RetryDuration: 30 * time.Second, + UserAgent: ua, + } +} + +// Do implements the Sender interface by invoking the active Sender after applying authorization. +// If Sender is not set, it uses a new instance of http.Client. In both cases it will, if UserAgent +// is set, apply set the User-Agent header. +func (c Client) Do(r *http.Request) (*http.Response, error) { + if r.UserAgent() == "" { + r, _ = Prepare(r, + WithUserAgent(c.UserAgent)) + } + r, err := Prepare(r, + c.WithInspection(), + c.WithAuthorization()) + if err != nil { + return nil, NewErrorWithError(err, "autorest/Client", "Do", nil, "Preparing request failed") + } + resp, err := SendWithSender(c.sender(), r, + DoRetryForStatusCodes(c.RetryAttempts, c.RetryDuration, statusCodesForRetry...)) + Respond(resp, + c.ByInspecting()) + return resp, err +} + +// sender returns the Sender to which to send requests. +func (c Client) sender() Sender { + if c.Sender == nil { + j, _ := cookiejar.New(nil) + return &http.Client{Jar: j} + } + return c.Sender +} + +// WithAuthorization is a convenience method that returns the WithAuthorization PrepareDecorator +// from the current Authorizer. If not Authorizer is set, it uses the NullAuthorizer. +func (c Client) WithAuthorization() PrepareDecorator { + return c.authorizer().WithAuthorization() +} + +// authorizer returns the Authorizer to use. +func (c Client) authorizer() Authorizer { + if c.Authorizer == nil { + return NullAuthorizer{} + } + return c.Authorizer +} + +// WithInspection is a convenience method that passes the request to the supplied RequestInspector, +// if present, or returns the WithNothing PrepareDecorator otherwise. +func (c Client) WithInspection() PrepareDecorator { + if c.RequestInspector == nil { + return WithNothing() + } + return c.RequestInspector +} + +// ByInspecting is a convenience method that passes the response to the supplied ResponseInspector, +// if present, or returns the ByIgnoring RespondDecorator otherwise. +func (c Client) ByInspecting() RespondDecorator { + if c.ResponseInspector == nil { + return ByIgnoring() + } + return c.ResponseInspector +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/client_test.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/client_test.go new file mode 100644 index 000000000000..fba3aa3b2d19 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/client_test.go @@ -0,0 +1,315 @@ +package autorest + +import ( + "bytes" + "fmt" + "io/ioutil" + "log" + "math/rand" + "net/http" + "reflect" + "testing" + "time" + + "github.com/Azure/go-autorest/autorest/mocks" +) + +func TestLoggingInspectorWithInspection(t *testing.T) { + b := bytes.Buffer{} + c := Client{} + li := LoggingInspector{Logger: log.New(&b, "", 0)} + c.RequestInspector = li.WithInspection() + + Prepare(mocks.NewRequestWithContent("Content"), + c.WithInspection()) + + if len(b.String()) <= 0 { + t.Fatal("autorest: LoggingInspector#WithInspection did not record Request to the log") + } +} + +func TestLoggingInspectorWithInspectionEmitsErrors(t *testing.T) { + b := bytes.Buffer{} + c := Client{} + r := mocks.NewRequestWithContent("Content") + li := LoggingInspector{Logger: log.New(&b, "", 0)} + c.RequestInspector = li.WithInspection() + + if _, err := Prepare(r, + c.WithInspection()); err != nil { + t.Error(err) + } + + if len(b.String()) <= 0 { + t.Fatal("autorest: LoggingInspector#WithInspection did not record Request to the log") + } +} + +func TestLoggingInspectorWithInspectionRestoresBody(t *testing.T) { + b := bytes.Buffer{} + c := Client{} + r := mocks.NewRequestWithContent("Content") + li := LoggingInspector{Logger: log.New(&b, "", 0)} + c.RequestInspector = li.WithInspection() + + Prepare(r, + c.WithInspection()) + + s, _ := ioutil.ReadAll(r.Body) + if len(s) <= 0 { + t.Fatal("autorest: LoggingInspector#WithInspection did not restore the Request body") + } +} + +func TestLoggingInspectorByInspecting(t *testing.T) { + b := bytes.Buffer{} + c := Client{} + li := LoggingInspector{Logger: log.New(&b, "", 0)} + c.ResponseInspector = li.ByInspecting() + + Respond(mocks.NewResponseWithContent("Content"), + c.ByInspecting()) + + if len(b.String()) <= 0 { + t.Fatal("autorest: LoggingInspector#ByInspection did not record Response to the log") + } +} + +func TestLoggingInspectorByInspectingEmitsErrors(t *testing.T) { + b := bytes.Buffer{} + c := Client{} + r := mocks.NewResponseWithContent("Content") + li := LoggingInspector{Logger: log.New(&b, "", 0)} + c.ResponseInspector = li.ByInspecting() + + if err := Respond(r, + c.ByInspecting()); err != nil { + t.Fatal(err) + } + + if len(b.String()) <= 0 { + t.Fatal("autorest: LoggingInspector#ByInspection did not record Response to the log") + } +} + +func TestLoggingInspectorByInspectingRestoresBody(t *testing.T) { + b := bytes.Buffer{} + c := Client{} + r := mocks.NewResponseWithContent("Content") + li := LoggingInspector{Logger: log.New(&b, "", 0)} + c.ResponseInspector = li.ByInspecting() + + Respond(r, + c.ByInspecting()) + + s, _ := ioutil.ReadAll(r.Body) + if len(s) <= 0 { + t.Fatal("autorest: LoggingInspector#ByInspecting did not restore the Response body") + } +} + +func TestNewClientWithUserAgent(t *testing.T) { + ua := "UserAgent" + c := NewClientWithUserAgent(ua) + + if c.UserAgent != ua { + t.Fatalf("autorest: NewClientWithUserAgent failed to set the UserAgent -- expected %s, received %s", + ua, c.UserAgent) + } +} + +func TestClientSenderReturnsHttpClientByDefault(t *testing.T) { + c := Client{} + + if fmt.Sprintf("%T", c.sender()) != "*http.Client" { + t.Fatal("autorest: Client#sender failed to return http.Client by default") + } +} + +func TestClientSenderReturnsSetSender(t *testing.T) { + c := Client{} + + s := mocks.NewSender() + c.Sender = s + + if c.sender() != s { + t.Fatal("autorest: Client#sender failed to return set Sender") + } +} + +func TestClientDoInvokesSender(t *testing.T) { + c := Client{} + + s := mocks.NewSender() + c.Sender = s + + c.Do(&http.Request{}) + if s.Attempts() != 1 { + t.Fatal("autorest: Client#Do failed to invoke the Sender") + } +} + +func TestClientDoSetsUserAgent(t *testing.T) { + ua := "UserAgent" + c := Client{UserAgent: ua} + r := mocks.NewRequest() + s := mocks.NewSender() + c.Sender = s + + c.Do(r) + + if r.UserAgent() != ua { + t.Fatalf("autorest: Client#Do failed to correctly set User-Agent header: %s=%s", + http.CanonicalHeaderKey(headerUserAgent), r.UserAgent()) + } +} + +func TestClientDoSetsAuthorization(t *testing.T) { + r := mocks.NewRequest() + s := mocks.NewSender() + c := Client{Authorizer: mockAuthorizer{}, Sender: s} + + c.Do(r) + if len(r.Header.Get(http.CanonicalHeaderKey(headerAuthorization))) <= 0 { + t.Fatalf("autorest: Client#Send failed to set Authorization header -- %s=%s", + http.CanonicalHeaderKey(headerAuthorization), + r.Header.Get(http.CanonicalHeaderKey(headerAuthorization))) + } +} + +func TestClientDoInvokesRequestInspector(t *testing.T) { + r := mocks.NewRequest() + s := mocks.NewSender() + i := &mockInspector{} + c := Client{RequestInspector: i.WithInspection(), Sender: s} + + c.Do(r) + if !i.wasInvoked { + t.Fatal("autorest: Client#Send failed to invoke the RequestInspector") + } +} + +func TestClientDoInvokesResponseInspector(t *testing.T) { + r := mocks.NewRequest() + s := mocks.NewSender() + i := &mockInspector{} + c := Client{ResponseInspector: i.ByInspecting(), Sender: s} + + c.Do(r) + if !i.wasInvoked { + t.Fatal("autorest: Client#Send failed to invoke the ResponseInspector") + } +} + +func TestClientDoReturnsErrorIfPrepareFails(t *testing.T) { + c := Client{} + s := mocks.NewSender() + c.Authorizer = mockFailingAuthorizer{} + c.Sender = s + + _, err := c.Do(&http.Request{}) + if err == nil { + t.Fatalf("autorest: Client#Do failed to return an error when Prepare failed") + } +} + +func TestClientDoDoesNotSendIfPrepareFails(t *testing.T) { + c := Client{} + s := mocks.NewSender() + c.Authorizer = mockFailingAuthorizer{} + c.Sender = s + + c.Do(&http.Request{}) + if s.Attempts() > 0 { + t.Fatal("autorest: Client#Do failed to invoke the Sender") + } +} + +func TestClientAuthorizerReturnsNullAuthorizerByDefault(t *testing.T) { + c := Client{} + + if fmt.Sprintf("%T", c.authorizer()) != "autorest.NullAuthorizer" { + t.Fatal("autorest: Client#authorizer failed to return the NullAuthorizer by default") + } +} + +func TestClientAuthorizerReturnsSetAuthorizer(t *testing.T) { + c := Client{} + c.Authorizer = mockAuthorizer{} + + if fmt.Sprintf("%T", c.authorizer()) != "autorest.mockAuthorizer" { + t.Fatal("autorest: Client#authorizer failed to return the set Authorizer") + } +} + +func TestClientWithAuthorizer(t *testing.T) { + c := Client{} + c.Authorizer = mockAuthorizer{} + + req, _ := Prepare(&http.Request{}, + c.WithAuthorization()) + + if req.Header.Get(headerAuthorization) == "" { + t.Fatal("autorest: Client#WithAuthorizer failed to return the WithAuthorizer from the active Authorizer") + } +} + +func TestClientWithInspection(t *testing.T) { + c := Client{} + r := &mockInspector{} + c.RequestInspector = r.WithInspection() + + Prepare(&http.Request{}, + c.WithInspection()) + + if !r.wasInvoked { + t.Fatal("autorest: Client#WithInspection failed to invoke RequestInspector") + } +} + +func TestClientWithInspectionSetsDefault(t *testing.T) { + c := Client{} + + r1 := &http.Request{} + r2, _ := Prepare(r1, + c.WithInspection()) + + if !reflect.DeepEqual(r1, r2) { + t.Fatal("autorest: Client#WithInspection failed to provide a default RequestInspector") + } +} + +func TestClientByInspecting(t *testing.T) { + c := Client{} + r := &mockInspector{} + c.ResponseInspector = r.ByInspecting() + + Respond(&http.Response{}, + c.ByInspecting()) + + if !r.wasInvoked { + t.Fatal("autorest: Client#ByInspecting failed to invoke ResponseInspector") + } +} + +func TestClientByInspectingSetsDefault(t *testing.T) { + c := Client{} + + r := &http.Response{} + Respond(r, + c.ByInspecting()) + + if !reflect.DeepEqual(r, &http.Response{}) { + t.Fatal("autorest: Client#ByInspecting failed to provide a default ResponseInspector") + } +} + +func randomString(n int) string { + const chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" + r := rand.New(rand.NewSource(time.Now().UTC().UnixNano())) + s := make([]byte, n) + for i := range s { + s[i] = chars[r.Intn(len(chars))] + } + return string(s) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/date/date.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/date/date.go new file mode 100644 index 000000000000..80ca60e9b08a --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/date/date.go @@ -0,0 +1,82 @@ +/* +Package date provides time.Time derivatives that conform to the Swagger.io (https://swagger.io/) +defined date formats: Date and DateTime. Both types may, in most cases, be used in lieu of +time.Time types. And both convert to time.Time through a ToTime method. +*/ +package date + +import ( + "fmt" + "time" +) + +const ( + fullDate = "2006-01-02" + fullDateJSON = `"2006-01-02"` + dateFormat = "%04d-%02d-%02d" + jsonFormat = `"%04d-%02d-%02d"` +) + +// Date defines a type similar to time.Time but assumes a layout of RFC3339 full-date (i.e., +// 2006-01-02). +type Date struct { + time.Time +} + +// ParseDate create a new Date from the passed string. +func ParseDate(date string) (d Date, err error) { + return parseDate(date, fullDate) +} + +func parseDate(date string, format string) (Date, error) { + d, err := time.Parse(format, date) + return Date{Time: d}, err +} + +// MarshalBinary preserves the Date as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalBinary() ([]byte, error) { + return d.MarshalText() +} + +// UnmarshalBinary reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalBinary(data []byte) error { + return d.UnmarshalText(data) +} + +// MarshalJSON preserves the Date as a JSON string conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalJSON() (json []byte, err error) { + return []byte(fmt.Sprintf(jsonFormat, d.Year(), d.Month(), d.Day())), nil +} + +// UnmarshalJSON reconstitutes the Date from a JSON string conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalJSON(data []byte) (err error) { + d.Time, err = time.Parse(fullDateJSON, string(data)) + return err +} + +// MarshalText preserves the Date as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalText() (text []byte, err error) { + return []byte(fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day())), nil +} + +// UnmarshalText reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalText(data []byte) (err error) { + d.Time, err = time.Parse(fullDate, string(data)) + return err +} + +// String returns the Date formatted as an RFC3339 full-date string (i.e., 2006-01-02). +func (d Date) String() string { + return fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day()) +} + +// ToTime returns a Date as a time.Time +func (d Date) ToTime() time.Time { + return d.Time +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/date/date_test.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/date/date_test.go new file mode 100644 index 000000000000..622f1f7efb36 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/date/date_test.go @@ -0,0 +1,223 @@ +package date + +import ( + "encoding/json" + "fmt" + "reflect" + "testing" + "time" +) + +func ExampleParseDate() { + d, err := ParseDate("2001-02-03") + if err != nil { + fmt.Println(err) + } + fmt.Println(d) + // Output: 2001-02-03 +} + +func ExampleDate() { + d, err := ParseDate("2001-02-03") + if err != nil { + fmt.Println(err) + } + + t, err := time.Parse(time.RFC3339, "2001-02-04T00:00:00Z") + if err != nil { + fmt.Println(err) + } + + // Date acts as time.Time when the receiver + if d.Before(t) { + fmt.Printf("Before ") + } else { + fmt.Printf("After ") + } + + // Convert Date when needing a time.Time + if t.After(d.ToTime()) { + fmt.Printf("After") + } else { + fmt.Printf("Before") + } + // Output: Before After +} + +func ExampleDate_MarshalBinary() { + d, err := ParseDate("2001-02-03") + if err != nil { + fmt.Println(err) + } + t, err := d.MarshalBinary() + if err != nil { + fmt.Println(err) + } + fmt.Println(string(t)) + // Output: 2001-02-03 +} + +func ExampleDate_UnmarshalBinary() { + d := Date{} + t := "2001-02-03" + + if err := d.UnmarshalBinary([]byte(t)); err != nil { + fmt.Println(err) + } + fmt.Println(d) + // Output: 2001-02-03 +} + +func ExampleDate_MarshalJSON() { + d, err := ParseDate("2001-02-03") + if err != nil { + fmt.Println(err) + } + j, err := json.Marshal(d) + if err != nil { + fmt.Println(err) + } + fmt.Println(string(j)) + // Output: "2001-02-03" +} + +func ExampleDate_UnmarshalJSON() { + var d struct { + Date Date `json:"date"` + } + j := `{"date" : "2001-02-03"}` + + if err := json.Unmarshal([]byte(j), &d); err != nil { + fmt.Println(err) + } + fmt.Println(d.Date) + // Output: 2001-02-03 +} + +func ExampleDate_MarshalText() { + d, err := ParseDate("2001-02-03") + if err != nil { + fmt.Println(err) + } + t, err := d.MarshalText() + if err != nil { + fmt.Println(err) + } + fmt.Println(string(t)) + // Output: 2001-02-03 +} + +func ExampleDate_UnmarshalText() { + d := Date{} + t := "2001-02-03" + + if err := d.UnmarshalText([]byte(t)); err != nil { + fmt.Println(err) + } + fmt.Println(d) + // Output: 2001-02-03 +} + +func TestDateString(t *testing.T) { + d, err := ParseDate("2001-02-03") + if err != nil { + t.Fatalf("date: String failed (%v)", err) + } + if d.String() != "2001-02-03" { + t.Fatalf("date: String failed (%v)", d.String()) + } +} + +func TestDateBinaryRoundTrip(t *testing.T) { + d1, err := ParseDate("2001-02-03") + if err != nil { + t.Fatalf("date: ParseDate failed (%v)", err) + } + t1, err := d1.MarshalBinary() + if err != nil { + t.Fatalf("date: MarshalBinary failed (%v)", err) + } + + d2 := Date{} + if err = d2.UnmarshalBinary(t1); err != nil { + t.Fatalf("date: UnmarshalBinary failed (%v)", err) + } + + if !reflect.DeepEqual(d1, d2) { + t.Fatalf("date: Round-trip Binary failed (%v, %v)", d1, d2) + } +} + +func TestDateJSONRoundTrip(t *testing.T) { + type s struct { + Date Date `json:"date"` + } + var err error + d1 := s{} + d1.Date, err = ParseDate("2001-02-03") + if err != nil { + t.Fatalf("date: ParseDate failed (%v)", err) + } + + j, err := json.Marshal(d1) + if err != nil { + t.Fatalf("date: MarshalJSON failed (%v)", err) + } + + d2 := s{} + if err = json.Unmarshal(j, &d2); err != nil { + t.Fatalf("date: UnmarshalJSON failed (%v)", err) + } + + if !reflect.DeepEqual(d1, d2) { + t.Fatalf("date: Round-trip JSON failed (%v, %v)", d1, d2) + } +} + +func TestDateTextRoundTrip(t *testing.T) { + d1, err := ParseDate("2001-02-03") + if err != nil { + t.Fatalf("date: ParseDate failed (%v)", err) + } + t1, err := d1.MarshalText() + if err != nil { + t.Fatalf("date: MarshalText failed (%v)", err) + } + d2 := Date{} + if err = d2.UnmarshalText(t1); err != nil { + t.Fatalf("date: UnmarshalText failed (%v)", err) + } + + if !reflect.DeepEqual(d1, d2) { + t.Fatalf("date: Round-trip Text failed (%v, %v)", d1, d2) + } +} + +func TestDateToTime(t *testing.T) { + var d Date + d, err := ParseDate("2001-02-03") + if err != nil { + t.Fatalf("date: ParseDate failed (%v)", err) + } + var _ time.Time = d.ToTime() +} + +func TestDateUnmarshalJSONReturnsError(t *testing.T) { + var d struct { + Date Date `json:"date"` + } + j := `{"date" : "February 3, 2001"}` + + if err := json.Unmarshal([]byte(j), &d); err == nil { + t.Fatal("date: Date failed to return error for malformed JSON date") + } +} + +func TestDateUnmarshalTextReturnsError(t *testing.T) { + d := Date{} + txt := "February 3, 2001" + + if err := d.UnmarshalText([]byte(txt)); err == nil { + t.Fatal("date: Date failed to return error for malformed Text date") + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/date/time.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/date/time.go new file mode 100644 index 000000000000..c1af6296348d --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/date/time.go @@ -0,0 +1,89 @@ +package date + +import ( + "regexp" + "time" +) + +// Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. +const ( + azureUtcFormatJSON = `"2006-01-02T15:04:05.999999999"` + azureUtcFormat = "2006-01-02T15:04:05.999999999" + rfc3339JSON = `"` + time.RFC3339Nano + `"` + rfc3339 = time.RFC3339Nano + tzOffsetRegex = `(Z|z|\+|-)(\d+:\d+)*"*$` +) + +// Time defines a type similar to time.Time but assumes a layout of RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +type Time struct { + time.Time +} + +// MarshalBinary preserves the Time as a byte array conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalBinary() ([]byte, error) { + return t.Time.MarshalText() +} + +// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalBinary(data []byte) error { + return t.UnmarshalText(data) +} + +// MarshalJSON preserves the Time as a JSON string conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalJSON() (json []byte, err error) { + return t.Time.MarshalJSON() +} + +// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalJSON(data []byte) (err error) { + timeFormat := azureUtcFormatJSON + match, err := regexp.Match(tzOffsetRegex, data) + if err != nil { + return err + } else if match { + timeFormat = rfc3339JSON + } + t.Time, err = ParseTime(timeFormat, string(data)) + return err +} + +// MarshalText preserves the Time as a byte array conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalText() (text []byte, err error) { + return t.Time.MarshalText() +} + +// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalText(data []byte) (err error) { + timeFormat := azureUtcFormat + match, err := regexp.Match(tzOffsetRegex, data) + if err != nil { + return err + } else if match { + timeFormat = rfc3339 + } + t.Time, err = ParseTime(timeFormat, string(data)) + return err +} + +// String returns the Time formatted as an RFC3339 date-time string (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) String() string { + // Note: time.Time.String does not return an RFC3339 compliant string, time.Time.MarshalText does. + b, err := t.MarshalText() + if err != nil { + return "" + } + return string(b) +} + +// ToTime returns a Time as a time.Time +func (t Time) ToTime() time.Time { + return t.Time +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/date/time_test.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/date/time_test.go new file mode 100644 index 000000000000..0a7dd9eb6bbb --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/date/time_test.go @@ -0,0 +1,263 @@ +package date + +import ( + "encoding/json" + "fmt" + "reflect" + "testing" + "time" +) + +func ExampleParseTime() { + d, _ := ParseTime(rfc3339, "2001-02-03T04:05:06Z") + fmt.Println(d) + // Output: 2001-02-03 04:05:06 +0000 UTC +} + +func ExampleTime_MarshalBinary() { + ti, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z") + if err != nil { + fmt.Println(err) + } + d := Time{ti} + t, err := d.MarshalBinary() + if err != nil { + fmt.Println(err) + } + fmt.Println(string(t)) + // Output: 2001-02-03T04:05:06Z +} + +func ExampleTime_UnmarshalBinary() { + d := Time{} + t := "2001-02-03T04:05:06Z" + + if err := d.UnmarshalBinary([]byte(t)); err != nil { + fmt.Println(err) + } + fmt.Println(d) + // Output: 2001-02-03T04:05:06Z +} + +func ExampleTime_MarshalJSON() { + d, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z") + if err != nil { + fmt.Println(err) + } + j, err := json.Marshal(d) + if err != nil { + fmt.Println(err) + } + fmt.Println(string(j)) + // Output: "2001-02-03T04:05:06Z" +} + +func ExampleTime_UnmarshalJSON() { + var d struct { + Time Time `json:"datetime"` + } + j := `{"datetime" : "2001-02-03T04:05:06Z"}` + + if err := json.Unmarshal([]byte(j), &d); err != nil { + fmt.Println(err) + } + fmt.Println(d.Time) + // Output: 2001-02-03T04:05:06Z +} + +func ExampleTime_MarshalText() { + d, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z") + if err != nil { + fmt.Println(err) + } + t, err := d.MarshalText() + if err != nil { + fmt.Println(err) + } + fmt.Println(string(t)) + // Output: 2001-02-03T04:05:06Z +} + +func ExampleTime_UnmarshalText() { + d := Time{} + t := "2001-02-03T04:05:06Z" + + if err := d.UnmarshalText([]byte(t)); err != nil { + fmt.Println(err) + } + fmt.Println(d) + // Output: 2001-02-03T04:05:06Z +} + +func TestUnmarshalTextforInvalidDate(t *testing.T) { + d := Time{} + dt := "2001-02-03T04:05:06AAA" + + if err := d.UnmarshalText([]byte(dt)); err == nil { + t.Fatalf("date: Time#Unmarshal was expecting error for invalid date") + } +} + +func TestUnmarshalJSONforInvalidDate(t *testing.T) { + d := Time{} + dt := `"2001-02-03T04:05:06AAA"` + + if err := d.UnmarshalJSON([]byte(dt)); err == nil { + t.Fatalf("date: Time#Unmarshal was expecting error for invalid date") + } +} + +func TestTimeString(t *testing.T) { + ti, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z") + if err != nil { + fmt.Println(err) + } + d := Time{ti} + if d.String() != "2001-02-03T04:05:06Z" { + t.Fatalf("date: Time#String failed (%v)", d.String()) + } +} + +func TestTimeStringReturnsEmptyStringForError(t *testing.T) { + d := Time{Time: time.Date(20000, 01, 01, 01, 01, 01, 01, time.UTC)} + if d.String() != "" { + t.Fatalf("date: Time#String failed empty string for an error") + } +} + +func TestTimeBinaryRoundTrip(t *testing.T) { + ti, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z") + if err != nil { + t.Fatalf("date: Time#ParseTime failed (%v)", err) + } + d1 := Time{ti} + t1, err := d1.MarshalBinary() + if err != nil { + t.Fatalf("date: Time#MarshalBinary failed (%v)", err) + } + + d2 := Time{} + if err = d2.UnmarshalBinary(t1); err != nil { + t.Fatalf("date: Time#UnmarshalBinary failed (%v)", err) + } + + if !reflect.DeepEqual(d1, d2) { + t.Fatalf("date:Round-trip Binary failed (%v, %v)", d1, d2) + } +} + +func TestTimeJSONRoundTrip(t *testing.T) { + type s struct { + Time Time `json:"datetime"` + } + + ti, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z") + if err != nil { + t.Fatalf("date: Time#ParseTime failed (%v)", err) + } + + d1 := s{Time: Time{ti}} + j, err := json.Marshal(d1) + if err != nil { + t.Fatalf("date: Time#MarshalJSON failed (%v)", err) + } + + d2 := s{} + if err = json.Unmarshal(j, &d2); err != nil { + t.Fatalf("date: Time#UnmarshalJSON failed (%v)", err) + } + + if !reflect.DeepEqual(d1, d2) { + t.Fatalf("date: Round-trip JSON failed (%v, %v)", d1, d2) + } +} + +func TestTimeTextRoundTrip(t *testing.T) { + ti, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z") + if err != nil { + t.Fatalf("date: Time#ParseTime failed (%v)", err) + } + d1 := Time{Time: ti} + t1, err := d1.MarshalText() + if err != nil { + t.Fatalf("date: Time#MarshalText failed (%v)", err) + } + + d2 := Time{} + if err = d2.UnmarshalText(t1); err != nil { + t.Fatalf("date: Time#UnmarshalText failed (%v)", err) + } + + if !reflect.DeepEqual(d1, d2) { + t.Fatalf("date: Round-trip Text failed (%v, %v)", d1, d2) + } +} + +func TestTimeToTime(t *testing.T) { + ti, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z") + d := Time{ti} + if err != nil { + t.Fatalf("date: Time#ParseTime failed (%v)", err) + } + var _ time.Time = d.ToTime() +} + +func TestUnmarshalJSONNoOffset(t *testing.T) { + var d struct { + Time Time `json:"datetime"` + } + j := `{"datetime" : "2001-02-03T04:05:06.789"}` + + if err := json.Unmarshal([]byte(j), &d); err != nil { + t.Fatalf("date: Time#Unmarshal failed (%v)", err) + } +} + +func TestUnmarshalJSONPosOffset(t *testing.T) { + var d struct { + Time Time `json:"datetime"` + } + j := `{"datetime" : "1980-01-02T00:11:35.01+01:00"}` + + if err := json.Unmarshal([]byte(j), &d); err != nil { + t.Fatalf("date: Time#Unmarshal failed (%v)", err) + } +} + +func TestUnmarshalJSONNegOffset(t *testing.T) { + var d struct { + Time Time `json:"datetime"` + } + j := `{"datetime" : "1492-10-12T10:15:01.789-08:00"}` + + if err := json.Unmarshal([]byte(j), &d); err != nil { + t.Fatalf("date: Time#Unmarshal failed (%v)", err) + } +} + +func TestUnmarshalTextNoOffset(t *testing.T) { + d := Time{} + t1 := "2001-02-03T04:05:06" + + if err := d.UnmarshalText([]byte(t1)); err != nil { + t.Fatalf("date: Time#UnmarshalText failed (%v)", err) + } +} + +func TestUnmarshalTextPosOffset(t *testing.T) { + d := Time{} + t1 := "2001-02-03T04:05:06+00:30" + + if err := d.UnmarshalText([]byte(t1)); err != nil { + t.Fatalf("date: Time#UnmarshalText failed (%v)", err) + } +} + +func TestUnmarshalTextNegOffset(t *testing.T) { + d := Time{} + t1 := "2001-02-03T04:05:06-11:00" + + if err := d.UnmarshalText([]byte(t1)); err != nil { + t.Fatalf("date: Time#UnmarshalText failed (%v)", err) + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go new file mode 100644 index 000000000000..11995fb9f2c5 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go @@ -0,0 +1,86 @@ +package date + +import ( + "errors" + "time" +) + +const ( + rfc1123JSON = `"` + time.RFC1123 + `"` + rfc1123 = time.RFC1123 +) + +// TimeRFC1123 defines a type similar to time.Time but assumes a layout of RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +type TimeRFC1123 struct { + time.Time +} + +// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalJSON(data []byte) (err error) { + t.Time, err = ParseTime(rfc1123JSON, string(data)) + if err != nil { + return err + } + return nil +} + +// MarshalJSON preserves the Time as a JSON string conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalJSON() ([]byte, error) { + if y := t.Year(); y < 0 || y >= 10000 { + return nil, errors.New("Time.MarshalJSON: year outside of range [0,9999]") + } + b := []byte(t.Format(rfc1123JSON)) + return b, nil +} + +// MarshalText preserves the Time as a byte array conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalText() ([]byte, error) { + if y := t.Year(); y < 0 || y >= 10000 { + return nil, errors.New("Time.MarshalText: year outside of range [0,9999]") + } + + b := []byte(t.Format(rfc1123)) + return b, nil +} + +// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalText(data []byte) (err error) { + t.Time, err = ParseTime(rfc1123, string(data)) + if err != nil { + return err + } + return nil +} + +// MarshalBinary preserves the Time as a byte array conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalBinary() ([]byte, error) { + return t.MarshalText() +} + +// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalBinary(data []byte) error { + return t.UnmarshalText(data) +} + +// ToTime returns a Time as a time.Time +func (t TimeRFC1123) ToTime() time.Time { + return t.Time +} + +// String returns the Time formatted as an RFC1123 date-time string (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) String() string { + // Note: time.Time.String does not return an RFC1123 compliant string, time.Time.MarshalText does. + b, err := t.MarshalText() + if err != nil { + return "" + } + return string(b) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123_test.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123_test.go new file mode 100644 index 000000000000..28f3ce213950 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123_test.go @@ -0,0 +1,212 @@ +package date + +import ( + "encoding/json" + "fmt" + "reflect" + "testing" + "time" +) + +func ExampleTimeRFC1123() { + d, err := ParseTime(rfc1123, "Mon, 02 Jan 2006 15:04:05 MST") + if err != nil { + fmt.Println(err) + } + fmt.Println(d) + // Output: 2006-01-02 15:04:05 +0000 MST +} + +func ExampleTimeRFC1123_MarshalBinary() { + ti, err := ParseTime(rfc1123, "Mon, 02 Jan 2006 15:04:05 MST") + if err != nil { + fmt.Println(err) + } + d := TimeRFC1123{ti} + b, err := d.MarshalBinary() + if err != nil { + fmt.Println(err) + } + fmt.Println(string(b)) + // Output: Mon, 02 Jan 2006 15:04:05 MST +} + +func ExampleTimeRFC1123_UnmarshalBinary() { + d := TimeRFC1123{} + t := "Mon, 02 Jan 2006 15:04:05 MST" + if err := d.UnmarshalBinary([]byte(t)); err != nil { + fmt.Println(err) + } + fmt.Println(d) + // Output: Mon, 02 Jan 2006 15:04:05 MST +} + +func ExampleTimeRFC1123_MarshalJSON() { + ti, err := ParseTime(rfc1123, "Mon, 02 Jan 2006 15:04:05 MST") + if err != nil { + fmt.Println(err) + } + d := TimeRFC1123{ti} + j, err := json.Marshal(d) + if err != nil { + fmt.Println(err) + } + fmt.Println(string(j)) + // Output: "Mon, 02 Jan 2006 15:04:05 MST" +} + +func TestTimeRFC1123MarshalJSONInvalid(t *testing.T) { + ti := time.Date(20000, 01, 01, 00, 00, 00, 00, time.UTC) + d := TimeRFC1123{ti} + if _, err := json.Marshal(d); err == nil { + t.Fatalf("date: TimeRFC1123#Marshal failed for invalid date") + } +} + +func ExampleTimeRFC1123_UnmarshalJSON() { + var d struct { + Time TimeRFC1123 `json:"datetime"` + } + j := `{"datetime" : "Mon, 02 Jan 2006 15:04:05 MST"}` + + if err := json.Unmarshal([]byte(j), &d); err != nil { + fmt.Println(err) + } + fmt.Println(d.Time) + // Output: Mon, 02 Jan 2006 15:04:05 MST +} + +func ExampleTimeRFC1123_MarshalText() { + ti, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z") + if err != nil { + fmt.Println(err) + } + d := TimeRFC1123{ti} + t, err := d.MarshalText() + if err != nil { + fmt.Println(err) + } + fmt.Println(string(t)) + // Output: Sat, 03 Feb 2001 04:05:06 UTC +} + +func ExampleTimeRFC1123_UnmarshalText() { + d := TimeRFC1123{} + t := "Sat, 03 Feb 2001 04:05:06 UTC" + + if err := d.UnmarshalText([]byte(t)); err != nil { + fmt.Println(err) + } + fmt.Println(d) + // Output: Sat, 03 Feb 2001 04:05:06 UTC +} + +func TestUnmarshalJSONforInvalidDateRfc1123(t *testing.T) { + dt := `"Mon, 02 Jan 2000000 15:05 MST"` + d := TimeRFC1123{} + if err := d.UnmarshalJSON([]byte(dt)); err == nil { + t.Fatalf("date: TimeRFC1123#Unmarshal failed for invalid date") + } +} + +func TestUnmarshalTextforInvalidDateRfc1123(t *testing.T) { + dt := "Mon, 02 Jan 2000000 15:05 MST" + d := TimeRFC1123{} + if err := d.UnmarshalText([]byte(dt)); err == nil { + t.Fatalf("date: TimeRFC1123#Unmarshal failed for invalid date") + } +} + +func TestTimeStringRfc1123(t *testing.T) { + ti, err := ParseTime(rfc1123, "Mon, 02 Jan 2006 15:04:05 MST") + if err != nil { + fmt.Println(err) + } + d := TimeRFC1123{ti} + if d.String() != "Mon, 02 Jan 2006 15:04:05 MST" { + t.Fatalf("date: TimeRFC1123#String failed (%v)", d.String()) + } +} + +func TestTimeStringReturnsEmptyStringForErrorRfc1123(t *testing.T) { + d := TimeRFC1123{Time: time.Date(20000, 01, 01, 01, 01, 01, 01, time.UTC)} + if d.String() != "" { + t.Fatalf("date: TimeRFC1123#String failed empty string for an error") + } +} + +func TestTimeBinaryRoundTripRfc1123(t *testing.T) { + ti, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z") + if err != nil { + t.Fatalf("date: TimeRFC1123#ParseTime failed (%v)", err) + } + d1 := TimeRFC1123{ti} + t1, err := d1.MarshalBinary() + if err != nil { + t.Fatalf("date: TimeRFC1123#MarshalBinary failed (%v)", err) + } + + d2 := TimeRFC1123{} + if err = d2.UnmarshalBinary(t1); err != nil { + t.Fatalf("date: TimeRFC1123#UnmarshalBinary failed (%v)", err) + } + + if !reflect.DeepEqual(d1, d2) { + t.Fatalf("date: Round-trip Binary failed (%v, %v)", d1, d2) + } +} + +func TestTimeJSONRoundTripRfc1123(t *testing.T) { + type s struct { + Time TimeRFC1123 `json:"datetime"` + } + var err error + ti, err := ParseTime(rfc1123, "Mon, 02 Jan 2006 15:04:05 MST") + if err != nil { + t.Fatalf("date: TimeRFC1123#ParseTime failed (%v)", err) + } + d1 := s{Time: TimeRFC1123{ti}} + j, err := json.Marshal(d1) + if err != nil { + t.Fatalf("date: TimeRFC1123#MarshalJSON failed (%v)", err) + } + + d2 := s{} + if err = json.Unmarshal(j, &d2); err != nil { + t.Fatalf("date: TimeRFC1123#UnmarshalJSON failed (%v)", err) + } + + if !reflect.DeepEqual(d1, d2) { + t.Fatalf("date: Round-trip JSON failed (%v, %v)", d1, d2) + } +} + +func TestTimeTextRoundTripRfc1123(t *testing.T) { + ti, err := ParseTime(rfc1123, "Mon, 02 Jan 2006 15:04:05 MST") + if err != nil { + t.Fatalf("date: TimeRFC1123#ParseTime failed (%v)", err) + } + d1 := TimeRFC1123{Time: ti} + t1, err := d1.MarshalText() + if err != nil { + t.Fatalf("date: TimeRFC1123#MarshalText failed (%v)", err) + } + + d2 := TimeRFC1123{} + if err = d2.UnmarshalText(t1); err != nil { + t.Fatalf("date: TimeRFC1123#UnmarshalText failed (%v)", err) + } + + if !reflect.DeepEqual(d1, d2) { + t.Fatalf("date: Round-trip Text failed (%v, %v)", d1, d2) + } +} + +func TestTimeToTimeRFC1123(t *testing.T) { + ti, err := ParseTime(rfc1123, "Mon, 02 Jan 2006 15:04:05 MST") + d := TimeRFC1123{ti} + if err != nil { + t.Fatalf("date: TimeRFC1123#ParseTime failed (%v)", err) + } + var _ time.Time = d.ToTime() +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/date/utility.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/date/utility.go new file mode 100644 index 000000000000..207b1a240a3a --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/date/utility.go @@ -0,0 +1,11 @@ +package date + +import ( + "strings" + "time" +) + +// ParseTime to parse Time string to specified format. +func ParseTime(format string, t string) (d time.Time, err error) { + return time.Parse(format, strings.ToUpper(t)) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/error.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/error.go new file mode 100644 index 000000000000..4bcb8f27b210 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/error.go @@ -0,0 +1,80 @@ +package autorest + +import ( + "fmt" + "net/http" +) + +const ( + // UndefinedStatusCode is used when HTTP status code is not available for an error. + UndefinedStatusCode = 0 +) + +// DetailedError encloses a error with details of the package, method, and associated HTTP +// status code (if any). +type DetailedError struct { + Original error + + // PackageType is the package type of the object emitting the error. For types, the value + // matches that produced the the '%T' format specifier of the fmt package. For other elements, + // such as functions, it is just the package name (e.g., "autorest"). + PackageType string + + // Method is the name of the method raising the error. + Method string + + // StatusCode is the HTTP Response StatusCode (if non-zero) that led to the error. + StatusCode interface{} + + // Message is the error message. + Message string + + // Service Error is the response body of failed API in bytes + ServiceError []byte +} + +// NewError creates a new Error conforming object from the passed packageType, method, and +// message. message is treated as a format string to which the optional args apply. +func NewError(packageType string, method string, message string, args ...interface{}) DetailedError { + return NewErrorWithError(nil, packageType, method, nil, message, args...) +} + +// NewErrorWithResponse creates a new Error conforming object from the passed +// packageType, method, statusCode of the given resp (UndefinedStatusCode if +// resp is nil), and message. message is treated as a format string to which the +// optional args apply. +func NewErrorWithResponse(packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError { + return NewErrorWithError(nil, packageType, method, resp, message, args...) +} + +// NewErrorWithError creates a new Error conforming object from the +// passed packageType, method, statusCode of the given resp (UndefinedStatusCode +// if resp is nil), message, and original error. message is treated as a format +// string to which the optional args apply. +func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError { + if v, ok := original.(DetailedError); ok { + return v + } + + statusCode := UndefinedStatusCode + if resp != nil { + statusCode = resp.StatusCode + } + + return DetailedError{ + Original: original, + PackageType: packageType, + Method: method, + StatusCode: statusCode, + Message: fmt.Sprintf(message, args...), + } +} + +// Error returns a formatted containing all available details (i.e., PackageType, Method, +// StatusCode, Message, and original error (if any)). +func (e DetailedError) Error() string { + if e.Original == nil { + return fmt.Sprintf("%s#%s: %s: StatusCode=%d", e.PackageType, e.Method, e.Message, e.StatusCode) + } + return fmt.Sprintf("%s#%s: %s: StatusCode=%d -- Original Error: %v", e.PackageType, e.Method, e.Message, e.StatusCode, e.Original) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/error_test.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/error_test.go new file mode 100644 index 000000000000..1975155ad0a7 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/error_test.go @@ -0,0 +1,188 @@ +package autorest + +import ( + "fmt" + "net/http" + "reflect" + "regexp" + "testing" +) + +func TestNewErrorWithError_AssignsPackageType(t *testing.T) { + e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", nil, "message") + + if e.PackageType != "packageType" { + t.Fatalf("autorest: Error failed to set package type -- expected %v, received %v", "packageType", e.PackageType) + } +} + +func TestNewErrorWithError_AssignsMethod(t *testing.T) { + e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", nil, "message") + + if e.Method != "method" { + t.Fatalf("autorest: Error failed to set method -- expected %v, received %v", "method", e.Method) + } +} + +func TestNewErrorWithError_AssignsMessage(t *testing.T) { + e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", nil, "message") + + if e.Message != "message" { + t.Fatalf("autorest: Error failed to set message -- expected %v, received %v", "message", e.Message) + } +} + +func TestNewErrorWithError_AssignsUndefinedStatusCodeIfRespNil(t *testing.T) { + e := NewErrorWithError(nil, "packageType", "method", nil, "message") + if e.StatusCode != UndefinedStatusCode { + t.Fatalf("autorest: Error failed to set status code -- expected %v, received %v", UndefinedStatusCode, e.StatusCode) + } +} + +func TestNewErrorWithError_AssignsStatusCode(t *testing.T) { + e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", &http.Response{ + StatusCode: http.StatusBadRequest, + Status: http.StatusText(http.StatusBadRequest)}, "message") + + if e.StatusCode != http.StatusBadRequest { + t.Fatalf("autorest: Error failed to set status code -- expected %v, received %v", http.StatusBadRequest, e.StatusCode) + } +} + +func TestNewErrorWithError_AcceptsArgs(t *testing.T) { + e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", nil, "message %s", "arg") + + if matched, _ := regexp.MatchString(`.*arg.*`, e.Message); !matched { + t.Fatalf("autorest: Error failed to apply message arguments -- expected %v, received %v", + `.*arg.*`, e.Message) + } +} + +func TestNewErrorWithError_AssignsError(t *testing.T) { + err := fmt.Errorf("original") + e := NewErrorWithError(err, "packageType", "method", nil, "message") + + if e.Original != err { + t.Fatalf("autorest: Error failed to set error -- expected %v, received %v", err, e.Original) + } +} + +func TestNewErrorWithResponse_ContainsStatusCode(t *testing.T) { + e := NewErrorWithResponse("packageType", "method", &http.Response{ + StatusCode: http.StatusBadRequest, + Status: http.StatusText(http.StatusBadRequest)}, "message") + + if e.StatusCode != http.StatusBadRequest { + t.Fatalf("autorest: Error failed to set status code -- expected %v, received %v", http.StatusBadRequest, e.StatusCode) + } +} + +func TestNewErrorWithResponse_nilResponse_ReportsUndefinedStatusCode(t *testing.T) { + e := NewErrorWithResponse("packageType", "method", nil, "message") + + if e.StatusCode != UndefinedStatusCode { + t.Fatalf("autorest: Error failed to set status code -- expected %v, received %v", UndefinedStatusCode, e.StatusCode) + } +} + +func TestNewErrorWithResponse_Forwards(t *testing.T) { + e1 := NewError("packageType", "method", "message %s", "arg") + e2 := NewErrorWithResponse("packageType", "method", nil, "message %s", "arg") + + if !reflect.DeepEqual(e1, e2) { + t.Fatal("autorest: NewError did not return an error equivelent to NewErrorWithError") + } +} + +func TestNewErrorWithError_Forwards(t *testing.T) { + e1 := NewError("packageType", "method", "message %s", "arg") + e2 := NewErrorWithError(nil, "packageType", "method", nil, "message %s", "arg") + + if !reflect.DeepEqual(e1, e2) { + t.Fatal("autorest: NewError did not return an error equivelent to NewErrorWithError") + } +} + +func TestNewErrorWithError_DoesNotWrapADetailedError(t *testing.T) { + e1 := NewError("packageType1", "method1", "message1 %s", "arg1") + e2 := NewErrorWithError(e1, "packageType2", "method2", nil, "message2 %s", "arg2") + + if !reflect.DeepEqual(e1, e2) { + t.Fatalf("autorest: NewErrorWithError incorrectly wrapped a DetailedError -- expected %v, received %v", e1, e2) + } +} + +func TestNewErrorWithError_WrapsAnError(t *testing.T) { + e1 := fmt.Errorf("Inner Error") + var e2 interface{} = NewErrorWithError(e1, "packageType", "method", nil, "message") + + if _, ok := e2.(DetailedError); !ok { + t.Fatalf("autorest: NewErrorWithError failed to wrap a standard error -- received %T", e2) + } +} + +func TestDetailedError(t *testing.T) { + err := fmt.Errorf("original") + e := NewErrorWithError(err, "packageType", "method", nil, "message") + + if matched, _ := regexp.MatchString(`.*original.*`, e.Error()); !matched { + t.Fatalf("autorest: Error#Error failed to return original error message -- expected %v, received %v", + `.*original.*`, e.Error()) + } +} + +func TestDetailedErrorConstainsPackageType(t *testing.T) { + e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", nil, "message") + + if matched, _ := regexp.MatchString(`.*packageType.*`, e.Error()); !matched { + t.Fatalf("autorest: Error#String failed to include PackageType -- expected %v, received %v", + `.*packageType.*`, e.Error()) + } +} + +func TestDetailedErrorConstainsMethod(t *testing.T) { + e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", nil, "message") + + if matched, _ := regexp.MatchString(`.*method.*`, e.Error()); !matched { + t.Fatalf("autorest: Error#String failed to include Method -- expected %v, received %v", + `.*method.*`, e.Error()) + } +} + +func TestDetailedErrorConstainsMessage(t *testing.T) { + e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", nil, "message") + + if matched, _ := regexp.MatchString(`.*message.*`, e.Error()); !matched { + t.Fatalf("autorest: Error#String failed to include Message -- expected %v, received %v", + `.*message.*`, e.Error()) + } +} + +func TestDetailedErrorConstainsStatusCode(t *testing.T) { + e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", &http.Response{ + StatusCode: http.StatusBadRequest, + Status: http.StatusText(http.StatusBadRequest)}, "message") + + if matched, _ := regexp.MatchString(`.*400.*`, e.Error()); !matched { + t.Fatalf("autorest: Error#String failed to include Status Code -- expected %v, received %v", + `.*400.*`, e.Error()) + } +} + +func TestDetailedErrorConstainsOriginal(t *testing.T) { + e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", nil, "message") + + if matched, _ := regexp.MatchString(`.*original.*`, e.Error()); !matched { + t.Fatalf("autorest: Error#String failed to include Original error -- expected %v, received %v", + `.*original.*`, e.Error()) + } +} + +func TestDetailedErrorSkipsOriginal(t *testing.T) { + e := NewError("packageType", "method", "message") + + if matched, _ := regexp.MatchString(`.*Original.*`, e.Error()); matched { + t.Fatalf("autorest: Error#String included missing Original error -- unexpected %v, received %v", + `.*Original.*`, e.Error()) + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/preparer.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/preparer.go new file mode 100644 index 000000000000..5b2c52704a29 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/preparer.go @@ -0,0 +1,433 @@ +package autorest + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "mime/multipart" + "net/http" + "net/url" + "strings" +) + +const ( + mimeTypeJSON = "application/json" + mimeTypeFormPost = "application/x-www-form-urlencoded" + + headerAuthorization = "Authorization" + headerContentType = "Content-Type" + headerUserAgent = "User-Agent" +) + +// Preparer is the interface that wraps the Prepare method. +// +// Prepare accepts and possibly modifies an http.Request (e.g., adding Headers). Implementations +// must ensure to not share or hold per-invocation state since Preparers may be shared and re-used. +type Preparer interface { + Prepare(*http.Request) (*http.Request, error) +} + +// PreparerFunc is a method that implements the Preparer interface. +type PreparerFunc func(*http.Request) (*http.Request, error) + +// Prepare implements the Preparer interface on PreparerFunc. +func (pf PreparerFunc) Prepare(r *http.Request) (*http.Request, error) { + return pf(r) +} + +// PrepareDecorator takes and possibly decorates, by wrapping, a Preparer. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then affect the result. +type PrepareDecorator func(Preparer) Preparer + +// CreatePreparer creates, decorates, and returns a Preparer. +// Without decorators, the returned Preparer returns the passed http.Request unmodified. +// Preparers are safe to share and re-use. +func CreatePreparer(decorators ...PrepareDecorator) Preparer { + return DecoratePreparer( + Preparer(PreparerFunc(func(r *http.Request) (*http.Request, error) { return r, nil })), + decorators...) +} + +// DecoratePreparer accepts a Preparer and a, possibly empty, set of PrepareDecorators, which it +// applies to the Preparer. Decorators are applied in the order received, but their affect upon the +// request depends on whether they are a pre-decorator (change the http.Request and then pass it +// along) or a post-decorator (pass the http.Request along and alter it on return). +func DecoratePreparer(p Preparer, decorators ...PrepareDecorator) Preparer { + for _, decorate := range decorators { + p = decorate(p) + } + return p +} + +// Prepare accepts an http.Request and a, possibly empty, set of PrepareDecorators. +// It creates a Preparer from the decorators which it then applies to the passed http.Request. +func Prepare(r *http.Request, decorators ...PrepareDecorator) (*http.Request, error) { + if r == nil { + return nil, NewError("autorest", "Prepare", "Invoked without an http.Request") + } + return CreatePreparer(decorators...).Prepare(r) +} + +// WithNothing returns a "do nothing" PrepareDecorator that makes no changes to the passed +// http.Request. +func WithNothing() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + return p.Prepare(r) + }) + } +} + +// WithHeader returns a PrepareDecorator that sets the specified HTTP header of the http.Request to +// the passed value. It canonicalizes the passed header name (via http.CanonicalHeaderKey) before +// adding the header. +func WithHeader(header string, value string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set(http.CanonicalHeaderKey(header), value) + } + return r, err + }) + } +} + +// WithBearerAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Bearer " followed by the supplied token. +func WithBearerAuthorization(token string) PrepareDecorator { + return WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", token)) +} + +// AsContentType returns a PrepareDecorator that adds an HTTP Content-Type header whose value +// is the passed contentType. +func AsContentType(contentType string) PrepareDecorator { + return WithHeader(headerContentType, contentType) +} + +// WithUserAgent returns a PrepareDecorator that adds an HTTP User-Agent header whose value is the +// passed string. +func WithUserAgent(ua string) PrepareDecorator { + return WithHeader(headerUserAgent, ua) +} + +// AsFormURLEncoded returns a PrepareDecorator that adds an HTTP Content-Type header whose value is +// "application/x-www-form-urlencoded". +func AsFormURLEncoded() PrepareDecorator { + return AsContentType(mimeTypeFormPost) +} + +// AsJSON returns a PrepareDecorator that adds an HTTP Content-Type header whose value is +// "application/json". +func AsJSON() PrepareDecorator { + return AsContentType(mimeTypeJSON) +} + +// WithMethod returns a PrepareDecorator that sets the HTTP method of the passed request. The +// decorator does not validate that the passed method string is a known HTTP method. +func WithMethod(method string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r.Method = method + return p.Prepare(r) + }) + } +} + +// AsDelete returns a PrepareDecorator that sets the HTTP method to DELETE. +func AsDelete() PrepareDecorator { return WithMethod("DELETE") } + +// AsGet returns a PrepareDecorator that sets the HTTP method to GET. +func AsGet() PrepareDecorator { return WithMethod("GET") } + +// AsHead returns a PrepareDecorator that sets the HTTP method to HEAD. +func AsHead() PrepareDecorator { return WithMethod("HEAD") } + +// AsOptions returns a PrepareDecorator that sets the HTTP method to OPTIONS. +func AsOptions() PrepareDecorator { return WithMethod("OPTIONS") } + +// AsPatch returns a PrepareDecorator that sets the HTTP method to PATCH. +func AsPatch() PrepareDecorator { return WithMethod("PATCH") } + +// AsPost returns a PrepareDecorator that sets the HTTP method to POST. +func AsPost() PrepareDecorator { return WithMethod("POST") } + +// AsPut returns a PrepareDecorator that sets the HTTP method to PUT. +func AsPut() PrepareDecorator { return WithMethod("PUT") } + +// WithBaseURL returns a PrepareDecorator that populates the http.Request with a url.URL constructed +// from the supplied baseUrl. +func WithBaseURL(baseURL string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + var u *url.URL + if u, err = url.Parse(baseURL); err != nil { + return r, err + } + if u.Scheme == "" { + err = fmt.Errorf("autorest: No scheme detected in URL %s", baseURL) + } + if err == nil { + r.URL = u + } + } + return r, err + }) + } +} + +// WithFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) into the +// http.Request body. +func WithFormData(v url.Values) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + s := v.Encode() + r.ContentLength = int64(len(s)) + r.Body = ioutil.NopCloser(strings.NewReader(s)) + } + return r, err + }) + } +} + +// WithMultiPartFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) form parameters +// into the http.Request body. +func WithMultiPartFormData(formDataParameters map[string]interface{}) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + var body bytes.Buffer + writer := multipart.NewWriter(&body) + for key, value := range formDataParameters { + if rc, ok := value.(io.ReadCloser); ok { + var fd io.Writer + if fd, err = writer.CreateFormFile(key, key); err != nil { + return r, err + } + if _, err = io.Copy(fd, rc); err != nil { + return r, err + } + } else { + if err = writer.WriteField(key, ensureValueString(value)); err != nil { + return r, err + } + } + } + if err = writer.Close(); err != nil { + return r, err + } + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set(http.CanonicalHeaderKey(headerContentType), writer.FormDataContentType()) + r.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes())) + r.ContentLength = int64(body.Len()) + return r, err + } + return r, err + }) + } +} + +// WithFile returns a PrepareDecorator that sends file in request body. +func WithFile(f io.ReadCloser) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + b, err := ioutil.ReadAll(f) + if err != nil { + return r, err + } + r.Body = ioutil.NopCloser(bytes.NewReader(b)) + r.ContentLength = int64(len(b)) + } + return r, err + }) + } +} + +// WithBool returns a PrepareDecorator that encodes the passed bool into the body of the request +// and sets the Content-Length header. +func WithBool(v bool) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithFloat32 returns a PrepareDecorator that encodes the passed float32 into the body of the +// request and sets the Content-Length header. +func WithFloat32(v float32) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithFloat64 returns a PrepareDecorator that encodes the passed float64 into the body of the +// request and sets the Content-Length header. +func WithFloat64(v float64) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithInt32 returns a PrepareDecorator that encodes the passed int32 into the body of the request +// and sets the Content-Length header. +func WithInt32(v int32) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithInt64 returns a PrepareDecorator that encodes the passed int64 into the body of the request +// and sets the Content-Length header. +func WithInt64(v int64) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithString returns a PrepareDecorator that encodes the passed string into the body of the request +// and sets the Content-Length header. +func WithString(v string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + r.ContentLength = int64(len(v)) + r.Body = ioutil.NopCloser(strings.NewReader(v)) + } + return r, err + }) + } +} + +// WithJSON returns a PrepareDecorator that encodes the data passed as JSON into the body of the +// request and sets the Content-Length header. +func WithJSON(v interface{}) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + b, err := json.Marshal(v) + if err == nil { + r.ContentLength = int64(len(b)) + r.Body = ioutil.NopCloser(bytes.NewReader(b)) + } + } + return r, err + }) + } +} + +// WithPath returns a PrepareDecorator that adds the supplied path to the request URL. If the path +// is absolute (that is, it begins with a "/"), it replaces the existing path. +func WithPath(path string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithPath", "Invoked with a nil URL") + } + if r.URL, err = parseURL(r.URL, path); err != nil { + return r, err + } + } + return r, err + }) + } +} + +// WithEscapedPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the +// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. The +// values will be escaped (aka URL encoded) before insertion into the path. +func WithEscapedPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator { + parameters := escapeValueStrings(ensureValueStrings(pathParameters)) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithEscapedPathParameters", "Invoked with a nil URL") + } + for key, value := range parameters { + path = strings.Replace(path, "{"+key+"}", value, -1) + } + if r.URL, err = parseURL(r.URL, path); err != nil { + return r, err + } + } + return r, err + }) + } +} + +// WithPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the +// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. +func WithPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator { + parameters := ensureValueStrings(pathParameters) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithPathParameters", "Invoked with a nil URL") + } + for key, value := range parameters { + path = strings.Replace(path, "{"+key+"}", value, -1) + } + + if r.URL, err = parseURL(r.URL, path); err != nil { + return r, err + } + } + return r, err + }) + } +} + +func parseURL(u *url.URL, path string) (*url.URL, error) { + p := strings.TrimRight(u.String(), "/") + if !strings.HasPrefix(path, "/") { + path = "/" + path + } + return url.Parse(p + path) +} + +// WithQueryParameters returns a PrepareDecorators that encodes and applies the query parameters +// given in the supplied map (i.e., key=value). +func WithQueryParameters(queryParameters map[string]interface{}) PrepareDecorator { + parameters := ensureValueStrings(queryParameters) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithQueryParameters", "Invoked with a nil URL") + } + v := r.URL.Query() + for key, value := range parameters { + v.Add(key, value) + } + r.URL.RawQuery = createQuery(v) + } + return r, err + }) + } +} + +// Authorizer is the interface that provides a PrepareDecorator used to supply request +// authorization. Most often, the Authorizer decorator runs last so it has access to the full +// state of the formed HTTP request. +type Authorizer interface { + WithAuthorization() PrepareDecorator +} + +// NullAuthorizer implements a default, "do nothing" Authorizer. +type NullAuthorizer struct{} + +// WithAuthorization returns a PrepareDecorator that does nothing. +func (na NullAuthorizer) WithAuthorization() PrepareDecorator { + return WithNothing() +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/preparer_test.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/preparer_test.go new file mode 100644 index 000000000000..1b36379aa7df --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/preparer_test.go @@ -0,0 +1,718 @@ +package autorest + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/url" + "reflect" + "strconv" + "strings" + "testing" + + "github.com/Azure/go-autorest/autorest/mocks" +) + +// PrepareDecorators wrap and invoke a Preparer. Most often, the decorator invokes the passed +// Preparer and decorates the response. +func ExamplePrepareDecorator() { + path := "a/b/c/" + pd := func() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, fmt.Errorf("ERROR: URL is not set") + } + r.URL.Path += path + } + return r, err + }) + } + } + + r, _ := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + pd()) + + fmt.Printf("Path is %s\n", r.URL) + // Output: Path is https://microsoft.com/a/b/c/ +} + +// PrepareDecorators may also modify and then invoke the Preparer. +func ExamplePrepareDecorator_pre() { + pd := func() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r.Header.Add(http.CanonicalHeaderKey("ContentType"), "application/json") + return p.Prepare(r) + }) + } + } + + r, _ := Prepare(&http.Request{Header: http.Header{}}, + pd()) + + fmt.Printf("ContentType is %s\n", r.Header.Get("ContentType")) + // Output: ContentType is application/json +} + +// Create a sequence of three Preparers that build up the URL path. +func ExampleCreatePreparer() { + p := CreatePreparer( + WithBaseURL("https://microsoft.com/"), + WithPath("a"), + WithPath("b"), + WithPath("c")) + r, err := p.Prepare(&http.Request{}) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Println(r.URL) + } + // Output: https://microsoft.com/a/b/c +} + +// Create and apply separate Preparers +func ExampleCreatePreparer_multiple() { + params := map[string]interface{}{ + "param1": "a", + "param2": "c", + } + + p1 := CreatePreparer(WithBaseURL("https://microsoft.com/")) + p2 := CreatePreparer(WithPathParameters("/{param1}/b/{param2}/", params)) + + r, err := p1.Prepare(&http.Request{}) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } + + r, err = p2.Prepare(r) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Println(r.URL) + } + // Output: https://microsoft.com/a/b/c/ +} + +// Create and chain separate Preparers +func ExampleCreatePreparer_chain() { + params := map[string]interface{}{ + "param1": "a", + "param2": "c", + } + + p := CreatePreparer(WithBaseURL("https://microsoft.com/")) + p = DecoratePreparer(p, WithPathParameters("/{param1}/b/{param2}/", params)) + + r, err := p.Prepare(&http.Request{}) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Println(r.URL) + } + // Output: https://microsoft.com/a/b/c/ +} + +// Create and prepare an http.Request in one call +func ExamplePrepare() { + r, err := Prepare(&http.Request{}, + AsGet(), + WithBaseURL("https://microsoft.com/"), + WithPath("a/b/c/")) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Printf("%s %s", r.Method, r.URL) + } + // Output: GET https://microsoft.com/a/b/c/ +} + +// Create a request for a supplied base URL and path +func ExampleWithBaseURL() { + r, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/a/b/c/")) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Println(r.URL) + } + // Output: https://microsoft.com/a/b/c/ +} + +func ExampleWithBaseURL_second() { + _, err := Prepare(&http.Request{}, WithBaseURL(":")) + fmt.Println(err) + // Output: parse :: missing protocol scheme +} + +// Create a request with a custom HTTP header +func ExampleWithHeader() { + r, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/a/b/c/"), + WithHeader("x-foo", "bar")) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Printf("Header %s=%s\n", "x-foo", r.Header.Get("x-foo")) + } + // Output: Header x-foo=bar +} + +// Create a request whose Body is the JSON encoding of a structure +func ExampleWithFormData() { + v := url.Values{} + v.Add("name", "Rob Pike") + v.Add("age", "42") + + r, err := Prepare(&http.Request{}, + WithFormData(v)) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } + + b, err := ioutil.ReadAll(r.Body) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Printf("Request Body contains %s\n", string(b)) + } + // Output: Request Body contains age=42&name=Rob+Pike +} + +// Create a request whose Body is the JSON encoding of a structure +func ExampleWithJSON() { + t := mocks.T{Name: "Rob Pike", Age: 42} + + r, err := Prepare(&http.Request{}, + WithJSON(&t)) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } + + b, err := ioutil.ReadAll(r.Body) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Printf("Request Body contains %s\n", string(b)) + } + // Output: Request Body contains {"name":"Rob Pike","age":42} +} + +// Create a request from a path with escaped parameters +func ExampleWithEscapedPathParameters() { + params := map[string]interface{}{ + "param1": "a b c", + "param2": "d e f", + } + r, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + WithEscapedPathParameters("/{param1}/b/{param2}/", params)) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Println(r.URL) + } + // Output: https://microsoft.com/a+b+c/b/d+e+f/ +} + +// Create a request from a path with parameters +func ExampleWithPathParameters() { + params := map[string]interface{}{ + "param1": "a", + "param2": "c", + } + r, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + WithPathParameters("/{param1}/b/{param2}/", params)) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Println(r.URL) + } + // Output: https://microsoft.com/a/b/c/ +} + +// Create a request with query parameters +func ExampleWithQueryParameters() { + params := map[string]interface{}{ + "q1": "value1", + "q2": "value2", + } + r, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + WithPath("/a/b/c/"), + WithQueryParameters(params)) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Println(r.URL) + } + // Output: https://microsoft.com/a/b/c/?q1=value1&q2=value2 +} + +func TestWithPathWithInvalidPath(t *testing.T) { + p := "path%2*end" + if _, err := Prepare(&http.Request{}, WithBaseURL("https://microsoft.com/"), WithPath(p)); err == nil { + t.Fatalf("autorest: WithPath should fail for invalid URL escape error for path '%v' ", p) + } + +} + +func TestWithPathParametersWithInvalidPath(t *testing.T) { + p := "path%2*end" + m := map[string]interface{}{ + "path1": p, + } + if _, err := Prepare(&http.Request{}, WithBaseURL("https://microsoft.com/"), WithPathParameters("/{path1}/", m)); err == nil { + t.Fatalf("autorest: WithPath should fail for invalid URL escape for path '%v' ", p) + } + +} + +func TestCreatePreparerDoesNotModify(t *testing.T) { + r1 := &http.Request{} + p := CreatePreparer() + r2, err := p.Prepare(r1) + if err != nil { + t.Fatalf("autorest: CreatePreparer failed (%v)", err) + } + if !reflect.DeepEqual(r1, r2) { + t.Fatalf("autorest: CreatePreparer without decorators modified the request") + } +} + +func TestCreatePreparerRunsDecoratorsInOrder(t *testing.T) { + p := CreatePreparer(WithBaseURL("https://microsoft.com/"), WithPath("1"), WithPath("2"), WithPath("3")) + r, err := p.Prepare(&http.Request{}) + if err != nil { + t.Fatalf("autorest: CreatePreparer failed (%v)", err) + } + if r.URL.String() != "https:/1/2/3" && r.URL.Host != "microsoft.com" { + t.Fatalf("autorest: CreatePreparer failed to run decorators in order") + } +} + +func TestAsContentType(t *testing.T) { + r, err := Prepare(mocks.NewRequest(), AsContentType("application/text")) + if err != nil { + fmt.Printf("ERROR: %v", err) + } + if r.Header.Get(headerContentType) != "application/text" { + t.Fatalf("autorest: AsContentType failed to add header (%s=%s)", headerContentType, r.Header.Get(headerContentType)) + } +} + +func TestAsFormURLEncoded(t *testing.T) { + r, err := Prepare(mocks.NewRequest(), AsFormURLEncoded()) + if err != nil { + fmt.Printf("ERROR: %v", err) + } + if r.Header.Get(headerContentType) != mimeTypeFormPost { + t.Fatalf("autorest: AsFormURLEncoded failed to add header (%s=%s)", headerContentType, r.Header.Get(headerContentType)) + } +} + +func TestAsJSON(t *testing.T) { + r, err := Prepare(mocks.NewRequest(), AsJSON()) + if err != nil { + fmt.Printf("ERROR: %v", err) + } + if r.Header.Get(headerContentType) != mimeTypeJSON { + t.Fatalf("autorest: AsJSON failed to add header (%s=%s)", headerContentType, r.Header.Get(headerContentType)) + } +} + +func TestWithNothing(t *testing.T) { + r1 := mocks.NewRequest() + r2, err := Prepare(r1, WithNothing()) + if err != nil { + t.Fatalf("autorest: WithNothing returned an unexpected error (%v)", err) + } + + if !reflect.DeepEqual(r1, r2) { + t.Fatal("azure: WithNothing modified the passed HTTP Request") + } +} + +func TestWithBearerAuthorization(t *testing.T) { + r, err := Prepare(mocks.NewRequest(), WithBearerAuthorization("SOME-TOKEN")) + if err != nil { + fmt.Printf("ERROR: %v", err) + } + if r.Header.Get(headerAuthorization) != "Bearer SOME-TOKEN" { + t.Fatalf("autorest: WithBearerAuthorization failed to add header (%s=%s)", headerAuthorization, r.Header.Get(headerAuthorization)) + } +} + +func TestWithUserAgent(t *testing.T) { + ua := "User Agent Go" + r, err := Prepare(mocks.NewRequest(), WithUserAgent(ua)) + if err != nil { + fmt.Printf("ERROR: %v", err) + } + if r.UserAgent() != ua || r.Header.Get(headerUserAgent) != ua { + t.Fatalf("autorest: WithUserAgent failed to add header (%s=%s)", headerUserAgent, r.Header.Get(headerUserAgent)) + } +} + +func TestWithMethod(t *testing.T) { + r, _ := Prepare(mocks.NewRequest(), WithMethod("HEAD")) + if r.Method != "HEAD" { + t.Fatal("autorest: WithMethod failed to set HTTP method header") + } +} + +func TestAsDelete(t *testing.T) { + r, _ := Prepare(mocks.NewRequest(), AsDelete()) + if r.Method != "DELETE" { + t.Fatal("autorest: AsDelete failed to set HTTP method header to DELETE") + } +} + +func TestAsGet(t *testing.T) { + r, _ := Prepare(mocks.NewRequest(), AsGet()) + if r.Method != "GET" { + t.Fatal("autorest: AsGet failed to set HTTP method header to GET") + } +} + +func TestAsHead(t *testing.T) { + r, _ := Prepare(mocks.NewRequest(), AsHead()) + if r.Method != "HEAD" { + t.Fatal("autorest: AsHead failed to set HTTP method header to HEAD") + } +} + +func TestAsOptions(t *testing.T) { + r, _ := Prepare(mocks.NewRequest(), AsOptions()) + if r.Method != "OPTIONS" { + t.Fatal("autorest: AsOptions failed to set HTTP method header to OPTIONS") + } +} + +func TestAsPatch(t *testing.T) { + r, _ := Prepare(mocks.NewRequest(), AsPatch()) + if r.Method != "PATCH" { + t.Fatal("autorest: AsPatch failed to set HTTP method header to PATCH") + } +} + +func TestAsPost(t *testing.T) { + r, _ := Prepare(mocks.NewRequest(), AsPost()) + if r.Method != "POST" { + t.Fatal("autorest: AsPost failed to set HTTP method header to POST") + } +} + +func TestAsPut(t *testing.T) { + r, _ := Prepare(mocks.NewRequest(), AsPut()) + if r.Method != "PUT" { + t.Fatal("autorest: AsPut failed to set HTTP method header to PUT") + } +} + +func TestPrepareWithNullRequest(t *testing.T) { + _, err := Prepare(nil) + if err == nil { + t.Fatal("autorest: Prepare failed to return an error when given a null http.Request") + } +} + +func TestWithFormDataSetsContentLength(t *testing.T) { + v := url.Values{} + v.Add("name", "Rob Pike") + v.Add("age", "42") + + r, err := Prepare(&http.Request{}, + WithFormData(v)) + if err != nil { + t.Fatalf("autorest: WithFormData failed with error (%v)", err) + } + + b, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("autorest: WithFormData failed with error (%v)", err) + } + + expected := "name=Rob+Pike&age=42" + if !(string(b) == "name=Rob+Pike&age=42" || string(b) == "age=42&name=Rob+Pike") { + t.Fatalf("autorest:WithFormData failed to return correct string got (%v), expected (%v)", string(b), expected) + } + + if r.ContentLength != int64(len(b)) { + t.Fatalf("autorest:WithFormData set Content-Length to %v, expected %v", r.ContentLength, len(b)) + } +} + +func TestWithMultiPartFormDataSetsContentLength(t *testing.T) { + v := map[string]interface{}{ + "file": ioutil.NopCloser(strings.NewReader("Hello Gopher")), + "age": "42", + } + + r, err := Prepare(&http.Request{}, + WithMultiPartFormData(v)) + if err != nil { + t.Fatalf("autorest: WithMultiPartFormData failed with error (%v)", err) + } + + b, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("autorest: WithMultiPartFormData failed with error (%v)", err) + } + + if r.ContentLength != int64(len(b)) { + t.Fatalf("autorest:WithMultiPartFormData set Content-Length to %v, expected %v", r.ContentLength, len(b)) + } +} + +func TestWithMultiPartFormDataWithNoFile(t *testing.T) { + v := map[string]interface{}{ + "file": "no file", + "age": "42", + } + + r, err := Prepare(&http.Request{}, + WithMultiPartFormData(v)) + if err != nil { + t.Fatalf("autorest: WithMultiPartFormData failed with error (%v)", err) + } + + b, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("autorest: WithMultiPartFormData failed with error (%v)", err) + } + + if r.ContentLength != int64(len(b)) { + t.Fatalf("autorest:WithMultiPartFormData set Content-Length to %v, expected %v", r.ContentLength, len(b)) + } +} + +func TestWithFile(t *testing.T) { + r, err := Prepare(&http.Request{}, + WithFile(ioutil.NopCloser(strings.NewReader("Hello Gopher")))) + if err != nil { + t.Fatalf("autorest: WithFile failed with error (%v)", err) + } + + b, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("autorest: WithFile failed with error (%v)", err) + } + if r.ContentLength != int64(len(b)) { + t.Fatalf("autorest:WithFile set Content-Length to %v, expected %v", r.ContentLength, len(b)) + } +} + +func TestWithBool_SetsTheBody(t *testing.T) { + r, err := Prepare(&http.Request{}, + WithBool(false)) + if err != nil { + t.Fatalf("autorest: WithBool failed with error (%v)", err) + } + + s, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("autorest: WithBool failed with error (%v)", err) + } + + if r.ContentLength != int64(len(fmt.Sprintf("%v", false))) { + t.Fatalf("autorest: WithBool set Content-Length to %v, expected %v", r.ContentLength, int64(len(fmt.Sprintf("%v", false)))) + } + + v, err := strconv.ParseBool(string(s)) + if err != nil || v { + t.Fatalf("autorest: WithBool incorrectly encoded the boolean as %v", s) + } +} + +func TestWithFloat32_SetsTheBody(t *testing.T) { + r, err := Prepare(&http.Request{}, + WithFloat32(42.0)) + if err != nil { + t.Fatalf("autorest: WithFloat32 failed with error (%v)", err) + } + + s, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("autorest: WithFloat32 failed with error (%v)", err) + } + + if r.ContentLength != int64(len(fmt.Sprintf("%v", 42.0))) { + t.Fatalf("autorest: WithFloat32 set Content-Length to %v, expected %v", r.ContentLength, int64(len(fmt.Sprintf("%v", 42.0)))) + } + + v, err := strconv.ParseFloat(string(s), 32) + if err != nil || float32(v) != float32(42.0) { + t.Fatalf("autorest: WithFloat32 incorrectly encoded the boolean as %v", s) + } +} + +func TestWithFloat64_SetsTheBody(t *testing.T) { + r, err := Prepare(&http.Request{}, + WithFloat64(42.0)) + if err != nil { + t.Fatalf("autorest: WithFloat64 failed with error (%v)", err) + } + + s, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("autorest: WithFloat64 failed with error (%v)", err) + } + + if r.ContentLength != int64(len(fmt.Sprintf("%v", 42.0))) { + t.Fatalf("autorest: WithFloat64 set Content-Length to %v, expected %v", r.ContentLength, int64(len(fmt.Sprintf("%v", 42.0)))) + } + + v, err := strconv.ParseFloat(string(s), 64) + if err != nil || v != float64(42.0) { + t.Fatalf("autorest: WithFloat64 incorrectly encoded the boolean as %v", s) + } +} + +func TestWithInt32_SetsTheBody(t *testing.T) { + r, err := Prepare(&http.Request{}, + WithInt32(42)) + if err != nil { + t.Fatalf("autorest: WithInt32 failed with error (%v)", err) + } + + s, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("autorest: WithInt32 failed with error (%v)", err) + } + + if r.ContentLength != int64(len(fmt.Sprintf("%v", 42))) { + t.Fatalf("autorest: WithInt32 set Content-Length to %v, expected %v", r.ContentLength, int64(len(fmt.Sprintf("%v", 42)))) + } + + v, err := strconv.ParseInt(string(s), 10, 32) + if err != nil || int32(v) != int32(42) { + t.Fatalf("autorest: WithInt32 incorrectly encoded the boolean as %v", s) + } +} + +func TestWithInt64_SetsTheBody(t *testing.T) { + r, err := Prepare(&http.Request{}, + WithInt64(42)) + if err != nil { + t.Fatalf("autorest: WithInt64 failed with error (%v)", err) + } + + s, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("autorest: WithInt64 failed with error (%v)", err) + } + + if r.ContentLength != int64(len(fmt.Sprintf("%v", 42))) { + t.Fatalf("autorest: WithInt64 set Content-Length to %v, expected %v", r.ContentLength, int64(len(fmt.Sprintf("%v", 42)))) + } + + v, err := strconv.ParseInt(string(s), 10, 64) + if err != nil || v != int64(42) { + t.Fatalf("autorest: WithInt64 incorrectly encoded the boolean as %v", s) + } +} + +func TestWithString_SetsTheBody(t *testing.T) { + r, err := Prepare(&http.Request{}, + WithString("value")) + if err != nil { + t.Fatalf("autorest: WithString failed with error (%v)", err) + } + + s, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("autorest: WithString failed with error (%v)", err) + } + + if r.ContentLength != int64(len("value")) { + t.Fatalf("autorest: WithString set Content-Length to %v, expected %v", r.ContentLength, int64(len("value"))) + } + + if string(s) != "value" { + t.Fatalf("autorest: WithString incorrectly encoded the string as %v", s) + } +} + +func TestWithJSONSetsContentLength(t *testing.T) { + r, err := Prepare(&http.Request{}, + WithJSON(&mocks.T{Name: "Rob Pike", Age: 42})) + if err != nil { + t.Fatalf("autorest: WithJSON failed with error (%v)", err) + } + + b, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("autorest: WithJSON failed with error (%v)", err) + } + + if r.ContentLength != int64(len(b)) { + t.Fatalf("autorest:WithJSON set Content-Length to %v, expected %v", r.ContentLength, len(b)) + } +} + +func TestWithHeaderAllocatesHeaders(t *testing.T) { + r, err := Prepare(mocks.NewRequest(), WithHeader("x-foo", "bar")) + if err != nil { + t.Fatalf("autorest: WithHeader failed (%v)", err) + } + if r.Header.Get("x-foo") != "bar" { + t.Fatalf("autorest: WithHeader failed to add header (%s=%s)", "x-foo", r.Header.Get("x-foo")) + } +} + +func TestWithPathCatchesNilURL(t *testing.T) { + _, err := Prepare(&http.Request{}, WithPath("a")) + if err == nil { + t.Fatalf("autorest: WithPath failed to catch a nil URL") + } +} + +func TestWithEscapedPathParametersCatchesNilURL(t *testing.T) { + _, err := Prepare(&http.Request{}, WithEscapedPathParameters("", map[string]interface{}{"foo": "bar"})) + if err == nil { + t.Fatalf("autorest: WithEscapedPathParameters failed to catch a nil URL") + } +} + +func TestWithPathParametersCatchesNilURL(t *testing.T) { + _, err := Prepare(&http.Request{}, WithPathParameters("", map[string]interface{}{"foo": "bar"})) + if err == nil { + t.Fatalf("autorest: WithPathParameters failed to catch a nil URL") + } +} + +func TestWithQueryParametersCatchesNilURL(t *testing.T) { + _, err := Prepare(&http.Request{}, WithQueryParameters(map[string]interface{}{"foo": "bar"})) + if err == nil { + t.Fatalf("autorest: WithQueryParameters failed to catch a nil URL") + } +} + +func TestModifyingExistingRequest(t *testing.T) { + r, err := Prepare(mocks.NewRequestForURL("https://bing.com"), WithPath("search"), WithQueryParameters(map[string]interface{}{"q": "golang"})) + if err != nil { + t.Fatalf("autorest: Preparing an existing request returned an error (%v)", err) + } + if r.URL.String() != "https:/search?q=golang" && r.URL.Host != "bing.com" { + t.Fatalf("autorest: Preparing an existing request failed (%s)", r.URL) + } +} + +func TestWithAuthorizer(t *testing.T) { + r1 := mocks.NewRequest() + + na := &NullAuthorizer{} + r2, err := Prepare(r1, + na.WithAuthorization()) + if err != nil { + t.Fatalf("autorest: NullAuthorizer#WithAuthorization returned an unexpected error (%v)", err) + } else if !reflect.DeepEqual(r1, r2) { + t.Fatalf("autorest: NullAuthorizer#WithAuthorization modified the request -- received %v, expected %v", r2, r1) + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/responder.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/responder.go new file mode 100644 index 000000000000..07cd7ef5cc8c --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/responder.go @@ -0,0 +1,215 @@ +package autorest + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io/ioutil" + "net/http" + "strings" +) + +// Responder is the interface that wraps the Respond method. +// +// Respond accepts and reacts to an http.Response. Implementations must ensure to not share or hold +// state since Responders may be shared and re-used. +type Responder interface { + Respond(*http.Response) error +} + +// ResponderFunc is a method that implements the Responder interface. +type ResponderFunc func(*http.Response) error + +// Respond implements the Responder interface on ResponderFunc. +func (rf ResponderFunc) Respond(r *http.Response) error { + return rf(r) +} + +// RespondDecorator takes and possibly decorates, by wrapping, a Responder. Decorators may react to +// the http.Response and pass it along or, first, pass the http.Response along then react. +type RespondDecorator func(Responder) Responder + +// CreateResponder creates, decorates, and returns a Responder. Without decorators, the returned +// Responder returns the passed http.Response unmodified. Responders may or may not be safe to share +// and re-used: It depends on the applied decorators. For example, a standard decorator that closes +// the response body is fine to share whereas a decorator that reads the body into a passed struct +// is not. +// +// To prevent memory leaks, ensure that at least one Responder closes the response body. +func CreateResponder(decorators ...RespondDecorator) Responder { + return DecorateResponder( + Responder(ResponderFunc(func(r *http.Response) error { return nil })), + decorators...) +} + +// DecorateResponder accepts a Responder and a, possibly empty, set of RespondDecorators, which it +// applies to the Responder. Decorators are applied in the order received, but their affect upon the +// request depends on whether they are a pre-decorator (react to the http.Response and then pass it +// along) or a post-decorator (pass the http.Response along and then react). +func DecorateResponder(r Responder, decorators ...RespondDecorator) Responder { + for _, decorate := range decorators { + r = decorate(r) + } + return r +} + +// Respond accepts an http.Response and a, possibly empty, set of RespondDecorators. +// It creates a Responder from the decorators it then applies to the passed http.Response. +func Respond(r *http.Response, decorators ...RespondDecorator) error { + if r == nil { + return nil + } + return CreateResponder(decorators...).Respond(r) +} + +// ByIgnoring returns a RespondDecorator that ignores the passed http.Response passing it unexamined +// to the next RespondDecorator. +func ByIgnoring() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + return r.Respond(resp) + }) + } +} + +// ByCopying copies the contents of the http.Response Body into the passed bytes.Buffer as +// the Body is read. +func ByCopying(b *bytes.Buffer) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && resp != nil && resp.Body != nil { + resp.Body = TeeReadCloser(resp.Body, b) + } + return err + }) + } +} + +// ByClosing returns a RespondDecorator that first invokes the passed Responder after which it +// closes the response body. Since the passed Responder is invoked prior to closing the response +// body, the decorator may occur anywhere within the set. +func ByClosing() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if resp != nil && resp.Body != nil { + if err := resp.Body.Close(); err != nil { + return fmt.Errorf("Error closing the response body: %v", err) + } + } + return err + }) + } +} + +// ByClosingIfError returns a RespondDecorator that first invokes the passed Responder after which +// it closes the response if the passed Responder returns an error and the response body exists. +func ByClosingIfError() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err != nil && resp != nil && resp.Body != nil { + if err := resp.Body.Close(); err != nil { + return fmt.Errorf("Error closing the response body: %v", err) + } + } + return err + }) + } +} + +// ByUnmarshallingJSON returns a RespondDecorator that decodes a JSON document returned in the +// response Body into the value pointed to by v. +func ByUnmarshallingJSON(v interface{}) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil { + b, errInner := ioutil.ReadAll(resp.Body) + if errInner != nil { + err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) + } else if len(strings.Trim(string(b), " ")) > 0 { + errInner = json.Unmarshal(b, v) + if errInner != nil { + err = fmt.Errorf("Error occurred unmarshalling JSON - Error = '%v' JSON = '%s'", errInner, string(b)) + } + } + } + return err + }) + } +} + +// ByUnmarshallingXML returns a RespondDecorator that decodes a XML document returned in the +// response Body into the value pointed to by v. +func ByUnmarshallingXML(v interface{}) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil { + b, errInner := ioutil.ReadAll(resp.Body) + if errInner != nil { + err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) + } else { + errInner = xml.Unmarshal(b, v) + if errInner != nil { + err = fmt.Errorf("Error occurred unmarshalling Xml - Error = '%v' Xml = '%s'", errInner, string(b)) + } + } + } + return err + }) + } +} + +// WithErrorUnlessStatusCode returns a RespondDecorator that emits an error unless the response +// StatusCode is among the set passed. On error, response body is fully read into a buffer and +// presented in the returned error, as well as in the response body. +func WithErrorUnlessStatusCode(codes ...int) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && !ResponseHasStatusCode(resp, codes...) { + derr := NewErrorWithResponse("autorest", "WithErrorUnlessStatusCode", resp, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + if resp.Body != nil { + defer resp.Body.Close() + b, _ := ioutil.ReadAll(resp.Body) + derr.ServiceError = b + resp.Body = ioutil.NopCloser(bytes.NewReader(b)) + } + err = derr + } + return err + }) + } +} + +// WithErrorUnlessOK returns a RespondDecorator that emits an error if the response StatusCode is +// anything other than HTTP 200. +func WithErrorUnlessOK() RespondDecorator { + return WithErrorUnlessStatusCode(http.StatusOK) +} + +// ExtractHeader extracts all values of the specified header from the http.Response. It returns an +// empty string slice if the passed http.Response is nil or the header does not exist. +func ExtractHeader(header string, resp *http.Response) []string { + if resp != nil && resp.Header != nil { + return resp.Header[http.CanonicalHeaderKey(header)] + } + return nil +} + +// ExtractHeaderValue extracts the first value of the specified header from the http.Response. It +// returns an empty string if the passed http.Response is nil or the header does not exist. +func ExtractHeaderValue(header string, resp *http.Response) string { + h := ExtractHeader(header, resp) + if len(h) > 0 { + return h[0] + } + return "" +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/responder_test.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/responder_test.go new file mode 100644 index 000000000000..5355cc63fe28 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/responder_test.go @@ -0,0 +1,591 @@ +package autorest + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "reflect" + "strings" + "testing" + + "github.com/Azure/go-autorest/autorest/mocks" +) + +func ExampleWithErrorUnlessOK() { + r := mocks.NewResponse() + r.Request = mocks.NewRequest() + + // Respond and leave the response body open (for a subsequent responder to close) + err := Respond(r, + WithErrorUnlessOK(), + ByClosingIfError()) + + if err == nil { + fmt.Printf("%s of %s returned HTTP 200", r.Request.Method, r.Request.URL) + + // Complete handling the response and close the body + Respond(r, + ByClosing()) + } + // Output: GET of https://microsoft.com/a/b/c/ returned HTTP 200 +} + +func ExampleByUnmarshallingJSON() { + c := ` + { + "name" : "Rob Pike", + "age" : 42 + } + ` + + type V struct { + Name string `json:"name"` + Age int `json:"age"` + } + + v := &V{} + + Respond(mocks.NewResponseWithContent(c), + ByUnmarshallingJSON(v), + ByClosing()) + + fmt.Printf("%s is %d years old\n", v.Name, v.Age) + // Output: Rob Pike is 42 years old +} + +func ExampleByUnmarshallingXML() { + c := ` + + Rob Pike + 42 + ` + + type V struct { + Name string `xml:"Name"` + Age int `xml:"Age"` + } + + v := &V{} + + Respond(mocks.NewResponseWithContent(c), + ByUnmarshallingXML(v), + ByClosing()) + + fmt.Printf("%s is %d years old\n", v.Name, v.Age) + // Output: Rob Pike is 42 years old +} + +func TestCreateResponderDoesNotModify(t *testing.T) { + r1 := mocks.NewResponse() + r2 := mocks.NewResponse() + p := CreateResponder() + err := p.Respond(r1) + if err != nil { + t.Fatalf("autorest: CreateResponder failed (%v)", err) + } + if !reflect.DeepEqual(r1, r2) { + t.Fatalf("autorest: CreateResponder without decorators modified the response") + } +} + +func TestCreateResponderRunsDecoratorsInOrder(t *testing.T) { + s := "" + + d := func(n int) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil { + s += fmt.Sprintf("%d", n) + } + return err + }) + } + } + + p := CreateResponder(d(1), d(2), d(3)) + err := p.Respond(&http.Response{}) + if err != nil { + t.Fatalf("autorest: Respond failed (%v)", err) + } + + if s != "123" { + t.Fatalf("autorest: CreateResponder invoked decorators in an incorrect order; expected '123', received '%s'", s) + } +} + +func TestByIgnoring(t *testing.T) { + r := mocks.NewResponse() + + Respond(r, + (func() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(r2 *http.Response) error { + r1 := mocks.NewResponse() + if !reflect.DeepEqual(r1, r2) { + t.Fatalf("autorest: ByIgnoring modified the HTTP Response -- received %v, expected %v", r2, r1) + } + return nil + }) + } + })(), + ByIgnoring(), + ByClosing()) +} + +func TestByCopying_Copies(t *testing.T) { + r := mocks.NewResponseWithContent(jsonT) + b := &bytes.Buffer{} + + err := Respond(r, + ByCopying(b), + ByUnmarshallingJSON(&mocks.T{}), + ByClosing()) + if err != nil { + t.Fatalf("autorest: ByCopying returned an unexpected error -- %v", err) + } + if b.String() != jsonT { + t.Fatalf("autorest: ByCopying failed to copy the bytes read") + } +} + +func TestByCopying_ReturnsNestedErrors(t *testing.T) { + r := mocks.NewResponseWithContent(jsonT) + + r.Body.Close() + err := Respond(r, + ByCopying(&bytes.Buffer{}), + ByUnmarshallingJSON(&mocks.T{}), + ByClosing()) + if err == nil { + t.Fatalf("autorest: ByCopying failed to return the expected error") + } +} + +func TestByCopying_AcceptsNilReponse(t *testing.T) { + r := mocks.NewResponse() + + Respond(r, + (func() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + resp.Body.Close() + r.Respond(nil) + return nil + }) + } + })(), + ByCopying(&bytes.Buffer{})) +} + +func TestByCopying_AcceptsNilBody(t *testing.T) { + r := mocks.NewResponse() + + Respond(r, + (func() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + resp.Body.Close() + resp.Body = nil + r.Respond(resp) + return nil + }) + } + })(), + ByCopying(&bytes.Buffer{})) +} + +func TestByClosing(t *testing.T) { + r := mocks.NewResponse() + err := Respond(r, ByClosing()) + if err != nil { + t.Fatalf("autorest: ByClosing failed (%v)", err) + } + if r.Body.(*mocks.Body).IsOpen() { + t.Fatalf("autorest: ByClosing did not close the response body") + } +} + +func TestByClosingAcceptsNilResponse(t *testing.T) { + r := mocks.NewResponse() + + Respond(r, + (func() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + resp.Body.Close() + r.Respond(nil) + return nil + }) + } + })(), + ByClosing()) +} + +func TestByClosingAcceptsNilBody(t *testing.T) { + r := mocks.NewResponse() + + Respond(r, + (func() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + resp.Body.Close() + resp.Body = nil + r.Respond(resp) + return nil + }) + } + })(), + ByClosing()) +} + +func TestByClosingClosesEvenAfterErrors(t *testing.T) { + var e error + + r := mocks.NewResponse() + Respond(r, + withErrorRespondDecorator(&e), + ByClosing()) + + if r.Body.(*mocks.Body).IsOpen() { + t.Fatalf("autorest: ByClosing did not close the response body after an error occurred") + } +} + +func TestByClosingClosesReturnsNestedErrors(t *testing.T) { + var e error + + r := mocks.NewResponse() + err := Respond(r, + withErrorRespondDecorator(&e), + ByClosing()) + + if err == nil || !reflect.DeepEqual(e, err) { + t.Fatalf("autorest: ByClosing failed to return a nested error") + } +} + +func TestByClosingIfErrorAcceptsNilResponse(t *testing.T) { + var e error + + r := mocks.NewResponse() + + Respond(r, + withErrorRespondDecorator(&e), + (func() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + resp.Body.Close() + r.Respond(nil) + return nil + }) + } + })(), + ByClosingIfError()) +} + +func TestByClosingIfErrorAcceptsNilBody(t *testing.T) { + var e error + + r := mocks.NewResponse() + + Respond(r, + withErrorRespondDecorator(&e), + (func() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + resp.Body.Close() + resp.Body = nil + r.Respond(resp) + return nil + }) + } + })(), + ByClosingIfError()) +} + +func TestByClosingIfErrorClosesIfAnErrorOccurs(t *testing.T) { + var e error + + r := mocks.NewResponse() + Respond(r, + withErrorRespondDecorator(&e), + ByClosingIfError()) + + if r.Body.(*mocks.Body).IsOpen() { + t.Fatalf("autorest: ByClosingIfError did not close the response body after an error occurred") + } +} + +func TestByClosingIfErrorDoesNotClosesIfNoErrorOccurs(t *testing.T) { + r := mocks.NewResponse() + Respond(r, + ByClosingIfError()) + + if !r.Body.(*mocks.Body).IsOpen() { + t.Fatalf("autorest: ByClosingIfError closed the response body even though no error occurred") + } +} + +func TestByUnmarshallingJSON(t *testing.T) { + v := &mocks.T{} + r := mocks.NewResponseWithContent(jsonT) + err := Respond(r, + ByUnmarshallingJSON(v), + ByClosing()) + if err != nil { + t.Fatalf("autorest: ByUnmarshallingJSON failed (%v)", err) + } + if v.Name != "Rob Pike" || v.Age != 42 { + t.Fatalf("autorest: ByUnmarshallingJSON failed to properly unmarshal") + } +} + +func TestByUnmarshallingJSON_HandlesReadErrors(t *testing.T) { + v := &mocks.T{} + r := mocks.NewResponseWithContent(jsonT) + r.Body.(*mocks.Body).Close() + + err := Respond(r, + ByUnmarshallingJSON(v), + ByClosing()) + if err == nil { + t.Fatalf("autorest: ByUnmarshallingJSON failed to receive / respond to read error") + } +} + +func TestByUnmarshallingJSONIncludesJSONInErrors(t *testing.T) { + v := &mocks.T{} + j := jsonT[0 : len(jsonT)-2] + r := mocks.NewResponseWithContent(j) + err := Respond(r, + ByUnmarshallingJSON(v), + ByClosing()) + if err == nil || !strings.Contains(err.Error(), j) { + t.Fatalf("autorest: ByUnmarshallingJSON failed to return JSON in error (%v)", err) + } +} + +func TestByUnmarshallingJSONEmptyInput(t *testing.T) { + v := &mocks.T{} + r := mocks.NewResponseWithContent(``) + err := Respond(r, + ByUnmarshallingJSON(v), + ByClosing()) + if err != nil { + t.Fatalf("autorest: ByUnmarshallingJSON failed to return nil in case of empty JSON (%v)", err) + } +} + +func TestByUnmarshallingXML(t *testing.T) { + v := &mocks.T{} + r := mocks.NewResponseWithContent(xmlT) + err := Respond(r, + ByUnmarshallingXML(v), + ByClosing()) + if err != nil { + t.Fatalf("autorest: ByUnmarshallingXML failed (%v)", err) + } + if v.Name != "Rob Pike" || v.Age != 42 { + t.Fatalf("autorest: ByUnmarshallingXML failed to properly unmarshal") + } +} + +func TestByUnmarshallingXML_HandlesReadErrors(t *testing.T) { + v := &mocks.T{} + r := mocks.NewResponseWithContent(xmlT) + r.Body.(*mocks.Body).Close() + + err := Respond(r, + ByUnmarshallingXML(v), + ByClosing()) + if err == nil { + t.Fatalf("autorest: ByUnmarshallingXML failed to receive / respond to read error") + } +} + +func TestByUnmarshallingXMLIncludesXMLInErrors(t *testing.T) { + v := &mocks.T{} + x := xmlT[0 : len(xmlT)-2] + r := mocks.NewResponseWithContent(x) + err := Respond(r, + ByUnmarshallingXML(v), + ByClosing()) + if err == nil || !strings.Contains(err.Error(), x) { + t.Fatalf("autorest: ByUnmarshallingXML failed to return XML in error (%v)", err) + } +} + +func TestRespondAcceptsNullResponse(t *testing.T) { + err := Respond(nil) + if err != nil { + t.Fatalf("autorest: Respond returned an unexpected error when given a null Response (%v)", err) + } +} + +func TestWithErrorUnlessStatusCodeOKResponse(t *testing.T) { + v := &mocks.T{} + r := mocks.NewResponseWithContent(jsonT) + err := Respond(r, + WithErrorUnlessStatusCode(http.StatusOK), + ByUnmarshallingJSON(v), + ByClosing()) + + if err != nil { + t.Fatalf("autorest: WithErrorUnlessStatusCode(http.StatusOK) failed on okay response. (%v)", err) + } + + if v.Name != "Rob Pike" || v.Age != 42 { + t.Fatalf("autorest: WithErrorUnlessStatusCode(http.StatusOK) corrupted the response body of okay response.") + } +} + +func TesWithErrorUnlessStatusCodeErrorResponse(t *testing.T) { + v := &mocks.T{} + e := &mocks.T{} + r := mocks.NewResponseWithContent(jsonT) + r.Status = "400 BadRequest" + r.StatusCode = http.StatusBadRequest + + err := Respond(r, + WithErrorUnlessStatusCode(http.StatusOK), + ByUnmarshallingJSON(v), + ByClosing()) + + if err == nil { + t.Fatal("autorest: WithErrorUnlessStatusCode(http.StatusOK) did not return error, on a response to a bad request.") + } + + var errorRespBody []byte + if derr, ok := err.(DetailedError); !ok { + t.Fatalf("autorest: WithErrorUnlessStatusCode(http.StatusOK) got wrong error type : %T, expected: DetailedError, on a response to a bad request.", err) + } else { + errorRespBody = derr.ServiceError + } + + if errorRespBody == nil { + t.Fatalf("autorest: WithErrorUnlessStatusCode(http.StatusOK) ServiceError not returned in DetailedError on a response to a bad request.") + } + + err = json.Unmarshal(errorRespBody, e) + if err != nil { + t.Fatalf("autorest: WithErrorUnlessStatusCode(http.StatusOK) cannot parse error returned in ServiceError into json. %v", err) + } + + expected := &mocks.T{Name: "Rob Pike", Age: 42} + if e != expected { + t.Fatalf("autorest: WithErrorUnlessStatusCode(http.StatusOK wrong value from parsed ServiceError: got=%#v expected=%#v", e, expected) + } +} + +func TestWithErrorUnlessStatusCode(t *testing.T) { + r := mocks.NewResponse() + r.Request = mocks.NewRequest() + r.Status = "400 BadRequest" + r.StatusCode = http.StatusBadRequest + + err := Respond(r, + WithErrorUnlessStatusCode(http.StatusBadRequest, http.StatusUnauthorized, http.StatusInternalServerError), + ByClosingIfError()) + + if err != nil { + t.Fatalf("autorest: WithErrorUnlessStatusCode returned an error (%v) for an acceptable status code (%s)", err, r.Status) + } +} + +func TestWithErrorUnlessStatusCodeEmitsErrorForUnacceptableStatusCode(t *testing.T) { + r := mocks.NewResponse() + r.Request = mocks.NewRequest() + r.Status = "400 BadRequest" + r.StatusCode = http.StatusBadRequest + + err := Respond(r, + WithErrorUnlessStatusCode(http.StatusOK, http.StatusUnauthorized, http.StatusInternalServerError), + ByClosingIfError()) + + if err == nil { + t.Fatalf("autorest: WithErrorUnlessStatusCode failed to return an error for an unacceptable status code (%s)", r.Status) + } +} + +func TestWithErrorUnlessOK(t *testing.T) { + r := mocks.NewResponse() + r.Request = mocks.NewRequest() + + err := Respond(r, + WithErrorUnlessOK(), + ByClosingIfError()) + + if err != nil { + t.Fatalf("autorest: WithErrorUnlessOK returned an error for OK status code (%v)", err) + } +} + +func TestWithErrorUnlessOKEmitsErrorIfNotOK(t *testing.T) { + r := mocks.NewResponse() + r.Request = mocks.NewRequest() + r.Status = "400 BadRequest" + r.StatusCode = http.StatusBadRequest + + err := Respond(r, + WithErrorUnlessOK(), + ByClosingIfError()) + + if err == nil { + t.Fatalf("autorest: WithErrorUnlessOK failed to return an error for a non-OK status code (%v)", err) + } +} + +func TestExtractHeader(t *testing.T) { + r := mocks.NewResponse() + v := []string{"v1", "v2", "v3"} + mocks.SetResponseHeaderValues(r, mocks.TestHeader, v) + + if !reflect.DeepEqual(ExtractHeader(mocks.TestHeader, r), v) { + t.Fatalf("autorest: ExtractHeader failed to retrieve the expected header -- expected [%s]%v, received [%s]%v", + mocks.TestHeader, v, mocks.TestHeader, ExtractHeader(mocks.TestHeader, r)) + } +} + +func TestExtractHeaderHandlesMissingHeader(t *testing.T) { + var v []string + r := mocks.NewResponse() + + if !reflect.DeepEqual(ExtractHeader(mocks.TestHeader, r), v) { + t.Fatalf("autorest: ExtractHeader failed to handle a missing header -- expected %v, received %v", + v, ExtractHeader(mocks.TestHeader, r)) + } +} + +func TestExtractHeaderValue(t *testing.T) { + r := mocks.NewResponse() + v := "v1" + mocks.SetResponseHeader(r, mocks.TestHeader, v) + + if ExtractHeaderValue(mocks.TestHeader, r) != v { + t.Fatalf("autorest: ExtractHeader failed to retrieve the expected header -- expected [%s]%v, received [%s]%v", + mocks.TestHeader, v, mocks.TestHeader, ExtractHeaderValue(mocks.TestHeader, r)) + } +} + +func TestExtractHeaderValueHandlesMissingHeader(t *testing.T) { + r := mocks.NewResponse() + v := "" + + if ExtractHeaderValue(mocks.TestHeader, r) != v { + t.Fatalf("autorest: ExtractHeader failed to retrieve the expected header -- expected [%s]%v, received [%s]%v", + mocks.TestHeader, v, mocks.TestHeader, ExtractHeaderValue(mocks.TestHeader, r)) + } +} + +func TestExtractHeaderValueRetrievesFirstValue(t *testing.T) { + r := mocks.NewResponse() + v := []string{"v1", "v2", "v3"} + mocks.SetResponseHeaderValues(r, mocks.TestHeader, v) + + if ExtractHeaderValue(mocks.TestHeader, r) != v[0] { + t.Fatalf("autorest: ExtractHeader failed to retrieve the expected header -- expected [%s]%v, received [%s]%v", + mocks.TestHeader, v[0], mocks.TestHeader, ExtractHeaderValue(mocks.TestHeader, r)) + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/sender.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/sender.go new file mode 100644 index 000000000000..a12f0f7ff551 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/sender.go @@ -0,0 +1,269 @@ +package autorest + +import ( + "bytes" + "fmt" + "io/ioutil" + "log" + "math" + "net/http" + "time" +) + +// Sender is the interface that wraps the Do method to send HTTP requests. +// +// The standard http.Client conforms to this interface. +type Sender interface { + Do(*http.Request) (*http.Response, error) +} + +// SenderFunc is a method that implements the Sender interface. +type SenderFunc func(*http.Request) (*http.Response, error) + +// Do implements the Sender interface on SenderFunc. +func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { + return sf(r) +} + +// SendDecorator takes and possibily decorates, by wrapping, a Sender. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then react to the +// http.Response result. +type SendDecorator func(Sender) Sender + +// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. +func CreateSender(decorators ...SendDecorator) Sender { + return DecorateSender(&http.Client{}, decorators...) +} + +// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to +// the Sender. Decorators are applied in the order received, but their affect upon the request +// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a +// post-decorator (pass the http.Request along and react to the results in http.Response). +func DecorateSender(s Sender, decorators ...SendDecorator) Sender { + for _, decorate := range decorators { + s = decorate(s) + } + return s +} + +// Send sends, by means of the default http.Client, the passed http.Request, returning the +// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which +// it will apply the http.Client before invoking the Do method. +// +// Send is a convenience method and not recommended for production. Advanced users should use +// SendWithSender, passing and sharing their own Sender (e.g., instance of http.Client). +// +// Send will not poll or retry requests. +func Send(r *http.Request, decorators ...SendDecorator) (*http.Response, error) { + return SendWithSender(&http.Client{}, r, decorators...) +} + +// SendWithSender sends the passed http.Request, through the provided Sender, returning the +// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which +// it will apply the http.Client before invoking the Do method. +// +// SendWithSender will not poll or retry requests. +func SendWithSender(s Sender, r *http.Request, decorators ...SendDecorator) (*http.Response, error) { + return DecorateSender(s, decorators...).Do(r) +} + +// AfterDelay returns a SendDecorator that delays for the passed time.Duration before +// invoking the Sender. The delay may be terminated by closing the optional channel on the +// http.Request. If canceled, no further Senders are invoked. +func AfterDelay(d time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + if !DelayForBackoff(d, 0, r.Cancel) { + return nil, fmt.Errorf("autorest: AfterDelay canceled before full delay") + } + return s.Do(r) + }) + } +} + +// AsIs returns a SendDecorator that invokes the passed Sender without modifying the http.Request. +func AsIs() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return s.Do(r) + }) + } +} + +// DoCloseIfError returns a SendDecorator that first invokes the passed Sender after which +// it closes the response if the passed Sender returns an error and the response body exists. +func DoCloseIfError() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err != nil { + Respond(resp, ByClosing()) + } + return resp, err + }) + } +} + +// DoErrorIfStatusCode returns a SendDecorator that emits an error if the response StatusCode is +// among the set passed. Since these are artificial errors, the response body may still require +// closing. +func DoErrorIfStatusCode(codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err == nil && ResponseHasStatusCode(resp, codes...) { + err = NewErrorWithResponse("autorest", "DoErrorIfStatusCode", resp, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + } + return resp, err + }) + } +} + +// DoErrorUnlessStatusCode returns a SendDecorator that emits an error unless the response +// StatusCode is among the set passed. Since these are artificial errors, the response body +// may still require closing. +func DoErrorUnlessStatusCode(codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err == nil && !ResponseHasStatusCode(resp, codes...) { + err = NewErrorWithResponse("autorest", "DoErrorUnlessStatusCode", resp, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + } + return resp, err + }) + } +} + +// DoPollForStatusCodes returns a SendDecorator that polls if the http.Response contains one of the +// passed status codes. It expects the http.Response to contain a Location header providing the +// URL at which to poll (using GET) and will poll until the time passed is equal to or greater than +// the supplied duration. It will delay between requests for the duration specified in the +// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled by +// closing the optional channel on the http.Request. +func DoPollForStatusCodes(duration time.Duration, delay time.Duration, codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + resp, err = s.Do(r) + + if err == nil && ResponseHasStatusCode(resp, codes...) { + r, err = NewPollingRequest(resp, r.Cancel) + + for err == nil && ResponseHasStatusCode(resp, codes...) { + Respond(resp, + ByClosing()) + resp, err = SendWithSender(s, r, + AfterDelay(GetRetryAfter(resp, delay))) + } + } + + return resp, err + }) + } +} + +// DoRetryForAttempts returns a SendDecorator that retries a failed request for up to the specified +// number of attempts, exponentially backing off between requests using the supplied backoff +// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on +// the http.Request. +func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + for attempt := 0; attempt < attempts; attempt++ { + resp, err = s.Do(r) + if err == nil { + return resp, err + } + DelayForBackoff(backoff, attempt, r.Cancel) + } + return resp, err + }) + } +} + +// DoRetryForStatusCodes returns a SendDecorator that retries for specified statusCodes for up to the specified +// number of attempts, exponentially backing off between requests using the supplied backoff +// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on +// the http.Request. +func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + b := []byte{} + if r.Body != nil { + b, err = ioutil.ReadAll(r.Body) + if err != nil { + return resp, err + } + } + + // Increment to add the first call (attempts denotes number of retries) + attempts++ + for attempt := 0; attempt < attempts; attempt++ { + r.Body = ioutil.NopCloser(bytes.NewBuffer(b)) + resp, err = s.Do(r) + if err != nil || !ResponseHasStatusCode(resp, codes...) { + return resp, err + } + DelayForBackoff(backoff, attempt, r.Cancel) + } + return resp, err + }) + } +} + +// DoRetryForDuration returns a SendDecorator that retries the request until the total time is equal +// to or greater than the specified duration, exponentially backing off between requests using the +// supplied backoff time.Duration (which may be zero). Retrying may be canceled by closing the +// optional channel on the http.Request. +func DoRetryForDuration(d time.Duration, backoff time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + end := time.Now().Add(d) + for attempt := 0; time.Now().Before(end); attempt++ { + resp, err = s.Do(r) + if err == nil { + return resp, err + } + DelayForBackoff(backoff, attempt, r.Cancel) + } + return resp, err + }) + } +} + +// WithLogging returns a SendDecorator that implements simple before and after logging of the +// request. +func WithLogging(logger *log.Logger) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + logger.Printf("Sending %s %s", r.Method, r.URL) + resp, err := s.Do(r) + if err != nil { + logger.Printf("%s %s received error '%v'", r.Method, r.URL, err) + } else { + logger.Printf("%s %s received %s", r.Method, r.URL, resp.Status) + } + return resp, err + }) + } +} + +// DelayForBackoff invokes time.After for the supplied backoff duration raised to the power of +// passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set +// to zero for no delay. The delay may be canceled by closing the passed channel. If terminated early, +// returns false. +// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt +// count. +func DelayForBackoff(backoff time.Duration, attempt int, cancel <-chan struct{}) bool { + select { + case <-time.After(time.Duration(backoff.Seconds()*math.Pow(2, float64(attempt))) * time.Second): + return true + case <-cancel: + return false + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/sender_test.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/sender_test.go new file mode 100644 index 000000000000..4c462dda6a51 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/sender_test.go @@ -0,0 +1,734 @@ +package autorest + +import ( + "bytes" + "fmt" + "log" + "net/http" + "os" + "reflect" + "sync" + "testing" + "time" + + "github.com/Azure/go-autorest/autorest/mocks" +) + +func ExampleSendWithSender() { + r := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(r) + + client := mocks.NewSender() + client.AppendAndRepeatResponse(r, 10) + + logger := log.New(os.Stdout, "autorest: ", 0) + na := NullAuthorizer{} + + req, _ := Prepare(&http.Request{}, + AsGet(), + WithBaseURL("https://microsoft.com/a/b/c/"), + na.WithAuthorization()) + + r, _ = SendWithSender(client, req, + WithLogging(logger), + DoErrorIfStatusCode(http.StatusAccepted), + DoCloseIfError(), + DoRetryForAttempts(5, time.Duration(0))) + + Respond(r, + ByClosing()) + + // Output: + // autorest: Sending GET https://microsoft.com/a/b/c/ + // autorest: GET https://microsoft.com/a/b/c/ received 202 Accepted + // autorest: Sending GET https://microsoft.com/a/b/c/ + // autorest: GET https://microsoft.com/a/b/c/ received 202 Accepted + // autorest: Sending GET https://microsoft.com/a/b/c/ + // autorest: GET https://microsoft.com/a/b/c/ received 202 Accepted + // autorest: Sending GET https://microsoft.com/a/b/c/ + // autorest: GET https://microsoft.com/a/b/c/ received 202 Accepted + // autorest: Sending GET https://microsoft.com/a/b/c/ + // autorest: GET https://microsoft.com/a/b/c/ received 202 Accepted +} + +func ExampleDoRetryForAttempts() { + client := mocks.NewSender() + client.SetAndRepeatError(fmt.Errorf("Faux Error"), 10) + + // Retry with backoff -- ensure returned Bodies are closed + r, _ := SendWithSender(client, mocks.NewRequest(), + DoCloseIfError(), + DoRetryForAttempts(5, time.Duration(0))) + + Respond(r, + ByClosing()) + + fmt.Printf("Retry stopped after %d attempts", client.Attempts()) + // Output: Retry stopped after 5 attempts +} + +func ExampleDoErrorIfStatusCode() { + client := mocks.NewSender() + client.AppendAndRepeatResponse(mocks.NewResponseWithStatus("204 NoContent", http.StatusNoContent), 10) + + // Chain decorators to retry the request, up to five times, if the status code is 204 + r, _ := SendWithSender(client, mocks.NewRequest(), + DoErrorIfStatusCode(http.StatusNoContent), + DoCloseIfError(), + DoRetryForAttempts(5, time.Duration(0))) + + Respond(r, + ByClosing()) + + fmt.Printf("Retry stopped after %d attempts with code %s", client.Attempts(), r.Status) + // Output: Retry stopped after 5 attempts with code 204 NoContent +} + +func TestSendWithSenderRunsDecoratorsInOrder(t *testing.T) { + client := mocks.NewSender() + s := "" + + r, err := SendWithSender(client, mocks.NewRequest(), + withMessage(&s, "a"), + withMessage(&s, "b"), + withMessage(&s, "c")) + if err != nil { + t.Fatalf("autorest: SendWithSender returned an error (%v)", err) + } + + Respond(r, + ByClosing()) + + if s != "abc" { + t.Fatalf("autorest: SendWithSender invoke decorators out of order; expected 'abc', received '%s'", s) + } +} + +func TestCreateSender(t *testing.T) { + f := false + + s := CreateSender( + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + f = true + return nil, nil + }) + } + })()) + s.Do(&http.Request{}) + + if !f { + t.Fatal("autorest: CreateSender failed to apply supplied decorator") + } +} + +func TestSend(t *testing.T) { + f := false + + Send(&http.Request{}, + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + f = true + return nil, nil + }) + } + })()) + + if !f { + t.Fatal("autorest: Send failed to apply supplied decorator") + } +} + +func TestAfterDelayWaits(t *testing.T) { + client := mocks.NewSender() + + d := 2 * time.Second + + tt := time.Now() + r, _ := SendWithSender(client, mocks.NewRequest(), + AfterDelay(d)) + s := time.Since(tt) + if s < d { + t.Fatal("autorest: AfterDelay failed to wait for at least the specified duration") + } + + Respond(r, + ByClosing()) +} + +func TestAfterDelay_Cancels(t *testing.T) { + client := mocks.NewSender() + cancel := make(chan struct{}) + delay := 5 * time.Second + + var wg sync.WaitGroup + wg.Add(1) + tt := time.Now() + go func() { + req := mocks.NewRequest() + req.Cancel = cancel + wg.Done() + SendWithSender(client, req, + AfterDelay(delay)) + }() + wg.Wait() + close(cancel) + time.Sleep(5 * time.Millisecond) + if time.Since(tt) >= delay { + t.Fatal("autorest: AfterDelay failed to cancel") + } +} + +func TestAfterDelayDoesNotWaitTooLong(t *testing.T) { + client := mocks.NewSender() + + d := 5 * time.Millisecond + start := time.Now() + r, _ := SendWithSender(client, mocks.NewRequest(), + AfterDelay(d)) + + if time.Since(start) > (5 * d) { + t.Fatal("autorest: AfterDelay waited too long (exceeded 5 times specified duration)") + } + + Respond(r, + ByClosing()) +} + +func TestAsIs(t *testing.T) { + client := mocks.NewSender() + + r1 := mocks.NewResponse() + client.AppendResponse(r1) + + r2, err := SendWithSender(client, mocks.NewRequest(), + AsIs()) + if err != nil { + t.Fatalf("autorest: AsIs returned an unexpected error (%v)", err) + } else if !reflect.DeepEqual(r1, r2) { + t.Fatalf("autorest: AsIs modified the response -- received %v, expected %v", r2, r1) + } + + Respond(r1, + ByClosing()) + Respond(r2, + ByClosing()) +} + +func TestDoCloseIfError(t *testing.T) { + client := mocks.NewSender() + client.AppendResponse(mocks.NewResponseWithStatus("400 BadRequest", http.StatusBadRequest)) + + r, _ := SendWithSender(client, mocks.NewRequest(), + DoErrorIfStatusCode(http.StatusBadRequest), + DoCloseIfError()) + + if r.Body.(*mocks.Body).IsOpen() { + t.Fatal("autorest: Expected DoCloseIfError to close response body -- it was left open") + } + + Respond(r, + ByClosing()) +} + +func TestDoCloseIfErrorAcceptsNilResponse(t *testing.T) { + client := mocks.NewSender() + + SendWithSender(client, mocks.NewRequest(), + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err != nil { + resp.Body.Close() + } + return nil, fmt.Errorf("Faux Error") + }) + } + })(), + DoCloseIfError()) +} + +func TestDoCloseIfErrorAcceptsNilBody(t *testing.T) { + client := mocks.NewSender() + + SendWithSender(client, mocks.NewRequest(), + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err != nil { + resp.Body.Close() + } + resp.Body = nil + return resp, fmt.Errorf("Faux Error") + }) + } + })(), + DoCloseIfError()) +} + +func TestDoErrorIfStatusCode(t *testing.T) { + client := mocks.NewSender() + client.AppendResponse(mocks.NewResponseWithStatus("400 BadRequest", http.StatusBadRequest)) + + r, err := SendWithSender(client, mocks.NewRequest(), + DoErrorIfStatusCode(http.StatusBadRequest), + DoCloseIfError()) + if err == nil { + t.Fatal("autorest: DoErrorIfStatusCode failed to emit an error for passed code") + } + + Respond(r, + ByClosing()) +} + +func TestDoErrorIfStatusCodeIgnoresStatusCodes(t *testing.T) { + client := mocks.NewSender() + client.AppendResponse(newAcceptedResponse()) + + r, err := SendWithSender(client, mocks.NewRequest(), + DoErrorIfStatusCode(http.StatusBadRequest), + DoCloseIfError()) + if err != nil { + t.Fatal("autorest: DoErrorIfStatusCode failed to ignore a status code") + } + + Respond(r, + ByClosing()) +} + +func TestDoErrorUnlessStatusCode(t *testing.T) { + client := mocks.NewSender() + client.AppendResponse(mocks.NewResponseWithStatus("400 BadRequest", http.StatusBadRequest)) + + r, err := SendWithSender(client, mocks.NewRequest(), + DoErrorUnlessStatusCode(http.StatusAccepted), + DoCloseIfError()) + if err == nil { + t.Fatal("autorest: DoErrorUnlessStatusCode failed to emit an error for an unknown status code") + } + + Respond(r, + ByClosing()) +} + +func TestDoErrorUnlessStatusCodeIgnoresStatusCodes(t *testing.T) { + client := mocks.NewSender() + client.AppendResponse(newAcceptedResponse()) + + r, err := SendWithSender(client, mocks.NewRequest(), + DoErrorUnlessStatusCode(http.StatusAccepted), + DoCloseIfError()) + if err != nil { + t.Fatal("autorest: DoErrorUnlessStatusCode emitted an error for a knonwn status code") + } + + Respond(r, + ByClosing()) +} + +func TestDoRetryForAttemptsStopsAfterSuccess(t *testing.T) { + client := mocks.NewSender() + + r, err := SendWithSender(client, mocks.NewRequest(), + DoRetryForAttempts(5, time.Duration(0))) + if client.Attempts() != 1 { + t.Fatalf("autorest: DoRetryForAttempts failed to stop after success -- expected attempts %v, actual %v", + 1, client.Attempts()) + } + if err != nil { + t.Fatalf("autorest: DoRetryForAttempts returned an unexpected error (%v)", err) + } + + Respond(r, + ByClosing()) +} + +func TestDoRetryForAttemptsStopsAfterAttempts(t *testing.T) { + client := mocks.NewSender() + client.SetAndRepeatError(fmt.Errorf("Faux Error"), 10) + + r, err := SendWithSender(client, mocks.NewRequest(), + DoRetryForAttempts(5, time.Duration(0)), + DoCloseIfError()) + if err == nil { + t.Fatal("autorest: Mock client failed to emit errors") + } + + Respond(r, + ByClosing()) + + if client.Attempts() != 5 { + t.Fatal("autorest: DoRetryForAttempts failed to stop after specified number of attempts") + } +} + +func TestDoRetryForAttemptsReturnsResponse(t *testing.T) { + client := mocks.NewSender() + client.SetError(fmt.Errorf("Faux Error")) + + r, err := SendWithSender(client, mocks.NewRequest(), + DoRetryForAttempts(1, time.Duration(0))) + if err == nil { + t.Fatal("autorest: Mock client failed to emit errors") + } + + if r == nil { + t.Fatal("autorest: DoRetryForAttempts failed to return the underlying response") + } + + Respond(r, + ByClosing()) +} + +func TestDoRetryForDurationStopsAfterSuccess(t *testing.T) { + client := mocks.NewSender() + + r, err := SendWithSender(client, mocks.NewRequest(), + DoRetryForDuration(10*time.Millisecond, time.Duration(0))) + if client.Attempts() != 1 { + t.Fatalf("autorest: DoRetryForDuration failed to stop after success -- expected attempts %v, actual %v", + 1, client.Attempts()) + } + if err != nil { + t.Fatalf("autorest: DoRetryForDuration returned an unexpected error (%v)", err) + } + + Respond(r, + ByClosing()) +} + +func TestDoRetryForDurationStopsAfterDuration(t *testing.T) { + client := mocks.NewSender() + client.SetAndRepeatError(fmt.Errorf("Faux Error"), -1) + + d := 5 * time.Millisecond + start := time.Now() + r, err := SendWithSender(client, mocks.NewRequest(), + DoRetryForDuration(d, time.Duration(0)), + DoCloseIfError()) + if err == nil { + t.Fatal("autorest: Mock client failed to emit errors") + } + + if time.Since(start) < d { + t.Fatal("autorest: DoRetryForDuration failed stopped too soon") + } + + Respond(r, + ByClosing()) +} + +func TestDoRetryForDurationStopsWithinReason(t *testing.T) { + client := mocks.NewSender() + client.SetAndRepeatError(fmt.Errorf("Faux Error"), -1) + + d := 5 * time.Second + start := time.Now() + r, err := SendWithSender(client, mocks.NewRequest(), + DoRetryForDuration(d, time.Duration(0)), + DoCloseIfError()) + if err == nil { + t.Fatal("autorest: Mock client failed to emit errors") + } + + if time.Since(start) > (5 * d) { + t.Fatal("autorest: DoRetryForDuration failed stopped soon enough (exceeded 5 times specified duration)") + } + + Respond(r, + ByClosing()) +} + +func TestDoRetryForDurationReturnsResponse(t *testing.T) { + client := mocks.NewSender() + client.SetAndRepeatError(fmt.Errorf("Faux Error"), -1) + + r, err := SendWithSender(client, mocks.NewRequest(), + DoRetryForDuration(10*time.Millisecond, time.Duration(0)), + DoCloseIfError()) + if err == nil { + t.Fatal("autorest: Mock client failed to emit errors") + } + + if r == nil { + t.Fatal("autorest: DoRetryForDuration failed to return the underlying response") + } + + Respond(r, + ByClosing()) +} + +func TestDelayForBackoff(t *testing.T) { + d := 2 * time.Second + start := time.Now() + DelayForBackoff(d, 0, nil) + if time.Since(start) < d { + t.Fatal("autorest: DelayForBackoff did not delay as long as expected") + } +} + +func TestDelayForBackoff_Cancels(t *testing.T) { + cancel := make(chan struct{}) + delay := 5 * time.Second + + var wg sync.WaitGroup + wg.Add(1) + start := time.Now() + go func() { + wg.Done() + DelayForBackoff(delay, 0, cancel) + }() + wg.Wait() + close(cancel) + time.Sleep(5 * time.Millisecond) + if time.Since(start) >= delay { + t.Fatal("autorest: DelayForBackoff failed to cancel") + } +} + +func TestDelayForBackoffWithinReason(t *testing.T) { + d := 5 * time.Second + maxCoefficient := 2 + start := time.Now() + DelayForBackoff(d, 0, nil) + if time.Since(start) > (time.Duration(maxCoefficient) * d) { + + t.Fatalf("autorest: DelayForBackoff delayed too long (exceeded %d times the specified duration)", maxCoefficient) + } +} + +func TestDoPollForStatusCodes_IgnoresUnspecifiedStatusCodes(t *testing.T) { + client := mocks.NewSender() + + r, _ := SendWithSender(client, mocks.NewRequest(), + DoPollForStatusCodes(time.Duration(0), time.Duration(0))) + + if client.Attempts() != 1 { + t.Fatalf("autorest: Sender#DoPollForStatusCodes polled for unspecified status code") + } + + Respond(r, + ByClosing()) +} + +func TestDoPollForStatusCodes_PollsForSpecifiedStatusCodes(t *testing.T) { + client := mocks.NewSender() + client.AppendResponse(newAcceptedResponse()) + + r, _ := SendWithSender(client, mocks.NewRequest(), + DoPollForStatusCodes(time.Millisecond, time.Millisecond, http.StatusAccepted)) + + if client.Attempts() != 2 { + t.Fatalf("autorest: Sender#DoPollForStatusCodes failed to poll for specified status code") + } + + Respond(r, + ByClosing()) +} + +func TestDoPollForStatusCodes_CanBeCanceled(t *testing.T) { + cancel := make(chan struct{}) + delay := 5 * time.Second + + r := mocks.NewResponse() + mocks.SetAcceptedHeaders(r) + client := mocks.NewSender() + client.AppendAndRepeatResponse(r, 100) + + var wg sync.WaitGroup + wg.Add(1) + start := time.Now() + go func() { + wg.Done() + r, _ := SendWithSender(client, mocks.NewRequest(), + DoPollForStatusCodes(time.Millisecond, time.Millisecond, http.StatusAccepted)) + Respond(r, + ByClosing()) + }() + wg.Wait() + close(cancel) + time.Sleep(5 * time.Millisecond) + if time.Since(start) >= delay { + t.Fatalf("autorest: Sender#DoPollForStatusCodes failed to cancel") + } +} + +func TestDoPollForStatusCodes_ClosesAllNonreturnedResponseBodiesWhenPolling(t *testing.T) { + resp := newAcceptedResponse() + + client := mocks.NewSender() + client.AppendAndRepeatResponse(resp, 2) + + r, _ := SendWithSender(client, mocks.NewRequest(), + DoPollForStatusCodes(time.Millisecond, time.Millisecond, http.StatusAccepted)) + + if resp.Body.(*mocks.Body).IsOpen() || resp.Body.(*mocks.Body).CloseAttempts() < 2 { + t.Fatalf("autorest: Sender#DoPollForStatusCodes did not close unreturned response bodies") + } + + Respond(r, + ByClosing()) +} + +func TestDoPollForStatusCodes_LeavesLastResponseBodyOpen(t *testing.T) { + client := mocks.NewSender() + client.AppendResponse(newAcceptedResponse()) + + r, _ := SendWithSender(client, mocks.NewRequest(), + DoPollForStatusCodes(time.Millisecond, time.Millisecond, http.StatusAccepted)) + + if !r.Body.(*mocks.Body).IsOpen() { + t.Fatalf("autorest: Sender#DoPollForStatusCodes did not leave open the body of the last response") + } + + Respond(r, + ByClosing()) +} + +func TestDoPollForStatusCodes_StopsPollingAfterAnError(t *testing.T) { + client := mocks.NewSender() + client.AppendAndRepeatResponse(newAcceptedResponse(), 5) + client.SetError(fmt.Errorf("Faux Error")) + client.SetEmitErrorAfter(1) + + r, _ := SendWithSender(client, mocks.NewRequest(), + DoPollForStatusCodes(time.Millisecond, time.Millisecond, http.StatusAccepted)) + + if client.Attempts() > 2 { + t.Fatalf("autorest: Sender#DoPollForStatusCodes failed to stop polling after receiving an error") + } + + Respond(r, + ByClosing()) +} + +func TestDoPollForStatusCodes_ReturnsPollingError(t *testing.T) { + client := mocks.NewSender() + client.AppendAndRepeatResponse(newAcceptedResponse(), 5) + client.SetError(fmt.Errorf("Faux Error")) + client.SetEmitErrorAfter(1) + + r, err := SendWithSender(client, mocks.NewRequest(), + DoPollForStatusCodes(time.Millisecond, time.Millisecond, http.StatusAccepted)) + + if err == nil { + t.Fatalf("autorest: Sender#DoPollForStatusCodes failed to return error from polling") + } + + Respond(r, + ByClosing()) +} + +func TestWithLogging_Logs(t *testing.T) { + buf := &bytes.Buffer{} + logger := log.New(buf, "autorest: ", 0) + client := mocks.NewSender() + + r, _ := SendWithSender(client, &http.Request{}, + WithLogging(logger)) + + if buf.String() == "" { + t.Fatal("autorest: Sender#WithLogging failed to log the request") + } + + Respond(r, + ByClosing()) +} + +func TestWithLogging_HandlesMissingResponse(t *testing.T) { + buf := &bytes.Buffer{} + logger := log.New(buf, "autorest: ", 0) + client := mocks.NewSender() + client.AppendResponse(nil) + client.SetError(fmt.Errorf("Faux Error")) + + r, err := SendWithSender(client, &http.Request{}, + WithLogging(logger)) + + if r != nil || err == nil { + t.Fatal("autorest: Sender#WithLogging returned a valid response -- expecting nil") + } + if buf.String() == "" { + t.Fatal("autorest: Sender#WithLogging failed to log the request for a nil response") + } + + Respond(r, + ByClosing()) +} + +func TestDoRetryForStatusCodesWithSuccess(t *testing.T) { + client := mocks.NewSender() + client.AppendAndRepeatResponse(mocks.NewResponseWithStatus("408 Request Timeout", http.StatusRequestTimeout), 2) + client.AppendResponse(mocks.NewResponseWithStatus("200 OK", http.StatusOK)) + + r, _ := SendWithSender(client, mocks.NewRequest(), + DoRetryForStatusCodes(5, time.Duration(2*time.Second), http.StatusRequestTimeout), + ) + + Respond(r, + ByClosing()) + + if client.Attempts() != 3 { + t.Fatalf("autorest: Sender#DoRetryForStatusCodes -- Got: StatusCode %v in %v attempts; Want: StatusCode 200 OK in 2 attempts -- ", + r.Status, client.Attempts()-1) + } +} + +func TestDoRetryForStatusCodesWithNoSuccess(t *testing.T) { + client := mocks.NewSender() + client.AppendAndRepeatResponse(mocks.NewResponseWithStatus("504 Gateway Timeout", http.StatusGatewayTimeout), 5) + + r, _ := SendWithSender(client, mocks.NewRequest(), + DoRetryForStatusCodes(2, time.Duration(2*time.Second), http.StatusGatewayTimeout), + ) + Respond(r, + ByClosing()) + + if client.Attempts() != 3 { + t.Fatalf("autorest: Sender#DoRetryForStatusCodes -- Got: failed stop after %v retry attempts; Want: Stop after 2 retry attempts", + client.Attempts()-1) + } +} + +func TestDoRetryForStatusCodes_CodeNotInRetryList(t *testing.T) { + client := mocks.NewSender() + client.AppendAndRepeatResponse(mocks.NewResponseWithStatus("204 No Content", http.StatusNoContent), 1) + + r, _ := SendWithSender(client, mocks.NewRequest(), + DoRetryForStatusCodes(6, time.Duration(2*time.Second), http.StatusGatewayTimeout), + ) + + Respond(r, + ByClosing()) + + if client.Attempts() != 1 || r.Status != "204 No Content" { + t.Fatalf("autorest: Sender#DoRetryForStatusCodes -- Got: Retry attempts %v for StatusCode %v; Want: 0 attempts for StatusCode 204", + client.Attempts(), r.Status) + } +} + +func TestDoRetryForStatusCodes_RequestBodyReadError(t *testing.T) { + client := mocks.NewSender() + client.AppendAndRepeatResponse(mocks.NewResponseWithStatus("204 No Content", http.StatusNoContent), 2) + + r, err := SendWithSender(client, mocks.NewRequestWithCloseBody(), + DoRetryForStatusCodes(6, time.Duration(2*time.Second), http.StatusGatewayTimeout), + ) + + Respond(r, + ByClosing()) + + if err == nil || client.Attempts() != 0 { + t.Fatalf("autorest: Sender#DoRetryForStatusCodes -- Got: Not failed for request body read error; Want: Failed for body read error - %v", err) + } +} + +func newAcceptedResponse() *http.Response { + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(resp) + return resp +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/to/convert.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/to/convert.go new file mode 100644 index 000000000000..7b180b866b90 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/to/convert.go @@ -0,0 +1,133 @@ +/* +Package to provides helpers to ease working with pointer values of marshalled structures. +*/ +package to + +// String returns a string value for the passed string pointer. It returns the empty string if the +// pointer is nil. +func String(s *string) string { + if s != nil { + return *s + } + return "" +} + +// StringPtr returns a pointer to the passed string. +func StringPtr(s string) *string { + return &s +} + +// StringSlice returns a string slice value for the passed string slice pointer. It returns a nil +// slice if the pointer is nil. +func StringSlice(s *[]string) []string { + if s != nil { + return *s + } + return nil +} + +// StringSlicePtr returns a pointer to the passed string slice. +func StringSlicePtr(s []string) *[]string { + return &s +} + +// StringMap returns a map of strings built from the map of string pointers. The empty string is +// used for nil pointers. +func StringMap(msp map[string]*string) map[string]string { + ms := make(map[string]string, len(msp)) + for k, sp := range msp { + if sp != nil { + ms[k] = *sp + } else { + ms[k] = "" + } + } + return ms +} + +// StringMapPtr returns a pointer to a map of string pointers built from the passed map of strings. +func StringMapPtr(ms map[string]string) *map[string]*string { + msp := make(map[string]*string, len(ms)) + for k, s := range ms { + msp[k] = StringPtr(s) + } + return &msp +} + +// Bool returns a bool value for the passed bool pointer. It returns false if the pointer is nil. +func Bool(b *bool) bool { + if b != nil { + return *b + } + return false +} + +// BoolPtr returns a pointer to the passed bool. +func BoolPtr(b bool) *bool { + return &b +} + +// Int returns an int value for the passed int pointer. It returns 0 if the pointer is nil. +func Int(i *int) int { + if i != nil { + return *i + } + return 0 +} + +// IntPtr returns a pointer to the passed int. +func IntPtr(i int) *int { + return &i +} + +// Int32 returns an int value for the passed int pointer. It returns 0 if the pointer is nil. +func Int32(i *int32) int32 { + if i != nil { + return *i + } + return 0 +} + +// Int32Ptr returns a pointer to the passed int32. +func Int32Ptr(i int32) *int32 { + return &i +} + +// Int64 returns an int value for the passed int pointer. It returns 0 if the pointer is nil. +func Int64(i *int64) int64 { + if i != nil { + return *i + } + return 0 +} + +// Int64Ptr returns a pointer to the passed int64. +func Int64Ptr(i int64) *int64 { + return &i +} + +// Float32 returns an int value for the passed int pointer. It returns 0.0 if the pointer is nil. +func Float32(i *float32) float32 { + if i != nil { + return *i + } + return 0.0 +} + +// Float32Ptr returns a pointer to the passed float32. +func Float32Ptr(i float32) *float32 { + return &i +} + +// Float64 returns an int value for the passed int pointer. It returns 0.0 if the pointer is nil. +func Float64(i *float64) float64 { + if i != nil { + return *i + } + return 0.0 +} + +// Float64Ptr returns a pointer to the passed float64. +func Float64Ptr(i float64) *float64 { + return &i +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/to/convert_test.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/to/convert_test.go new file mode 100644 index 000000000000..8c983539251b --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/to/convert_test.go @@ -0,0 +1,220 @@ +package to + +import ( + "reflect" + "testing" +) + +func TestString(t *testing.T) { + v := "" + if String(&v) != v { + t.Fatalf("to: String failed to return the correct string -- expected %v, received %v", + v, String(&v)) + } +} + +func TestStringHandlesNil(t *testing.T) { + if String(nil) != "" { + t.Fatalf("to: String failed to correctly convert nil -- expected %v, received %v", + "", String(nil)) + } +} + +func TestStringPtr(t *testing.T) { + v := "" + if *StringPtr(v) != v { + t.Fatalf("to: StringPtr failed to return the correct string -- expected %v, received %v", + v, *StringPtr(v)) + } +} + +func TestStringSlice(t *testing.T) { + v := []string{} + if out := StringSlice(&v); !reflect.DeepEqual(out, v) { + t.Fatalf("to: StringSlice failed to return the correct slice -- expected %v, received %v", + v, out) + } +} + +func TestStringSliceHandlesNil(t *testing.T) { + if out := StringSlice(nil); out != nil { + t.Fatalf("to: StringSlice failed to correctly convert nil -- expected %v, received %v", + nil, out) + } +} + +func TestStringSlicePtr(t *testing.T) { + v := []string{"a", "b"} + if out := StringSlicePtr(v); !reflect.DeepEqual(*out, v) { + t.Fatalf("to: StringSlicePtr failed to return the correct slice -- expected %v, received %v", + v, *out) + } +} + +func TestStringMap(t *testing.T) { + msp := map[string]*string{"foo": StringPtr("foo"), "bar": StringPtr("bar"), "baz": StringPtr("baz")} + for k, v := range StringMap(msp) { + if *msp[k] != v { + t.Fatalf("to: StringMap incorrectly converted an entry -- expected [%s]%v, received[%s]%v", + k, v, k, *msp[k]) + } + } +} + +func TestStringMapHandlesNil(t *testing.T) { + msp := map[string]*string{"foo": StringPtr("foo"), "bar": nil, "baz": StringPtr("baz")} + for k, v := range StringMap(msp) { + if msp[k] == nil && v != "" { + t.Fatalf("to: StringMap incorrectly converted a nil entry -- expected [%s]%v, received[%s]%v", + k, v, k, *msp[k]) + } + } +} + +func TestStringMapPtr(t *testing.T) { + ms := map[string]string{"foo": "foo", "bar": "bar", "baz": "baz"} + for k, msp := range *StringMapPtr(ms) { + if ms[k] != *msp { + t.Fatalf("to: StringMapPtr incorrectly converted an entry -- expected [%s]%v, received[%s]%v", + k, ms[k], k, *msp) + } + } +} + +func TestBool(t *testing.T) { + v := false + if Bool(&v) != v { + t.Fatalf("to: Bool failed to return the correct string -- expected %v, received %v", + v, Bool(&v)) + } +} + +func TestBoolHandlesNil(t *testing.T) { + if Bool(nil) != false { + t.Fatalf("to: Bool failed to correctly convert nil -- expected %v, received %v", + false, Bool(nil)) + } +} + +func TestBoolPtr(t *testing.T) { + v := false + if *BoolPtr(v) != v { + t.Fatalf("to: BoolPtr failed to return the correct string -- expected %v, received %v", + v, *BoolPtr(v)) + } +} + +func TestInt(t *testing.T) { + v := 0 + if Int(&v) != v { + t.Fatalf("to: Int failed to return the correct string -- expected %v, received %v", + v, Int(&v)) + } +} + +func TestIntHandlesNil(t *testing.T) { + if Int(nil) != 0 { + t.Fatalf("to: Int failed to correctly convert nil -- expected %v, received %v", + 0, Int(nil)) + } +} + +func TestIntPtr(t *testing.T) { + v := 0 + if *IntPtr(v) != v { + t.Fatalf("to: IntPtr failed to return the correct string -- expected %v, received %v", + v, *IntPtr(v)) + } +} + +func TestInt32(t *testing.T) { + v := int32(0) + if Int32(&v) != v { + t.Fatalf("to: Int32 failed to return the correct string -- expected %v, received %v", + v, Int32(&v)) + } +} + +func TestInt32HandlesNil(t *testing.T) { + if Int32(nil) != int32(0) { + t.Fatalf("to: Int32 failed to correctly convert nil -- expected %v, received %v", + 0, Int32(nil)) + } +} + +func TestInt32Ptr(t *testing.T) { + v := int32(0) + if *Int32Ptr(v) != v { + t.Fatalf("to: Int32Ptr failed to return the correct string -- expected %v, received %v", + v, *Int32Ptr(v)) + } +} + +func TestInt64(t *testing.T) { + v := int64(0) + if Int64(&v) != v { + t.Fatalf("to: Int64 failed to return the correct string -- expected %v, received %v", + v, Int64(&v)) + } +} + +func TestInt64HandlesNil(t *testing.T) { + if Int64(nil) != int64(0) { + t.Fatalf("to: Int64 failed to correctly convert nil -- expected %v, received %v", + 0, Int64(nil)) + } +} + +func TestInt64Ptr(t *testing.T) { + v := int64(0) + if *Int64Ptr(v) != v { + t.Fatalf("to: Int64Ptr failed to return the correct string -- expected %v, received %v", + v, *Int64Ptr(v)) + } +} + +func TestFloat32(t *testing.T) { + v := float32(0) + if Float32(&v) != v { + t.Fatalf("to: Float32 failed to return the correct string -- expected %v, received %v", + v, Float32(&v)) + } +} + +func TestFloat32HandlesNil(t *testing.T) { + if Float32(nil) != float32(0) { + t.Fatalf("to: Float32 failed to correctly convert nil -- expected %v, received %v", + 0, Float32(nil)) + } +} + +func TestFloat32Ptr(t *testing.T) { + v := float32(0) + if *Float32Ptr(v) != v { + t.Fatalf("to: Float32Ptr failed to return the correct string -- expected %v, received %v", + v, *Float32Ptr(v)) + } +} + +func TestFloat64(t *testing.T) { + v := float64(0) + if Float64(&v) != v { + t.Fatalf("to: Float64 failed to return the correct string -- expected %v, received %v", + v, Float64(&v)) + } +} + +func TestFloat64HandlesNil(t *testing.T) { + if Float64(nil) != float64(0) { + t.Fatalf("to: Float64 failed to correctly convert nil -- expected %v, received %v", + 0, Float64(nil)) + } +} + +func TestFloat64Ptr(t *testing.T) { + v := float64(0) + if *Float64Ptr(v) != v { + t.Fatalf("to: Float64Ptr failed to return the correct string -- expected %v, received %v", + v, *Float64Ptr(v)) + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/utility.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/utility.go new file mode 100644 index 000000000000..78067148b28d --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/utility.go @@ -0,0 +1,178 @@ +package autorest + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "net/url" + "reflect" + "sort" + "strings" +) + +// EncodedAs is a series of constants specifying various data encodings +type EncodedAs string + +const ( + // EncodedAsJSON states that data is encoded as JSON + EncodedAsJSON EncodedAs = "JSON" + + // EncodedAsXML states that data is encoded as Xml + EncodedAsXML EncodedAs = "XML" +) + +// Decoder defines the decoding method json.Decoder and xml.Decoder share +type Decoder interface { + Decode(v interface{}) error +} + +// NewDecoder creates a new decoder appropriate to the passed encoding. +// encodedAs specifies the type of encoding and r supplies the io.Reader containing the +// encoded data. +func NewDecoder(encodedAs EncodedAs, r io.Reader) Decoder { + if encodedAs == EncodedAsJSON { + return json.NewDecoder(r) + } else if encodedAs == EncodedAsXML { + return xml.NewDecoder(r) + } + return nil +} + +// CopyAndDecode decodes the data from the passed io.Reader while making a copy. Having a copy +// is especially useful if there is a chance the data will fail to decode. +// encodedAs specifies the expected encoding, r provides the io.Reader to the data, and v +// is the decoding destination. +func CopyAndDecode(encodedAs EncodedAs, r io.Reader, v interface{}) (bytes.Buffer, error) { + b := bytes.Buffer{} + return b, NewDecoder(encodedAs, io.TeeReader(r, &b)).Decode(v) +} + +// TeeReadCloser returns a ReadCloser that writes to w what it reads from rc. +// It utilizes io.TeeReader to copy the data read and has the same behavior when reading. +// Further, when it is closed, it ensures that rc is closed as well. +func TeeReadCloser(rc io.ReadCloser, w io.Writer) io.ReadCloser { + return &teeReadCloser{rc, io.TeeReader(rc, w)} +} + +type teeReadCloser struct { + rc io.ReadCloser + r io.Reader +} + +func (t *teeReadCloser) Read(p []byte) (int, error) { + return t.r.Read(p) +} + +func (t *teeReadCloser) Close() error { + return t.rc.Close() +} + +func containsInt(ints []int, n int) bool { + for _, i := range ints { + if i == n { + return true + } + } + return false +} + +func escapeValueStrings(m map[string]string) map[string]string { + for key, value := range m { + m[key] = url.QueryEscape(value) + } + return m +} + +func ensureValueStrings(mapOfInterface map[string]interface{}) map[string]string { + mapOfStrings := make(map[string]string) + for key, value := range mapOfInterface { + mapOfStrings[key] = ensureValueString(value) + } + return mapOfStrings +} + +func ensureValueString(value interface{}) string { + if value == nil { + return "" + } + switch v := value.(type) { + case string: + return v + case []byte: + return string(v) + default: + return fmt.Sprintf("%v", v) + } +} + +// MapToValues method converts map[string]interface{} to url.Values. +func MapToValues(m map[string]interface{}) url.Values { + v := url.Values{} + for key, value := range m { + x := reflect.ValueOf(value) + if x.Kind() == reflect.Array || x.Kind() == reflect.Slice { + for i := 0; i < x.Len(); i++ { + v.Add(key, ensureValueString(x.Index(i))) + } + } else { + v.Add(key, ensureValueString(value)) + } + } + return v +} + +// String method converts interface v to string. If interface is a list, it +// joins list elements using separator. +func String(v interface{}, sep ...string) string { + if len(sep) > 0 { + return ensureValueString(strings.Join(v.([]string), sep[0])) + } + return ensureValueString(v) +} + +// Encode method encodes url path and query parameters. +func Encode(location string, v interface{}, sep ...string) string { + s := String(v, sep...) + switch strings.ToLower(location) { + case "path": + return pathEscape(s) + case "query": + return queryEscape(s) + default: + return s + } +} + +func pathEscape(s string) string { + return strings.Replace(url.QueryEscape(s), "+", "%20", -1) +} + +func queryEscape(s string) string { + return url.QueryEscape(s) +} + +// This method is same as Encode() method of "net/url" go package, +// except it does not encode the query parameters because they +// already come encoded. It formats values map in query format (bar=foo&a=b). +func createQuery(v url.Values) string { + var buf bytes.Buffer + keys := make([]string, 0, len(v)) + for k := range v { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + vs := v[k] + prefix := url.QueryEscape(k) + "=" + for _, v := range vs { + if buf.Len() > 0 { + buf.WriteByte('&') + } + buf.WriteString(prefix) + buf.WriteString(v) + } + } + return buf.String() +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/utility_test.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/utility_test.go new file mode 100644 index 000000000000..99c16c97c7f4 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/utility_test.go @@ -0,0 +1,368 @@ +package autorest + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "net/http" + "net/url" + "reflect" + "sort" + "strings" + "testing" + + "github.com/Azure/go-autorest/autorest/mocks" +) + +const ( + jsonT = ` + { + "name":"Rob Pike", + "age":42 + }` + xmlT = ` + + Rob Pike + 42 + ` +) + +func TestNewDecoderCreatesJSONDecoder(t *testing.T) { + d := NewDecoder(EncodedAsJSON, strings.NewReader(jsonT)) + _, ok := d.(*json.Decoder) + if d == nil || !ok { + t.Fatal("autorest: NewDecoder failed to create a JSON decoder when requested") + } +} + +func TestNewDecoderCreatesXMLDecoder(t *testing.T) { + d := NewDecoder(EncodedAsXML, strings.NewReader(xmlT)) + _, ok := d.(*xml.Decoder) + if d == nil || !ok { + t.Fatal("autorest: NewDecoder failed to create an XML decoder when requested") + } +} + +func TestNewDecoderReturnsNilForUnknownEncoding(t *testing.T) { + d := NewDecoder("unknown", strings.NewReader(xmlT)) + if d != nil { + t.Fatal("autorest: NewDecoder created a decoder for an unknown encoding") + } +} + +func TestCopyAndDecodeDecodesJSON(t *testing.T) { + _, err := CopyAndDecode(EncodedAsJSON, strings.NewReader(jsonT), &mocks.T{}) + if err != nil { + t.Fatalf("autorest: CopyAndDecode returned an error with valid JSON - %v", err) + } +} + +func TestCopyAndDecodeDecodesXML(t *testing.T) { + _, err := CopyAndDecode(EncodedAsXML, strings.NewReader(xmlT), &mocks.T{}) + if err != nil { + t.Fatalf("autorest: CopyAndDecode returned an error with valid XML - %v", err) + } +} + +func TestCopyAndDecodeReturnsJSONDecodingErrors(t *testing.T) { + _, err := CopyAndDecode(EncodedAsJSON, strings.NewReader(jsonT[0:len(jsonT)-2]), &mocks.T{}) + if err == nil { + t.Fatalf("autorest: CopyAndDecode failed to return an error with invalid JSON") + } +} + +func TestCopyAndDecodeReturnsXMLDecodingErrors(t *testing.T) { + _, err := CopyAndDecode(EncodedAsXML, strings.NewReader(xmlT[0:len(xmlT)-2]), &mocks.T{}) + if err == nil { + t.Fatalf("autorest: CopyAndDecode failed to return an error with invalid XML") + } +} + +func TestCopyAndDecodeAlwaysReturnsACopy(t *testing.T) { + b, _ := CopyAndDecode(EncodedAsJSON, strings.NewReader(jsonT), &mocks.T{}) + if b.String() != jsonT { + t.Fatalf("autorest: CopyAndDecode failed to return a valid copy of the data - %v", b.String()) + } +} + +func TestTeeReadCloser_Copies(t *testing.T) { + v := &mocks.T{} + r := mocks.NewResponseWithContent(jsonT) + b := &bytes.Buffer{} + + r.Body = TeeReadCloser(r.Body, b) + + err := Respond(r, + ByUnmarshallingJSON(v), + ByClosing()) + if err != nil { + t.Fatalf("autorest: TeeReadCloser returned an unexpected error -- %v", err) + } + if b.String() != jsonT { + t.Fatalf("autorest: TeeReadCloser failed to copy the bytes read") + } +} + +func TestTeeReadCloser_PassesReadErrors(t *testing.T) { + v := &mocks.T{} + r := mocks.NewResponseWithContent(jsonT) + + r.Body.(*mocks.Body).Close() + r.Body = TeeReadCloser(r.Body, &bytes.Buffer{}) + + err := Respond(r, + ByUnmarshallingJSON(v), + ByClosing()) + if err == nil { + t.Fatalf("autorest: TeeReadCloser failed to return the expected error") + } +} + +func TestTeeReadCloser_ClosesWrappedReader(t *testing.T) { + v := &mocks.T{} + r := mocks.NewResponseWithContent(jsonT) + + b := r.Body.(*mocks.Body) + r.Body = TeeReadCloser(r.Body, &bytes.Buffer{}) + err := Respond(r, + ByUnmarshallingJSON(v), + ByClosing()) + if err != nil { + t.Fatalf("autorest: TeeReadCloser returned an unexpected error -- %v", err) + } + if b.IsOpen() { + t.Fatalf("autorest: TeeReadCloser failed to close the nested io.ReadCloser") + } +} + +func TestContainsIntFindsValue(t *testing.T) { + ints := []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9} + v := 5 + if !containsInt(ints, v) { + t.Fatalf("autorest: containsInt failed to find %v in %v", v, ints) + } +} + +func TestContainsIntDoesNotFindValue(t *testing.T) { + ints := []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9} + v := 42 + if containsInt(ints, v) { + t.Fatalf("autorest: containsInt unexpectedly found %v in %v", v, ints) + } +} + +func TestContainsIntAcceptsEmptyList(t *testing.T) { + ints := make([]int, 10) + if containsInt(ints, 42) { + t.Fatalf("autorest: containsInt failed to handle an empty list") + } +} + +func TestContainsIntAcceptsNilList(t *testing.T) { + var ints []int + if containsInt(ints, 42) { + t.Fatalf("autorest: containsInt failed to handle an nil list") + } +} + +func TestEscapeStrings(t *testing.T) { + m := map[string]string{ + "string": "a long string with = odd characters", + "int": "42", + "nil": "", + } + r := map[string]string{ + "string": "a+long+string+with+%3D+odd+characters", + "int": "42", + "nil": "", + } + v := escapeValueStrings(m) + if !reflect.DeepEqual(v, r) { + t.Fatalf("autorest: ensureValueStrings returned %v\n", v) + } +} + +func TestEnsureStrings(t *testing.T) { + m := map[string]interface{}{ + "string": "string", + "int": 42, + "nil": nil, + "bytes": []byte{255, 254, 253}, + } + r := map[string]string{ + "string": "string", + "int": "42", + "nil": "", + "bytes": string([]byte{255, 254, 253}), + } + v := ensureValueStrings(m) + if !reflect.DeepEqual(v, r) { + t.Fatalf("autorest: ensureValueStrings returned %v\n", v) + } +} + +func ExampleString() { + m := []string{ + "string1", + "string2", + "string3", + } + + fmt.Println(String(m, ",")) + // Output: string1,string2,string3 +} + +func TestStringWithValidString(t *testing.T) { + i := 123 + if String(i) != "123" { + t.Fatal("autorest: String method failed to convert integer 123 to string") + } +} + +func TestEncodeWithValidPath(t *testing.T) { + s := Encode("Path", "Hello Gopher") + if s != "Hello%20Gopher" { + t.Fatalf("autorest: Encode method failed for valid path encoding. Got: %v; Want: %v", s, "Hello%20Gopher") + } +} + +func TestEncodeWithValidQuery(t *testing.T) { + s := Encode("Query", "Hello Gopher") + if s != "Hello+Gopher" { + t.Fatalf("autorest: Encode method failed for valid query encoding. Got: '%v'; Want: 'Hello+Gopher'", s) + } +} + +func TestEncodeWithValidNotPathQuery(t *testing.T) { + s := Encode("Host", "Hello Gopher") + if s != "Hello Gopher" { + t.Fatalf("autorest: Encode method failed for parameter not query or path. Got: '%v'; Want: 'Hello Gopher'", s) + } +} + +func TestMapToValues(t *testing.T) { + m := map[string]interface{}{ + "a": "a", + "b": 2, + } + v := url.Values{} + v.Add("a", "a") + v.Add("b", "2") + if !isEqual(v, MapToValues(m)) { + t.Fatalf("autorest: MapToValues method failed to return correct values - expected(%v) got(%v)", v, MapToValues(m)) + } +} + +func TestMapToValuesWithArrayValues(t *testing.T) { + m := map[string]interface{}{ + "a": []string{"a", "b"}, + "b": 2, + "c": []int{3, 4}, + } + v := url.Values{} + v.Add("a", "a") + v.Add("a", "b") + v.Add("b", "2") + v.Add("c", "3") + v.Add("c", "4") + + if !isEqual(v, MapToValues(m)) { + t.Fatalf("autorest: MapToValues method failed to return correct values - expected(%v) got(%v)", v, MapToValues(m)) + } +} + +func isEqual(v, u url.Values) bool { + for key, value := range v { + if len(u[key]) == 0 { + return false + } + sort.Strings(value) + sort.Strings(u[key]) + for i := range value { + if value[i] != u[key][i] { + return false + } + } + u.Del(key) + } + if len(u) > 0 { + return false + } + return true +} + +func doEnsureBodyClosed(t *testing.T) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if resp != nil && resp.Body != nil && resp.Body.(*mocks.Body).IsOpen() { + t.Fatal("autorest: Expected Body to be closed -- it was left open") + } + return resp, err + }) + } +} + +type mockAuthorizer struct{} + +func (ma mockAuthorizer) WithAuthorization() PrepareDecorator { + return WithHeader(headerAuthorization, mocks.TestAuthorizationHeader) +} + +type mockFailingAuthorizer struct{} + +func (mfa mockFailingAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + return r, fmt.Errorf("ERROR: mockFailingAuthorizer returned expected error") + }) + } +} + +type mockInspector struct { + wasInvoked bool +} + +func (mi *mockInspector) WithInspection() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + mi.wasInvoked = true + return p.Prepare(r) + }) + } +} + +func (mi *mockInspector) ByInspecting() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + mi.wasInvoked = true + return r.Respond(resp) + }) + } +} + +func withMessage(output *string, msg string) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err == nil { + *output += msg + } + return resp, err + }) + } +} + +func withErrorRespondDecorator(e *error) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err != nil { + return err + } + *e = fmt.Errorf("autorest: Faux Respond Error") + return *e + }) + } +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go new file mode 100644 index 000000000000..d7b0eadc5542 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go @@ -0,0 +1,373 @@ +/* +Package validation provides methods for validating parameter value using reflection. +*/ +package validation + +import ( + "fmt" + "reflect" + "regexp" + "strings" +) + +// Constraint stores constraint name, target field name +// Rule and chain validations. +type Constraint struct { + + // Target field name for validation. + Target string + + // Constraint name e.g. minLength, MaxLength, Pattern, etc. + Name string + + // Rule for constraint e.g. greater than 10, less than 5 etc. + Rule interface{} + + // Chain Validations for struct type + Chain []Constraint +} + +// Validation stores parameter-wise validation. +type Validation struct { + TargetValue interface{} + Constraints []Constraint +} + +// Constraint list +const ( + Empty = "Empty" + Null = "Null" + ReadOnly = "ReadOnly" + Pattern = "Pattern" + MaxLength = "MaxLength" + MinLength = "MinLength" + MaxItems = "MaxItems" + MinItems = "MinItems" + MultipleOf = "MultipleOf" + UniqueItems = "UniqueItems" + InclusiveMaximum = "InclusiveMaximum" + ExclusiveMaximum = "ExclusiveMaximum" + ExclusiveMinimum = "ExclusiveMinimum" + InclusiveMinimum = "InclusiveMinimum" +) + +// Validate method validates constraints on parameter +// passed in validation array. +func Validate(m []Validation) error { + for _, item := range m { + v := reflect.ValueOf(item.TargetValue) + for _, constraint := range item.Constraints { + var err error + switch v.Kind() { + case reflect.Ptr: + err = validatePtr(v, constraint) + case reflect.String: + err = validateString(v, constraint) + case reflect.Struct: + err = validateStruct(v, constraint) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + err = validateInt(v, constraint) + case reflect.Float32, reflect.Float64: + err = validateFloat(v, constraint) + case reflect.Array, reflect.Slice, reflect.Map: + err = validateArrayMap(v, constraint) + default: + err = createError(v, constraint, fmt.Sprintf("unknown type %v", v.Kind())) + } + + if err != nil { + return err + } + } + } + return nil +} + +func validateStruct(x reflect.Value, v Constraint, name ...string) error { + //Get field name from target name which is in format a.b.c + s := strings.Split(v.Target, ".") + f := x.FieldByName(s[len(s)-1]) + if isZero(f) { + return createError(x, v, fmt.Sprintf("field %q doesn't exist", v.Target)) + } + + if err := Validate([]Validation{ + { + TargetValue: getInterfaceValue(f), + Constraints: []Constraint{v}, + }, + }); err != nil { + return err + } + return nil +} + +func validatePtr(x reflect.Value, v Constraint) error { + if v.Name == ReadOnly { + if !x.IsNil() { + return createError(x.Elem(), v, "readonly parameter; must send as nil or empty in request") + } + return nil + } + if x.IsNil() { + return checkNil(x, v) + } + if v.Chain != nil { + return Validate([]Validation{ + { + TargetValue: getInterfaceValue(x.Elem()), + Constraints: v.Chain, + }, + }) + } + return nil +} + +func validateInt(x reflect.Value, v Constraint) error { + i := x.Int() + r, ok := v.Rule.(int) + if !ok { + return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.Name, v.Rule)) + } + switch v.Name { + case MultipleOf: + if i%int64(r) != 0 { + return createError(x, v, fmt.Sprintf("value must be a multiple of %v", r)) + } + case ExclusiveMinimum: + if i <= int64(r) { + return createError(x, v, fmt.Sprintf("value must be greater than %v", r)) + } + case ExclusiveMaximum: + if i >= int64(r) { + return createError(x, v, fmt.Sprintf("value must be less than %v", r)) + } + case InclusiveMinimum: + if i < int64(r) { + return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r)) + } + case InclusiveMaximum: + if i > int64(r) { + return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r)) + } + default: + return createError(x, v, fmt.Sprintf("constraint %v is not applicable for type integer", v.Name)) + } + return nil +} + +func validateFloat(x reflect.Value, v Constraint) error { + f := x.Float() + r, ok := v.Rule.(float64) + if !ok { + return createError(x, v, fmt.Sprintf("rule must be float value for %v constraint; got: %v", v.Name, v.Rule)) + } + switch v.Name { + case ExclusiveMinimum: + if f <= r { + return createError(x, v, fmt.Sprintf("value must be greater than %v", r)) + } + case ExclusiveMaximum: + if f >= r { + return createError(x, v, fmt.Sprintf("value must be less than %v", r)) + } + case InclusiveMinimum: + if f < r { + return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r)) + } + case InclusiveMaximum: + if f > r { + return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r)) + } + default: + return createError(x, v, fmt.Sprintf("constraint %s is not applicable for type float", v.Name)) + } + return nil +} + +func validateString(x reflect.Value, v Constraint) error { + s := x.String() + switch v.Name { + case Empty: + if len(s) == 0 { + return checkEmpty(x, v) + } + case Pattern: + reg, err := regexp.Compile(v.Rule.(string)) + if err != nil { + return createError(x, v, err.Error()) + } + if !reg.MatchString(s) { + return createError(x, v, fmt.Sprintf("value doesn't match pattern %v", v.Rule)) + } + case MaxLength: + if _, ok := v.Rule.(int); !ok { + return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.Name, v.Rule)) + } + if len(s) > v.Rule.(int) { + return createError(x, v, fmt.Sprintf("value length must be less than %v", v.Rule)) + } + case MinLength: + if _, ok := v.Rule.(int); !ok { + return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.Name, v.Rule)) + } + if len(s) < v.Rule.(int) { + return createError(x, v, fmt.Sprintf("value length must be greater than %v", v.Rule)) + } + case ReadOnly: + if len(s) > 0 { + return createError(reflect.ValueOf(s), v, "readonly parameter; must send as nil or empty in request") + } + default: + return createError(x, v, fmt.Sprintf("constraint %s is not applicable to string type", v.Name)) + } + + if v.Chain != nil { + return Validate([]Validation{ + { + TargetValue: getInterfaceValue(x), + Constraints: v.Chain, + }, + }) + } + return nil +} + +func validateArrayMap(x reflect.Value, v Constraint) error { + switch v.Name { + case Null: + if x.IsNil() { + return checkNil(x, v) + } + case Empty: + if x.IsNil() || x.Len() == 0 { + return checkEmpty(x, v) + } + case MaxItems: + if _, ok := v.Rule.(int); !ok { + return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.Name, v.Rule)) + } + if x.Len() > v.Rule.(int) { + return createError(x, v, fmt.Sprintf("maximum item limit is %v; got: %v", v.Rule, x.Len())) + } + case MinItems: + if _, ok := v.Rule.(int); !ok { + return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.Name, v.Rule)) + } + if x.Len() < v.Rule.(int) { + return createError(x, v, fmt.Sprintf("minimum item limit is %v; got: %v", v.Rule, x.Len())) + } + case UniqueItems: + if x.Kind() == reflect.Array || x.Kind() == reflect.Slice { + if !checkForUniqueInArray(x) { + return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.Target, x)) + } + } else if x.Kind() == reflect.Map { + if !checkForUniqueInMap(x) { + return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.Target, x)) + } + } else { + return createError(x, v, fmt.Sprintf("type must be array, slice or map for constraint %v; got: %v", v.Name, x.Kind())) + } + case ReadOnly: + if x.Len() != 0 { + return createError(x, v, "readonly parameter; must send as nil or empty in request") + } + default: + return createError(x, v, fmt.Sprintf("constraint %v is not applicable to array, slice and map type", v.Name)) + } + + if v.Chain != nil { + return Validate([]Validation{ + { + TargetValue: getInterfaceValue(x), + Constraints: v.Chain, + }, + }) + } + return nil +} + +func checkNil(x reflect.Value, v Constraint) error { + if _, ok := v.Rule.(bool); !ok { + return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.Name, v.Rule)) + } + if v.Rule.(bool) { + return createError(x, v, "value can not be null; required parameter") + } + return nil +} + +func checkEmpty(x reflect.Value, v Constraint) error { + if _, ok := v.Rule.(bool); !ok { + return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.Name, v.Rule)) + } + + if v.Rule.(bool) { + return createError(x, v, "value can not be null or empty; required parameter") + } + return nil +} + +func checkForUniqueInArray(x reflect.Value) bool { + if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 { + return false + } + arrOfInterface := make([]interface{}, x.Len()) + + for i := 0; i < x.Len(); i++ { + arrOfInterface[i] = x.Index(i).Interface() + } + + m := make(map[interface{}]bool) + for _, val := range arrOfInterface { + if m[val] { + return false + } + m[val] = true + } + return true +} + +func checkForUniqueInMap(x reflect.Value) bool { + if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 { + return false + } + mapOfInterface := make(map[interface{}]interface{}, x.Len()) + + keys := x.MapKeys() + for _, k := range keys { + mapOfInterface[k.Interface()] = x.MapIndex(k).Interface() + } + + m := make(map[interface{}]bool) + for _, val := range mapOfInterface { + if m[val] { + return false + } + m[val] = true + } + return true +} + +func getInterfaceValue(x reflect.Value) interface{} { + if x.Kind() == reflect.Invalid { + return nil + } + return x.Interface() +} + +func isZero(x interface{}) bool { + return x == reflect.Zero(reflect.TypeOf(x)).Interface() +} + +func createError(x reflect.Value, v Constraint, err string) error { + return fmt.Errorf("autorest/validation: validation failed: parameter=%s constraint=%s value=%#v details: %s", + v.Target, v.Name, getInterfaceValue(x), err) +} + +// NewErrorWithValidationError appends package type and method name in +// validation error. +func NewErrorWithValidationError(err error, packageType, method string) error { + return fmt.Errorf("%s#%s: Invalid input: %v", packageType, method, err) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/validation/validation_test.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/validation/validation_test.go new file mode 100644 index 000000000000..fdf8228463a5 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/validation/validation_test.go @@ -0,0 +1,2417 @@ +package validation + +import ( + "fmt" + "reflect" + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestCheckForUniqueInArrayTrue(t *testing.T) { + require.Equal(t, checkForUniqueInArray(reflect.ValueOf([]int{1, 2, 3})), true) +} + +func TestCheckForUniqueInArrayFalse(t *testing.T) { + require.Equal(t, checkForUniqueInArray(reflect.ValueOf([]int{1, 2, 3, 3})), false) +} + +func TestCheckForUniqueInArrayEmpty(t *testing.T) { + require.Equal(t, checkForUniqueInArray(reflect.ValueOf([]int{})), false) +} + +func TestCheckForUniqueInMapTrue(t *testing.T) { + require.Equal(t, checkForUniqueInMap(reflect.ValueOf(map[string]int{"one": 1, "two": 2})), true) +} + +func TestCheckForUniqueInMapFalse(t *testing.T) { + require.Equal(t, checkForUniqueInMap(reflect.ValueOf(map[int]string{1: "one", 2: "one"})), false) +} + +func TestCheckForUniqueInMapEmpty(t *testing.T) { + require.Equal(t, checkForUniqueInMap(reflect.ValueOf(map[int]string{})), false) +} + +func TestCheckEmpty_WithValueEmptyRuleTrue(t *testing.T) { + var x interface{} + v := Constraint{ + Target: "str", + Name: Empty, + Rule: true, + Chain: nil, + } + expected := createError(reflect.ValueOf(x), v, "value can not be null or empty; required parameter") + require.Equal(t, checkEmpty(reflect.ValueOf(x), v).Error(), expected.Error()) +} + +func TestCheckEmpty_WithEmptyStringRuleFalse(t *testing.T) { + var x interface{} + v := Constraint{ + Target: "str", + Name: Empty, + Rule: false, + Chain: nil, + } + require.Nil(t, checkEmpty(reflect.ValueOf(x), v)) +} + +func TestCheckEmpty_IncorrectRule(t *testing.T) { + var x interface{} + v := Constraint{ + Target: "str", + Name: Empty, + Rule: 10, + Chain: nil, + } + expected := createError(reflect.ValueOf(x), v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.Name, v.Rule)) + require.Equal(t, checkEmpty(reflect.ValueOf(x), v).Error(), expected.Error()) +} + +func TestCheckEmpty_WithErrorArray(t *testing.T) { + var x interface{} = []string{} + v := Constraint{ + Target: "str", + Name: Empty, + Rule: true, + Chain: nil, + } + expected := createError(reflect.ValueOf(x), v, "value can not be null or empty; required parameter") + require.Equal(t, checkEmpty(reflect.ValueOf(x), v).Error(), expected.Error()) +} + +func TestCheckNil_WithNilValueRuleTrue(t *testing.T) { + var x interface{} + v := Constraint{ + Target: "x", + Name: Null, + Rule: true, + Chain: []Constraint{ + {"x", MaxItems, 4, nil}, + }, + } + expected := createError(reflect.ValueOf(x), v, "value can not be null; required parameter") + require.Equal(t, checkNil(reflect.ValueOf(x), v).Error(), expected.Error()) +} + +func TestCheckNil_WithNilValueRuleFalse(t *testing.T) { + var x interface{} + v := Constraint{ + Target: "x", + Name: Null, + Rule: false, + Chain: []Constraint{ + {"x", MaxItems, 4, nil}, + }, + } + require.Nil(t, checkNil(reflect.ValueOf(x), v)) +} + +func TestCheckNil_IncorrectRule(t *testing.T) { + var x interface{} + c := Constraint{ + Target: "str", + Name: Null, + Rule: 10, + Chain: nil, + } + expected := createError(reflect.ValueOf(x), c, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", c.Name, c.Rule)) + require.Equal(t, checkNil(reflect.ValueOf(x), c).Error(), expected.Error()) +} + +func TestValidateArrayMap_WithNilValueRuleTrue(t *testing.T) { + var a []string + var x interface{} = a + c := Constraint{ + Target: "arr", + Name: Null, + Rule: true, + Chain: nil, + } + expected := createError(reflect.ValueOf(x), c, "value can not be null; required parameter") + require.Equal(t, validateArrayMap(reflect.ValueOf(x), c), expected) +} + +func TestValidateArrayMap_WithNilValueRuleFalse(t *testing.T) { + var x interface{} = []string{} + c := Constraint{ + Target: "arr", + Name: Null, + Rule: false, + Chain: nil, + } + require.Nil(t, validateArrayMap(reflect.ValueOf(x), c)) +} + +func TestValidateArrayMap_WithValueRuleNullTrue(t *testing.T) { + var x interface{} = []string{"1", "2"} + c := Constraint{ + Target: "arr", + Name: Null, + Rule: false, + Chain: nil, + } + require.Nil(t, validateArrayMap(reflect.ValueOf(x), c)) +} + +func TestValidateArrayMap_WithEmptyValueRuleTrue(t *testing.T) { + var x interface{} = []string{} + c := Constraint{ + Target: "arr", + Name: Empty, + Rule: true, + Chain: nil, + } + expected := createError(reflect.ValueOf(x), c, "value can not be null or empty; required parameter") + require.Equal(t, validateArrayMap(reflect.ValueOf(x), c), expected) +} + +func TestValidateArrayMap_WithEmptyValueRuleFalse(t *testing.T) { + var x interface{} = []string{} + c := Constraint{ + Target: "arr", + Name: Empty, + Rule: false, + Chain: nil, + } + require.Nil(t, validateArrayMap(reflect.ValueOf(x), c)) +} + +func TestValidateArrayMap_WithEmptyRuleEmptyTrue(t *testing.T) { + var x interface{} = []string{"1", "2"} + c := Constraint{ + Target: "arr", + Name: Empty, + Rule: false, + Chain: nil, + } + require.Nil(t, validateArrayMap(reflect.ValueOf(x), c)) +} + +func TestValidateArrayMap_MaxItemsIncorrectRule(t *testing.T) { + var x interface{} = []string{"1", "2"} + c := Constraint{ + Target: "arr", + Name: MaxItems, + Rule: false, + Chain: nil, + } + expected := createError(reflect.ValueOf(x), c, fmt.Sprintf("rule must be integer for %v constraint; got: %v", c.Name, c.Rule)) + require.Equal(t, validateArrayMap(reflect.ValueOf(x), c).Error(), expected.Error()) +} + +func TestValidateArrayMap_MaxItemsNoError(t *testing.T) { + var x interface{} = []string{"1", "2"} + c := Constraint{ + Target: "arr", + Name: MaxItems, + Rule: 2, + Chain: nil, + } + require.Nil(t, validateArrayMap(reflect.ValueOf(x), c)) +} + +func TestValidateArrayMap_MaxItemsWithError(t *testing.T) { + var x interface{} = []string{"1", "2", "3"} + c := Constraint{ + Target: "arr", + Name: MaxItems, + Rule: 2, + Chain: nil, + } + expected := createError(reflect.ValueOf(x), c, fmt.Sprintf("maximum item limit is %v; got: 3", c.Rule)) + require.Equal(t, validateArrayMap(reflect.ValueOf(x), c).Error(), expected.Error()) +} + +func TestValidateArrayMap_MaxItemsWithEmpty(t *testing.T) { + var x interface{} = []string{} + c := Constraint{ + Target: "arr", + Name: MaxItems, + Rule: 2, + Chain: nil, + } + require.Nil(t, validateArrayMap(reflect.ValueOf(x), c)) +} + +func TestValidateArrayMap_MinItemsIncorrectRule(t *testing.T) { + var x interface{} = []int{1, 2} + c := Constraint{ + Target: "arr", + Name: MinItems, + Rule: false, + Chain: nil, + } + expected := createError(reflect.ValueOf(x), c, fmt.Sprintf("rule must be integer for %v constraint; got: %v", c.Name, c.Rule)) + require.Equal(t, validateArrayMap(reflect.ValueOf(x), c).Error(), expected.Error()) +} + +func TestValidateArrayMap_MinItemsNoError1(t *testing.T) { + c := Constraint{ + Target: "arr", + Name: MinItems, + Rule: 2, + Chain: nil, + } + require.Nil(t, validateArrayMap(reflect.ValueOf([]int{1, 2}), c)) +} + +func TestValidateArrayMap_MinItemsNoError2(t *testing.T) { + c := Constraint{ + Target: "arr", + Name: MinItems, + Rule: 2, + Chain: nil, + } + require.Nil(t, validateArrayMap(reflect.ValueOf([]int{1, 2, 3}), c)) +} + +func TestValidateArrayMap_MinItemsWithError(t *testing.T) { + var x interface{} = []int{1} + c := Constraint{ + Target: "arr", + Name: MinItems, + Rule: 2, + Chain: nil, + } + expected := createError(reflect.ValueOf(x), c, fmt.Sprintf("minimum item limit is %v; got: 1", c.Rule)) + require.Equal(t, validateArrayMap(reflect.ValueOf(x), c).Error(), expected.Error()) +} + +func TestValidateArrayMap_MinItemsWithEmpty(t *testing.T) { + var x interface{} = []int{} + c := Constraint{ + Target: "arr", + Name: MinItems, + Rule: 2, + Chain: nil, + } + expected := createError(reflect.ValueOf(x), c, fmt.Sprintf("minimum item limit is %v; got: 0", c.Rule)) + require.Equal(t, validateArrayMap(reflect.ValueOf(x), c).Error(), expected.Error()) +} + +func TestValidateArrayMap_Map_MaxItemsIncorrectRule(t *testing.T) { + var x interface{} = map[int]string{1: "1", 2: "2"} + c := Constraint{ + Target: "arr", + Name: MaxItems, + Rule: false, + Chain: nil, + } + require.Equal(t, strings.Contains(validateArrayMap(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("rule must be integer for %v constraint; got: %v", c.Name, c.Rule)), true) +} + +func TestValidateArrayMap_Map_MaxItemsNoError(t *testing.T) { + var x interface{} = map[int]string{1: "1", 2: "2"} + c := Constraint{ + Target: "arr", + Name: MaxItems, + Rule: 2, + Chain: nil, + } + require.Nil(t, validateArrayMap(reflect.ValueOf(x), c)) +} + +func TestValidateArrayMap_Map_MaxItemsWithError(t *testing.T) { + a := map[int]string{1: "1", 2: "2", 3: "3"} + var x interface{} = a + c := Constraint{ + Target: "arr", + Name: MaxItems, + Rule: 2, + Chain: nil, + } + require.Equal(t, strings.Contains(validateArrayMap(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("maximum item limit is %v; got: %v", c.Rule, len(a))), true) +} + +func TestValidateArrayMap_Map_MaxItemsWithEmpty(t *testing.T) { + a := map[int]string{} + var x interface{} = a + c := Constraint{ + Target: "arr", + Name: MaxItems, + Rule: 2, + Chain: nil, + } + require.Nil(t, validateArrayMap(reflect.ValueOf(x), c)) +} + +func TestValidateArrayMap_Map_MinItemsIncorrectRule(t *testing.T) { + var x interface{} = map[int]string{1: "1", 2: "2"} + c := Constraint{ + Target: "arr", + Name: MinItems, + Rule: false, + Chain: nil, + } + require.Equal(t, strings.Contains(validateArrayMap(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("rule must be integer for %v constraint; got: %v", c.Name, c.Rule)), true) +} + +func TestValidateArrayMap_Map_MinItemsNoError1(t *testing.T) { + var x interface{} = map[int]string{1: "1", 2: "2"} + require.Nil(t, validateArrayMap(reflect.ValueOf(x), + Constraint{ + Target: "arr", + Name: MinItems, + Rule: 2, + Chain: nil, + })) +} + +func TestValidateArrayMap_Map_MinItemsNoError2(t *testing.T) { + var x interface{} = map[int]string{1: "1", 2: "2", 3: "3"} + c := Constraint{ + Target: "arr", + Name: MinItems, + Rule: 2, + Chain: nil, + } + require.Nil(t, validateArrayMap(reflect.ValueOf(x), c)) +} + +func TestValidateArrayMap_Map_MinItemsWithError(t *testing.T) { + a := map[int]string{1: "1"} + var x interface{} = a + c := Constraint{ + Target: "arr", + Name: MinItems, + Rule: 2, + Chain: nil, + } + expected := createError(reflect.ValueOf(x), c, fmt.Sprintf("minimum item limit is %v; got: %v", c.Rule, len(a))) + require.Equal(t, validateArrayMap(reflect.ValueOf(x), c).Error(), expected.Error()) +} + +func TestValidateArrayMap_Map_MinItemsWithEmpty(t *testing.T) { + a := map[int]string{} + var x interface{} = a + c := Constraint{ + Target: "arr", + Name: MinItems, + Rule: 2, + Chain: nil, + } + expected := createError(reflect.ValueOf(x), c, fmt.Sprintf("minimum item limit is %v; got: %v", c.Rule, len(a))) + require.Equal(t, validateArrayMap(reflect.ValueOf(x), c).Error(), expected.Error()) +} + +// func TestValidateArrayMap_Map_MinItemsNil(t *testing.T) { +// var a map[int]float64 +// var x interface{} = a +// c := Constraint{ +// Target: "str", +// Name: MinItems, +// Rule: true, +// Chain: nil, +// } +// expected := createError(reflect.Value(x), c, fmt.Sprintf("all items in parameter %v must be unique; got:%v", c.Target, x)) +// if z := validateArrayMap(reflect.ValueOf(x), c); strings.Contains(z.Error(), "all items in parameter str must be unique;") { +// t.Fatalf("autorest/validation: valiateArrayMap failed to return error \nexpect: %v;\ngot: %v", expected, z) +// } +// } + +func TestValidateArrayMap_Map_UniqueItemsTrue(t *testing.T) { + var x interface{} = map[float64]int{1.2: 1, 1.4: 2} + c := Constraint{ + Target: "str", + Name: UniqueItems, + Rule: true, + Chain: nil, + } + require.Nil(t, validateArrayMap(reflect.ValueOf(x), c)) +} + +func TestValidateArrayMap_Map_UniqueItemsFalse(t *testing.T) { + var x interface{} = map[string]string{"1": "1", "2": "2", "3": "1"} + c := Constraint{ + Target: "str", + Name: UniqueItems, + Rule: true, + Chain: nil, + } + z := validateArrayMap(reflect.ValueOf(x), c) + require.Equal(t, strings.Contains(z.Error(), + fmt.Sprintf("all items in parameter %q must be unique", c.Target)), true) +} + +func TestValidateArrayMap_Map_UniqueItemsEmpty(t *testing.T) { + // Consider Empty map as not unique returns false + var x interface{} = map[int]float64{} + c := Constraint{ + Target: "str", + Name: UniqueItems, + Rule: true, + Chain: nil, + } + z := validateArrayMap(reflect.ValueOf(x), c) + require.Equal(t, strings.Contains(z.Error(), + fmt.Sprintf("all items in parameter %q must be unique", c.Target)), true) +} + +func TestValidateArrayMap_Map_UniqueItemsNil(t *testing.T) { + var a map[int]float64 + var x interface{} = a + c := Constraint{ + Target: "str", + Name: UniqueItems, + Rule: true, + Chain: nil, + } + z := validateArrayMap(reflect.ValueOf(x), c) + require.Equal(t, strings.Contains(z.Error(), + fmt.Sprintf("all items in parameter %q must be unique; got:%v", c.Target, x)), true) +} + +func TestValidateArrayMap_Array_UniqueItemsTrue(t *testing.T) { + var x interface{} = []int{1, 2} + c := Constraint{ + Target: "str", + Name: UniqueItems, + Rule: true, + Chain: nil, + } + require.Nil(t, validateArrayMap(reflect.ValueOf(x), c)) +} + +func TestValidateArrayMap_Array_UniqueItemsFalse(t *testing.T) { + var x interface{} = []string{"1", "2", "1"} + c := Constraint{ + Target: "str", + Name: UniqueItems, + Rule: true, + Chain: nil, + } + z := validateArrayMap(reflect.ValueOf(x), c) + require.Equal(t, strings.Contains(z.Error(), + fmt.Sprintf("all items in parameter %q must be unique; got:%v", c.Target, x)), true) +} + +func TestValidateArrayMap_Array_UniqueItemsEmpty(t *testing.T) { + // Consider Empty array as not unique returns false + var x interface{} = []float64{} + c := Constraint{ + Target: "str", + Name: UniqueItems, + Rule: true, + Chain: nil, + } + z := validateArrayMap(reflect.ValueOf(x), c) + require.Equal(t, strings.Contains(z.Error(), + fmt.Sprintf("all items in parameter %q must be unique; got:%v", c.Target, x)), true) +} + +func TestValidateArrayMap_Array_UniqueItemsNil(t *testing.T) { + // Consider nil array as not unique returns false + var a []float64 + var x interface{} = a + c := Constraint{ + Target: "str", + Name: UniqueItems, + Rule: true, + Chain: nil, + } + z := validateArrayMap(reflect.ValueOf(x), c) + require.Equal(t, strings.Contains(z.Error(), + fmt.Sprintf("all items in parameter %q must be unique; got:%v", c.Target, x)), true) +} + +func TestValidateArrayMap_Array_UniqueItemsInvalidType(t *testing.T) { + var x interface{} = "hello" + c := Constraint{ + Target: "str", + Name: UniqueItems, + Rule: true, + Chain: nil, + } + z := validateArrayMap(reflect.ValueOf(x), c) + require.Equal(t, strings.Contains(z.Error(), + fmt.Sprintf("type must be array, slice or map for constraint %v; got: %v", c.Name, reflect.ValueOf(x).Kind())), true) +} + +func TestValidateArrayMap_Array_UniqueItemsInvalidConstraint(t *testing.T) { + var x interface{} = "hello" + c := Constraint{ + Target: "str", + Name: "sdad", + Rule: true, + Chain: nil, + } + z := validateArrayMap(reflect.ValueOf(x), c) + require.Equal(t, strings.Contains(z.Error(), + fmt.Sprintf("constraint %v is not applicable to array, slice and map type", c.Name)), true) +} + +func TestValidateArrayMap_ValidateChainConstraint1(t *testing.T) { + a := []int{1, 2, 3, 4} + var x interface{} = a + c := Constraint{ + Target: "str", + Name: Null, + Rule: true, + Chain: []Constraint{ + {"str", MaxItems, 3, nil}, + }, + } + z := validateArrayMap(reflect.ValueOf(x), c) + require.Equal(t, strings.Contains(z.Error(), + fmt.Sprintf("maximum item limit is %v; got: %v", (c.Chain)[0].Rule, len(a))), true) +} + +func TestValidateArrayMap_ValidateChainConstraint2(t *testing.T) { + a := []int{1, 2, 3, 4} + var x interface{} = a + c := Constraint{ + Target: "str", + Name: Empty, + Rule: true, + Chain: []Constraint{ + {"str", MaxItems, 3, nil}, + }, + } + z := validateArrayMap(reflect.ValueOf(x), c) + require.Equal(t, strings.Contains(z.Error(), + fmt.Sprintf("maximum item limit is %v; got: %v", (c.Chain)[0].Rule, len(a))), true) +} + +func TestValidateArrayMap_ValidateChainConstraint3(t *testing.T) { + var a []string + var x interface{} = a + c := Constraint{ + Target: "str", + Name: Null, + Rule: true, + Chain: []Constraint{ + {"str", MaxItems, 3, nil}, + }, + } + z := validateArrayMap(reflect.ValueOf(x), c) + require.Equal(t, strings.Contains(z.Error(), + fmt.Sprintf("value can not be null; required parameter")), true) +} + +func TestValidateArrayMap_ValidateChainConstraint4(t *testing.T) { + var x interface{} = []int{} + c := Constraint{ + Target: "str", + Name: Empty, + Rule: true, + Chain: []Constraint{ + {"str", MaxItems, 3, nil}, + }, + } + z := validateArrayMap(reflect.ValueOf(x), c) + require.Equal(t, strings.Contains(z.Error(), + fmt.Sprintf("value can not be null or empty; required parameter")), true) +} + +func TestValidateArrayMap_ValidateChainConstraintNilNotRequired(t *testing.T) { + var a []int + var x interface{} = a + c := Constraint{ + Target: "str", + Name: Null, + Rule: false, + Chain: []Constraint{ + {"str", MaxItems, 3, nil}, + }, + } + require.Nil(t, validateArrayMap(reflect.ValueOf(x), c)) +} + +func TestValidateArrayMap_ValidateChainConstraintEmptyNotRequired(t *testing.T) { + var x interface{} = map[string]int{} + c := Constraint{ + Target: "str", + Name: Empty, + Rule: false, + Chain: []Constraint{ + {"str", MaxItems, 3, nil}, + }, + } + require.Nil(t, validateArrayMap(reflect.ValueOf(x), c)) +} + +func TestValidateArrayMap_ReadOnlyWithError(t *testing.T) { + var x interface{} = []int{1, 2} + c := Constraint{ + Target: "str", + Name: ReadOnly, + Rule: true, + Chain: []Constraint{ + {"str", MaxItems, 3, nil}, + }, + } + z := validateArrayMap(reflect.ValueOf(x), c) + require.Equal(t, strings.Contains(z.Error(), + fmt.Sprintf("readonly parameter; must send as nil or empty in request")), true) +} + +func TestValidateArrayMap_ReadOnlyWithoutError(t *testing.T) { + var x interface{} = []int{} + c := Constraint{ + Target: "str", + Name: ReadOnly, + Rule: true, + Chain: nil, + } + require.Nil(t, validateArrayMap(reflect.ValueOf(x), c)) +} + +func TestValidateString_ReadOnly(t *testing.T) { + var x interface{} = "Hello Gopher" + c := Constraint{ + Target: "str", + Name: ReadOnly, + Rule: true, + Chain: nil, + } + require.Equal(t, strings.Contains(validateString(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("readonly parameter; must send as nil or empty in request")), true) +} + +func TestValidateString_EmptyTrue(t *testing.T) { + // Empty true means parameter is required but Empty returns error + c := Constraint{ + Target: "str", + Name: Empty, + Rule: true, + Chain: nil, + } + require.Equal(t, strings.Contains(validateString(reflect.ValueOf(""), c).Error(), + fmt.Sprintf("value can not be null or empty; required parameter")), true) +} + +func TestValidateString_EmptyFalse(t *testing.T) { + // Empty false means parameter is not required and Empty return nil + var x interface{} + c := Constraint{ + Target: "str", + Name: Empty, + Rule: false, + Chain: nil, + } + require.Nil(t, validateString(reflect.ValueOf(x), c)) +} + +func TestValidateString_MaxLengthInvalid(t *testing.T) { + // Empty true means parameter is required but Empty returns error + var x interface{} = "Hello" + c := Constraint{ + Target: "str", + Name: MaxLength, + Rule: 4, + Chain: nil, + } + require.Equal(t, strings.Contains(validateString(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("value length must be less than %v", c.Rule)), true) +} + +func TestValidateString_MaxLengthValid(t *testing.T) { + // Empty false means parameter is not required and Empty return nil + c := Constraint{ + Target: "str", + Name: MaxLength, + Rule: 7, + Chain: nil, + } + require.Nil(t, validateString(reflect.ValueOf("Hello"), c)) +} + +func TestValidateString_MaxLengthRuleInvalid(t *testing.T) { + var x interface{} = "Hello" + c := Constraint{ + Target: "str", + Name: MaxLength, + Rule: true, // must be int for maxLength + Chain: nil, + } + require.Equal(t, strings.Contains(validateString(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("rule must be integer value for %v constraint; got: %v", c.Name, c.Rule)), true) +} + +func TestValidateString_MinLengthInvalid(t *testing.T) { + var x interface{} = "Hello" + c := Constraint{ + Target: "str", + Name: MinLength, + Rule: 10, + Chain: nil, + } + require.Equal(t, strings.Contains(validateString(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("value length must be greater than %v", c.Rule)), true) +} + +func TestValidateString_MinLengthValid(t *testing.T) { + c := Constraint{ + Target: "str", + Name: MinLength, + Rule: 2, + Chain: nil, + } + require.Nil(t, validateString(reflect.ValueOf("Hello"), c)) +} + +func TestValidateString_MinLengthRuleInvalid(t *testing.T) { + var x interface{} = "Hello" + c := Constraint{ + Target: "str", + Name: MinLength, + Rule: true, // must be int for minLength + Chain: nil, + } + require.Equal(t, strings.Contains(validateString(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("rule must be integer value for %v constraint; got: %v", c.Name, c.Rule)), true) +} + +func TestValidateString_PatternInvalidPattern(t *testing.T) { + var x interface{} = "Hello" + c := Constraint{ + Target: "str", + Name: Pattern, + Rule: `^[[:alnum:$`, + Chain: nil, + } + require.Equal(t, strings.Contains(validateString(reflect.ValueOf(x), c).Error(), + "error parsing regexp: missing closing ]"), true) +} + +func TestValidateString_PatternMatch1(t *testing.T) { + c := Constraint{ + Target: "str", + Name: Pattern, + Rule: `^http://\w+$`, + Chain: nil, + } + require.Nil(t, validateString(reflect.ValueOf("http://masd"), c)) +} + +func TestValidateString_PatternMatch2(t *testing.T) { + c := Constraint{ + Target: "str", + Name: Pattern, + Rule: `^[a-zA-Z0-9]+$`, + Chain: nil, + } + require.Nil(t, validateString(reflect.ValueOf("asdadad2323sad"), c)) +} + +func TestValidateString_PatternNotMatch(t *testing.T) { + var x interface{} = "asdad@@ad2323sad" + c := Constraint{ + Target: "str", + Name: Pattern, + Rule: `^[a-zA-Z0-9]+$`, + Chain: nil, + } + require.Equal(t, strings.Contains(validateString(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("value doesn't match pattern %v", c.Rule)), true) +} + +func TestValidateString_InvalidConstraint(t *testing.T) { + var x interface{} = "asdad@@ad2323sad" + c := Constraint{ + Target: "str", + Name: UniqueItems, + Rule: "^[a-zA-Z0-9]+$", + Chain: nil, + } + require.Equal(t, strings.Contains(validateString(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("constraint %s is not applicable to string type", c.Name)), true) +} + +func TestValidateFloat_InvalidConstraint(t *testing.T) { + var x interface{} = 1.4 + c := Constraint{ + Target: "str", + Name: UniqueItems, + Rule: 3.0, + Chain: nil, + } + require.Equal(t, strings.Contains(validateFloat(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("constraint %v is not applicable for type float", c.Name)), true) +} + +func TestValidateFloat_InvalidRuleValue(t *testing.T) { + var x interface{} = 1.4 + c := Constraint{ + Target: "str", + Name: ExclusiveMinimum, + Rule: 3, + Chain: nil, + } + require.Equal(t, strings.Contains(validateFloat(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("rule must be float value for %v constraint; got: %v", c.Name, c.Rule)), true) +} + +func TestValidateFloat_ExclusiveMinimumConstraintValid(t *testing.T) { + c := Constraint{ + Target: "str", + Name: ExclusiveMinimum, + Rule: 1.0, + Chain: nil, + } + require.Nil(t, validateFloat(reflect.ValueOf(1.42), c)) +} + +func TestValidateFloat_ExclusiveMinimumConstraintInvalid(t *testing.T) { + var x interface{} = 1.4 + c := Constraint{ + Target: "str", + Name: ExclusiveMinimum, + Rule: 1.5, + Chain: nil, + } + require.Equal(t, strings.Contains(validateFloat(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("value must be greater than %v", c.Rule)), true) +} + +func TestValidateFloat_ExclusiveMinimumConstraintBoundary(t *testing.T) { + var x interface{} = 1.42 + c := Constraint{ + Target: "str", + Name: ExclusiveMinimum, + Rule: 1.42, + Chain: nil, + } + require.Equal(t, strings.Contains(validateFloat(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("value must be greater than %v", c.Rule)), true) +} + +func TestValidateFloat_exclusiveMaximumConstraintValid(t *testing.T) { + c := Constraint{ + Target: "str", + Name: ExclusiveMaximum, + Rule: 2.0, + Chain: nil, + } + require.Nil(t, validateFloat(reflect.ValueOf(1.42), c)) +} + +func TestValidateFloat_exclusiveMaximumConstraintInvalid(t *testing.T) { + var x interface{} = 1.42 + c := Constraint{ + Target: "str", + Name: ExclusiveMaximum, + Rule: 1.2, + Chain: nil, + } + require.Equal(t, strings.Contains(validateFloat(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("value must be less than %v", c.Rule)), true) +} + +func TestValidateFloat_exclusiveMaximumConstraintBoundary(t *testing.T) { + var x interface{} = 1.42 + c := Constraint{ + Target: "str", + Name: ExclusiveMaximum, + Rule: 1.42, + Chain: nil, + } + require.Equal(t, strings.Contains(validateFloat(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("value must be less than %v", c.Rule)), true) +} + +func TestValidateFloat_inclusiveMaximumConstraintValid(t *testing.T) { + c := Constraint{ + Target: "str", + Name: InclusiveMaximum, + Rule: 2.0, + Chain: nil, + } + require.Nil(t, validateFloat(reflect.ValueOf(1.42), c)) +} + +func TestValidateFloat_inclusiveMaximumConstraintInvalid(t *testing.T) { + var x interface{} = 1.42 + c := Constraint{ + Target: "str", + Name: InclusiveMaximum, + Rule: 1.2, + Chain: nil, + } + require.Equal(t, strings.Contains(validateFloat(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("value must be less than or equal to %v", c.Rule)), true) + +} + +func TestValidateFloat_inclusiveMaximumConstraintBoundary(t *testing.T) { + c := Constraint{ + Target: "str", + Name: InclusiveMaximum, + Rule: 1.42, + Chain: nil, + } + require.Nil(t, validateFloat(reflect.ValueOf(1.42), c)) +} + +func TestValidateFloat_InclusiveMinimumConstraintValid(t *testing.T) { + c := Constraint{ + Target: "str", + Name: InclusiveMinimum, + Rule: 1.0, + Chain: nil, + } + require.Nil(t, validateFloat(reflect.ValueOf(1.42), c)) +} + +func TestValidateFloat_InclusiveMinimumConstraintInvalid(t *testing.T) { + var x interface{} = 1.42 + c := Constraint{ + Target: "str", + Name: InclusiveMinimum, + Rule: 1.5, + Chain: nil, + } + require.Equal(t, strings.Contains(validateFloat(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("value must be greater than or equal to %v", c.Rule)), true) + +} + +func TestValidateFloat_InclusiveMinimumConstraintBoundary(t *testing.T) { + c := Constraint{ + Target: "str", + Name: InclusiveMinimum, + Rule: 1.42, + Chain: nil, + } + require.Nil(t, validateFloat(reflect.ValueOf(1.42), c)) +} + +func TestValidateInt_InvalidConstraint(t *testing.T) { + var x interface{} = 1 + c := Constraint{ + Target: "str", + Name: UniqueItems, + Rule: 3, + Chain: nil, + } + require.Equal(t, strings.Contains(validateInt(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("constraint %s is not applicable for type integer", c.Name)), true) +} + +func TestValidateInt_InvalidRuleValue(t *testing.T) { + var x interface{} = 1 + c := Constraint{ + Target: "str", + Name: ExclusiveMinimum, + Rule: 3.4, + Chain: nil, + } + require.Equal(t, strings.Contains(validateInt(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("rule must be integer value for %v constraint; got: %v", c.Name, c.Rule)), true) +} + +func TestValidateInt_ExclusiveMinimumConstraintValid(t *testing.T) { + c := Constraint{ + Target: "str", + Name: ExclusiveMinimum, + Rule: 1, + Chain: nil, + } + require.Nil(t, validateInt(reflect.ValueOf(3), c)) +} + +func TestValidateInt_ExclusiveMinimumConstraintInvalid(t *testing.T) { + var x interface{} = 1 + c := Constraint{ + Target: "str", + Name: ExclusiveMinimum, + Rule: 3, + Chain: nil, + } + require.Equal(t, validateInt(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(x), c, fmt.Sprintf("value must be greater than %v", c.Rule)).Error()) +} + +func TestValidateInt_ExclusiveMinimumConstraintBoundary(t *testing.T) { + var x interface{} = 1 + c := Constraint{ + Target: "str", + Name: ExclusiveMinimum, + Rule: 1, + Chain: nil, + } + require.Equal(t, validateInt(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(x), c, fmt.Sprintf("value must be greater than %v", c.Rule)).Error()) +} + +func TestValidateInt_exclusiveMaximumConstraintValid(t *testing.T) { + c := Constraint{ + Target: "str", + Name: ExclusiveMaximum, + Rule: 2, + Chain: nil, + } + require.Nil(t, validateInt(reflect.ValueOf(1), c)) +} + +func TestValidateInt_exclusiveMaximumConstraintInvalid(t *testing.T) { + var x interface{} = 2 + c := Constraint{ + Target: "str", + Name: ExclusiveMaximum, + Rule: 1, + Chain: nil, + } + require.Equal(t, validateInt(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(x), c, fmt.Sprintf("value must be less than %v", c.Rule)).Error()) +} + +func TestValidateInt_exclusiveMaximumConstraintBoundary(t *testing.T) { + var x interface{} = 1 + c := Constraint{ + Target: "str", + Name: ExclusiveMaximum, + Rule: 1, + Chain: nil, + } + require.Equal(t, validateInt(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(x), c, fmt.Sprintf("value must be less than %v", c.Rule)).Error()) +} + +func TestValidateInt_inclusiveMaximumConstraintValid(t *testing.T) { + c := Constraint{ + Target: "str", + Name: InclusiveMaximum, + Rule: 2, + Chain: nil, + } + require.Nil(t, validateInt(reflect.ValueOf(1), c)) +} + +func TestValidateInt_inclusiveMaximumConstraintInvalid(t *testing.T) { + var x interface{} = 2 + c := Constraint{ + Target: "str", + Name: InclusiveMaximum, + Rule: 1, + Chain: nil, + } + require.Equal(t, validateInt(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(x), c, fmt.Sprintf("value must be less than or equal to %v", c.Rule)).Error()) +} + +func TestValidateInt_inclusiveMaximumConstraintBoundary(t *testing.T) { + c := Constraint{ + Target: "str", + Name: InclusiveMaximum, + Rule: 1, + Chain: nil, + } + require.Nil(t, validateInt(reflect.ValueOf(1), c)) +} + +func TestValidateInt_InclusiveMinimumConstraintValid(t *testing.T) { + c := Constraint{ + Target: "str", + Name: InclusiveMinimum, + Rule: 1, + Chain: nil, + } + require.Nil(t, validateInt(reflect.ValueOf(1), c)) +} + +func TestValidateInt_InclusiveMinimumConstraintInvalid(t *testing.T) { + var x interface{} = 1 + c := Constraint{ + Target: "str", + Name: InclusiveMinimum, + Rule: 2, + Chain: nil, + } + require.Equal(t, validateInt(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(x), c, fmt.Sprintf("value must be greater than or equal to %v", c.Rule)).Error()) +} + +func TestValidateInt_InclusiveMinimumConstraintBoundary(t *testing.T) { + c := Constraint{ + Target: "str", + Name: InclusiveMinimum, + Rule: 1, + Chain: nil, + } + require.Nil(t, validateInt(reflect.ValueOf(1), c)) +} + +func TestValidateInt_MultipleOfWithoutError(t *testing.T) { + c := Constraint{ + Target: "str", + Name: MultipleOf, + Rule: 10, + Chain: nil, + } + require.Nil(t, validateInt(reflect.ValueOf(2300), c)) +} + +func TestValidateInt_MultipleOfWithError(t *testing.T) { + c := Constraint{ + Target: "str", + Name: MultipleOf, + Rule: 11, + Chain: nil, + } + require.Equal(t, validateInt(reflect.ValueOf(2300), c).Error(), + createError(reflect.ValueOf(2300), c, fmt.Sprintf("value must be a multiple of %v", c.Rule)).Error()) +} + +func TestValidatePointer_NilTrue(t *testing.T) { + var z *int + var x interface{} = z + c := Constraint{ + Target: "ptr", + Name: Null, + Rule: true, // Required property + Chain: nil, + } + require.Equal(t, validatePtr(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(x), c, "value can not be null; required parameter").Error()) +} + +func TestValidatePointer_NilFalse(t *testing.T) { + var z *int + var x interface{} = z + c := Constraint{ + Target: "ptr", + Name: Null, + Rule: false, // not required property + Chain: nil, + } + require.Nil(t, validatePtr(reflect.ValueOf(x), c)) +} + +func TestValidatePointer_NilReadonlyValid(t *testing.T) { + var z *int + var x interface{} = z + c := Constraint{ + Target: "ptr", + Name: ReadOnly, + Rule: true, + Chain: nil, + } + require.Nil(t, validatePtr(reflect.ValueOf(x), c)) +} + +func TestValidatePointer_NilReadonlyInvalid(t *testing.T) { + z := 10 + var x interface{} = &z + c := Constraint{ + Target: "ptr", + Name: ReadOnly, + Rule: true, + Chain: nil, + } + require.Equal(t, validatePtr(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(z), c, "readonly parameter; must send as nil or empty in request").Error()) +} + +func TestValidatePointer_IntValid(t *testing.T) { + z := 10 + var x interface{} = &z + c := Constraint{ + Target: "ptr", + Name: InclusiveMinimum, + Rule: 3, + Chain: nil, + } + require.Nil(t, validatePtr(reflect.ValueOf(x), c)) +} + +func TestValidatePointer_IntInvalid(t *testing.T) { + z := 10 + var x interface{} = &z + c := Constraint{ + Target: "ptr", + Name: Null, + Rule: true, + Chain: []Constraint{ + { + Target: "ptr", + Name: InclusiveMinimum, + Rule: 11, + Chain: nil, + }, + }, + } + require.Equal(t, validatePtr(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(10), c.Chain[0], "value must be greater than or equal to 11").Error()) +} + +func TestValidatePointer_IntInvalidConstraint(t *testing.T) { + z := 10 + var x interface{} = &z + c := Constraint{ + Target: "ptr", + Name: Null, + Rule: true, + Chain: []Constraint{ + { + Target: "ptr", + Name: MaxItems, + Rule: 3, + Chain: nil, + }, + }, + } + require.Equal(t, validatePtr(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(10), c.Chain[0], + fmt.Sprintf("constraint %v is not applicable for type integer", MaxItems)).Error()) +} + +func TestValidatePointer_ValidInt64(t *testing.T) { + z := int64(10) + var x interface{} = &z + c := Constraint{ + Target: "ptr", + Name: Null, + Rule: true, + Chain: []Constraint{ + { + Target: "ptr", + Name: InclusiveMinimum, + Rule: 3, + Chain: nil, + }, + }} + require.Nil(t, validatePtr(reflect.ValueOf(x), c)) +} + +func TestValidatePointer_InvalidConstraintInt64(t *testing.T) { + z := int64(10) + var x interface{} = &z + c := Constraint{ + Target: "ptr", + Name: Null, + Rule: true, + Chain: []Constraint{ + { + Target: "ptr", + Name: MaxItems, + Rule: 3, + Chain: nil, + }, + }, + } + require.Equal(t, validatePtr(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(10), c.Chain[0], + fmt.Sprintf("constraint %v is not applicable for type integer", MaxItems)).Error()) +} + +func TestValidatePointer_ValidFloat(t *testing.T) { + z := 10.1 + var x interface{} = &z + c := Constraint{ + Target: "ptr", + Name: Null, + Rule: true, + Chain: []Constraint{ + { + Target: "ptr", + Name: InclusiveMinimum, + Rule: 3.0, + Chain: nil, + }}} + require.Nil(t, validatePtr(reflect.ValueOf(x), c)) +} + +func TestValidatePointer_InvalidFloat(t *testing.T) { + z := 10.1 + var x interface{} = &z + c := Constraint{ + Target: "ptr", + Name: Null, + Rule: true, + Chain: []Constraint{ + { + Target: "ptr", + Name: InclusiveMinimum, + Rule: 12.0, + Chain: nil, + }}, + } + require.Equal(t, validatePtr(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(10.1), c.Chain[0], + "value must be greater than or equal to 12").Error()) +} + +func TestValidatePointer_InvalidConstraintFloat(t *testing.T) { + z := 10.1 + var x interface{} = &z + c := Constraint{ + Target: "ptr", + Name: Null, + Rule: true, + Chain: []Constraint{ + { + Target: "ptr", + Name: MaxItems, + Rule: 3.0, + Chain: nil, + }}, + } + require.Equal(t, validatePtr(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(10.1), c.Chain[0], + fmt.Sprintf("constraint %v is not applicable for type float", MaxItems)).Error()) +} + +func TestValidatePointer_StringValid(t *testing.T) { + z := "hello" + var x interface{} = &z + c := Constraint{ + Target: "ptr", + Name: Null, + Rule: true, + Chain: []Constraint{ + { + Target: "ptr", + Name: Pattern, + Rule: "^[a-z]+$", + Chain: nil, + }}} + require.Nil(t, validatePtr(reflect.ValueOf(x), c)) +} + +func TestValidatePointer_StringInvalid(t *testing.T) { + z := "hello" + var x interface{} = &z + c := Constraint{ + Target: "ptr", + Name: Null, + Rule: true, + Chain: []Constraint{ + { + Target: "ptr", + Name: MaxLength, + Rule: 2, + Chain: nil, + }}} + require.Equal(t, validatePtr(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf("hello"), c.Chain[0], + "value length must be less than 2").Error()) +} + +func TestValidatePointer_ArrayValid(t *testing.T) { + c := Constraint{ + Target: "ptr", + Name: Null, + Rule: true, + Chain: []Constraint{ + { + Target: "ptr", + Name: UniqueItems, + Rule: "true", + Chain: nil, + }}} + require.Nil(t, validatePtr(reflect.ValueOf(&[]string{"1", "2"}), c)) +} + +func TestValidatePointer_ArrayInvalid(t *testing.T) { + z := []string{"1", "2", "2"} + var x interface{} = &z + c := Constraint{ + Target: "ptr", + Name: Null, + Rule: true, + Chain: []Constraint{{ + Target: "ptr", + Name: UniqueItems, + Rule: true, + Chain: nil, + }}, + } + require.Equal(t, validatePtr(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(z), c.Chain[0], + fmt.Sprintf("all items in parameter %q must be unique; got:%v", c.Target, z)).Error()) +} + +func TestValidatePointer_MapValid(t *testing.T) { + c := Constraint{ + Target: "ptr", + Name: Null, + Rule: true, + Chain: []Constraint{ + { + Target: "ptr", + Name: UniqueItems, + Rule: true, + Chain: nil, + }}} + require.Nil(t, validatePtr(reflect.ValueOf(&map[interface{}]string{1: "1", "1": "2"}), c)) +} + +func TestValidatePointer_MapInvalid(t *testing.T) { + z := map[interface{}]string{1: "1", "1": "2", 1.3: "2"} + var x interface{} = &z + c := Constraint{ + Target: "ptr", + Name: Null, + Rule: true, + Chain: []Constraint{{ + Target: "ptr", + Name: UniqueItems, + Rule: true, + Chain: nil, + }}, + } + require.Equal(t, strings.Contains(validatePtr(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("all items in parameter %q must be unique;", c.Target)), true) +} + +type Child struct { + I string +} +type Product struct { + C *Child + Str *string + Name string + Arr *[]string + M *map[string]string + Num *int32 +} + +type Sample struct { + M *map[string]*string + Name string +} + +func TestValidatePointer_StructWithError(t *testing.T) { + s := "hello" + var x interface{} = &Product{ + C: &Child{"100"}, + Str: &s, + Name: "Gopher", + } + c := Constraint{ + "p", Null, "True", + []Constraint{ + {"C", Null, true, + []Constraint{ + {"I", MaxLength, 2, nil}, + }}, + {"Str", MaxLength, 2, nil}, + {"Name", MaxLength, 5, nil}, + }, + } + require.Equal(t, validatePtr(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf("100"), c.Chain[0].Chain[0], + "value length must be less than 2").Error()) +} + +func TestValidatePointer_WithNilStruct(t *testing.T) { + var p *Product + var x interface{} = p + c := Constraint{ + "p", Null, true, + []Constraint{ + {"C", Null, true, + []Constraint{ + {"I", Empty, true, + []Constraint{ + {"I", MaxLength, 5, nil}, + }}, + }}, + {"Str", MaxLength, 2, nil}, + {"Name", MaxLength, 5, nil}, + }, + } + require.Equal(t, validatePtr(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(x), c, + fmt.Sprintf("value can not be null; required parameter")).Error()) +} + +func TestValidatePointer_StructWithNoError(t *testing.T) { + s := "hello" + var x interface{} = &Product{ + C: &Child{"100"}, + Str: &s, + Name: "Gopher", + } + c := Constraint{ + "p", Null, true, + []Constraint{ + {"C", Null, true, + []Constraint{ + {"I", Empty, true, + []Constraint{ + {"I", MaxLength, 5, nil}, + }}, + }}, + }, + } + require.Nil(t, validatePtr(reflect.ValueOf(x), c)) +} + +func TestValidateStruct_FieldNotExist(t *testing.T) { + s := "hello" + var x interface{} = Product{ + C: &Child{"100"}, + Str: &s, + Name: "Gopher", + } + c := Constraint{ + "C", Null, true, + []Constraint{ + {"Name", Empty, true, nil}, + }, + } + s = "Name" + require.Equal(t, validateStruct(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(Child{"100"}), c.Chain[0], + fmt.Sprintf("field %q doesn't exist", s)).Error()) +} + +func TestValidateStruct_WithChainConstraint(t *testing.T) { + s := "hello" + var x interface{} = Product{ + C: &Child{"100"}, + Str: &s, + Name: "Gopher", + } + c := Constraint{ + "C", Null, true, + []Constraint{ + {"I", Empty, true, + []Constraint{ + {"I", MaxLength, 2, nil}, + }}, + }, + } + require.Equal(t, validateStruct(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf("100"), c.Chain[0].Chain[0], "value length must be less than 2").Error()) +} + +func TestValidateStruct_WithoutChainConstraint(t *testing.T) { + s := "hello" + var x interface{} = Product{ + C: &Child{""}, + Str: &s, + Name: "Gopher", + } + c := Constraint{"C", Null, true, + []Constraint{ + {"I", Empty, true, nil}, // throw error for Empty + }} + require.Equal(t, validateStruct(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(""), c.Chain[0], "value can not be null or empty; required parameter").Error()) +} + +func TestValidateStruct_WithArrayNull(t *testing.T) { + s := "hello" + var x interface{} = Product{ + C: &Child{""}, + Str: &s, + Name: "Gopher", + Arr: nil, + } + c := Constraint{"Arr", Null, true, + []Constraint{ + {"Arr", MaxItems, 4, nil}, + {"Arr", MinItems, 2, nil}, + }, + } + require.Equal(t, validateStruct(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(x.(Product).Arr), c, "value can not be null; required parameter").Error()) +} + +func TestValidateStruct_WithArrayEmptyError(t *testing.T) { + // arr := []string{} + var x interface{} = Product{ + Arr: &[]string{}, + } + c := Constraint{ + "Arr", Null, true, + []Constraint{ + {"Arr", Empty, true, nil}, + {"Arr", MaxItems, 4, nil}, + {"Arr", MinItems, 2, nil}, + }} + + require.Equal(t, validateStruct(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(*(x.(Product).Arr)), c.Chain[0], + fmt.Sprintf("value can not be null or empty; required parameter")).Error()) +} + +func TestValidateStruct_WithArrayEmptyWithoutError(t *testing.T) { + var x interface{} = Product{ + Arr: &[]string{}, + } + c := Constraint{ + "Arr", Null, true, + []Constraint{ + {"Arr", Empty, false, nil}, + {"Arr", MaxItems, 4, nil}, + }, + } + require.Nil(t, validateStruct(reflect.ValueOf(x), c)) +} + +func TestValidateStruct_ArrayWithError(t *testing.T) { + arr := []string{"1", "1"} + var x interface{} = Product{ + Arr: &arr, + } + c := Constraint{ + "Arr", Null, true, + []Constraint{ + {"Arr", Empty, true, nil}, + {"Arr", MaxItems, 4, nil}, + {"Arr", UniqueItems, true, nil}, + }, + } + s := "Arr" + require.Equal(t, validateStruct(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(*(x.(Product).Arr)), c.Chain[2], + fmt.Sprintf("all items in parameter %q must be unique; got:%v", s, *(x.(Product).Arr))).Error()) +} + +func TestValidateStruct_MapWithError(t *testing.T) { + m := map[string]string{ + "a": "hello", + "b": "hello", + } + var x interface{} = Product{ + M: &m, + } + c := Constraint{ + "M", Null, true, + []Constraint{ + {"M", Empty, true, nil}, + {"M", MaxItems, 4, nil}, + {"M", UniqueItems, true, nil}, + }, + } + + s := "M" + require.Equal(t, strings.Contains(validateStruct(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("all items in parameter %q must be unique;", s)), true) +} + +func TestValidateStruct_MapWithNoError(t *testing.T) { + m := map[string]string{} + var x interface{} = Product{ + M: &m, + } + c := Constraint{ + "M", Null, true, + []Constraint{ + {"M", Empty, false, nil}, + {"M", MaxItems, 4, nil}, + }, + } + require.Nil(t, validateStruct(reflect.ValueOf(x), c)) +} + +func TestValidateStruct_MapNilNoError(t *testing.T) { + var m map[string]string + var x interface{} = Product{ + M: &m, + } + c := Constraint{ + "M", Null, false, + []Constraint{ + {"M", Empty, false, nil}, + {"M", MaxItems, 4, nil}, + }, + } + require.Nil(t, validateStruct(reflect.ValueOf(x), c)) +} + +func TestValidate_MapValidationWithError(t *testing.T) { + var x1 interface{} = &Product{ + Arr: &[]string{"1", "2"}, + M: &map[string]string{"a": "hello"}, + } + s := "hello" + var x2 interface{} = &Sample{ + M: &map[string]*string{"a": &s}, + } + v := []Validation{ + {x1, + []Constraint{{"x1", Null, true, + []Constraint{ + {"Arr", Null, true, + []Constraint{ + {"Arr", Empty, true, nil}, + {"Arr", MaxItems, 4, nil}, + {"Arr", UniqueItems, true, nil}, + }, + }, + {"M", Null, false, + []Constraint{ + {"M", Empty, false, nil}, + {"M", MinItems, 1, nil}, + {"M", UniqueItems, true, nil}, + }, + }, + }, + }}}, + {x2, + []Constraint{ + {"x2", Null, true, + []Constraint{ + {"M", Null, false, + []Constraint{ + {"M", Empty, false, nil}, + {"M", MinItems, 2, nil}, + {"M", UniqueItems, true, nil}, + }, + }, + }, + }, + {"Name", Empty, true, nil}, + }}, + } + + z := Validate(v).Error() + require.Equal(t, strings.Contains(z, "minimum item limit is 2; got: 1"), true) + require.Equal(t, strings.Contains(z, "MinItems"), true) +} + +func TestValidate_MapValidationWithoutError(t *testing.T) { + var x1 interface{} = &Product{ + Arr: &[]string{"1", "2"}, + M: &map[string]string{"a": "hello"}, + } + s := "hello" + var x2 interface{} = &Sample{ + M: &map[string]*string{"a": &s}, + } + v := []Validation{ + {x1, + []Constraint{{"x1", Null, true, + []Constraint{ + {"Arr", Null, true, + []Constraint{ + {"Arr", Empty, true, nil}, + {"Arr", MaxItems, 4, nil}, + {"Arr", UniqueItems, true, nil}, + }, + }, + {"M", Null, false, + []Constraint{ + {"M", Empty, false, nil}, + {"M", MinItems, 1, nil}, + {"M", UniqueItems, true, nil}, + }, + }, + }, + }}}, + {x2, + []Constraint{ + {"x2", Null, true, + []Constraint{ + {"M", Null, false, + []Constraint{ + {"M", Empty, false, nil}, + {"M", MinItems, 1, nil}, + {"M", UniqueItems, true, nil}, + }, + }, + }, + }, + {"Name", Empty, true, nil}, + }}, + } + require.Nil(t, Validate(v)) +} + +func TestValidate_UnknownType(t *testing.T) { + var c chan int + v := []Validation{ + {c, + []Constraint{{"c", Null, true, nil}}}, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(c), v[0].Constraints[0], + fmt.Sprintf("unknown type %v", reflect.ValueOf(c).Kind())).Error()) +} + +func TestValidate_example1(t *testing.T) { + var x1 interface{} = Product{ + Arr: &[]string{"1", "1"}, + M: &map[string]string{"a": "hello"}, + } + s := "hello" + var x2 interface{} = Sample{ + M: &map[string]*string{"a": &s}, + } + v := []Validation{ + {x1, + []Constraint{{"Arr", Null, true, + []Constraint{ + {"Arr", Empty, true, nil}, + {"Arr", MaxItems, 4, nil}, + {"Arr", UniqueItems, true, nil}, + }}, + {"M", Null, false, + []Constraint{ + {"M", Empty, false, nil}, + {"M", MinItems, 1, nil}, + {"M", UniqueItems, true, nil}, + }, + }, + }}, + {x2, + []Constraint{ + {"M", Null, false, + []Constraint{ + {"M", Empty, false, nil}, + {"M", MinItems, 1, nil}, + {"M", UniqueItems, true, nil}, + }, + }, + {"Name", Empty, true, nil}, + }}, + } + s = "Arr" + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf([]string{"1", "1"}), v[0].Constraints[0].Chain[2], + fmt.Sprintf("all items in parameter %q must be unique; got:%v", s, []string{"1", "1"})).Error()) +} + +func TestValidate_Int(t *testing.T) { + n := int32(100) + v := []Validation{ + {n, + []Constraint{ + {"n", MultipleOf, 10, nil}, + {"n", ExclusiveMinimum, 100, nil}, + }, + }, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(n), v[0].Constraints[1], + "value must be greater than 100").Error()) +} + +func TestValidate_IntPointer(t *testing.T) { + n := int32(100) + p := &n + v := []Validation{ + {p, + []Constraint{ + {"p", Null, true, []Constraint{ + {"p", ExclusiveMinimum, 100, nil}, + }}, + }, + }, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(n), v[0].Constraints[0].Chain[0], + "value must be greater than 100").Error()) + + // required paramter + p = nil + v = []Validation{ + {p, + []Constraint{ + {"p", Null, true, []Constraint{ + {"p", ExclusiveMinimum, 100, nil}, + }}, + }, + }, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(v[0].TargetValue), v[0].Constraints[0], + "value can not be null; required parameter").Error()) + + // Not required + p = nil + v = []Validation{ + {p, + []Constraint{ + {"p", Null, false, []Constraint{ + {"p", ExclusiveMinimum, 100, nil}, + }}, + }, + }, + } + require.Nil(t, Validate(v)) +} + +func TestValidate_IntStruct(t *testing.T) { + n := int32(100) + p := &Product{ + Num: &n, + } + + v := []Validation{ + {p, []Constraint{{"p", Null, true, + []Constraint{ + {"Num", Null, true, []Constraint{ + {"Num", ExclusiveMinimum, 100, nil}, + }}, + }, + }}}, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(n), v[0].Constraints[0].Chain[0].Chain[0], + "value must be greater than 100").Error()) + + // required paramter + p = &Product{} + v = []Validation{ + {p, []Constraint{{"p", Null, true, + []Constraint{ + {"p.Num", Null, true, []Constraint{ + {"p.Num", ExclusiveMinimum, 100, nil}, + }}, + }, + }}}, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(p.Num), v[0].Constraints[0].Chain[0], + "value can not be null; required parameter").Error()) + + // Not required + p = &Product{} + v = []Validation{ + {p, []Constraint{{"p", Null, true, + []Constraint{ + {"Num", Null, false, []Constraint{ + {"Num", ExclusiveMinimum, 100, nil}, + }}, + }, + }}}, + } + require.Nil(t, Validate(v)) + + // Parent not required + p = nil + v = []Validation{ + {p, []Constraint{{"p", Null, false, + []Constraint{ + {"Num", Null, false, []Constraint{ + {"Num", ExclusiveMinimum, 100, nil}, + }}, + }, + }}}, + } + require.Nil(t, Validate(v)) +} + +func TestValidate_String(t *testing.T) { + s := "hello" + v := []Validation{ + {s, + []Constraint{ + {"s", Empty, true, nil}, + {"s", Empty, true, + []Constraint{{"s", MaxLength, 3, nil}}}, + }, + }, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(s), v[0].Constraints[1].Chain[0], + "value length must be less than 3").Error()) + + // required paramter + s = "" + v = []Validation{ + {s, + []Constraint{ + {"s", Empty, true, nil}, + {"s", Empty, true, + []Constraint{{"s", MaxLength, 3, nil}}}, + }, + }, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(s), v[0].Constraints[1], + "value can not be null or empty; required parameter").Error()) + + // not required paramter + s = "" + v = []Validation{ + {s, + []Constraint{ + {"s", Empty, false, nil}, + {"s", Empty, false, + []Constraint{{"s", MaxLength, 3, nil}}}, + }, + }, + } + require.Nil(t, Validate(v)) +} + +func TestValidate_StringStruct(t *testing.T) { + s := "hello" + p := &Product{ + Str: &s, + } + + v := []Validation{ + {p, []Constraint{{"p", Null, true, + []Constraint{ + {"p.Str", Null, true, []Constraint{ + {"p.Str", Empty, true, nil}, + {"p.Str", MaxLength, 3, nil}, + }}, + }, + }}}, + } + // e := ValidationError{ + // Constraint: MaxLength, + // Target: "Str", + // TargetValue: s, + // Details: fmt.Sprintf("value length must be less than 3", s), + // } + // if z := Validate(v); !reflect.DeepEqual(e, z) { + // t.Fatalf("autorest/validation: Validate failed to return error \nexpect: %v\ngot: %v", e, z) + // } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(s), v[0].Constraints[0].Chain[0].Chain[1], + "value length must be less than 3").Error()) + + // required paramter - can't be Empty + s = "" + p = &Product{ + Str: &s, + } + v = []Validation{ + {p, []Constraint{{"p", Null, true, + []Constraint{ + {"Str", Null, true, []Constraint{ + {"Str", Empty, true, nil}, + {"Str", MaxLength, 3, nil}, + }}, + }, + }}}, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(s), v[0].Constraints[0].Chain[0].Chain[0], + "value can not be null or empty; required parameter").Error()) + + // required paramter - can't be null + p = &Product{} + v = []Validation{ + {p, []Constraint{{"p", Null, true, + []Constraint{ + {"p.Str", Null, true, []Constraint{ + {"p.Str", Empty, true, nil}, + {"p.Str", MaxLength, 3, nil}, + }}, + }, + }}}, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(p.Str), v[0].Constraints[0].Chain[0], + "value can not be null; required parameter").Error()) + + // Not required + p = &Product{} + v = []Validation{ + {p, []Constraint{{"p", Null, true, + []Constraint{ + {"Str", Null, false, []Constraint{ + {"Str", Empty, true, nil}, + {"Str", MaxLength, 3, nil}, + }}, + }, + }}}, + } + require.Nil(t, Validate(v)) + + // Parent not required + p = nil + v = []Validation{ + {p, []Constraint{{"p", Null, false, + []Constraint{ + {"Str", Null, true, []Constraint{ + {"Str", Empty, true, nil}, + {"Str", MaxLength, 3, nil}, + }}, + }, + }}}, + } + require.Nil(t, Validate(v)) +} + +func TestValidate_Array(t *testing.T) { + s := []string{"hello"} + v := []Validation{ + {s, + []Constraint{ + {"s", Null, true, + []Constraint{ + {"s", Empty, true, nil}, + {"s", MinItems, 2, nil}, + }}, + }, + }, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(s), v[0].Constraints[0].Chain[1], + fmt.Sprintf("minimum item limit is 2; got: %v", len(s))).Error()) + + // Empty array + v = []Validation{ + {[]string{}, + []Constraint{ + {"s", Null, true, + []Constraint{ + {"s", Empty, true, nil}, + {"s", MinItems, 2, nil}}}, + }, + }, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf([]string{}), v[0].Constraints[0].Chain[0], + "value can not be null or empty; required parameter").Error()) + + // null array + var s1 []string + v = []Validation{ + {s1, + []Constraint{ + {"s1", Null, true, + []Constraint{ + {"s1", Empty, true, nil}, + {"s1", MinItems, 2, nil}}}, + }, + }, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(s1), v[0].Constraints[0], + "value can not be null; required parameter").Error()) + + // not required paramter + v = []Validation{ + {s1, + []Constraint{ + {"s1", Null, false, + []Constraint{ + {"s1", Empty, true, nil}, + {"s1", MinItems, 2, nil}}}, + }, + }, + } + require.Nil(t, Validate(v)) +} + +func TestValidate_ArrayPointer(t *testing.T) { + s := []string{"hello"} + v := []Validation{ + {&s, + []Constraint{ + {"s", Null, true, + []Constraint{ + {"s", Empty, true, nil}, + {"s", MinItems, 2, nil}, + }}, + }, + }, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(s), v[0].Constraints[0].Chain[1], + fmt.Sprintf("minimum item limit is 2; got: %v", len(s))).Error()) + + // Empty array + v = []Validation{ + {&[]string{}, + []Constraint{ + {"s", Null, true, + []Constraint{ + {"s", Empty, true, nil}, + {"s", MinItems, 2, nil}}}, + }, + }, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf([]string{}), v[0].Constraints[0].Chain[0], + "value can not be null or empty; required parameter").Error()) + + // null array + var s1 *[]string + v = []Validation{ + {s1, + []Constraint{ + {"s1", Null, true, + []Constraint{ + {"s1", Empty, true, nil}, + {"s1", MinItems, 2, nil}}}, + }, + }, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(s1), v[0].Constraints[0], + "value can not be null; required parameter").Error()) + + // not required paramter + v = []Validation{ + {s1, + []Constraint{ + {"s1", Null, false, + []Constraint{ + {"s1", Empty, true, nil}, + {"s1", MinItems, 2, nil}}}, + }, + }, + } + require.Nil(t, Validate(v)) +} + +func TestValidate_ArrayInStruct(t *testing.T) { + s := []string{"hello"} + p := &Product{ + Arr: &s, + } + + v := []Validation{ + {p, []Constraint{{"p", Null, true, + []Constraint{ + {"p.Arr", Null, true, []Constraint{ + {"p.Arr", Empty, true, nil}, + {"p.Arr", MinItems, 2, nil}, + }}, + }, + }}}, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(s), v[0].Constraints[0].Chain[0].Chain[1], + fmt.Sprintf("minimum item limit is 2; got: %v", len(s))).Error()) + + // required paramter - can't be Empty + p = &Product{ + Arr: &[]string{}, + } + v = []Validation{ + {p, []Constraint{{"p", Null, true, + []Constraint{ + {"p.Arr", Null, true, []Constraint{ + {"p.Arr", Empty, true, nil}, + {"p.Arr", MinItems, 2, nil}, + }}, + }, + }}}, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf([]string{}), v[0].Constraints[0].Chain[0].Chain[0], + "value can not be null or empty; required parameter").Error()) + + // required paramter - can't be null + p = &Product{} + v = []Validation{ + {p, []Constraint{{"p", Null, true, + []Constraint{ + {"p.Arr", Null, true, []Constraint{ + {"p.Arr", Empty, true, nil}, + {"p.Arr", MinItems, 2, nil}, + }}, + }, + }}}, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(p.Arr), v[0].Constraints[0].Chain[0], + "value can not be null; required parameter").Error()) + + // Not required + v = []Validation{ + {&Product{}, []Constraint{{"p", Null, true, + []Constraint{ + {"Arr", Null, false, []Constraint{ + {"Arr", Empty, true, nil}, + {"Arr", MinItems, 2, nil}, + }}, + }, + }}}, + } + require.Nil(t, Validate(v)) + + // Parent not required + p = nil + v = []Validation{ + {p, []Constraint{{"p", Null, false, + []Constraint{ + {"Arr", Null, true, []Constraint{ + {"Arr", Empty, true, nil}, + {"Arr", MinItems, 2, nil}, + }}, + }, + }}}, + } + require.Nil(t, Validate(v)) +} + +func TestValidate_StructInStruct(t *testing.T) { + p := &Product{ + C: &Child{I: "hello"}, + } + v := []Validation{ + {p, []Constraint{{"p", Null, true, + []Constraint{{"C", Null, true, + []Constraint{{"I", MinLength, 7, nil}}}, + }, + }}}, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(p.C.I), v[0].Constraints[0].Chain[0].Chain[0], + "value length must be greater than 7").Error()) + + // required paramter - can't be Empty + p = &Product{ + C: &Child{I: ""}, + } + + v = []Validation{ + {p, []Constraint{{"p", Null, true, + []Constraint{{"C", Null, true, + []Constraint{{"I", Empty, true, nil}}}, + }, + }}}, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(p.C.I), v[0].Constraints[0].Chain[0].Chain[0], + "value can not be null or empty; required parameter").Error()) + + // required paramter - can't be null + p = &Product{} + v = []Validation{ + {p, []Constraint{{"p", Null, true, + []Constraint{{"C", Null, true, + []Constraint{{"I", Empty, true, nil}}}, + }, + }}}, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(p.C), v[0].Constraints[0].Chain[0], + "value can not be null; required parameter").Error()) + + // Not required + v = []Validation{ + {&Product{}, []Constraint{{"p", Null, true, + []Constraint{{"p.C", Null, false, + []Constraint{{"p.C.I", Empty, true, nil}}}, + }, + }}}, + } + require.Nil(t, Validate(v)) + + // Parent not required + p = nil + v = []Validation{ + {p, []Constraint{{"p", Null, false, + []Constraint{{"p.C", Null, false, + []Constraint{{"p.C.I", Empty, true, nil}}}, + }, + }}}, + } + require.Nil(t, Validate(v)) +} + +func TestNewErrorWithValidationError(t *testing.T) { + p := &Product{} + v := []Validation{ + {p, []Constraint{{"p", Null, true, + []Constraint{{"p.C", Null, true, + []Constraint{{"p.C.I", Empty, true, nil}}}, + }, + }}}, + } + err := createError(reflect.ValueOf(p.C), v[0].Constraints[0].Chain[0], "value can not be null; required parameter") + z := fmt.Sprintf("batch.AccountClient#Create: Invalid input: %s", + err.Error()) + require.Equal(t, NewErrorWithValidationError(err, "batch.AccountClient", "Create").Error(), z) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/version.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/version.go new file mode 100644 index 000000000000..8031a332cd2d --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/version.go @@ -0,0 +1,18 @@ +package autorest + +import ( + "fmt" +) + +const ( + major = "7" + minor = "0" + patch = "0" + tag = "" + semVerFormat = "%s.%s.%s%s" +) + +// Version returns the semantic version (see http://semver.org). +func Version() string { + return fmt.Sprintf(semVerFormat, major, minor, patch, tag) +} diff --git a/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/version_test.go b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/version_test.go new file mode 100644 index 000000000000..14925fe4e53f --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor/github.com/Azure/go-autorest/autorest/version_test.go @@ -0,0 +1,13 @@ +package autorest + +import ( + "testing" +) + +func TestVersion(t *testing.T) { + v := "7.0.0" + if Version() != v { + t.Fatalf("autorest: Version failed to return the expected version -- expected %s, received %s", + v, Version()) + } +} From 526bd5d44ed7db69a778a249d3fc426841bf6751 Mon Sep 17 00:00:00 2001 From: Maciej Szulik Date: Mon, 2 Oct 2017 16:40:30 +0200 Subject: [PATCH 24/27] UPSTREAM: : openapi generation for createNamespacedDeploymentConfigRollback duplication problem --- .../src/k8s.io/apiserver/pkg/endpoints/openapi/openapi.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/endpoints/openapi/openapi.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/endpoints/openapi/openapi.go index a057bae8377f..8b7d55030e82 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/endpoints/openapi/openapi.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/endpoints/openapi/openapi.go @@ -63,6 +63,10 @@ func GetOperationIDAndTags(r *restful.Route) (string, []string, error) { op := r.Operation path := r.Path var tags []string + // FIXME: this is hacky way to get rid of conflict name + if strings.HasPrefix(path, "/oapi/v1/namespaces/{namespace}/deploymentconfigs/{name}/rollback") { + op = op + "Rollback" + } prefix, exists := verbs.GetPrefix(op) if !exists { return op, tags, fmt.Errorf("operation names should start with a verb. Cannot determine operation verb from %v", op) From 44f6988cdb0b9881180233cc57d57143309a3199 Mon Sep 17 00:00:00 2001 From: Maciej Szulik Date: Fri, 29 Sep 2017 18:35:17 +0200 Subject: [PATCH 25/27] OpenShift changes after the rebase to 1.7.6 --- pkg/assets/apiserver/asset_apiserver.go | 2 +- pkg/cmd/server/bootstrappolicy/policy.go | 1 + .../kubernetes/master/master_config_test.go | 1 + pkg/oauth/apiserver/oauth_apiserver.go | 2 +- test/integration/node_authorizer_test.go | 45 ++++++++++++++++++- .../bootstrap_cluster_roles.yaml | 6 +++ .../bootstrap_policy_file.yaml | 7 +++ 7 files changed, 60 insertions(+), 4 deletions(-) diff --git a/pkg/assets/apiserver/asset_apiserver.go b/pkg/assets/apiserver/asset_apiserver.go index 6b315e422ebb..b31e683b62f8 100644 --- a/pkg/assets/apiserver/asset_apiserver.go +++ b/pkg/assets/apiserver/asset_apiserver.go @@ -155,7 +155,7 @@ func buildHandlerChainForAssets(consoleRedirectPath string) func(startingHandler handler = genericapifilters.WithAudit(handler, c.RequestContextMapper, c.AuditBackend, c.AuditPolicyChecker, c.LongRunningFunc) } handler = genericfilters.WithCORS(handler, c.CorsAllowedOriginList, nil, nil, nil, "true") - handler = genericfilters.WithTimeoutForNonLongRunningRequests(handler, c.RequestContextMapper, c.LongRunningFunc) + handler = genericfilters.WithTimeoutForNonLongRunningRequests(handler, c.RequestContextMapper, c.LongRunningFunc, c.RequestTimeout) handler = genericapifilters.WithRequestInfo(handler, genericapiserver.NewRequestInfoResolver(c), c.RequestContextMapper) handler = apirequest.WithRequestContext(handler, c.RequestContextMapper) handler = genericfilters.WithPanicRecovery(handler) diff --git a/pkg/cmd/server/bootstrappolicy/policy.go b/pkg/cmd/server/bootstrappolicy/policy.go index b02bf86bf3bc..cb9f8b45977c 100644 --- a/pkg/cmd/server/bootstrappolicy/policy.go +++ b/pkg/cmd/server/bootstrappolicy/policy.go @@ -693,6 +693,7 @@ func GetOpenshiftBootstrapClusterRoles() []rbac.ClusterRole { rbac.NewRule("get", "create", "delete").Groups(kapiGroup).Resources("pods").RuleOrDie(), // TODO: restrict to pods scheduled on the bound node once supported rbac.NewRule("update").Groups(kapiGroup).Resources("pods/status").RuleOrDie(), + rbac.NewRule("create").Groups(kapiGroup).Resources("pods/eviction").RuleOrDie(), // TODO: restrict to secrets and configmaps used by pods scheduled on bound node once supported // Needed for imagepullsecrets, rbd/ceph and secret volumes, and secrets in envs diff --git a/pkg/cmd/server/kubernetes/master/master_config_test.go b/pkg/cmd/server/kubernetes/master/master_config_test.go index 4f333a87d207..869739159bcd 100644 --- a/pkg/cmd/server/kubernetes/master/master_config_test.go +++ b/pkg/cmd/server/kubernetes/master/master_config_test.go @@ -75,6 +75,7 @@ func TestAPIServerDefaults(t *testing.T) { MaxRequestsInFlight: 400, MaxMutatingRequestsInFlight: 200, MinRequestTimeout: 1800, + RequestTimeout: time.Duration(60) * time.Second, }, Admission: &apiserveroptions.AdmissionOptions{ PluginNames: []string{"AlwaysAdmit"}, diff --git a/pkg/oauth/apiserver/oauth_apiserver.go b/pkg/oauth/apiserver/oauth_apiserver.go index b4eb893bf303..3d77a0a9fa64 100644 --- a/pkg/oauth/apiserver/oauth_apiserver.go +++ b/pkg/oauth/apiserver/oauth_apiserver.go @@ -200,7 +200,7 @@ func (c *OAuthServerConfig) buildHandlerChainForOAuth(startingHandler http.Handl handler = genericfilters.WithMaxInFlightLimit(handler, genericConfig.MaxRequestsInFlight, genericConfig.MaxMutatingRequestsInFlight, genericConfig.RequestContextMapper, genericConfig.LongRunningFunc) handler = genericfilters.WithCORS(handler, genericConfig.CorsAllowedOriginList, nil, nil, nil, "true") - handler = genericfilters.WithTimeoutForNonLongRunningRequests(handler, genericConfig.RequestContextMapper, genericConfig.LongRunningFunc) + handler = genericfilters.WithTimeoutForNonLongRunningRequests(handler, genericConfig.RequestContextMapper, genericConfig.LongRunningFunc, genericConfig.RequestTimeout) handler = genericapifilters.WithRequestInfo(handler, genericapiserver.NewRequestInfoResolver(genericConfig), genericConfig.RequestContextMapper) handler = apirequest.WithRequestContext(handler, genericConfig.RequestContextMapper) handler = genericfilters.WithPanicRecovery(handler) diff --git a/test/integration/node_authorizer_test.go b/test/integration/node_authorizer_test.go index 0caa3607d7a8..035b9c3a2777 100644 --- a/test/integration/node_authorizer_test.go +++ b/test/integration/node_authorizer_test.go @@ -14,6 +14,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/rest" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/policy" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "github.com/openshift/origin/pkg/cmd/server/admin" @@ -22,6 +23,8 @@ import ( testserver "github.com/openshift/origin/test/util/server" ) +// If this test fails make sure to update it with contents from +// vendor/k8s.io/kubernetes/test/integration/auth/node_test.go#TestNodeAuthorizer func TestNodeAuthorizer(t *testing.T) { masterConfig, err := testserver.DefaultMasterOptions() if err != nil { @@ -188,6 +191,30 @@ func TestNodeAuthorizer(t *testing.T) { deleteNode2 := func(client clientset.Interface) error { return client.Core().Nodes().Delete("node2", nil) } + createNode2NormalPodEviction := func(client clientset.Interface) error { + return client.Policy().Evictions("ns").Evict(&policy.Eviction{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "policy/v1beta1", + Kind: "Eviction", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "node2normalpod", + Namespace: "ns", + }, + }) + } + createNode2MirrorPodEviction := func(client clientset.Interface) error { + return client.Policy().Evictions("ns").Evict(&policy.Eviction{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "policy/v1beta1", + Kind: "Eviction", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "node2mirrorpod", + Namespace: "ns", + }, + }) + } // nodeanonClient := clientsetForToken(tokenNodeUnknown, clientConfig) // node1Client := clientsetForToken(tokenNode1, clientConfig) @@ -201,7 +228,9 @@ func TestNodeAuthorizer(t *testing.T) { expectForbidden(t, getPV(nodeanonClient)) expectForbidden(t, createNode2NormalPod(nodeanonClient)) expectForbidden(t, createNode2MirrorPod(nodeanonClient)) + expectForbidden(t, deleteNode2NormalPod(nodeanonClient)) expectForbidden(t, deleteNode2MirrorPod(nodeanonClient)) + expectForbidden(t, createNode2MirrorPodEviction(nodeanonClient)) expectForbidden(t, createNode2(nodeanonClient)) expectForbidden(t, updateNode2Status(nodeanonClient)) expectForbidden(t, deleteNode2(nodeanonClient)) @@ -213,7 +242,8 @@ func TestNodeAuthorizer(t *testing.T) { expectForbidden(t, getPV(node1Client)) expectForbidden(t, createNode2NormalPod(nodeanonClient)) expectForbidden(t, createNode2MirrorPod(node1Client)) - expectForbidden(t, deleteNode2MirrorPod(node1Client)) + expectNotFound(t, deleteNode2MirrorPod(node1Client)) + expectNotFound(t, createNode2MirrorPodEviction(node1Client)) expectForbidden(t, createNode2(node1Client)) expectForbidden(t, updateNode2Status(node1Client)) expectForbidden(t, deleteNode2(node1Client)) @@ -228,6 +258,8 @@ func TestNodeAuthorizer(t *testing.T) { // mirror pod and self node lifecycle is allowed expectAllowed(t, createNode2MirrorPod(node2Client)) expectAllowed(t, deleteNode2MirrorPod(node2Client)) + expectAllowed(t, createNode2MirrorPod(node2Client)) + expectAllowed(t, createNode2MirrorPodEviction(node2Client)) expectAllowed(t, createNode2(node2Client)) expectAllowed(t, updateNode2Status(node2Client)) expectAllowed(t, deleteNode2(node2Client)) @@ -244,8 +276,10 @@ func TestNodeAuthorizer(t *testing.T) { expectForbidden(t, createNode2NormalPod(nodeanonClient)) expectForbidden(t, updateNode2NormalPodStatus(nodeanonClient)) expectForbidden(t, deleteNode2NormalPod(nodeanonClient)) + expectForbidden(t, createNode2NormalPodEviction(nodeanonClient)) expectForbidden(t, createNode2MirrorPod(nodeanonClient)) expectForbidden(t, deleteNode2MirrorPod(nodeanonClient)) + expectForbidden(t, createNode2MirrorPodEviction(nodeanonClient)) expectForbidden(t, getSecret(node1Client)) expectForbidden(t, getPVSecret(node1Client)) @@ -255,8 +289,10 @@ func TestNodeAuthorizer(t *testing.T) { expectForbidden(t, createNode2NormalPod(node1Client)) expectForbidden(t, updateNode2NormalPodStatus(node1Client)) expectForbidden(t, deleteNode2NormalPod(node1Client)) + expectForbidden(t, createNode2NormalPodEviction(node1Client)) expectForbidden(t, createNode2MirrorPod(node1Client)) - expectForbidden(t, deleteNode2MirrorPod(node1Client)) + expectNotFound(t, deleteNode2MirrorPod(node1Client)) + expectNotFound(t, createNode2MirrorPodEviction(node1Client)) // node2 can get referenced objects now expectAllowed(t, getSecret(node2Client)) @@ -269,6 +305,11 @@ func TestNodeAuthorizer(t *testing.T) { expectAllowed(t, deleteNode2NormalPod(node2Client)) expectAllowed(t, createNode2MirrorPod(node2Client)) expectAllowed(t, deleteNode2MirrorPod(node2Client)) + // recreate as an admin to test eviction + expectAllowed(t, createNode2NormalPod(superuserClient)) + expectAllowed(t, createNode2MirrorPod(superuserClient)) + expectAllowed(t, createNode2NormalPodEviction(node2Client)) + expectAllowed(t, createNode2MirrorPodEviction(node2Client)) } func makeNodeClientset(t *testing.T, signer *admin.SignerCertOptions, certDir string, username string, anonymousConfig *rest.Config) clientset.Interface { diff --git a/test/testdata/bootstrappolicy/bootstrap_cluster_roles.yaml b/test/testdata/bootstrappolicy/bootstrap_cluster_roles.yaml index 549908684d6e..5ad53587eae2 100644 --- a/test/testdata/bootstrappolicy/bootstrap_cluster_roles.yaml +++ b/test/testdata/bootstrappolicy/bootstrap_cluster_roles.yaml @@ -2261,6 +2261,12 @@ items: - pods/status verbs: - update + - apiGroups: + - "" + resources: + - pods/eviction + verbs: + - create - apiGroups: - "" resources: diff --git a/test/testdata/bootstrappolicy/bootstrap_policy_file.yaml b/test/testdata/bootstrappolicy/bootstrap_policy_file.yaml index eb040fc21cf7..1ff9c9a35466 100644 --- a/test/testdata/bootstrappolicy/bootstrap_policy_file.yaml +++ b/test/testdata/bootstrappolicy/bootstrap_policy_file.yaml @@ -2473,6 +2473,13 @@ items: - pods/status verbs: - update + - apiGroups: + - "" + attributeRestrictions: null + resources: + - pods/eviction + verbs: + - create - apiGroups: - "" attributeRestrictions: null From 1f2d677019ebc968932bbb7d1c1fbf539d81c8b1 Mon Sep 17 00:00:00 2001 From: Maciej Szulik Date: Mon, 2 Oct 2017 12:54:27 +0200 Subject: [PATCH 26/27] UPSTREAM: : generated updates --- .../pkg/api/v1/zz_generated.deepcopy.go | 18 +----------------- .../pkg/api/zz_generated.deepcopy.go | 18 +----------------- .../v1beta1/zz_generated.deepcopy.go | 18 +----------------- .../authorization/zz_generated.deepcopy.go | 18 +----------------- .../autoscaling/v1/zz_generated.deepcopy.go | 18 +----------------- .../apis/autoscaling/zz_generated.deepcopy.go | 18 +----------------- .../pkg/apis/batch/v1/zz_generated.deepcopy.go | 18 +----------------- .../pkg/apis/batch/zz_generated.deepcopy.go | 18 +----------------- .../v1alpha1/zz_generated.deepcopy.go | 18 +----------------- .../componentconfig/zz_generated.deepcopy.go | 18 +----------------- .../v1beta1/zz_generated.deepcopy.go | 18 +----------------- .../apis/extensions/zz_generated.deepcopy.go | 18 +----------------- .../pkg/apis/meta/v1/zz_generated.deepcopy.go | 18 +----------------- .../pkg/runtime/zz_generated.deepcopy.go | 18 +----------------- .../v1beta1/zz_generated.deepcopy.go | 18 +----------------- .../apiregistration/zz_generated.deepcopy.go | 18 +----------------- 16 files changed, 16 insertions(+), 272 deletions(-) diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/api/v1/zz_generated.deepcopy.go index 18cd4311e879..f90f8f0a299b 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/api/v1/zz_generated.deepcopy.go @@ -1,20 +1,4 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// +build !ignore_autogenerated_openshift // This file was autogenerated by deepcopy-gen. Do not edit it manually! diff --git a/vendor/k8s.io/kubernetes/pkg/api/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/api/zz_generated.deepcopy.go index 0aa95e1071b0..402b693a4b59 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/api/zz_generated.deepcopy.go @@ -1,20 +1,4 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// +build !ignore_autogenerated_openshift // This file was autogenerated by deepcopy-gen. Do not edit it manually! diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/zz_generated.deepcopy.go index fe99f2844845..0648e3ae0bb8 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/zz_generated.deepcopy.go @@ -1,20 +1,4 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// +build !ignore_autogenerated_openshift // This file was autogenerated by deepcopy-gen. Do not edit it manually! diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/zz_generated.deepcopy.go index 74c83c56a74a..72c417114211 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/authorization/zz_generated.deepcopy.go @@ -1,20 +1,4 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// +build !ignore_autogenerated_openshift // This file was autogenerated by deepcopy-gen. Do not edit it manually! diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/zz_generated.deepcopy.go index 94fdc46d0628..00cee6b45946 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/zz_generated.deepcopy.go @@ -1,20 +1,4 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// +build !ignore_autogenerated_openshift // This file was autogenerated by deepcopy-gen. Do not edit it manually! diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/zz_generated.deepcopy.go index a15d9b32ba89..55f485b6d0db 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/zz_generated.deepcopy.go @@ -1,20 +1,4 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// +build !ignore_autogenerated_openshift // This file was autogenerated by deepcopy-gen. Do not edit it manually! diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/zz_generated.deepcopy.go index a372c1eea987..87c6062c2d42 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/zz_generated.deepcopy.go @@ -1,20 +1,4 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// +build !ignore_autogenerated_openshift // This file was autogenerated by deepcopy-gen. Do not edit it manually! diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/zz_generated.deepcopy.go index 88f1a825bffd..8f7c692ef7fe 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/batch/zz_generated.deepcopy.go @@ -1,20 +1,4 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// +build !ignore_autogenerated_openshift // This file was autogenerated by deepcopy-gen. Do not edit it manually! diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/zz_generated.deepcopy.go index 9b1527fae37e..cb49f9583b37 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/zz_generated.deepcopy.go @@ -1,20 +1,4 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// +build !ignore_autogenerated_openshift // This file was autogenerated by deepcopy-gen. Do not edit it manually! diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/zz_generated.deepcopy.go index c39fea4aae1c..1366b2f87dc8 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/zz_generated.deepcopy.go @@ -1,20 +1,4 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// +build !ignore_autogenerated_openshift // This file was autogenerated by deepcopy-gen. Do not edit it manually! diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/zz_generated.deepcopy.go index 00cf27c7b70e..73ed112a8ea9 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/zz_generated.deepcopy.go @@ -1,20 +1,4 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// +build !ignore_autogenerated_openshift // This file was autogenerated by deepcopy-gen. Do not edit it manually! diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/zz_generated.deepcopy.go index 0efd6727a297..5c8256c2dc10 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/zz_generated.deepcopy.go @@ -1,20 +1,4 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// +build !ignore_autogenerated_openshift // This file was autogenerated by deepcopy-gen. Do not edit it manually! diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go index 6fac96be4079..6c4a3305cb7c 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go @@ -1,20 +1,4 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// +build !ignore_autogenerated_openshift // This file was autogenerated by deepcopy-gen. Do not edit it manually! diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go index 54ce6ad59e01..e38977b52d79 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go @@ -1,20 +1,4 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// +build !ignore_autogenerated_openshift // This file was autogenerated by deepcopy-gen. Do not edit it manually! diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.deepcopy.go index 4b1049385563..850c42d580b3 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.deepcopy.go @@ -1,20 +1,4 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// +build !ignore_autogenerated_openshift // This file was autogenerated by deepcopy-gen. Do not edit it manually! diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/zz_generated.deepcopy.go index 3388bcef4887..c15c11d9ce55 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/zz_generated.deepcopy.go @@ -1,20 +1,4 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ +// +build !ignore_autogenerated_openshift // This file was autogenerated by deepcopy-gen. Do not edit it manually! From 911690d726d3606af862345924edd3481166a0d9 Mon Sep 17 00:00:00 2001 From: Maciej Szulik Date: Mon, 2 Oct 2017 12:54:38 +0200 Subject: [PATCH 27/27] Generated updates --- api/docs/api/v1.Pod.adoc | 4 +- api/docs/api/v1.ReplicationController.adoc | 12 +- .../v1.DeploymentConfig.adoc | 12 +- api/docs/apis-apps/v1beta1.Deployment.adoc | 8 +- .../apis-build.openshift.io/v1.Build.adoc | 4 +- .../v1.BuildConfig.adoc | 8 +- .../apis-extensions/v1beta1.Deployment.adoc | 8 +- .../apis-extensions/v1beta1.ReplicaSet.adoc | 6 +- .../v1.ImageStream.adoc | 2 +- api/docs/oapi/v1.Build.adoc | 4 +- api/docs/oapi/v1.BuildConfig.adoc | 8 +- api/docs/oapi/v1.DeploymentConfig.adoc | 12 +- api/docs/oapi/v1.ImageStream.adoc | 2 +- ...rnetes_pkg_apis_autoscaling_v2alpha1.proto | 1 - api/swagger-spec/api-v1.json | 20 +- api/swagger-spec/oapi-v1.json | 52 ++--- api/swagger-spec/openshift-openapi-spec.json | 178 +++++++++--------- contrib/completions/bash/openshift | 4 + contrib/completions/zsh/openshift | 4 + 19 files changed, 178 insertions(+), 171 deletions(-) diff --git a/api/docs/api/v1.Pod.adoc b/api/docs/api/v1.Pod.adoc index f63073511fb4..20d866bd94f1 100644 --- a/api/docs/api/v1.Pod.adoc +++ b/api/docs/api/v1.Pod.adoc @@ -1671,7 +1671,7 @@ $ curl -k \ [[Post-api-v1-namespaces-namespace-pods-name-binding]] === Create binding of a Pod in a namespace -Create binding of a Binding +Create binding of a Pod ==== HTTP request ---- @@ -1749,7 +1749,7 @@ EOF [[Post-api-v1-namespaces-namespace-pods-name-eviction]] === Create eviction of a Pod in a namespace -Create eviction of an Eviction +Create eviction of a Pod ==== HTTP request ---- diff --git a/api/docs/api/v1.ReplicationController.adoc b/api/docs/api/v1.ReplicationController.adoc index de338b48d6ab..7a3dcf0d21ed 100644 --- a/api/docs/api/v1.ReplicationController.adoc +++ b/api/docs/api/v1.ReplicationController.adoc @@ -1559,7 +1559,7 @@ $ curl -k \ [[Get-api-v1-namespaces-namespace-replicationcontrollers-name-scale]] === Get scale of a ReplicationController in a namespace -Read scale of the specified Scale +Read scale of the specified ReplicationController ==== HTTP request ---- @@ -1613,7 +1613,7 @@ $ curl -k \ [[Get-apis-extensions-v1beta1-namespaces-namespace-replicationcontrollers-name-scale]] === Get scale of a ReplicationController in a namespace -Read scale of the specified Scale +Read scale of the specified ReplicationControllerDummy ==== HTTP request ---- @@ -1667,7 +1667,7 @@ $ curl -k \ [[Put-api-v1-namespaces-namespace-replicationcontrollers-name-scale]] === Update scale of a ReplicationController in a namespace -Replace scale of the specified Scale +Replace scale of the specified ReplicationController ==== HTTP request ---- @@ -1745,7 +1745,7 @@ EOF [[Put-apis-extensions-v1beta1-namespaces-namespace-replicationcontrollers-name-scale]] === Update scale of a ReplicationController in a namespace -Replace scale of the specified Scale +Replace scale of the specified ReplicationControllerDummy ==== HTTP request ---- @@ -1823,7 +1823,7 @@ EOF [[Patch-api-v1-namespaces-namespace-replicationcontrollers-name-scale]] === Patch scale of a ReplicationController in a namespace -Partially update scale of the specified Scale +Partially update scale of the specified ReplicationController ==== HTTP request ---- @@ -1899,7 +1899,7 @@ EOF [[Patch-apis-extensions-v1beta1-namespaces-namespace-replicationcontrollers-name-scale]] === Patch scale of a ReplicationController in a namespace -Partially update scale of the specified Scale +Partially update scale of the specified ReplicationControllerDummy ==== HTTP request ---- diff --git a/api/docs/apis-apps.openshift.io/v1.DeploymentConfig.adoc b/api/docs/apis-apps.openshift.io/v1.DeploymentConfig.adoc index 70cd25af5d88..a2649e7b608d 100644 --- a/api/docs/apis-apps.openshift.io/v1.DeploymentConfig.adoc +++ b/api/docs/apis-apps.openshift.io/v1.DeploymentConfig.adoc @@ -1831,7 +1831,7 @@ $ curl -k \ [[Post-apis-apps.openshift.io-v1-namespaces-namespace-deploymentconfigs-name-instantiate]] === Create instantiate of a DeploymentConfig in a namespace -Create instantiate of a DeploymentRequest +Create instantiate of a DeploymentConfig ==== HTTP request ---- @@ -1909,7 +1909,7 @@ EOF [[Get-apis-apps.openshift.io-v1-namespaces-namespace-deploymentconfigs-name-log]] === Get log of a DeploymentConfig in a namespace -Read log of the specified DeploymentLog +Read log of the specified DeploymentConfig ==== HTTP request ---- @@ -1972,7 +1972,7 @@ $ curl -k \ [[Post-apis-apps.openshift.io-v1-namespaces-namespace-deploymentconfigs-name-rollback]] === Create rollback of a DeploymentConfig in a namespace -Create rollback of a DeploymentConfigRollback +Create rollback of a DeploymentConfig ==== HTTP request ---- @@ -2050,7 +2050,7 @@ EOF [[Get-apis-apps.openshift.io-v1-namespaces-namespace-deploymentconfigs-name-scale]] === Get scale of a DeploymentConfig in a namespace -Read scale of the specified Scale +Read scale of the specified DeploymentConfig ==== HTTP request ---- @@ -2104,7 +2104,7 @@ $ curl -k \ [[Put-apis-apps.openshift.io-v1-namespaces-namespace-deploymentconfigs-name-scale]] === Update scale of a DeploymentConfig in a namespace -Replace scale of the specified Scale +Replace scale of the specified DeploymentConfig ==== HTTP request ---- @@ -2182,7 +2182,7 @@ EOF [[Patch-apis-apps.openshift.io-v1-namespaces-namespace-deploymentconfigs-name-scale]] === Patch scale of a DeploymentConfig in a namespace -Partially update scale of the specified Scale +Partially update scale of the specified DeploymentConfig ==== HTTP request ---- diff --git a/api/docs/apis-apps/v1beta1.Deployment.adoc b/api/docs/apis-apps/v1beta1.Deployment.adoc index 11664cbe7eb6..f592a42f0704 100644 --- a/api/docs/apis-apps/v1beta1.Deployment.adoc +++ b/api/docs/apis-apps/v1beta1.Deployment.adoc @@ -1578,7 +1578,7 @@ $ curl -k \ [[Post-apis-apps-v1beta1-namespaces-namespace-deployments-name-rollback]] === Create rollback of a Deployment in a namespace -Create rollback of a DeploymentRollback +Create rollback of a Deployment ==== HTTP request ---- @@ -1656,7 +1656,7 @@ EOF [[Get-apis-apps-v1beta1-namespaces-namespace-deployments-name-scale]] === Get scale of a Deployment in a namespace -Read scale of the specified Scale +Read scale of the specified Deployment ==== HTTP request ---- @@ -1710,7 +1710,7 @@ $ curl -k \ [[Put-apis-apps-v1beta1-namespaces-namespace-deployments-name-scale]] === Update scale of a Deployment in a namespace -Replace scale of the specified Scale +Replace scale of the specified Deployment ==== HTTP request ---- @@ -1788,7 +1788,7 @@ EOF [[Patch-apis-apps-v1beta1-namespaces-namespace-deployments-name-scale]] === Patch scale of a Deployment in a namespace -Partially update scale of the specified Scale +Partially update scale of the specified Deployment ==== HTTP request ---- diff --git a/api/docs/apis-build.openshift.io/v1.Build.adoc b/api/docs/apis-build.openshift.io/v1.Build.adoc index 0ca7382b8134..549f762504c3 100644 --- a/api/docs/apis-build.openshift.io/v1.Build.adoc +++ b/api/docs/apis-build.openshift.io/v1.Build.adoc @@ -1208,7 +1208,7 @@ $ curl -k \ [[Post-apis-build.openshift.io-v1-namespaces-namespace-builds-name-clone]] === Create clone of a Build in a namespace -Create clone of a BuildRequest +Create clone of a Build ==== HTTP request ---- @@ -1364,7 +1364,7 @@ EOF [[Get-apis-build.openshift.io-v1-namespaces-namespace-builds-name-log]] === Get log of a Build in a namespace -Read log of the specified BuildLog +Read log of the specified Build ==== HTTP request ---- diff --git a/api/docs/apis-build.openshift.io/v1.BuildConfig.adoc b/api/docs/apis-build.openshift.io/v1.BuildConfig.adoc index f446fb187d35..c084689a4621 100644 --- a/api/docs/apis-build.openshift.io/v1.BuildConfig.adoc +++ b/api/docs/apis-build.openshift.io/v1.BuildConfig.adoc @@ -1144,7 +1144,7 @@ $ curl -k \ [[Post-apis-build.openshift.io-v1-namespaces-namespace-buildconfigs-name-instantiate]] === Create instantiate of a BuildConfig in a namespace -Create instantiate of a BuildRequest +Create instantiate of a BuildConfig ==== HTTP request ---- @@ -1222,7 +1222,7 @@ EOF [[Post-apis-build.openshift.io-v1-namespaces-namespace-buildconfigs-name-instantiatebinary]] === Create instantiatebinary of a BuildConfig in a namespace -Connect POST requests to instantiatebinary of BinaryBuildRequestOptions +Connect POST requests to instantiatebinary of BuildConfig ==== HTTP request ---- @@ -1281,7 +1281,7 @@ $ curl -k \ [[Post-apis-build.openshift.io-v1-namespaces-namespace-buildconfigs-name-webhooks]] === Create webhooks of a BuildConfig in a namespace -Connect POST requests to webhooks of Build +Connect POST requests to webhooks of BuildConfig ==== HTTP request ---- @@ -1334,7 +1334,7 @@ $ curl -k \ [[Post-apis-build.openshift.io-v1-namespaces-namespace-buildconfigs-name-webhooks-path]] === Create webhooks/{path} of a BuildConfig in a namespace -Connect POST requests to webhooks of Build +Connect POST requests to webhooks of BuildConfig ==== HTTP request ---- diff --git a/api/docs/apis-extensions/v1beta1.Deployment.adoc b/api/docs/apis-extensions/v1beta1.Deployment.adoc index aef68ea356f0..3abaefe2aaa6 100644 --- a/api/docs/apis-extensions/v1beta1.Deployment.adoc +++ b/api/docs/apis-extensions/v1beta1.Deployment.adoc @@ -1578,7 +1578,7 @@ $ curl -k \ [[Post-apis-extensions-v1beta1-namespaces-namespace-deployments-name-rollback]] === Create rollback of a Deployment in a namespace -Create rollback of a DeploymentRollback +Create rollback of a Deployment ==== HTTP request ---- @@ -1656,7 +1656,7 @@ EOF [[Get-apis-extensions-v1beta1-namespaces-namespace-deployments-name-scale]] === Get scale of a Deployment in a namespace -Read scale of the specified Scale +Read scale of the specified Deployment ==== HTTP request ---- @@ -1710,7 +1710,7 @@ $ curl -k \ [[Put-apis-extensions-v1beta1-namespaces-namespace-deployments-name-scale]] === Update scale of a Deployment in a namespace -Replace scale of the specified Scale +Replace scale of the specified Deployment ==== HTTP request ---- @@ -1788,7 +1788,7 @@ EOF [[Patch-apis-extensions-v1beta1-namespaces-namespace-deployments-name-scale]] === Patch scale of a Deployment in a namespace -Partially update scale of the specified Scale +Partially update scale of the specified Deployment ==== HTTP request ---- diff --git a/api/docs/apis-extensions/v1beta1.ReplicaSet.adoc b/api/docs/apis-extensions/v1beta1.ReplicaSet.adoc index 6b585b12aa04..00d1ff12e307 100644 --- a/api/docs/apis-extensions/v1beta1.ReplicaSet.adoc +++ b/api/docs/apis-extensions/v1beta1.ReplicaSet.adoc @@ -1565,7 +1565,7 @@ $ curl -k \ [[Get-apis-extensions-v1beta1-namespaces-namespace-replicasets-name-scale]] === Get scale of a ReplicaSet in a namespace -Read scale of the specified Scale +Read scale of the specified ReplicaSet ==== HTTP request ---- @@ -1619,7 +1619,7 @@ $ curl -k \ [[Put-apis-extensions-v1beta1-namespaces-namespace-replicasets-name-scale]] === Update scale of a ReplicaSet in a namespace -Replace scale of the specified Scale +Replace scale of the specified ReplicaSet ==== HTTP request ---- @@ -1697,7 +1697,7 @@ EOF [[Patch-apis-extensions-v1beta1-namespaces-namespace-replicasets-name-scale]] === Patch scale of a ReplicaSet in a namespace -Partially update scale of the specified Scale +Partially update scale of the specified ReplicaSet ==== HTTP request ---- diff --git a/api/docs/apis-image.openshift.io/v1.ImageStream.adoc b/api/docs/apis-image.openshift.io/v1.ImageStream.adoc index 5dca04d55752..1eb6405192f1 100644 --- a/api/docs/apis-image.openshift.io/v1.ImageStream.adoc +++ b/api/docs/apis-image.openshift.io/v1.ImageStream.adoc @@ -922,7 +922,7 @@ $ curl -k \ [[Get-apis-image.openshift.io-v1-namespaces-namespace-imagestreams-name-secrets]] === Get secrets of a ImageStream in a namespace -Read secrets of the specified SecretList +Read secrets of the specified ImageStream ==== HTTP request ---- diff --git a/api/docs/oapi/v1.Build.adoc b/api/docs/oapi/v1.Build.adoc index b605afe46284..28924cd094ff 100644 --- a/api/docs/oapi/v1.Build.adoc +++ b/api/docs/oapi/v1.Build.adoc @@ -1208,7 +1208,7 @@ $ curl -k \ [[Post-oapi-v1-namespaces-namespace-builds-name-clone]] === Create clone of a Build in a namespace -Create clone of a BuildRequest +Create clone of a Build ==== HTTP request ---- @@ -1364,7 +1364,7 @@ EOF [[Get-oapi-v1-namespaces-namespace-builds-name-log]] === Get log of a Build in a namespace -Read log of the specified BuildLog +Read log of the specified Build ==== HTTP request ---- diff --git a/api/docs/oapi/v1.BuildConfig.adoc b/api/docs/oapi/v1.BuildConfig.adoc index f4de859b359e..306fc8a97e9e 100644 --- a/api/docs/oapi/v1.BuildConfig.adoc +++ b/api/docs/oapi/v1.BuildConfig.adoc @@ -1144,7 +1144,7 @@ $ curl -k \ [[Post-oapi-v1-namespaces-namespace-buildconfigs-name-instantiate]] === Create instantiate of a BuildConfig in a namespace -Create instantiate of a BuildRequest +Create instantiate of a BuildConfig ==== HTTP request ---- @@ -1222,7 +1222,7 @@ EOF [[Post-oapi-v1-namespaces-namespace-buildconfigs-name-instantiatebinary]] === Create instantiatebinary of a BuildConfig in a namespace -Connect POST requests to instantiatebinary of BinaryBuildRequestOptions +Connect POST requests to instantiatebinary of BuildConfig ==== HTTP request ---- @@ -1281,7 +1281,7 @@ $ curl -k \ [[Post-oapi-v1-namespaces-namespace-buildconfigs-name-webhooks]] === Create webhooks of a BuildConfig in a namespace -Connect POST requests to webhooks of Build +Connect POST requests to webhooks of BuildConfig ==== HTTP request ---- @@ -1334,7 +1334,7 @@ $ curl -k \ [[Post-oapi-v1-namespaces-namespace-buildconfigs-name-webhooks-path]] === Create webhooks/{path} of a BuildConfig in a namespace -Connect POST requests to webhooks of Build +Connect POST requests to webhooks of BuildConfig ==== HTTP request ---- diff --git a/api/docs/oapi/v1.DeploymentConfig.adoc b/api/docs/oapi/v1.DeploymentConfig.adoc index b0e0bda75576..04c3d23edc8f 100644 --- a/api/docs/oapi/v1.DeploymentConfig.adoc +++ b/api/docs/oapi/v1.DeploymentConfig.adoc @@ -1831,7 +1831,7 @@ $ curl -k \ [[Post-oapi-v1-namespaces-namespace-deploymentconfigs-name-instantiate]] === Create instantiate of a DeploymentConfig in a namespace -Create instantiate of a DeploymentRequest +Create instantiate of a DeploymentConfig ==== HTTP request ---- @@ -1909,7 +1909,7 @@ EOF [[Get-oapi-v1-namespaces-namespace-deploymentconfigs-name-log]] === Get log of a DeploymentConfig in a namespace -Read log of the specified DeploymentLog +Read log of the specified DeploymentConfig ==== HTTP request ---- @@ -1972,7 +1972,7 @@ $ curl -k \ [[Post-oapi-v1-namespaces-namespace-deploymentconfigs-name-rollback]] === Create rollback of a DeploymentConfig in a namespace -Create rollback of a DeploymentConfigRollback +Create rollback of a DeploymentConfig ==== HTTP request ---- @@ -2050,7 +2050,7 @@ EOF [[Get-oapi-v1-namespaces-namespace-deploymentconfigs-name-scale]] === Get scale of a DeploymentConfig in a namespace -Read scale of the specified Scale +Read scale of the specified DeploymentConfig ==== HTTP request ---- @@ -2104,7 +2104,7 @@ $ curl -k \ [[Put-oapi-v1-namespaces-namespace-deploymentconfigs-name-scale]] === Update scale of a DeploymentConfig in a namespace -Replace scale of the specified Scale +Replace scale of the specified DeploymentConfig ==== HTTP request ---- @@ -2182,7 +2182,7 @@ EOF [[Patch-oapi-v1-namespaces-namespace-deploymentconfigs-name-scale]] === Patch scale of a DeploymentConfig in a namespace -Partially update scale of the specified Scale +Partially update scale of the specified DeploymentConfig ==== HTTP request ---- diff --git a/api/docs/oapi/v1.ImageStream.adoc b/api/docs/oapi/v1.ImageStream.adoc index fd043e7667ae..f8564381ac97 100644 --- a/api/docs/oapi/v1.ImageStream.adoc +++ b/api/docs/oapi/v1.ImageStream.adoc @@ -922,7 +922,7 @@ $ curl -k \ [[Get-oapi-v1-namespaces-namespace-imagestreams-name-secrets]] === Get secrets of a ImageStream in a namespace -Read secrets of the specified SecretList +Read secrets of the specified ImageStream ==== HTTP request ---- diff --git a/api/protobuf-spec/k8s_io_kubernetes_pkg_apis_autoscaling_v2alpha1.proto b/api/protobuf-spec/k8s_io_kubernetes_pkg_apis_autoscaling_v2alpha1.proto index d51547584f1e..c2223a1188c2 100644 --- a/api/protobuf-spec/k8s_io_kubernetes_pkg_apis_autoscaling_v2alpha1.proto +++ b/api/protobuf-spec/k8s_io_kubernetes_pkg_apis_autoscaling_v2alpha1.proto @@ -27,7 +27,6 @@ import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v2alpha1"; diff --git a/api/swagger-spec/api-v1.json b/api/swagger-spec/api-v1.json index 3160b2755b26..8e1b1530f2fa 100644 --- a/api/swagger-spec/api-v1.json +++ b/api/swagger-spec/api-v1.json @@ -9550,8 +9550,8 @@ { "type": "v1.Binding", "method": "POST", - "summary": "create binding of a Binding", - "nickname": "createNamespacedBindingBinding", + "summary": "create binding of a Pod", + "nickname": "createNamespacedPodBinding", "parameters": [ { "type": "string", @@ -9611,8 +9611,8 @@ { "type": "v1beta1.Eviction", "method": "POST", - "summary": "create eviction of an Eviction", - "nickname": "createNamespacedEvictionEviction", + "summary": "create eviction of a Pod", + "nickname": "createNamespacedPodEviction", "parameters": [ { "type": "string", @@ -12591,8 +12591,8 @@ { "type": "v1.Scale", "method": "GET", - "summary": "read scale of the specified Scale", - "nickname": "readNamespacedScaleScale", + "summary": "read scale of the specified ReplicationController", + "nickname": "readNamespacedReplicationControllerScale", "parameters": [ { "type": "string", @@ -12638,8 +12638,8 @@ { "type": "v1.Scale", "method": "PUT", - "summary": "replace scale of the specified Scale", - "nickname": "replaceNamespacedScaleScale", + "summary": "replace scale of the specified ReplicationController", + "nickname": "replaceNamespacedReplicationControllerScale", "parameters": [ { "type": "string", @@ -12693,8 +12693,8 @@ { "type": "v1.Scale", "method": "PATCH", - "summary": "partially update scale of the specified Scale", - "nickname": "patchNamespacedScaleScale", + "summary": "partially update scale of the specified ReplicationController", + "nickname": "patchNamespacedReplicationControllerScale", "parameters": [ { "type": "string", diff --git a/api/swagger-spec/oapi-v1.json b/api/swagger-spec/oapi-v1.json index 75c7e2c559e7..b715c85d5df9 100644 --- a/api/swagger-spec/oapi-v1.json +++ b/api/swagger-spec/oapi-v1.json @@ -1150,8 +1150,8 @@ { "type": "v1.Build", "method": "POST", - "summary": "create instantiate of a BuildRequest", - "nickname": "createNamespacedBuildRequestInstantiate", + "summary": "create instantiate of a BuildConfig", + "nickname": "createNamespacedBuildConfigInstantiate", "parameters": [ { "type": "string", @@ -1211,8 +1211,8 @@ { "type": "v1.Build", "method": "POST", - "summary": "connect POST requests to instantiatebinary of BinaryBuildRequestOptions", - "nickname": "connectPostNamespacedBinaryBuildRequestOptionsInstantiatebinary", + "summary": "connect POST requests to instantiatebinary of BuildConfig", + "nickname": "connectPostNamespacedBuildConfigInstantiatebinary", "parameters": [ { "type": "string", @@ -1303,8 +1303,8 @@ { "type": "string", "method": "POST", - "summary": "connect POST requests to webhooks of Build", - "nickname": "connectPostNamespacedBuildWebhooks", + "summary": "connect POST requests to webhooks of BuildConfig", + "nickname": "connectPostNamespacedBuildConfigWebhooks", "parameters": [ { "type": "string", @@ -1347,8 +1347,8 @@ { "type": "string", "method": "POST", - "summary": "connect POST requests to webhooks of Build", - "nickname": "connectPostNamespacedBuildWebhooksWithPath", + "summary": "connect POST requests to webhooks of BuildConfig", + "nickname": "connectPostNamespacedBuildConfigWebhooksWithPath", "parameters": [ { "type": "string", @@ -2299,8 +2299,8 @@ { "type": "v1.BuildRequest", "method": "POST", - "summary": "create clone of a BuildRequest", - "nickname": "createNamespacedBuildRequestClone", + "summary": "create clone of a Build", + "nickname": "createNamespacedBuildClone", "parameters": [ { "type": "string", @@ -2421,8 +2421,8 @@ { "type": "v1.BuildLog", "method": "GET", - "summary": "read log of the specified BuildLog", - "nickname": "readNamespacedBuildLogLog", + "summary": "read log of the specified Build", + "nickname": "readNamespacedBuildLog", "parameters": [ { "type": "string", @@ -5591,8 +5591,8 @@ { "type": "v1.DeploymentRequest", "method": "POST", - "summary": "create instantiate of a DeploymentRequest", - "nickname": "createNamespacedDeploymentRequestInstantiate", + "summary": "create instantiate of a DeploymentConfig", + "nickname": "createNamespacedDeploymentConfigInstantiate", "parameters": [ { "type": "string", @@ -5652,8 +5652,8 @@ { "type": "v1.DeploymentLog", "method": "GET", - "summary": "read log of the specified DeploymentLog", - "nickname": "readNamespacedDeploymentLogLog", + "summary": "read log of the specified DeploymentConfig", + "nickname": "readNamespacedDeploymentConfigLog", "parameters": [ { "type": "string", @@ -5777,8 +5777,8 @@ { "type": "v1.DeploymentConfigRollback", "method": "POST", - "summary": "create rollback of a DeploymentConfigRollback", - "nickname": "createNamespacedDeploymentConfigRollbackRollback", + "summary": "create rollback of a DeploymentConfig", + "nickname": "createNamespacedDeploymentConfigRollback", "parameters": [ { "type": "string", @@ -5838,8 +5838,8 @@ { "type": "v1beta1.Scale", "method": "GET", - "summary": "read scale of the specified Scale", - "nickname": "readNamespacedScaleScale", + "summary": "read scale of the specified DeploymentConfig", + "nickname": "readNamespacedDeploymentConfigScale", "parameters": [ { "type": "string", @@ -5885,8 +5885,8 @@ { "type": "v1beta1.Scale", "method": "PUT", - "summary": "replace scale of the specified Scale", - "nickname": "replaceNamespacedScaleScale", + "summary": "replace scale of the specified DeploymentConfig", + "nickname": "replaceNamespacedDeploymentConfigScale", "parameters": [ { "type": "string", @@ -5940,8 +5940,8 @@ { "type": "v1beta1.Scale", "method": "PATCH", - "summary": "partially update scale of the specified Scale", - "nickname": "patchNamespacedScaleScale", + "summary": "partially update scale of the specified DeploymentConfig", + "nickname": "patchNamespacedDeploymentConfigScale", "parameters": [ { "type": "string", @@ -10767,8 +10767,8 @@ { "type": "v1.SecretList", "method": "GET", - "summary": "read secrets of the specified SecretList", - "nickname": "readNamespacedSecretListSecrets", + "summary": "read secrets of the specified ImageStream", + "nickname": "readNamespacedImageStreamSecrets", "parameters": [ { "type": "string", diff --git a/api/swagger-spec/openshift-openapi-spec.json b/api/swagger-spec/openshift-openapi-spec.json index 778f5500da62..cab55d9a3550 100644 --- a/api/swagger-spec/openshift-openapi-spec.json +++ b/api/swagger-spec/openshift-openapi-spec.json @@ -4062,7 +4062,7 @@ }, "/api/v1/namespaces/{namespace}/pods/{name}/binding": { "post": { - "description": "create binding of a Binding", + "description": "create binding of a Pod", "consumes": [ "*/*" ], @@ -4077,7 +4077,7 @@ "tags": [ "core_v1" ], - "operationId": "createCoreV1NamespacedBindingBinding", + "operationId": "createCoreV1NamespacedPodBinding", "parameters": [ { "name": "body", @@ -4134,7 +4134,7 @@ }, "/api/v1/namespaces/{namespace}/pods/{name}/eviction": { "post": { - "description": "create eviction of an Eviction", + "description": "create eviction of a Pod", "consumes": [ "*/*" ], @@ -4149,7 +4149,7 @@ "tags": [ "core_v1" ], - "operationId": "createCoreV1NamespacedEvictionEviction", + "operationId": "createCoreV1NamespacedPodEviction", "parameters": [ { "name": "body", @@ -6137,7 +6137,7 @@ }, "/api/v1/namespaces/{namespace}/replicationcontrollers/{name}/scale": { "get": { - "description": "read scale of the specified Scale", + "description": "read scale of the specified ReplicationController", "consumes": [ "*/*" ], @@ -6152,7 +6152,7 @@ "tags": [ "core_v1" ], - "operationId": "readCoreV1NamespacedScaleScale", + "operationId": "readCoreV1NamespacedReplicationControllerScale", "responses": { "200": { "description": "OK", @@ -6172,7 +6172,7 @@ } }, "put": { - "description": "replace scale of the specified Scale", + "description": "replace scale of the specified ReplicationController", "consumes": [ "*/*" ], @@ -6187,7 +6187,7 @@ "tags": [ "core_v1" ], - "operationId": "replaceCoreV1NamespacedScaleScale", + "operationId": "replaceCoreV1NamespacedReplicationControllerScale", "parameters": [ { "name": "body", @@ -6217,7 +6217,7 @@ } }, "patch": { - "description": "partially update scale of the specified Scale", + "description": "partially update scale of the specified ReplicationController", "consumes": [ "application/json-patch+json", "application/merge-patch+json", @@ -6234,7 +6234,7 @@ "tags": [ "core_v1" ], - "operationId": "patchCoreV1NamespacedScaleScale", + "operationId": "patchCoreV1NamespacedReplicationControllerScale", "parameters": [ { "name": "body", @@ -19163,7 +19163,7 @@ }, "/apis/apps.openshift.io/v1/namespaces/{namespace}/deploymentconfigs/{name}/instantiate": { "post": { - "description": "create instantiate of a DeploymentRequest", + "description": "create instantiate of a DeploymentConfig", "consumes": [ "*/*" ], @@ -19178,7 +19178,7 @@ "tags": [ "appsOpenshiftIo_v1" ], - "operationId": "createAppsOpenshiftIoV1NamespacedDeploymentRequestInstantiate", + "operationId": "createAppsOpenshiftIoV1NamespacedDeploymentConfigInstantiate", "parameters": [ { "name": "body", @@ -19235,7 +19235,7 @@ }, "/apis/apps.openshift.io/v1/namespaces/{namespace}/deploymentconfigs/{name}/log": { "get": { - "description": "read log of the specified DeploymentLog", + "description": "read log of the specified DeploymentConfig", "consumes": [ "*/*" ], @@ -19250,7 +19250,7 @@ "tags": [ "appsOpenshiftIo_v1" ], - "operationId": "readAppsOpenshiftIoV1NamespacedDeploymentLogLog", + "operationId": "readAppsOpenshiftIoV1NamespacedDeploymentConfigLog", "responses": { "200": { "description": "OK", @@ -19360,7 +19360,7 @@ }, "/apis/apps.openshift.io/v1/namespaces/{namespace}/deploymentconfigs/{name}/rollback": { "post": { - "description": "create rollback of a DeploymentConfigRollback", + "description": "create rollback of a DeploymentConfig", "consumes": [ "*/*" ], @@ -19375,7 +19375,7 @@ "tags": [ "appsOpenshiftIo_v1" ], - "operationId": "createAppsOpenshiftIoV1NamespacedDeploymentConfigRollbackRollback", + "operationId": "createAppsOpenshiftIoV1NamespacedDeploymentConfigRollback", "parameters": [ { "name": "body", @@ -19432,7 +19432,7 @@ }, "/apis/apps.openshift.io/v1/namespaces/{namespace}/deploymentconfigs/{name}/scale": { "get": { - "description": "read scale of the specified Scale", + "description": "read scale of the specified DeploymentConfig", "consumes": [ "*/*" ], @@ -19447,7 +19447,7 @@ "tags": [ "appsOpenshiftIo_v1" ], - "operationId": "readAppsOpenshiftIoV1NamespacedScaleScale", + "operationId": "readAppsOpenshiftIoV1NamespacedDeploymentConfigScale", "responses": { "200": { "description": "OK", @@ -19467,7 +19467,7 @@ } }, "put": { - "description": "replace scale of the specified Scale", + "description": "replace scale of the specified DeploymentConfig", "consumes": [ "*/*" ], @@ -19482,7 +19482,7 @@ "tags": [ "appsOpenshiftIo_v1" ], - "operationId": "replaceAppsOpenshiftIoV1NamespacedScaleScale", + "operationId": "replaceAppsOpenshiftIoV1NamespacedDeploymentConfigScale", "parameters": [ { "name": "body", @@ -19512,7 +19512,7 @@ } }, "patch": { - "description": "partially update scale of the specified Scale", + "description": "partially update scale of the specified DeploymentConfig", "consumes": [ "application/json-patch+json", "application/merge-patch+json", @@ -19529,7 +19529,7 @@ "tags": [ "appsOpenshiftIo_v1" ], - "operationId": "patchAppsOpenshiftIoV1NamespacedScaleScale", + "operationId": "patchAppsOpenshiftIoV1NamespacedDeploymentConfigScale", "parameters": [ { "name": "body", @@ -21294,7 +21294,7 @@ }, "/apis/apps/v1beta1/namespaces/{namespace}/deployments/{name}/rollback": { "post": { - "description": "create rollback of a DeploymentRollback", + "description": "create rollback of a Deployment", "consumes": [ "*/*" ], @@ -21309,7 +21309,7 @@ "tags": [ "apps_v1beta1" ], - "operationId": "createAppsV1beta1NamespacedDeploymentRollbackRollback", + "operationId": "createAppsV1beta1NamespacedDeploymentRollback", "parameters": [ { "name": "body", @@ -21366,7 +21366,7 @@ }, "/apis/apps/v1beta1/namespaces/{namespace}/deployments/{name}/scale": { "get": { - "description": "read scale of the specified Scale", + "description": "read scale of the specified Deployment", "consumes": [ "*/*" ], @@ -21381,7 +21381,7 @@ "tags": [ "apps_v1beta1" ], - "operationId": "readAppsV1beta1NamespacedScaleScale", + "operationId": "readAppsV1beta1NamespacedDeploymentScale", "responses": { "200": { "description": "OK", @@ -21401,7 +21401,7 @@ } }, "put": { - "description": "replace scale of the specified Scale", + "description": "replace scale of the specified Deployment", "consumes": [ "*/*" ], @@ -21416,7 +21416,7 @@ "tags": [ "apps_v1beta1" ], - "operationId": "replaceAppsV1beta1NamespacedScaleScale", + "operationId": "replaceAppsV1beta1NamespacedDeploymentScale", "parameters": [ { "name": "body", @@ -21446,7 +21446,7 @@ } }, "patch": { - "description": "partially update scale of the specified Scale", + "description": "partially update scale of the specified Deployment", "consumes": [ "application/json-patch+json", "application/merge-patch+json", @@ -21463,7 +21463,7 @@ "tags": [ "apps_v1beta1" ], - "operationId": "patchAppsV1beta1NamespacedScaleScale", + "operationId": "patchAppsV1beta1NamespacedDeploymentScale", "parameters": [ { "name": "body", @@ -32477,7 +32477,7 @@ }, "/apis/build.openshift.io/v1/namespaces/{namespace}/buildconfigs/{name}/instantiate": { "post": { - "description": "create instantiate of a BuildRequest", + "description": "create instantiate of a BuildConfig", "consumes": [ "*/*" ], @@ -32492,7 +32492,7 @@ "tags": [ "buildOpenshiftIo_v1" ], - "operationId": "createBuildOpenshiftIoV1NamespacedBuildRequestInstantiate", + "operationId": "createBuildOpenshiftIoV1NamespacedBuildConfigInstantiate", "parameters": [ { "name": "body", @@ -32549,7 +32549,7 @@ }, "/apis/build.openshift.io/v1/namespaces/{namespace}/buildconfigs/{name}/instantiatebinary": { "post": { - "description": "connect POST requests to instantiatebinary of BinaryBuildRequestOptions", + "description": "connect POST requests to instantiatebinary of BuildConfig", "consumes": [ "*/*" ], @@ -32562,7 +32562,7 @@ "tags": [ "buildOpenshiftIo_v1" ], - "operationId": "connectBuildOpenshiftIoV1PostNamespacedBinaryBuildRequestOptionsInstantiatebinary", + "operationId": "connectBuildOpenshiftIoV1PostNamespacedBuildConfigInstantiatebinary", "responses": { "200": { "description": "OK", @@ -32651,7 +32651,7 @@ }, "/apis/build.openshift.io/v1/namespaces/{namespace}/buildconfigs/{name}/webhooks": { "post": { - "description": "connect POST requests to webhooks of Build", + "description": "connect POST requests to webhooks of BuildConfig", "consumes": [ "*/*" ], @@ -32664,7 +32664,7 @@ "tags": [ "buildOpenshiftIo_v1" ], - "operationId": "connectBuildOpenshiftIoV1PostNamespacedBuildWebhooks", + "operationId": "connectBuildOpenshiftIoV1PostNamespacedBuildConfigWebhooks", "responses": { "200": { "description": "OK", @@ -32711,7 +32711,7 @@ }, "/apis/build.openshift.io/v1/namespaces/{namespace}/buildconfigs/{name}/webhooks/{path}": { "post": { - "description": "connect POST requests to webhooks of Build", + "description": "connect POST requests to webhooks of BuildConfig", "consumes": [ "*/*" ], @@ -32724,7 +32724,7 @@ "tags": [ "buildOpenshiftIo_v1" ], - "operationId": "connectBuildOpenshiftIoV1PostNamespacedBuildWebhooksWithPath", + "operationId": "connectBuildOpenshiftIoV1PostNamespacedBuildConfigWebhooksWithPath", "responses": { "200": { "description": "OK", @@ -33239,7 +33239,7 @@ }, "/apis/build.openshift.io/v1/namespaces/{namespace}/builds/{name}/clone": { "post": { - "description": "create clone of a BuildRequest", + "description": "create clone of a Build", "consumes": [ "*/*" ], @@ -33254,7 +33254,7 @@ "tags": [ "buildOpenshiftIo_v1" ], - "operationId": "createBuildOpenshiftIoV1NamespacedBuildRequestClone", + "operationId": "createBuildOpenshiftIoV1NamespacedBuildClone", "parameters": [ { "name": "body", @@ -33383,7 +33383,7 @@ }, "/apis/build.openshift.io/v1/namespaces/{namespace}/builds/{name}/log": { "get": { - "description": "read log of the specified BuildLog", + "description": "read log of the specified Build", "consumes": [ "*/*" ], @@ -33398,7 +33398,7 @@ "tags": [ "buildOpenshiftIo_v1" ], - "operationId": "readBuildOpenshiftIoV1NamespacedBuildLogLog", + "operationId": "readBuildOpenshiftIoV1NamespacedBuildLog", "responses": { "200": { "description": "OK", @@ -36473,7 +36473,7 @@ }, "/apis/extensions/v1beta1/namespaces/{namespace}/deployments/{name}/rollback": { "post": { - "description": "create rollback of a DeploymentRollback", + "description": "create rollback of a Deployment", "consumes": [ "*/*" ], @@ -36488,7 +36488,7 @@ "tags": [ "extensions_v1beta1" ], - "operationId": "createExtensionsV1beta1NamespacedDeploymentRollbackRollback", + "operationId": "createExtensionsV1beta1NamespacedDeploymentRollback", "parameters": [ { "name": "body", @@ -36545,7 +36545,7 @@ }, "/apis/extensions/v1beta1/namespaces/{namespace}/deployments/{name}/scale": { "get": { - "description": "read scale of the specified Scale", + "description": "read scale of the specified Deployment", "consumes": [ "*/*" ], @@ -36560,7 +36560,7 @@ "tags": [ "extensions_v1beta1" ], - "operationId": "readExtensionsV1beta1NamespacedDeploymentsScale", + "operationId": "readExtensionsV1beta1NamespacedDeploymentScale", "responses": { "200": { "description": "OK", @@ -36580,7 +36580,7 @@ } }, "put": { - "description": "replace scale of the specified Scale", + "description": "replace scale of the specified Deployment", "consumes": [ "*/*" ], @@ -36595,7 +36595,7 @@ "tags": [ "extensions_v1beta1" ], - "operationId": "replaceExtensionsV1beta1NamespacedDeploymentsScale", + "operationId": "replaceExtensionsV1beta1NamespacedDeploymentScale", "parameters": [ { "name": "body", @@ -36625,7 +36625,7 @@ } }, "patch": { - "description": "partially update scale of the specified Scale", + "description": "partially update scale of the specified Deployment", "consumes": [ "application/json-patch+json", "application/merge-patch+json", @@ -36642,7 +36642,7 @@ "tags": [ "extensions_v1beta1" ], - "operationId": "patchExtensionsV1beta1NamespacedDeploymentsScale", + "operationId": "patchExtensionsV1beta1NamespacedDeploymentScale", "parameters": [ { "name": "body", @@ -38387,7 +38387,7 @@ }, "/apis/extensions/v1beta1/namespaces/{namespace}/replicasets/{name}/scale": { "get": { - "description": "read scale of the specified Scale", + "description": "read scale of the specified ReplicaSet", "consumes": [ "*/*" ], @@ -38402,7 +38402,7 @@ "tags": [ "extensions_v1beta1" ], - "operationId": "readExtensionsV1beta1NamespacedReplicasetsScale", + "operationId": "readExtensionsV1beta1NamespacedReplicaSetScale", "responses": { "200": { "description": "OK", @@ -38422,7 +38422,7 @@ } }, "put": { - "description": "replace scale of the specified Scale", + "description": "replace scale of the specified ReplicaSet", "consumes": [ "*/*" ], @@ -38437,7 +38437,7 @@ "tags": [ "extensions_v1beta1" ], - "operationId": "replaceExtensionsV1beta1NamespacedReplicasetsScale", + "operationId": "replaceExtensionsV1beta1NamespacedReplicaSetScale", "parameters": [ { "name": "body", @@ -38467,7 +38467,7 @@ } }, "patch": { - "description": "partially update scale of the specified Scale", + "description": "partially update scale of the specified ReplicaSet", "consumes": [ "application/json-patch+json", "application/merge-patch+json", @@ -38484,7 +38484,7 @@ "tags": [ "extensions_v1beta1" ], - "operationId": "patchExtensionsV1beta1NamespacedReplicasetsScale", + "operationId": "patchExtensionsV1beta1NamespacedReplicaSetScale", "parameters": [ { "name": "body", @@ -38695,7 +38695,7 @@ }, "/apis/extensions/v1beta1/namespaces/{namespace}/replicationcontrollers/{name}/scale": { "get": { - "description": "read scale of the specified Scale", + "description": "read scale of the specified ReplicationControllerDummy", "consumes": [ "*/*" ], @@ -38710,7 +38710,7 @@ "tags": [ "extensions_v1beta1" ], - "operationId": "readExtensionsV1beta1NamespacedReplicationcontrollersScale", + "operationId": "readExtensionsV1beta1NamespacedReplicationControllerDummyScale", "responses": { "200": { "description": "OK", @@ -38730,7 +38730,7 @@ } }, "put": { - "description": "replace scale of the specified Scale", + "description": "replace scale of the specified ReplicationControllerDummy", "consumes": [ "*/*" ], @@ -38745,7 +38745,7 @@ "tags": [ "extensions_v1beta1" ], - "operationId": "replaceExtensionsV1beta1NamespacedReplicationcontrollersScale", + "operationId": "replaceExtensionsV1beta1NamespacedReplicationControllerDummyScale", "parameters": [ { "name": "body", @@ -38775,7 +38775,7 @@ } }, "patch": { - "description": "partially update scale of the specified Scale", + "description": "partially update scale of the specified ReplicationControllerDummy", "consumes": [ "application/json-patch+json", "application/merge-patch+json", @@ -38792,7 +38792,7 @@ "tags": [ "extensions_v1beta1" ], - "operationId": "patchExtensionsV1beta1NamespacedReplicationcontrollersScale", + "operationId": "patchExtensionsV1beta1NamespacedReplicationControllerDummyScale", "parameters": [ { "name": "body", @@ -43513,7 +43513,7 @@ }, "/apis/image.openshift.io/v1/namespaces/{namespace}/imagestreams/{name}/secrets": { "get": { - "description": "read secrets of the specified SecretList", + "description": "read secrets of the specified ImageStream", "consumes": [ "*/*" ], @@ -43528,7 +43528,7 @@ "tags": [ "imageOpenshiftIo_v1" ], - "operationId": "readImageOpenshiftIoV1NamespacedSecretListSecrets", + "operationId": "readImageOpenshiftIoV1NamespacedImageStreamSecrets", "responses": { "200": { "description": "OK", @@ -70928,7 +70928,7 @@ }, "/oapi/v1/namespaces/{namespace}/buildconfigs/{name}/instantiate": { "post": { - "description": "create instantiate of a BuildRequest", + "description": "create instantiate of a BuildConfig", "consumes": [ "*/*" ], @@ -70943,7 +70943,7 @@ "tags": [ "oapi" ], - "operationId": "createNamespacedBuildRequestInstantiate", + "operationId": "createNamespacedBuildConfigInstantiate", "parameters": [ { "name": "body", @@ -71000,7 +71000,7 @@ }, "/oapi/v1/namespaces/{namespace}/buildconfigs/{name}/instantiatebinary": { "post": { - "description": "connect POST requests to instantiatebinary of BinaryBuildRequestOptions", + "description": "connect POST requests to instantiatebinary of BuildConfig", "consumes": [ "*/*" ], @@ -71013,7 +71013,7 @@ "tags": [ "oapi" ], - "operationId": "connectPostNamespacedBinaryBuildRequestOptionsInstantiatebinary", + "operationId": "connectPostNamespacedBuildConfigInstantiatebinary", "responses": { "200": { "description": "OK", @@ -71102,7 +71102,7 @@ }, "/oapi/v1/namespaces/{namespace}/buildconfigs/{name}/webhooks": { "post": { - "description": "connect POST requests to webhooks of Build", + "description": "connect POST requests to webhooks of BuildConfig", "consumes": [ "*/*" ], @@ -71115,7 +71115,7 @@ "tags": [ "oapi" ], - "operationId": "connectPostNamespacedBuildWebhooks", + "operationId": "connectPostNamespacedBuildConfigWebhooks", "responses": { "200": { "description": "OK", @@ -71162,7 +71162,7 @@ }, "/oapi/v1/namespaces/{namespace}/buildconfigs/{name}/webhooks/{path}": { "post": { - "description": "connect POST requests to webhooks of Build", + "description": "connect POST requests to webhooks of BuildConfig", "consumes": [ "*/*" ], @@ -71175,7 +71175,7 @@ "tags": [ "oapi" ], - "operationId": "connectPostNamespacedBuildWebhooksWithPath", + "operationId": "connectPostNamespacedBuildConfigWebhooksWithPath", "responses": { "200": { "description": "OK", @@ -71690,7 +71690,7 @@ }, "/oapi/v1/namespaces/{namespace}/builds/{name}/clone": { "post": { - "description": "create clone of a BuildRequest", + "description": "create clone of a Build", "consumes": [ "*/*" ], @@ -71705,7 +71705,7 @@ "tags": [ "oapi" ], - "operationId": "createNamespacedBuildRequestClone", + "operationId": "createNamespacedBuildClone", "parameters": [ { "name": "body", @@ -71834,7 +71834,7 @@ }, "/oapi/v1/namespaces/{namespace}/builds/{name}/log": { "get": { - "description": "read log of the specified BuildLog", + "description": "read log of the specified Build", "consumes": [ "*/*" ], @@ -71849,7 +71849,7 @@ "tags": [ "oapi" ], - "operationId": "readNamespacedBuildLogLog", + "operationId": "readNamespacedBuildLog", "responses": { "200": { "description": "OK", @@ -72483,7 +72483,7 @@ }, "/oapi/v1/namespaces/{namespace}/deploymentconfigs/{name}/instantiate": { "post": { - "description": "create instantiate of a DeploymentRequest", + "description": "create instantiate of a DeploymentConfig", "consumes": [ "*/*" ], @@ -72498,7 +72498,7 @@ "tags": [ "oapi" ], - "operationId": "createNamespacedDeploymentRequestInstantiate", + "operationId": "createNamespacedDeploymentConfigInstantiate", "parameters": [ { "name": "body", @@ -72555,7 +72555,7 @@ }, "/oapi/v1/namespaces/{namespace}/deploymentconfigs/{name}/log": { "get": { - "description": "read log of the specified DeploymentLog", + "description": "read log of the specified DeploymentConfig", "consumes": [ "*/*" ], @@ -72570,7 +72570,7 @@ "tags": [ "oapi" ], - "operationId": "readNamespacedDeploymentLogLog", + "operationId": "readNamespacedDeploymentConfigLog", "responses": { "200": { "description": "OK", @@ -72680,7 +72680,7 @@ }, "/oapi/v1/namespaces/{namespace}/deploymentconfigs/{name}/rollback": { "post": { - "description": "create rollback of a DeploymentConfigRollback", + "description": "create rollback of a DeploymentConfig", "consumes": [ "*/*" ], @@ -72752,7 +72752,7 @@ }, "/oapi/v1/namespaces/{namespace}/deploymentconfigs/{name}/scale": { "get": { - "description": "read scale of the specified Scale", + "description": "read scale of the specified DeploymentConfig", "consumes": [ "*/*" ], @@ -72767,7 +72767,7 @@ "tags": [ "oapi" ], - "operationId": "readNamespacedScaleScale", + "operationId": "readNamespacedDeploymentConfigScale", "responses": { "200": { "description": "OK", @@ -72787,7 +72787,7 @@ } }, "put": { - "description": "replace scale of the specified Scale", + "description": "replace scale of the specified DeploymentConfig", "consumes": [ "*/*" ], @@ -72802,7 +72802,7 @@ "tags": [ "oapi" ], - "operationId": "replaceNamespacedScaleScale", + "operationId": "replaceNamespacedDeploymentConfigScale", "parameters": [ { "name": "body", @@ -72832,7 +72832,7 @@ } }, "patch": { - "description": "partially update scale of the specified Scale", + "description": "partially update scale of the specified DeploymentConfig", "consumes": [ "application/json-patch+json", "application/merge-patch+json", @@ -72849,7 +72849,7 @@ "tags": [ "oapi" ], - "operationId": "patchNamespacedScaleScale", + "operationId": "patchNamespacedDeploymentConfigScale", "parameters": [ { "name": "body", @@ -74170,7 +74170,7 @@ }, "/oapi/v1/namespaces/{namespace}/imagestreams/{name}/secrets": { "get": { - "description": "read secrets of the specified SecretList", + "description": "read secrets of the specified ImageStream", "consumes": [ "*/*" ], @@ -74185,7 +74185,7 @@ "tags": [ "oapi" ], - "operationId": "readNamespacedSecretListSecrets", + "operationId": "readNamespacedImageStreamSecrets", "responses": { "200": { "description": "OK", diff --git a/contrib/completions/bash/openshift b/contrib/completions/bash/openshift index cb947f9499e5..5c6f3752f03b 100644 --- a/contrib/completions/bash/openshift +++ b/contrib/completions/bash/openshift @@ -32896,6 +32896,8 @@ _openshift_start_kubernetes_apiserver() local_nonpersistent_flags+=("--proxy-client-key-file=") flags+=("--repair-malformed-updates") local_nonpersistent_flags+=("--repair-malformed-updates") + flags+=("--request-timeout=") + local_nonpersistent_flags+=("--request-timeout=") flags+=("--requestheader-allowed-names=") local_nonpersistent_flags+=("--requestheader-allowed-names=") flags+=("--requestheader-client-ca-file=") @@ -33178,6 +33180,8 @@ _openshift_start_kubernetes_kube-proxy() local_nonpersistent_flags+=("--masquerade-all") flags+=("--master=") local_nonpersistent_flags+=("--master=") + flags+=("--metrics-bind-address=") + local_nonpersistent_flags+=("--metrics-bind-address=") flags+=("--oom-score-adj=") local_nonpersistent_flags+=("--oom-score-adj=") flags+=("--profiling") diff --git a/contrib/completions/zsh/openshift b/contrib/completions/zsh/openshift index a9158fc5db9b..f6293cea1066 100644 --- a/contrib/completions/zsh/openshift +++ b/contrib/completions/zsh/openshift @@ -33045,6 +33045,8 @@ _openshift_start_kubernetes_apiserver() local_nonpersistent_flags+=("--proxy-client-key-file=") flags+=("--repair-malformed-updates") local_nonpersistent_flags+=("--repair-malformed-updates") + flags+=("--request-timeout=") + local_nonpersistent_flags+=("--request-timeout=") flags+=("--requestheader-allowed-names=") local_nonpersistent_flags+=("--requestheader-allowed-names=") flags+=("--requestheader-client-ca-file=") @@ -33327,6 +33329,8 @@ _openshift_start_kubernetes_kube-proxy() local_nonpersistent_flags+=("--masquerade-all") flags+=("--master=") local_nonpersistent_flags+=("--master=") + flags+=("--metrics-bind-address=") + local_nonpersistent_flags+=("--metrics-bind-address=") flags+=("--oom-score-adj=") local_nonpersistent_flags+=("--oom-score-adj=") flags+=("--profiling")