From cb98002753a8207adbafed774292730a173b9cb3 Mon Sep 17 00:00:00 2001 From: shahriar Date: Wed, 3 Jan 2018 11:32:39 +0600 Subject: [PATCH 1/6] Fix operator image name --- pkg/cmds/init.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/cmds/init.go b/pkg/cmds/init.go index 2b54fd10e..54c0104bd 100644 --- a/pkg/cmds/init.go +++ b/pkg/cmds/init.go @@ -63,7 +63,7 @@ var operatorLabel = map[string]string{ } const ( - imageOperator = "kubedb/operator" + imageOperator = "operator" operatorName = "kubedb-operator" operatorContainer = "operator" operatorPortName = "web" From 03e5efa35a7c0edda6f6b8e86555f035bd9ea750 Mon Sep 17 00:00:00 2001 From: shahriar Date: Mon, 29 Jan 2018 19:09:11 +0600 Subject: [PATCH 2/6] update PolicyRule --- pkg/cmds/role.go | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/pkg/cmds/role.go b/pkg/cmds/role.go index 748b0fd43..9deee48d7 100644 --- a/pkg/cmds/role.go +++ b/pkg/cmds/role.go @@ -8,7 +8,6 @@ import ( apps "k8s.io/api/apps/v1beta1" batch "k8s.io/api/batch/v1" core "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" rbac "k8s.io/api/rbac/v1beta1" storage "k8s.io/api/storage/v1" apiextensions "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" @@ -25,11 +24,6 @@ var policyRuleOperator = []rbac.PolicyRule{ Resources: []string{"customresourcedefinitions"}, Verbs: []string{"create", "delete", "get", "list"}, }, - { - APIGroups: []string{extensions.GroupName}, - Resources: []string{"thirdpartyresources"}, - Verbs: []string{"create", "delete", "get", "list"}, - }, { APIGroups: []string{rbac.GroupName}, Resources: []string{"rolebindings", "roles"}, @@ -42,7 +36,12 @@ var policyRuleOperator = []rbac.PolicyRule{ }, { APIGroups: []string{core.GroupName}, - Resources: []string{"secrets", "serviceaccounts"}, + Resources: []string{"secrets"}, + Verbs: []string{"create", "delete", "get", "patch"}, + }, + { + APIGroups: []string{core.GroupName}, + Resources: []string{"serviceaccounts"}, Verbs: []string{"create", "delete", "get"}, }, { @@ -53,7 +52,7 @@ var policyRuleOperator = []rbac.PolicyRule{ { APIGroups: []string{batch.GroupName}, Resources: []string{"jobs"}, - Verbs: []string{"create", "delete", "get"}, + Verbs: []string{"create", "delete", "get", "list"}, }, { APIGroups: []string{storage.GroupName}, @@ -68,7 +67,7 @@ var policyRuleOperator = []rbac.PolicyRule{ { APIGroups: []string{core.GroupName}, Resources: []string{"persistentvolumeclaims"}, - Verbs: []string{"delete", "get", "list", "watch"}, + Verbs: []string{"delete", "get", "list", "patch", "watch"}, }, { APIGroups: []string{core.GroupName}, From ee80640579732e9d63df4adec93e891d68e44a89 Mon Sep 17 00:00:00 2001 From: shahriar Date: Tue, 30 Jan 2018 11:45:29 +0600 Subject: [PATCH 3/6] Update RBAC for Job watcher --- glide.lock | 24 +- pkg/cmds/init.go | 18 +- pkg/cmds/role.go | 123 ++++---- .../appscode/kutil/core/v1/configmap.go | 78 +++++ .../appscode/kutil/core/v1/kubernetes.go | 271 ++++++++++++++++++ .../github.com/appscode/kutil/core/v1/node.go | 96 +++++++ .../github.com/appscode/kutil/core/v1/pod.go | 163 +++++++++++ .../github.com/appscode/kutil/core/v1/pv.go | 78 +++++ .../github.com/appscode/kutil/core/v1/pvc.go | 78 +++++ .../github.com/appscode/kutil/core/v1/rc.go | 89 ++++++ .../appscode/kutil/core/v1/secret.go | 87 ++++++ .../appscode/kutil/core/v1/service.go | 107 +++++++ .../appscode/kutil/core/v1/serviceaccount.go | 78 +++++ .../kutil/rbac/v1beta1/clusterrole.go | 78 +++++ .../kutil/rbac/v1beta1/clusterrolebinding.go | 78 +++++ .../appscode/kutil/rbac/v1beta1/kubernetes.go | 41 +++ .../appscode/kutil/rbac/v1beta1/role.go | 78 +++++ .../kutil/rbac/v1beta1/rolebinding.go | 78 +++++ .../kutil/tools/analytics/analytics.go | 4 +- vendor/github.com/appscode/mergo/LICENSE | 28 ++ vendor/github.com/appscode/mergo/doc.go | 44 +++ vendor/github.com/appscode/mergo/map.go | 156 ++++++++++ vendor/github.com/appscode/mergo/merge.go | 123 ++++++++ vendor/github.com/appscode/mergo/mergo.go | 90 ++++++ 24 files changed, 1999 insertions(+), 89 deletions(-) create mode 100644 vendor/github.com/appscode/kutil/core/v1/configmap.go create mode 100644 vendor/github.com/appscode/kutil/core/v1/kubernetes.go create mode 100644 vendor/github.com/appscode/kutil/core/v1/node.go create mode 100644 vendor/github.com/appscode/kutil/core/v1/pod.go create mode 100644 vendor/github.com/appscode/kutil/core/v1/pv.go create mode 100644 vendor/github.com/appscode/kutil/core/v1/pvc.go create mode 100644 vendor/github.com/appscode/kutil/core/v1/rc.go create mode 100644 vendor/github.com/appscode/kutil/core/v1/secret.go create mode 100644 vendor/github.com/appscode/kutil/core/v1/service.go create mode 100644 vendor/github.com/appscode/kutil/core/v1/serviceaccount.go create mode 100644 vendor/github.com/appscode/kutil/rbac/v1beta1/clusterrole.go create mode 100644 vendor/github.com/appscode/kutil/rbac/v1beta1/clusterrolebinding.go create mode 100644 vendor/github.com/appscode/kutil/rbac/v1beta1/kubernetes.go create mode 100644 vendor/github.com/appscode/kutil/rbac/v1beta1/role.go create mode 100644 vendor/github.com/appscode/kutil/rbac/v1beta1/rolebinding.go create mode 100644 vendor/github.com/appscode/mergo/LICENSE create mode 100644 vendor/github.com/appscode/mergo/doc.go create mode 100644 vendor/github.com/appscode/mergo/map.go create mode 100644 vendor/github.com/appscode/mergo/merge.go create mode 100644 vendor/github.com/appscode/mergo/mergo.go diff --git a/glide.lock b/glide.lock index cd5557ec4..37e2b4d4d 100644 --- a/glide.lock +++ b/glide.lock @@ -1,5 +1,5 @@ hash: a0bbe9041555b0f2f28bf12a06e143bef8b5de12e1a09976d1266859df1bb483 -updated: 2018-01-29T18:47:42.981402503+06:00 +updated: 2018-01-30T11:34:14.800706335+06:00 imports: - name: cloud.google.com/go version: fe3d41e1ecb2ce36ad3a979037c9b9a2b726226f @@ -27,10 +27,14 @@ imports: subpackages: - api - name: github.com/appscode/kutil - version: 60ced7f055b34d512a84f65ca0a7d412ec3d6ef2 + version: a035b515d68bca793b2beacdf52e19a02f914017 subpackages: + - core/v1 - meta + - rbac/v1beta1 - tools/analytics +- name: github.com/appscode/mergo + version: e3000cb3d28c72b837601cac94debd91032d19fe - name: github.com/appscode/osm version: 8175dd853f6f3d4530b14018901c79140e305f3a subpackages: @@ -280,7 +284,7 @@ imports: - name: github.com/juju/ratelimit version: 5b9ff866471762aa2ab2dced63c9fb6f53921342 - name: github.com/kubedb/apimachinery - version: 455d1a753fcd939ecb9497ed4ab77567207c155c + version: 0de29010181b4b361fb210d5a85de7a4ff56e9df subpackages: - apis/kubedb - apis/kubedb/v1alpha1 @@ -290,32 +294,32 @@ imports: - pkg/storage - pkg/validator - name: github.com/kubedb/elasticsearch - version: b95239a26a35182cd906ca4d980a260eb65fd172 + version: cb4b4269abe35bf279ebb65f67d162840f2a198e subpackages: - pkg/docker - pkg/validator - name: github.com/kubedb/memcached - version: 7bbee4aa80e6c83bb828fd0019221944587df48d + version: 08191a9cdbd143cebcb343c969d50b6420e19542 subpackages: - pkg/docker - pkg/validator - name: github.com/kubedb/mongodb - version: a2c4668e219f336236fb7f02b896c4fc425bf36e + version: de8f30be6c914ead85d25596836e7fb5313356f5 subpackages: - pkg/docker - pkg/validator - name: github.com/kubedb/mysql - version: ebbfec2f507d142c146cd4749f1c376c49b536f1 + version: 6db2ae8d66b38c00106969bc801b247033e129dd subpackages: - pkg/docker - pkg/validator - name: github.com/kubedb/postgres - version: a5824238a43aca07f22382701364eb0c668cbed5 + version: d04e61ec820c48c076828471479d45ceee29ac9a subpackages: - pkg/docker - pkg/validator - name: github.com/kubedb/redis - version: 682c8cc57ab32fa22bdadcf951d921d817f6f68a + version: 56cc3e767e6714b07f84915e9177c138df299c3b subpackages: - pkg/docker - pkg/validator @@ -439,7 +443,7 @@ imports: - unicode/norm - width - name: google.golang.org/api - version: d7238a695cdc29affa1e76d5485eb2f72d30f8f8 + version: 7d0e2d350555821bef5a5b8aecf0d12cc1def633 subpackages: - gensupport - googleapi diff --git a/pkg/cmds/init.go b/pkg/cmds/init.go index 05fa5b831..d770c917f 100644 --- a/pkg/cmds/init.go +++ b/pkg/cmds/init.go @@ -223,6 +223,15 @@ func updateOperatorDeployment(cmd *cobra.Command, out, errOut io.Writer) error { return err } + if configureRBAC { + if err := EnsureRBACStuff(client, namespace, out); err != nil { + return err + } + deployment.Spec.Template.Spec.ServiceAccountName = ServiceAccountName + } else { + deployment.Spec.Template.Spec.ServiceAccountName = "" + } + containers := deployment.Spec.Template.Spec.Containers if len(containers) == 0 { fmt.Fprintln(errOut, fmt.Sprintf(`Invalid operator deployment "%v"`, operatorName)) @@ -248,15 +257,6 @@ func updateOperatorDeployment(cmd *cobra.Command, out, errOut io.Writer) error { deployment.Spec.Template.Spec.Containers[0].Image = fmt.Sprintf("%v:%v", repository, version) - if configureRBAC { - if err := EnsureRBACStuff(client, namespace, out); err != nil { - return err - } - deployment.Spec.Template.Spec.ServiceAccountName = ServiceAccountName - } else { - deployment.Spec.Template.Spec.ServiceAccountName = "" - } - deployment.Spec.Template.Spec.Containers[0].Args = []string{ "run", fmt.Sprintf("--governing-service=%v", governingService), diff --git a/pkg/cmds/role.go b/pkg/cmds/role.go index 9deee48d7..10fb6c3d7 100644 --- a/pkg/cmds/role.go +++ b/pkg/cmds/role.go @@ -4,6 +4,9 @@ import ( "fmt" "io" + "github.com/appscode/kutil" + core_util "github.com/appscode/kutil/core/v1" + rbac_util "github.com/appscode/kutil/rbac/v1beta1" "github.com/kubedb/apimachinery/apis/kubedb" apps "k8s.io/api/apps/v1beta1" batch "k8s.io/api/batch/v1" @@ -11,7 +14,6 @@ import ( rbac "k8s.io/api/rbac/v1beta1" storage "k8s.io/api/storage/v1" apiextensions "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" - kerr "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" ) @@ -52,7 +54,7 @@ var policyRuleOperator = []rbac.PolicyRule{ { APIGroups: []string{batch.GroupName}, Resources: []string{"jobs"}, - Verbs: []string{"create", "delete", "get", "list"}, + Verbs: []string{"create", "delete", "get", "list", "watch"}, }, { APIGroups: []string{storage.GroupName}, @@ -97,50 +99,46 @@ var policyRuleOperator = []rbac.PolicyRule{ } func EnsureRBACStuff(client kubernetes.Interface, namespace string, out io.Writer) error { + name := ServiceAccountName + // Ensure ClusterRoles for operator - clusterRoleOperator, err := client.RbacV1beta1().ClusterRoles().Get(name, metav1.GetOptions{}) + cr, vt1, err := rbac_util.CreateOrPatchClusterRole( + client, + metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + func(in *rbac.ClusterRole) *rbac.ClusterRole { + in.Labels = core_util.UpsertMap(in.Labels, operatorLabel) + in.Rules = policyRuleOperator + return in + }, + ) if err != nil { - if !kerr.IsNotFound(err) { - return err - } - // Create new one - role := &rbac.ClusterRole{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Labels: operatorLabel, - }, - Rules: policyRuleOperator, - } - if _, err := client.RbacV1beta1().ClusterRoles().Create(role); err != nil { - return err - } - fmt.Fprintln(out, "Successfully created cluster role.") - } else { - // Update existing one - clusterRoleOperator.Rules = policyRuleOperator - if _, err := client.RbacV1beta1().ClusterRoles().Update(clusterRoleOperator); err != nil { - return err - } - fmt.Fprintln(out, "Successfully updated cluster role.") + return err + } + if vt1 != kutil.VerbUnchanged { + fmt.Fprintf(out, `ClusterRole "%s" successfully %v`, cr.Name, vt1) } // Ensure ServiceAccounts - if _, err := client.CoreV1().ServiceAccounts(namespace).Get(name, metav1.GetOptions{}); err != nil { - if !kerr.IsNotFound(err) { - return err - } - sa := &core.ServiceAccount{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Labels: operatorLabel, - }, - } - if _, err := client.CoreV1().ServiceAccounts(namespace).Create(sa); err != nil { - return err - } - fmt.Fprintln(out, "Successfully created service account.") + sa, vt2, err := core_util.CreateOrPatchServiceAccount( + client, + metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + func(in *core.ServiceAccount) *core.ServiceAccount { + in.Labels = core_util.UpsertMap(in.Labels, operatorLabel) + return in + }, + ) + if err != nil { + return err + } + if vt2 != kutil.VerbUnchanged { + fmt.Fprintf(out, `ServiceAccount "%s" successfully %v`, sa.Name, vt2) } var roleBindingRef = rbac.RoleRef{ @@ -157,34 +155,25 @@ func EnsureRBACStuff(client kubernetes.Interface, namespace string, out io.Write } // Ensure ClusterRoleBindings - roleBinding, err := client.RbacV1beta1().ClusterRoleBindings().Get(name, metav1.GetOptions{}) - if err != nil { - if !kerr.IsNotFound(err) { - return err - } - - roleBinding := &rbac.ClusterRoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Labels: operatorLabel, - }, - RoleRef: roleBindingRef, - Subjects: roleBindingSubjects, - } + crb, vt3, err := rbac_util.CreateOrPatchClusterRoleBinding( + client, + metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + func(in *rbac.ClusterRoleBinding) *rbac.ClusterRoleBinding { + in.Labels = core_util.UpsertMap(in.Labels, operatorLabel) + in.RoleRef = roleBindingRef + in.Subjects = roleBindingSubjects - if _, err := client.RbacV1beta1().ClusterRoleBindings().Create(roleBinding); err != nil { - return err - } - fmt.Fprintln(out, "Successfully created cluster role bindings.") - } else { - roleBinding.RoleRef = roleBindingRef - roleBinding.Subjects = roleBindingSubjects - if _, err := client.RbacV1beta1().ClusterRoleBindings().Update(roleBinding); err != nil { - return err - } - fmt.Fprintln(out, "Successfully updated cluster role bindings.") + return in + }, + ) + if err != nil { + return err + } + if vt3 != kutil.VerbUnchanged { + fmt.Fprintf(out, `ClusterRoleBinding "%s" successfully %v`, crb.Name, vt3) } - return nil } diff --git a/vendor/github.com/appscode/kutil/core/v1/configmap.go b/vendor/github.com/appscode/kutil/core/v1/configmap.go new file mode 100644 index 000000000..1142a43bc --- /dev/null +++ b/vendor/github.com/appscode/kutil/core/v1/configmap.go @@ -0,0 +1,78 @@ +package v1 + +import ( + "encoding/json" + "fmt" + + "github.com/appscode/kutil" + "github.com/golang/glog" + core "k8s.io/api/core/v1" + kerr "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" +) + +func CreateOrPatchConfigMap(c kubernetes.Interface, meta metav1.ObjectMeta, transform func(*core.ConfigMap) *core.ConfigMap) (*core.ConfigMap, kutil.VerbType, error) { + cur, err := c.CoreV1().ConfigMaps(meta.Namespace).Get(meta.Name, metav1.GetOptions{}) + if kerr.IsNotFound(err) { + glog.V(3).Infof("Creating ConfigMap %s/%s.", meta.Namespace, meta.Name) + out, err := c.CoreV1().ConfigMaps(meta.Namespace).Create(transform(&core.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + Kind: "ConfigMap", + APIVersion: core.SchemeGroupVersion.String(), + }, + ObjectMeta: meta, + })) + return out, kutil.VerbCreated, err + } else if err != nil { + return nil, kutil.VerbUnchanged, err + } + return PatchConfigMap(c, cur, transform) +} + +func PatchConfigMap(c kubernetes.Interface, cur *core.ConfigMap, transform func(*core.ConfigMap) *core.ConfigMap) (*core.ConfigMap, kutil.VerbType, error) { + curJson, err := json.Marshal(cur) + if err != nil { + return nil, kutil.VerbUnchanged, err + } + + modJson, err := json.Marshal(transform(cur.DeepCopy())) + if err != nil { + return nil, kutil.VerbUnchanged, err + } + + patch, err := strategicpatch.CreateTwoWayMergePatch(curJson, modJson, core.ConfigMap{}) + if err != nil { + return nil, kutil.VerbUnchanged, err + } + if len(patch) == 0 || string(patch) == "{}" { + return cur, kutil.VerbUnchanged, nil + } + glog.V(3).Infof("Patching ConfigMap %s/%s with %s", cur.Namespace, cur.Name, string(patch)) + out, err := c.CoreV1().ConfigMaps(cur.Namespace).Patch(cur.Name, types.StrategicMergePatchType, patch) + return out, kutil.VerbPatched, err +} + +func TryUpdateConfigMap(c kubernetes.Interface, meta metav1.ObjectMeta, transform func(*core.ConfigMap) *core.ConfigMap) (result *core.ConfigMap, err error) { + attempt := 0 + err = wait.PollImmediate(kutil.RetryInterval, kutil.RetryTimeout, func() (bool, error) { + attempt++ + cur, e2 := c.CoreV1().ConfigMaps(meta.Namespace).Get(meta.Name, metav1.GetOptions{}) + if kerr.IsNotFound(e2) { + return false, e2 + } else if e2 == nil { + result, e2 = c.CoreV1().ConfigMaps(cur.Namespace).Update(transform(cur.DeepCopy())) + return e2 == nil, nil + } + glog.Errorf("Attempt %d failed to update ConfigMap %s/%s due to %v.", attempt, cur.Namespace, cur.Name, e2) + return false, nil + }) + + if err != nil { + err = fmt.Errorf("failed to update ConfigMap %s/%s after %d attempts due to %v", meta.Namespace, meta.Name, attempt, err) + } + return +} diff --git a/vendor/github.com/appscode/kutil/core/v1/kubernetes.go b/vendor/github.com/appscode/kutil/core/v1/kubernetes.go new file mode 100644 index 000000000..eb4e59e43 --- /dev/null +++ b/vendor/github.com/appscode/kutil/core/v1/kubernetes.go @@ -0,0 +1,271 @@ +package v1 + +import ( + "errors" + + "github.com/appscode/go/types" + "github.com/appscode/kutil/meta" + "github.com/appscode/mergo" + core "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +func GetGroupVersionKind(v interface{}) schema.GroupVersionKind { + return core.SchemeGroupVersion.WithKind(meta.GetKind(v)) +} + +func AssignTypeKind(v interface{}) error { + _, err := conversion.EnforcePtr(v) + if err != nil { + return err + } + + switch u := v.(type) { + case *core.Pod: + u.APIVersion = core.SchemeGroupVersion.String() + u.Kind = meta.GetKind(v) + return nil + case *core.ReplicationController: + u.APIVersion = core.SchemeGroupVersion.String() + u.Kind = meta.GetKind(v) + return nil + case *core.ConfigMap: + u.APIVersion = core.SchemeGroupVersion.String() + u.Kind = meta.GetKind(v) + return nil + case *core.Secret: + u.APIVersion = core.SchemeGroupVersion.String() + u.Kind = meta.GetKind(v) + return nil + case *core.Service: + u.APIVersion = core.SchemeGroupVersion.String() + u.Kind = meta.GetKind(v) + return nil + case *core.PersistentVolumeClaim: + u.APIVersion = core.SchemeGroupVersion.String() + u.Kind = meta.GetKind(v) + return nil + case *core.PersistentVolume: + u.APIVersion = core.SchemeGroupVersion.String() + u.Kind = meta.GetKind(v) + return nil + case *core.Node: + u.APIVersion = core.SchemeGroupVersion.String() + u.Kind = meta.GetKind(v) + return nil + case *core.ServiceAccount: + u.APIVersion = core.SchemeGroupVersion.String() + u.Kind = meta.GetKind(v) + return nil + case *core.Namespace: + u.APIVersion = core.SchemeGroupVersion.String() + u.Kind = meta.GetKind(v) + return nil + case *core.Endpoints: + u.APIVersion = core.SchemeGroupVersion.String() + u.Kind = meta.GetKind(v) + return nil + case *core.ComponentStatus: + u.APIVersion = core.SchemeGroupVersion.String() + u.Kind = meta.GetKind(v) + return nil + case *core.LimitRange: + u.APIVersion = core.SchemeGroupVersion.String() + u.Kind = meta.GetKind(v) + return nil + case *core.Event: + u.APIVersion = core.SchemeGroupVersion.String() + u.Kind = meta.GetKind(v) + return nil + } + return errors.New("unknown api object type") +} + +func RemoveNextInitializer(m metav1.ObjectMeta) metav1.ObjectMeta { + if m.GetInitializers() != nil { + pendingInitializers := m.GetInitializers().Pending + // Remove self from the list of pending Initializers while preserving ordering. + if len(pendingInitializers) == 1 { + m.Initializers = nil + } else { + m.Initializers.Pending = append(pendingInitializers[:0], pendingInitializers[1:]...) + } + } + return m +} + +func AddFinalizer(m metav1.ObjectMeta, finalizer string) metav1.ObjectMeta { + for _, name := range m.Finalizers { + if name == finalizer { + return m + } + } + m.Finalizers = append(m.Finalizers, finalizer) + return m +} + +func HasFinalizer(m metav1.ObjectMeta, finalizer string) bool { + for _, name := range m.Finalizers { + if name == finalizer { + return true + } + } + return false +} + +func RemoveFinalizer(m metav1.ObjectMeta, finalizer string) metav1.ObjectMeta { + // https://github.com/golang/go/wiki/SliceTricks#filtering-without-allocating + r := m.Finalizers[:0] + for _, name := range m.Finalizers { + if name != finalizer { + r = append(r, name) + } + } + m.Finalizers = r + return m +} + +func EnsureContainerDeleted(containers []core.Container, name string) []core.Container { + for i, c := range containers { + if c.Name == name { + return append(containers[:i], containers[i+1:]...) + } + } + return containers +} + +func UpsertContainer(containers []core.Container, upsert core.Container) []core.Container { + for i, container := range containers { + if container.Name == upsert.Name { + err := mergo.MergeWithOverwrite(&container, upsert) + if err != nil { + panic(err) + } + containers[i] = container + return containers + } + } + return append(containers, upsert) +} + +func UpsertVolume(volumes []core.Volume, nv core.Volume) []core.Volume { + for i, vol := range volumes { + if vol.Name == nv.Name { + volumes[i] = nv + return volumes + } + } + return append(volumes, nv) +} + +func UpsertVolumeClaim(volumeClaims []core.PersistentVolumeClaim, upsert core.PersistentVolumeClaim) []core.PersistentVolumeClaim { + for i, vc := range volumeClaims { + if vc.Name == upsert.Name { + volumeClaims[i] = upsert + return volumeClaims + } + } + return append(volumeClaims, upsert) +} + +func EnsureVolumeDeleted(volumes []core.Volume, name string) []core.Volume { + for i, v := range volumes { + if v.Name == name { + return append(volumes[:i], volumes[i+1:]...) + } + } + return volumes +} + +func UpsertVolumeMount(mounts []core.VolumeMount, nv core.VolumeMount) []core.VolumeMount { + for i, vol := range mounts { + if vol.Name == nv.Name { + mounts[i] = nv + return mounts + } + } + return append(mounts, nv) +} + +func EnsureVolumeMountDeleted(mounts []core.VolumeMount, name string) []core.VolumeMount { + for i, v := range mounts { + if v.Name == name { + return append(mounts[:i], mounts[i+1:]...) + } + } + return mounts +} + +func UpsertEnvVars(vars []core.EnvVar, nv ...core.EnvVar) []core.EnvVar { + upsert := func(env core.EnvVar) { + for i, v := range vars { + if v.Name == env.Name { + vars[i] = env + return + } + } + vars = append(vars, env) + } + + for _, env := range nv { + upsert(env) + } + return vars +} + +func EnsureEnvVarDeleted(vars []core.EnvVar, name string) []core.EnvVar { + for i, v := range vars { + if v.Name == name { + return append(vars[:i], vars[i+1:]...) + } + } + return vars +} + +func UpsertMap(maps, upsert map[string]string) map[string]string { + if maps == nil { + maps = make(map[string]string) + } + for k, v := range upsert { + maps[k] = v + } + return maps +} + +func MergeLocalObjectReferences(old, new []core.LocalObjectReference) []core.LocalObjectReference { + m := make(map[string]core.LocalObjectReference) + for _, ref := range old { + m[ref.Name] = ref + } + for _, ref := range new { + m[ref.Name] = ref + } + + result := make([]core.LocalObjectReference, 0, len(m)) + for _, ref := range m { + result = append(result, ref) + } + return result +} + +func EnsureOwnerReference(meta metav1.ObjectMeta, owner *core.ObjectReference) metav1.ObjectMeta { + fi := -1 + for i, ref := range meta.OwnerReferences { + if ref.Kind == owner.Kind && ref.Name == owner.Name { + fi = i + break + } + } + if fi == -1 { + meta.OwnerReferences = append(meta.OwnerReferences, metav1.OwnerReference{}) + fi = len(meta.OwnerReferences) - 1 + } + meta.OwnerReferences[fi].APIVersion = owner.APIVersion + meta.OwnerReferences[fi].Kind = owner.Kind + meta.OwnerReferences[fi].Name = owner.Name + meta.OwnerReferences[fi].UID = owner.UID + meta.OwnerReferences[fi].BlockOwnerDeletion = types.TrueP() + return meta +} diff --git a/vendor/github.com/appscode/kutil/core/v1/node.go b/vendor/github.com/appscode/kutil/core/v1/node.go new file mode 100644 index 000000000..594b80359 --- /dev/null +++ b/vendor/github.com/appscode/kutil/core/v1/node.go @@ -0,0 +1,96 @@ +package v1 + +import ( + "encoding/json" + "fmt" + + "github.com/appscode/kutil" + "github.com/golang/glog" + core "k8s.io/api/core/v1" + kerr "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" +) + +func CreateOrPatchNode(c kubernetes.Interface, meta metav1.ObjectMeta, transform func(*core.Node) *core.Node) (*core.Node, kutil.VerbType, error) { + cur, err := c.CoreV1().Nodes().Get(meta.Name, metav1.GetOptions{}) + if kerr.IsNotFound(err) { + glog.V(3).Infof("Creating Node %s/%s.", meta.Namespace, meta.Name) + out, err := c.CoreV1().Nodes().Create(transform(&core.Node{ + TypeMeta: metav1.TypeMeta{ + Kind: "Node", + APIVersion: core.SchemeGroupVersion.String(), + }, + ObjectMeta: meta, + })) + return out, kutil.VerbCreated, err + } else if err != nil { + return nil, kutil.VerbUnchanged, err + } + return PatchNode(c, cur, transform) +} + +func PatchNode(c kubernetes.Interface, cur *core.Node, transform func(*core.Node) *core.Node) (*core.Node, kutil.VerbType, error) { + curJson, err := json.Marshal(cur) + if err != nil { + return nil, kutil.VerbUnchanged, err + } + + modJson, err := json.Marshal(transform(cur.DeepCopy())) + if err != nil { + return nil, kutil.VerbUnchanged, err + } + + patch, err := strategicpatch.CreateTwoWayMergePatch(curJson, modJson, core.Node{}) + if err != nil { + return nil, kutil.VerbUnchanged, err + } + if len(patch) == 0 || string(patch) == "{}" { + return cur, kutil.VerbUnchanged, nil + } + glog.V(3).Infof("Patching Node %s with %s", cur.Name, string(patch)) + out, err := c.CoreV1().Nodes().Patch(cur.Name, types.StrategicMergePatchType, patch) + return out, kutil.VerbPatched, err +} + +func TryUpdateNode(c kubernetes.Interface, meta metav1.ObjectMeta, transform func(*core.Node) *core.Node) (result *core.Node, err error) { + attempt := 0 + err = wait.PollImmediate(kutil.RetryInterval, kutil.RetryTimeout, func() (bool, error) { + attempt++ + cur, e2 := c.CoreV1().Nodes().Get(meta.Name, metav1.GetOptions{}) + if kerr.IsNotFound(e2) { + return false, e2 + } else if e2 == nil { + result, e2 = c.CoreV1().Nodes().Update(transform(cur.DeepCopy())) + return e2 == nil, nil + } + glog.Errorf("Attempt %d failed to update Node %s due to %v.", attempt, cur.Name, e2) + return false, nil + }) + + if err != nil { + err = fmt.Errorf("failed to update Node %s after %d attempts due to %v", meta.Name, attempt, err) + } + return +} + +// NodeReady returns whether a node is ready. +func NodeReady(node core.Node) bool { + for _, cond := range node.Status.Conditions { + if cond.Type != core.NodeReady { + continue + } + return cond.Status == core.ConditionTrue + } + return false +} + +// IsMaster returns whether a node is a master. +func IsMaster(node core.Node) bool { + _, ok17 := node.Labels["node-role.kubernetes.io/master"] + role16, ok16 := node.Labels["kubernetes.io/role"] + return ok17 || (ok16 && role16 == "master") +} diff --git a/vendor/github.com/appscode/kutil/core/v1/pod.go b/vendor/github.com/appscode/kutil/core/v1/pod.go new file mode 100644 index 000000000..3b1054fe2 --- /dev/null +++ b/vendor/github.com/appscode/kutil/core/v1/pod.go @@ -0,0 +1,163 @@ +package v1 + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/appscode/kutil" + "github.com/golang/glog" + core "k8s.io/api/core/v1" + kerr "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" +) + +func CreateOrPatchPod(c kubernetes.Interface, meta metav1.ObjectMeta, transform func(*core.Pod) *core.Pod) (*core.Pod, kutil.VerbType, error) { + cur, err := c.CoreV1().Pods(meta.Namespace).Get(meta.Name, metav1.GetOptions{}) + if kerr.IsNotFound(err) { + glog.V(3).Infof("Creating Pod %s/%s.", meta.Namespace, meta.Name) + out, err := c.CoreV1().Pods(meta.Namespace).Create(transform(&core.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: core.SchemeGroupVersion.String(), + }, + ObjectMeta: meta, + })) + return out, kutil.VerbCreated, err + } else if err != nil { + return nil, kutil.VerbUnchanged, err + } + return PatchPod(c, cur, transform) +} + +func PatchPod(c kubernetes.Interface, cur *core.Pod, transform func(*core.Pod) *core.Pod) (*core.Pod, kutil.VerbType, error) { + curJson, err := json.Marshal(cur) + if err != nil { + return nil, kutil.VerbUnchanged, err + } + + modJson, err := json.Marshal(transform(cur.DeepCopy())) + if err != nil { + return nil, kutil.VerbUnchanged, err + } + + patch, err := strategicpatch.CreateTwoWayMergePatch(curJson, modJson, core.Pod{}) + if err != nil { + return nil, kutil.VerbUnchanged, err + } + if len(patch) == 0 || string(patch) == "{}" { + return cur, kutil.VerbUnchanged, nil + } + glog.V(3).Infof("Patching Pod %s/%s with %s", cur.Namespace, cur.Name, string(patch)) + out, err := c.CoreV1().Pods(cur.Namespace).Patch(cur.Name, types.StrategicMergePatchType, patch) + return out, kutil.VerbPatched, err +} + +func TryUpdatePod(c kubernetes.Interface, meta metav1.ObjectMeta, transform func(*core.Pod) *core.Pod) (result *core.Pod, err error) { + attempt := 0 + err = wait.PollImmediate(kutil.RetryInterval, kutil.RetryTimeout, func() (bool, error) { + attempt++ + cur, e2 := c.CoreV1().Pods(meta.Namespace).Get(meta.Name, metav1.GetOptions{}) + if kerr.IsNotFound(e2) { + return false, e2 + } else if e2 == nil { + result, e2 = c.CoreV1().Pods(cur.Namespace).Update(transform(cur.DeepCopy())) + return e2 == nil, nil + } + glog.Errorf("Attempt %d failed to update Pod %s/%s due to %v.", attempt, cur.Namespace, cur.Name, e2) + return false, nil + }) + + if err != nil { + err = fmt.Errorf("failed to update Pod %s/%s after %d attempts due to %v", meta.Namespace, meta.Name, attempt, err) + } + return +} + +// ref: https://github.com/coreos/prometheus-operator/blob/c79166fcff3dae7bb8bc1e6bddc81837c2d97c04/pkg/k8sutil/k8sutil.go#L64 +// PodRunningAndReady returns whether a pod is running and each container has +// passed it's ready state. +func PodRunningAndReady(pod core.Pod) (bool, error) { + switch pod.Status.Phase { + case core.PodFailed, core.PodSucceeded: + return false, errors.New("pod completed") + case core.PodRunning: + for _, cond := range pod.Status.Conditions { + if cond.Type != core.PodReady { + continue + } + return cond.Status == core.ConditionTrue, nil + } + return false, errors.New("pod ready condition not found") + } + return false, nil +} + +func RestartPods(kubeClient kubernetes.Interface, namespace string, selector *metav1.LabelSelector) error { + r, err := metav1.LabelSelectorAsSelector(selector) + if err != nil { + return err + } + return kubeClient.CoreV1().Pods(namespace).DeleteCollection(&metav1.DeleteOptions{}, metav1.ListOptions{ + LabelSelector: r.String(), + }) +} + +func WaitUntilPodRunning(kubeClient kubernetes.Interface, meta metav1.ObjectMeta) error { + return wait.PollImmediate(kutil.RetryInterval, kutil.ReadinessTimeout, func() (bool, error) { + if pod, err := kubeClient.CoreV1().Pods(meta.Namespace).Get(meta.Name, metav1.GetOptions{}); err == nil { + runningAndReady, _ := PodRunningAndReady(*pod) + return runningAndReady, nil + } + return false, nil + }) +} + +func WaitUntilPodRunningBySelector(kubeClient kubernetes.Interface, namespace string, selector *metav1.LabelSelector, count int) error { + r, err := metav1.LabelSelectorAsSelector(selector) + if err != nil { + return err + } + + return wait.PollImmediate(kutil.RetryInterval, kutil.ReadinessTimeout, func() (bool, error) { + podList, err := kubeClient.CoreV1().Pods(namespace).List(metav1.ListOptions{ + LabelSelector: r.String(), + }) + if err != nil { + return false, nil + } + + if len(podList.Items) != count { + return false, nil + } + + for _, pod := range podList.Items { + runningAndReady, _ := PodRunningAndReady(pod) + if !runningAndReady { + return false, nil + } + } + return true, nil + }) +} + +func WaitUntilPodDeletedBySelector(kubeClient kubernetes.Interface, namespace string, selector *metav1.LabelSelector) error { + r, err := metav1.LabelSelectorAsSelector(selector) + if err != nil { + return err + } + + return wait.PollImmediate(kutil.RetryInterval, kutil.ReadinessTimeout, func() (bool, error) { + podList, err := kubeClient.CoreV1().Pods(namespace).List(metav1.ListOptions{ + LabelSelector: r.String(), + }) + if err != nil { + return false, nil + } + return len(podList.Items) == 0, nil + }) +} diff --git a/vendor/github.com/appscode/kutil/core/v1/pv.go b/vendor/github.com/appscode/kutil/core/v1/pv.go new file mode 100644 index 000000000..192e124ef --- /dev/null +++ b/vendor/github.com/appscode/kutil/core/v1/pv.go @@ -0,0 +1,78 @@ +package v1 + +import ( + "encoding/json" + "fmt" + + "github.com/appscode/kutil" + "github.com/golang/glog" + core "k8s.io/api/core/v1" + kerr "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" +) + +func CreateOrPatchPV(c kubernetes.Interface, meta metav1.ObjectMeta, transform func(*core.PersistentVolume) *core.PersistentVolume) (*core.PersistentVolume, kutil.VerbType, error) { + cur, err := c.CoreV1().PersistentVolumes().Get(meta.Name, metav1.GetOptions{}) + if kerr.IsNotFound(err) { + glog.V(3).Infof("Creating PersistentVolume %s.", meta.Name) + out, err := c.CoreV1().PersistentVolumes().Create(transform(&core.PersistentVolume{ + TypeMeta: metav1.TypeMeta{ + Kind: "PersistentVolume", + APIVersion: core.SchemeGroupVersion.String(), + }, + ObjectMeta: meta, + })) + return out, kutil.VerbCreated, err + } else if err != nil { + return nil, kutil.VerbUnchanged, err + } + return PatchPV(c, cur, transform) +} + +func PatchPV(c kubernetes.Interface, cur *core.PersistentVolume, transform func(*core.PersistentVolume) *core.PersistentVolume) (*core.PersistentVolume, kutil.VerbType, error) { + curJson, err := json.Marshal(cur) + if err != nil { + return nil, kutil.VerbUnchanged, err + } + + modJson, err := json.Marshal(transform(cur.DeepCopy())) + if err != nil { + return nil, kutil.VerbUnchanged, err + } + + patch, err := strategicpatch.CreateTwoWayMergePatch(curJson, modJson, core.PersistentVolume{}) + if err != nil { + return nil, kutil.VerbUnchanged, err + } + if len(patch) == 0 || string(patch) == "{}" { + return cur, kutil.VerbUnchanged, nil + } + glog.V(3).Infof("Patching PersistentVolume %s with %s.", cur.Name, string(patch)) + out, err := c.CoreV1().PersistentVolumes().Patch(cur.Name, types.StrategicMergePatchType, patch) + return out, kutil.VerbPatched, err +} + +func TryUpdatePV(c kubernetes.Interface, meta metav1.ObjectMeta, transform func(*core.PersistentVolume) *core.PersistentVolume) (result *core.PersistentVolume, err error) { + attempt := 0 + err = wait.PollImmediate(kutil.RetryInterval, kutil.RetryTimeout, func() (bool, error) { + attempt++ + cur, e2 := c.CoreV1().PersistentVolumes().Get(meta.Name, metav1.GetOptions{}) + if kerr.IsNotFound(e2) { + return false, e2 + } else if e2 == nil { + result, e2 = c.CoreV1().PersistentVolumes().Update(transform(cur.DeepCopy())) + return e2 == nil, nil + } + glog.Errorf("Attempt %d failed to update PersistentVolume %s due to %v.", attempt, cur.Name, e2) + return false, nil + }) + + if err != nil { + err = fmt.Errorf("failed to update PersistentVolume %s after %d attempts due to %v", meta.Name, attempt, err) + } + return +} diff --git a/vendor/github.com/appscode/kutil/core/v1/pvc.go b/vendor/github.com/appscode/kutil/core/v1/pvc.go new file mode 100644 index 000000000..37f8c4be2 --- /dev/null +++ b/vendor/github.com/appscode/kutil/core/v1/pvc.go @@ -0,0 +1,78 @@ +package v1 + +import ( + "encoding/json" + "fmt" + + "github.com/appscode/kutil" + "github.com/golang/glog" + core "k8s.io/api/core/v1" + kerr "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" +) + +func CreateOrPatchPVC(c kubernetes.Interface, meta metav1.ObjectMeta, transform func(*core.PersistentVolumeClaim) *core.PersistentVolumeClaim) (*core.PersistentVolumeClaim, kutil.VerbType, error) { + cur, err := c.CoreV1().PersistentVolumeClaims(meta.Namespace).Get(meta.Name, metav1.GetOptions{}) + if kerr.IsNotFound(err) { + glog.V(3).Infof("Creating PersistentVolumeClaim %s/%s.", meta.Namespace, meta.Name) + out, err := c.CoreV1().PersistentVolumeClaims(meta.Namespace).Create(transform(&core.PersistentVolumeClaim{ + TypeMeta: metav1.TypeMeta{ + Kind: "PersistentVolumeClaim", + APIVersion: core.SchemeGroupVersion.String(), + }, + ObjectMeta: meta, + })) + return out, kutil.VerbCreated, err + } else if err != nil { + return nil, kutil.VerbUnchanged, err + } + return PatchPVC(c, cur, transform) +} + +func PatchPVC(c kubernetes.Interface, cur *core.PersistentVolumeClaim, transform func(*core.PersistentVolumeClaim) *core.PersistentVolumeClaim) (*core.PersistentVolumeClaim, kutil.VerbType, error) { + curJson, err := json.Marshal(cur) + if err != nil { + return nil, kutil.VerbUnchanged, err + } + + modJson, err := json.Marshal(transform(cur.DeepCopy())) + if err != nil { + return nil, kutil.VerbUnchanged, err + } + + patch, err := strategicpatch.CreateTwoWayMergePatch(curJson, modJson, core.PersistentVolumeClaim{}) + if err != nil { + return nil, kutil.VerbUnchanged, err + } + if len(patch) == 0 || string(patch) == "{}" { + return cur, kutil.VerbUnchanged, nil + } + glog.V(3).Infof("Patching PersistentVolumeClaim %s/%s with %s.", cur.Namespace, cur.Name, string(patch)) + out, err := c.CoreV1().PersistentVolumeClaims(cur.Namespace).Patch(cur.Name, types.StrategicMergePatchType, patch) + return out, kutil.VerbPatched, err +} + +func TryUpdatePVC(c kubernetes.Interface, meta metav1.ObjectMeta, transform func(*core.PersistentVolumeClaim) *core.PersistentVolumeClaim) (result *core.PersistentVolumeClaim, err error) { + attempt := 0 + err = wait.PollImmediate(kutil.RetryInterval, kutil.RetryTimeout, func() (bool, error) { + attempt++ + cur, e2 := c.CoreV1().PersistentVolumeClaims(meta.Namespace).Get(meta.Name, metav1.GetOptions{}) + if kerr.IsNotFound(e2) { + return false, e2 + } else if e2 == nil { + result, e2 = c.CoreV1().PersistentVolumeClaims(cur.Namespace).Update(transform(cur.DeepCopy())) + return e2 == nil, nil + } + glog.Errorf("Attempt %d failed to update PersistentVolumeClaim %s/%s due to %v.", attempt, cur.Namespace, cur.Name, e2) + return false, nil + }) + + if err != nil { + err = fmt.Errorf("failed to update PersistentVolumeClaim %s/%s after %d attempts due to %v", meta.Namespace, meta.Name, attempt, err) + } + return +} diff --git a/vendor/github.com/appscode/kutil/core/v1/rc.go b/vendor/github.com/appscode/kutil/core/v1/rc.go new file mode 100644 index 000000000..07de7170e --- /dev/null +++ b/vendor/github.com/appscode/kutil/core/v1/rc.go @@ -0,0 +1,89 @@ +package v1 + +import ( + "encoding/json" + "fmt" + + . "github.com/appscode/go/types" + "github.com/appscode/kutil" + "github.com/golang/glog" + core "k8s.io/api/core/v1" + kerr "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" +) + +func CreateOrPatchRC(c kubernetes.Interface, meta metav1.ObjectMeta, transform func(*core.ReplicationController) *core.ReplicationController) (*core.ReplicationController, kutil.VerbType, error) { + cur, err := c.CoreV1().ReplicationControllers(meta.Namespace).Get(meta.Name, metav1.GetOptions{}) + if kerr.IsNotFound(err) { + glog.V(3).Infof("Creating ReplicationController %s/%s.", meta.Namespace, meta.Name) + out, err := c.CoreV1().ReplicationControllers(meta.Namespace).Create(transform(&core.ReplicationController{ + TypeMeta: metav1.TypeMeta{ + Kind: "ReplicationController", + APIVersion: core.SchemeGroupVersion.String(), + }, + ObjectMeta: meta, + })) + return out, kutil.VerbCreated, err + } else if err != nil { + return nil, kutil.VerbUnchanged, err + } + return PatchRC(c, cur, transform) +} + +func PatchRC(c kubernetes.Interface, cur *core.ReplicationController, transform func(*core.ReplicationController) *core.ReplicationController) (*core.ReplicationController, kutil.VerbType, error) { + curJson, err := json.Marshal(cur) + if err != nil { + return nil, kutil.VerbUnchanged, err + } + + modJson, err := json.Marshal(transform(cur.DeepCopy())) + if err != nil { + return nil, kutil.VerbUnchanged, err + } + + patch, err := strategicpatch.CreateTwoWayMergePatch(curJson, modJson, core.ReplicationController{}) + if err != nil { + return nil, kutil.VerbUnchanged, err + } + if len(patch) == 0 || string(patch) == "{}" { + return cur, kutil.VerbUnchanged, nil + } + glog.V(3).Infof("Patching ReplicationController %s/%s with %s.", cur.Namespace, cur.Name, string(patch)) + out, err := c.CoreV1().ReplicationControllers(cur.Namespace).Patch(cur.Name, types.StrategicMergePatchType, patch) + return out, kutil.VerbPatched, err +} + +func TryUpdateRC(c kubernetes.Interface, meta metav1.ObjectMeta, transform func(*core.ReplicationController) *core.ReplicationController) (result *core.ReplicationController, err error) { + attempt := 0 + err = wait.PollImmediate(kutil.RetryInterval, kutil.RetryTimeout, func() (bool, error) { + attempt++ + cur, e2 := c.CoreV1().ReplicationControllers(meta.Namespace).Get(meta.Name, metav1.GetOptions{}) + if kerr.IsNotFound(e2) { + return false, e2 + } else if e2 == nil { + result, e2 = c.CoreV1().ReplicationControllers(cur.Namespace).Update(transform(cur.DeepCopy())) + return e2 == nil, nil + } + glog.Errorf("Attempt %d failed to update ReplicationController %s/%s due to %v.", attempt, cur.Namespace, cur.Name, e2) + return false, nil + }) + + if err != nil { + err = fmt.Errorf("failed to update ReplicationController %s/%s after %d attempts due to %v", meta.Namespace, meta.Name, attempt, err) + } + return +} + +func WaitUntilRCReady(c kubernetes.Interface, meta metav1.ObjectMeta) error { + return wait.PollImmediate(kutil.RetryInterval, kutil.ReadinessTimeout, func() (bool, error) { + if obj, err := c.CoreV1().ReplicationControllers(meta.Namespace).Get(meta.Name, metav1.GetOptions{}); err == nil { + return Int32(obj.Spec.Replicas) == obj.Status.ReadyReplicas, nil + } + + return false, nil + }) +} diff --git a/vendor/github.com/appscode/kutil/core/v1/secret.go b/vendor/github.com/appscode/kutil/core/v1/secret.go new file mode 100644 index 000000000..ffeee9155 --- /dev/null +++ b/vendor/github.com/appscode/kutil/core/v1/secret.go @@ -0,0 +1,87 @@ +package v1 + +import ( + "encoding/json" + "fmt" + + "github.com/appscode/kutil" + "github.com/golang/glog" + core "k8s.io/api/core/v1" + kerr "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" +) + +func CreateOrPatchSecret(c kubernetes.Interface, meta metav1.ObjectMeta, transform func(*core.Secret) *core.Secret) (*core.Secret, kutil.VerbType, error) { + cur, err := c.CoreV1().Secrets(meta.Namespace).Get(meta.Name, metav1.GetOptions{}) + if kerr.IsNotFound(err) { + glog.V(3).Infof("Creating Secret %s/%s.", meta.Namespace, meta.Name) + out, err := c.CoreV1().Secrets(meta.Namespace).Create(transform(&core.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: core.SchemeGroupVersion.String(), + }, + ObjectMeta: meta, + })) + return out, kutil.VerbCreated, err + } else if err != nil { + return nil, kutil.VerbUnchanged, err + } + return PatchSecret(c, cur, transform) +} + +func PatchSecret(c kubernetes.Interface, cur *core.Secret, transform func(*core.Secret) *core.Secret) (*core.Secret, kutil.VerbType, error) { + curJson, err := json.Marshal(cur) + if err != nil { + return nil, kutil.VerbUnchanged, err + } + + modJson, err := json.Marshal(transform(cur.DeepCopy())) + if err != nil { + return nil, kutil.VerbUnchanged, err + } + + patch, err := strategicpatch.CreateTwoWayMergePatch(curJson, modJson, core.Secret{}) + if err != nil { + return nil, kutil.VerbUnchanged, err + } + if len(patch) == 0 || string(patch) == "{}" { + return cur, kutil.VerbUnchanged, nil + } + glog.V(3).Infof("Patching Secret %s/%s", cur.Namespace, cur.Name) + out, err := c.CoreV1().Secrets(cur.Namespace).Patch(cur.Name, types.StrategicMergePatchType, patch) + return out, kutil.VerbPatched, err +} + +func TryUpdateSecret(c kubernetes.Interface, meta metav1.ObjectMeta, transform func(*core.Secret) *core.Secret) (result *core.Secret, err error) { + attempt := 0 + err = wait.PollImmediate(kutil.RetryInterval, kutil.RetryTimeout, func() (bool, error) { + attempt++ + cur, e2 := c.CoreV1().Secrets(meta.Namespace).Get(meta.Name, metav1.GetOptions{}) + if kerr.IsNotFound(e2) { + return false, e2 + } else if e2 == nil { + result, e2 = c.CoreV1().Secrets(cur.Namespace).Update(transform(cur.DeepCopy())) + return e2 == nil, nil + } + glog.Errorf("Attempt %d failed to update Secret %s/%s due to %v.", attempt, cur.Namespace, cur.Name, e2) + return false, nil + }) + + if err != nil { + err = fmt.Errorf("failed to update Secret %s/%s after %d attempts due to %v", meta.Namespace, meta.Name, attempt, err) + } + return +} + +func ObfuscateSecret(in core.Secret) *core.Secret { + data := make(map[string][]byte) + for k := range in.Data { + data[k] = []byte("-") + } + in.Data = data + return &in +} diff --git a/vendor/github.com/appscode/kutil/core/v1/service.go b/vendor/github.com/appscode/kutil/core/v1/service.go new file mode 100644 index 000000000..6a7cdfefe --- /dev/null +++ b/vendor/github.com/appscode/kutil/core/v1/service.go @@ -0,0 +1,107 @@ +package v1 + +import ( + "encoding/json" + "fmt" + + "github.com/appscode/kutil" + "github.com/golang/glog" + core "k8s.io/api/core/v1" + kerr "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" +) + +func CreateOrPatchService(c kubernetes.Interface, meta metav1.ObjectMeta, transform func(*core.Service) *core.Service) (*core.Service, kutil.VerbType, error) { + cur, err := c.CoreV1().Services(meta.Namespace).Get(meta.Name, metav1.GetOptions{}) + if kerr.IsNotFound(err) { + glog.V(3).Infof("Creating Service %s/%s.", meta.Namespace, meta.Name) + out, err := c.CoreV1().Services(meta.Namespace).Create(transform(&core.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + APIVersion: core.SchemeGroupVersion.String(), + }, + ObjectMeta: meta, + })) + return out, kutil.VerbCreated, err + } else if err != nil { + return nil, kutil.VerbUnchanged, err + } + return PatchService(c, cur, transform) +} + +func PatchService(c kubernetes.Interface, cur *core.Service, transform func(*core.Service) *core.Service) (*core.Service, kutil.VerbType, error) { + curJson, err := json.Marshal(cur) + if err != nil { + return nil, kutil.VerbUnchanged, err + } + + modJson, err := json.Marshal(transform(cur.DeepCopy())) + if err != nil { + return nil, kutil.VerbUnchanged, err + } + + patch, err := strategicpatch.CreateTwoWayMergePatch(curJson, modJson, core.Service{}) + if err != nil { + return nil, kutil.VerbUnchanged, err + } + if len(patch) == 0 || string(patch) == "{}" { + return cur, kutil.VerbUnchanged, nil + } + glog.V(3).Infof("Patching Service %s/%s with %s.", cur.Namespace, cur.Name, string(patch)) + out, err := c.CoreV1().Services(cur.Namespace).Patch(cur.Name, types.StrategicMergePatchType, patch) + return out, kutil.VerbPatched, err +} + +func TryUpdateService(c kubernetes.Interface, meta metav1.ObjectMeta, transform func(*core.Service) *core.Service) (result *core.Service, err error) { + attempt := 0 + err = wait.PollImmediate(kutil.RetryInterval, kutil.RetryTimeout, func() (bool, error) { + attempt++ + cur, e2 := c.CoreV1().Services(meta.Namespace).Get(meta.Name, metav1.GetOptions{}) + if kerr.IsNotFound(e2) { + return false, e2 + } else if e2 == nil { + result, e2 = c.CoreV1().Services(cur.Namespace).Update(transform(cur.DeepCopy())) + return e2 == nil, nil + } + glog.Errorf("Attempt %d failed to update Service %s/%s due to %v.", attempt, cur.Namespace, cur.Name, e2) + return false, nil + }) + + if err != nil { + err = fmt.Errorf("failed to update Service %s/%s after %d attempts due to %v", meta.Namespace, meta.Name, attempt, err) + } + return +} + +func MergeServicePorts(cur, desired []core.ServicePort) []core.ServicePort { + if len(cur) == 0 { + return desired + } + + // ports + curPorts := make(map[int32]core.ServicePort) + for _, p := range cur { + curPorts[p.Port] = p + } + for i, dp := range desired { + cp, ok := curPorts[dp.Port] + + // svc port not found + if !ok { + continue + } + + if dp.NodePort == 0 { + dp.NodePort = cp.NodePort // avoid reassigning port + } + if dp.Protocol == "" { + dp.Protocol = cp.Protocol + } + desired[i] = dp + } + return desired +} diff --git a/vendor/github.com/appscode/kutil/core/v1/serviceaccount.go b/vendor/github.com/appscode/kutil/core/v1/serviceaccount.go new file mode 100644 index 000000000..1a64b94d1 --- /dev/null +++ b/vendor/github.com/appscode/kutil/core/v1/serviceaccount.go @@ -0,0 +1,78 @@ +package v1 + +import ( + "encoding/json" + "fmt" + + "github.com/appscode/kutil" + "github.com/golang/glog" + core "k8s.io/api/core/v1" + kerr "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" +) + +func CreateOrPatchServiceAccount(c kubernetes.Interface, meta metav1.ObjectMeta, transform func(*core.ServiceAccount) *core.ServiceAccount) (*core.ServiceAccount, kutil.VerbType, error) { + cur, err := c.CoreV1().ServiceAccounts(meta.Namespace).Get(meta.Name, metav1.GetOptions{}) + if kerr.IsNotFound(err) { + glog.V(3).Infof("Creating ServiceAccount %s/%s.", meta.Namespace, meta.Name) + out, err := c.CoreV1().ServiceAccounts(meta.Namespace).Create(transform(&core.ServiceAccount{ + TypeMeta: metav1.TypeMeta{ + Kind: "ServiceAccount", + APIVersion: core.SchemeGroupVersion.String(), + }, + ObjectMeta: meta, + })) + return out, kutil.VerbCreated, err + } else if err != nil { + return nil, kutil.VerbUnchanged, err + } + return PatchServiceAccount(c, cur, transform) +} + +func PatchServiceAccount(c kubernetes.Interface, cur *core.ServiceAccount, transform func(*core.ServiceAccount) *core.ServiceAccount) (*core.ServiceAccount, kutil.VerbType, error) { + curJson, err := json.Marshal(cur) + if err != nil { + return nil, kutil.VerbUnchanged, err + } + + modJson, err := json.Marshal(transform(cur.DeepCopy())) + if err != nil { + return nil, kutil.VerbUnchanged, err + } + + patch, err := strategicpatch.CreateTwoWayMergePatch(curJson, modJson, core.ServiceAccount{}) + if err != nil { + return nil, kutil.VerbUnchanged, err + } + if len(patch) == 0 || string(patch) == "{}" { + return cur, kutil.VerbUnchanged, nil + } + glog.V(3).Infof("Patching ServiceAccount %s/%s with %s", cur.Namespace, cur.Name, string(patch)) + out, err := c.CoreV1().ServiceAccounts(cur.Namespace).Patch(cur.Name, types.StrategicMergePatchType, patch) + return out, kutil.VerbPatched, err +} + +func TryUpdateServiceAccount(c kubernetes.Interface, meta metav1.ObjectMeta, transform func(*core.ServiceAccount) *core.ServiceAccount) (result *core.ServiceAccount, err error) { + attempt := 0 + err = wait.PollImmediate(kutil.RetryInterval, kutil.RetryTimeout, func() (bool, error) { + attempt++ + cur, e2 := c.CoreV1().ServiceAccounts(meta.Namespace).Get(meta.Name, metav1.GetOptions{}) + if kerr.IsNotFound(e2) { + return false, e2 + } else if e2 == nil { + result, e2 = c.CoreV1().ServiceAccounts(cur.Namespace).Update(transform(cur.DeepCopy())) + return e2 == nil, nil + } + glog.Errorf("Attempt %d failed to update ServiceAccount %s/%s due to %v.", attempt, cur.Namespace, cur.Name, e2) + return false, nil + }) + + if err != nil { + err = fmt.Errorf("failed to update ServiceAccount %s/%s after %d attempts due to %v", meta.Namespace, meta.Name, attempt, err) + } + return +} diff --git a/vendor/github.com/appscode/kutil/rbac/v1beta1/clusterrole.go b/vendor/github.com/appscode/kutil/rbac/v1beta1/clusterrole.go new file mode 100644 index 000000000..93c136d9f --- /dev/null +++ b/vendor/github.com/appscode/kutil/rbac/v1beta1/clusterrole.go @@ -0,0 +1,78 @@ +package v1beta1 + +import ( + "encoding/json" + "fmt" + + "github.com/appscode/kutil" + "github.com/golang/glog" + rbac "k8s.io/api/rbac/v1beta1" + kerr "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" +) + +func CreateOrPatchClusterRole(c kubernetes.Interface, meta metav1.ObjectMeta, transform func(*rbac.ClusterRole) *rbac.ClusterRole) (*rbac.ClusterRole, kutil.VerbType, error) { + cur, err := c.RbacV1beta1().ClusterRoles().Get(meta.Name, metav1.GetOptions{}) + if kerr.IsNotFound(err) { + glog.V(3).Infof("Creating ClusterRole %s.", meta.Name) + out, err := c.RbacV1beta1().ClusterRoles().Create(transform(&rbac.ClusterRole{ + TypeMeta: metav1.TypeMeta{ + Kind: "ClusterRole", + APIVersion: rbac.SchemeGroupVersion.String(), + }, + ObjectMeta: meta, + })) + return out, kutil.VerbCreated, err + } else if err != nil { + return nil, kutil.VerbUnchanged, err + } + return PatchClusterRole(c, cur, transform) +} + +func PatchClusterRole(c kubernetes.Interface, cur *rbac.ClusterRole, transform func(*rbac.ClusterRole) *rbac.ClusterRole) (*rbac.ClusterRole, kutil.VerbType, error) { + curJson, err := json.Marshal(cur) + if err != nil { + return nil, kutil.VerbUnchanged, err + } + + modJson, err := json.Marshal(transform(cur.DeepCopy())) + if err != nil { + return nil, kutil.VerbUnchanged, err + } + + patch, err := strategicpatch.CreateTwoWayMergePatch(curJson, modJson, rbac.ClusterRole{}) + if err != nil { + return nil, kutil.VerbUnchanged, err + } + if len(patch) == 0 || string(patch) == "{}" { + return cur, kutil.VerbUnchanged, nil + } + glog.V(3).Infof("Patching ClusterRole %s with %s.", cur.Name, string(patch)) + out, err := c.RbacV1beta1().ClusterRoles().Patch(cur.Name, types.StrategicMergePatchType, patch) + return out, kutil.VerbPatched, err +} + +func TryUpdateClusterRole(c kubernetes.Interface, meta metav1.ObjectMeta, transform func(*rbac.ClusterRole) *rbac.ClusterRole) (result *rbac.ClusterRole, err error) { + attempt := 0 + err = wait.PollImmediate(kutil.RetryInterval, kutil.RetryTimeout, func() (bool, error) { + attempt++ + cur, e2 := c.RbacV1beta1().ClusterRoles().Get(meta.Name, metav1.GetOptions{}) + if kerr.IsNotFound(e2) { + return false, e2 + } else if e2 == nil { + result, e2 = c.RbacV1beta1().ClusterRoles().Update(transform(cur.DeepCopy())) + return e2 == nil, nil + } + glog.Errorf("Attempt %d failed to update ClusterRole %s due to %v.", attempt, cur.Name, e2) + return false, nil + }) + + if err != nil { + err = fmt.Errorf("failed to update ClusterRole %s after %d attempts due to %v", meta.Name, attempt, err) + } + return +} diff --git a/vendor/github.com/appscode/kutil/rbac/v1beta1/clusterrolebinding.go b/vendor/github.com/appscode/kutil/rbac/v1beta1/clusterrolebinding.go new file mode 100644 index 000000000..87735ff23 --- /dev/null +++ b/vendor/github.com/appscode/kutil/rbac/v1beta1/clusterrolebinding.go @@ -0,0 +1,78 @@ +package v1beta1 + +import ( + "encoding/json" + "fmt" + + "github.com/appscode/kutil" + "github.com/golang/glog" + rbac "k8s.io/api/rbac/v1beta1" + kerr "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" +) + +func CreateOrPatchClusterRoleBinding(c kubernetes.Interface, meta metav1.ObjectMeta, transform func(*rbac.ClusterRoleBinding) *rbac.ClusterRoleBinding) (*rbac.ClusterRoleBinding, kutil.VerbType, error) { + cur, err := c.RbacV1beta1().ClusterRoleBindings().Get(meta.Name, metav1.GetOptions{}) + if kerr.IsNotFound(err) { + glog.V(3).Infof("Creating ClusterRoleBinding %s.", meta.Name) + out, err := c.RbacV1beta1().ClusterRoleBindings().Create(transform(&rbac.ClusterRoleBinding{ + TypeMeta: metav1.TypeMeta{ + Kind: "ClusterRoleBinding", + APIVersion: rbac.SchemeGroupVersion.String(), + }, + ObjectMeta: meta, + })) + return out, kutil.VerbCreated, err + } else if err != nil { + return nil, kutil.VerbUnchanged, err + } + return PatchClusterRoleBinding(c, cur, transform) +} + +func PatchClusterRoleBinding(c kubernetes.Interface, cur *rbac.ClusterRoleBinding, transform func(*rbac.ClusterRoleBinding) *rbac.ClusterRoleBinding) (*rbac.ClusterRoleBinding, kutil.VerbType, error) { + curJson, err := json.Marshal(cur) + if err != nil { + return nil, kutil.VerbUnchanged, err + } + + modJson, err := json.Marshal(transform(cur.DeepCopy())) + if err != nil { + return nil, kutil.VerbUnchanged, err + } + + patch, err := strategicpatch.CreateTwoWayMergePatch(curJson, modJson, rbac.ClusterRoleBinding{}) + if err != nil { + return nil, kutil.VerbUnchanged, err + } + if len(patch) == 0 || string(patch) == "{}" { + return cur, kutil.VerbUnchanged, nil + } + glog.V(3).Infof("Patching ClusterRoleBinding %s with %s.", cur.Name, string(patch)) + out, err := c.RbacV1beta1().ClusterRoleBindings().Patch(cur.Name, types.StrategicMergePatchType, patch) + return out, kutil.VerbPatched, err +} + +func TryUpdateClusterRoleBinding(c kubernetes.Interface, meta metav1.ObjectMeta, transform func(*rbac.ClusterRoleBinding) *rbac.ClusterRoleBinding) (result *rbac.ClusterRoleBinding, err error) { + attempt := 0 + err = wait.PollImmediate(kutil.RetryInterval, kutil.RetryTimeout, func() (bool, error) { + attempt++ + cur, e2 := c.RbacV1beta1().ClusterRoleBindings().Get(meta.Name, metav1.GetOptions{}) + if kerr.IsNotFound(e2) { + return false, e2 + } else if e2 == nil { + result, e2 = c.RbacV1beta1().ClusterRoleBindings().Update(transform(cur.DeepCopy())) + return e2 == nil, nil + } + glog.Errorf("Attempt %d failed to update ClusterRoleBinding %s due to %v.", attempt, cur.Name, e2) + return false, nil + }) + + if err != nil { + err = fmt.Errorf("failed to update Role %s after %d attempts due to %v", meta.Name, attempt, err) + } + return +} diff --git a/vendor/github.com/appscode/kutil/rbac/v1beta1/kubernetes.go b/vendor/github.com/appscode/kutil/rbac/v1beta1/kubernetes.go new file mode 100644 index 000000000..cc74aa73c --- /dev/null +++ b/vendor/github.com/appscode/kutil/rbac/v1beta1/kubernetes.go @@ -0,0 +1,41 @@ +package v1beta1 + +import ( + "errors" + + "github.com/appscode/kutil/meta" + rbac "k8s.io/api/rbac/v1beta1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +func GetGroupVersionKind(v interface{}) schema.GroupVersionKind { + return rbac.SchemeGroupVersion.WithKind(meta.GetKind(v)) +} + +func AssignTypeKind(v interface{}) error { + _, err := conversion.EnforcePtr(v) + if err != nil { + return err + } + + switch u := v.(type) { + case *rbac.Role: + u.APIVersion = rbac.SchemeGroupVersion.String() + u.Kind = meta.GetKind(v) + return nil + case *rbac.RoleBinding: + u.APIVersion = rbac.SchemeGroupVersion.String() + u.Kind = meta.GetKind(v) + return nil + case *rbac.ClusterRole: + u.APIVersion = rbac.SchemeGroupVersion.String() + u.Kind = meta.GetKind(v) + return nil + case *rbac.ClusterRoleBinding: + u.APIVersion = rbac.SchemeGroupVersion.String() + u.Kind = meta.GetKind(v) + return nil + } + return errors.New("unknown api object type") +} diff --git a/vendor/github.com/appscode/kutil/rbac/v1beta1/role.go b/vendor/github.com/appscode/kutil/rbac/v1beta1/role.go new file mode 100644 index 000000000..692485698 --- /dev/null +++ b/vendor/github.com/appscode/kutil/rbac/v1beta1/role.go @@ -0,0 +1,78 @@ +package v1beta1 + +import ( + "encoding/json" + "fmt" + + "github.com/appscode/kutil" + "github.com/golang/glog" + rbac "k8s.io/api/rbac/v1beta1" + kerr "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" +) + +func CreateOrPatchRole(c kubernetes.Interface, meta metav1.ObjectMeta, transform func(*rbac.Role) *rbac.Role) (*rbac.Role, kutil.VerbType, error) { + cur, err := c.RbacV1beta1().Roles(meta.Namespace).Get(meta.Name, metav1.GetOptions{}) + if kerr.IsNotFound(err) { + glog.V(3).Infof("Creating Role %s/%s.", meta.Namespace, meta.Name) + out, err := c.RbacV1beta1().Roles(meta.Namespace).Create(transform(&rbac.Role{ + TypeMeta: metav1.TypeMeta{ + Kind: "Role", + APIVersion: rbac.SchemeGroupVersion.String(), + }, + ObjectMeta: meta, + })) + return out, kutil.VerbCreated, err + } else if err != nil { + return nil, kutil.VerbUnchanged, err + } + return PatchRole(c, cur, transform) +} + +func PatchRole(c kubernetes.Interface, cur *rbac.Role, transform func(*rbac.Role) *rbac.Role) (*rbac.Role, kutil.VerbType, error) { + curJson, err := json.Marshal(cur) + if err != nil { + return nil, kutil.VerbUnchanged, err + } + + modJson, err := json.Marshal(transform(cur.DeepCopy())) + if err != nil { + return nil, kutil.VerbUnchanged, err + } + + patch, err := strategicpatch.CreateTwoWayMergePatch(curJson, modJson, rbac.Role{}) + if err != nil { + return nil, kutil.VerbUnchanged, err + } + if len(patch) == 0 || string(patch) == "{}" { + return cur, kutil.VerbUnchanged, nil + } + glog.V(3).Infof("Patching Role %s/%s with %s.", cur.Namespace, cur.Name, string(patch)) + out, err := c.RbacV1beta1().Roles(cur.Namespace).Patch(cur.Name, types.StrategicMergePatchType, patch) + return out, kutil.VerbPatched, err +} + +func TryUpdateRole(c kubernetes.Interface, meta metav1.ObjectMeta, transform func(*rbac.Role) *rbac.Role) (result *rbac.Role, err error) { + attempt := 0 + err = wait.PollImmediate(kutil.RetryInterval, kutil.RetryTimeout, func() (bool, error) { + attempt++ + cur, e2 := c.RbacV1beta1().Roles(meta.Namespace).Get(meta.Name, metav1.GetOptions{}) + if kerr.IsNotFound(e2) { + return false, e2 + } else if e2 == nil { + result, e2 = c.RbacV1beta1().Roles(cur.Namespace).Update(transform(cur.DeepCopy())) + return e2 == nil, nil + } + glog.Errorf("Attempt %d failed to update Role %s/%s due to %v.", attempt, cur.Namespace, cur.Name, e2) + return false, nil + }) + + if err != nil { + err = fmt.Errorf("failed to update Role %s/%s after %d attempts due to %v", meta.Namespace, meta.Name, attempt, err) + } + return +} diff --git a/vendor/github.com/appscode/kutil/rbac/v1beta1/rolebinding.go b/vendor/github.com/appscode/kutil/rbac/v1beta1/rolebinding.go new file mode 100644 index 000000000..6589237d6 --- /dev/null +++ b/vendor/github.com/appscode/kutil/rbac/v1beta1/rolebinding.go @@ -0,0 +1,78 @@ +package v1beta1 + +import ( + "encoding/json" + "fmt" + + "github.com/appscode/kutil" + "github.com/golang/glog" + rbac "k8s.io/api/rbac/v1beta1" + kerr "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" +) + +func CreateOrPatchRoleBinding(c kubernetes.Interface, meta metav1.ObjectMeta, transform func(*rbac.RoleBinding) *rbac.RoleBinding) (*rbac.RoleBinding, kutil.VerbType, error) { + cur, err := c.RbacV1beta1().RoleBindings(meta.Namespace).Get(meta.Name, metav1.GetOptions{}) + if kerr.IsNotFound(err) { + glog.V(3).Infof("Creating RoleBinding %s/%s.", meta.Namespace, meta.Name) + out, err := c.RbacV1beta1().RoleBindings(meta.Namespace).Create(transform(&rbac.RoleBinding{ + TypeMeta: metav1.TypeMeta{ + Kind: "RoleBinding", + APIVersion: rbac.SchemeGroupVersion.String(), + }, + ObjectMeta: meta, + })) + return out, kutil.VerbCreated, err + } else if err != nil { + return nil, kutil.VerbUnchanged, err + } + return PatchRoleBinding(c, cur, transform) +} + +func PatchRoleBinding(c kubernetes.Interface, cur *rbac.RoleBinding, transform func(*rbac.RoleBinding) *rbac.RoleBinding) (*rbac.RoleBinding, kutil.VerbType, error) { + curJson, err := json.Marshal(cur) + if err != nil { + return nil, kutil.VerbUnchanged, err + } + + modJson, err := json.Marshal(transform(cur.DeepCopy())) + if err != nil { + return nil, kutil.VerbUnchanged, err + } + + patch, err := strategicpatch.CreateTwoWayMergePatch(curJson, modJson, rbac.RoleBinding{}) + if err != nil { + return nil, kutil.VerbUnchanged, err + } + if len(patch) == 0 || string(patch) == "{}" { + return cur, kutil.VerbUnchanged, nil + } + glog.V(3).Infof("Patching RoleBinding %s/%s with %s.", cur.Namespace, cur.Name, string(patch)) + out, err := c.RbacV1beta1().RoleBindings(cur.Namespace).Patch(cur.Name, types.StrategicMergePatchType, patch) + return out, kutil.VerbPatched, err +} + +func TryUpdateRoleBinding(c kubernetes.Interface, meta metav1.ObjectMeta, transform func(*rbac.RoleBinding) *rbac.RoleBinding) (result *rbac.RoleBinding, err error) { + attempt := 0 + err = wait.PollImmediate(kutil.RetryInterval, kutil.RetryTimeout, func() (bool, error) { + attempt++ + cur, e2 := c.RbacV1beta1().RoleBindings(meta.Namespace).Get(meta.Name, metav1.GetOptions{}) + if kerr.IsNotFound(e2) { + return false, e2 + } else if e2 == nil { + result, e2 = c.RbacV1beta1().RoleBindings(cur.Namespace).Update(transform(cur.DeepCopy())) + return e2 == nil, nil + } + glog.Errorf("Attempt %d failed to update RoleBinding %s/%s due to %v.", attempt, cur.Namespace, cur.Name, e2) + return false, nil + }) + + if err != nil { + err = fmt.Errorf("failed to update Role %s/%s after %d attempts due to %v", meta.Namespace, meta.Name, attempt, err) + } + return +} diff --git a/vendor/github.com/appscode/kutil/tools/analytics/analytics.go b/vendor/github.com/appscode/kutil/tools/analytics/analytics.go index 4145812ed..a72015232 100644 --- a/vendor/github.com/appscode/kutil/tools/analytics/analytics.go +++ b/vendor/github.com/appscode/kutil/tools/analytics/analytics.go @@ -47,9 +47,7 @@ func ClientID() string { return "$k8s$newforconfig" } nodes, err := client.CoreV1().Nodes().List(metav1.ListOptions{ - LabelSelector: labels.SelectorFromSet(map[string]string{ - "node-role.kubernetes.io/master": "", - }).String(), + LabelSelector: "node-role.kubernetes.io/master", }) if err != nil { return reasonForError(err) diff --git a/vendor/github.com/appscode/mergo/LICENSE b/vendor/github.com/appscode/mergo/LICENSE new file mode 100644 index 000000000..686680298 --- /dev/null +++ b/vendor/github.com/appscode/mergo/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2013 Dario Castañé. All rights reserved. +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/appscode/mergo/doc.go b/vendor/github.com/appscode/mergo/doc.go new file mode 100644 index 000000000..6e9aa7baf --- /dev/null +++ b/vendor/github.com/appscode/mergo/doc.go @@ -0,0 +1,44 @@ +// Copyright 2013 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package mergo merges same-type structs and maps by setting default values in zero-value fields. + +Mergo won't merge unexported (private) fields but will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). + +Usage + +From my own work-in-progress project: + + type networkConfig struct { + Protocol string + Address string + ServerType string `json: "server_type"` + Port uint16 + } + + type FssnConfig struct { + Network networkConfig + } + + var fssnDefault = FssnConfig { + networkConfig { + "tcp", + "127.0.0.1", + "http", + 31560, + }, + } + + // Inside a function [...] + + if err := mergo.Merge(&config, fssnDefault); err != nil { + log.Fatal(err) + } + + // More code [...] + +*/ +package mergo diff --git a/vendor/github.com/appscode/mergo/map.go b/vendor/github.com/appscode/mergo/map.go new file mode 100644 index 000000000..8e8c4ba8e --- /dev/null +++ b/vendor/github.com/appscode/mergo/map.go @@ -0,0 +1,156 @@ +// Copyright 2014 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on src/pkg/reflect/deepequal.go from official +// golang's stdlib. + +package mergo + +import ( + "fmt" + "reflect" + "unicode" + "unicode/utf8" +) + +func changeInitialCase(s string, mapper func(rune) rune) string { + if s == "" { + return s + } + r, n := utf8.DecodeRuneInString(s) + return string(mapper(r)) + s[n:] +} + +func isExported(field reflect.StructField) bool { + r, _ := utf8.DecodeRuneInString(field.Name) + return r >= 'A' && r <= 'Z' +} + +// Traverses recursively both values, assigning src's fields values to dst. +// The map argument tracks comparisons that have already been seen, which allows +// short circuiting on recursive types. +func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, overwrite bool) (err error) { + if dst.CanAddr() { + addr := dst.UnsafeAddr() + h := 17 * addr + seen := visited[h] + typ := dst.Type() + for p := seen; p != nil; p = p.next { + if p.ptr == addr && p.typ == typ { + return nil + } + } + // Remember, remember... + visited[h] = &visit{addr, typ, seen} + } + zeroValue := reflect.Value{} + switch dst.Kind() { + case reflect.Map: + dstMap := dst.Interface().(map[string]interface{}) + for i, n := 0, src.NumField(); i < n; i++ { + srcType := src.Type() + field := srcType.Field(i) + if !isExported(field) { + continue + } + fieldName := field.Name + fieldName = changeInitialCase(fieldName, unicode.ToLower) + if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v)) || overwrite) { + dstMap[fieldName] = src.Field(i).Interface() + } + } + case reflect.Struct: + srcMap := src.Interface().(map[string]interface{}) + for key := range srcMap { + srcValue := srcMap[key] + fieldName := changeInitialCase(key, unicode.ToUpper) + dstElement := dst.FieldByName(fieldName) + if dstElement == zeroValue { + // We discard it because the field doesn't exist. + continue + } + srcElement := reflect.ValueOf(srcValue) + dstKind := dstElement.Kind() + srcKind := srcElement.Kind() + if srcKind == reflect.Ptr && dstKind != reflect.Ptr { + srcElement = srcElement.Elem() + srcKind = reflect.TypeOf(srcElement.Interface()).Kind() + } else if dstKind == reflect.Ptr { + // Can this work? I guess it can't. + if srcKind != reflect.Ptr && srcElement.CanAddr() { + srcPtr := srcElement.Addr() + srcElement = reflect.ValueOf(srcPtr) + srcKind = reflect.Ptr + } + } + if !srcElement.IsValid() { + continue + } + if srcKind == dstKind { + if err = deepMerge(dstElement, srcElement, visited, depth+1, overwrite); err != nil { + return + } + } else { + if srcKind == reflect.Map { + if err = deepMap(dstElement, srcElement, visited, depth+1, overwrite); err != nil { + return + } + } else { + return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind) + } + } + } + } + return +} + +// Map sets fields' values in dst from src. +// src can be a map with string keys or a struct. dst must be the opposite: +// if src is a map, dst must be a valid pointer to struct. If src is a struct, +// dst must be map[string]interface{}. +// It won't merge unexported (private) fields and will do recursively +// any exported field. +// If dst is a map, keys will be src fields' names in lower camel case. +// Missing key in src that doesn't match a field in dst will be skipped. This +// doesn't apply if dst is a map. +// This is separated method from Merge because it is cleaner and it keeps sane +// semantics: merging equal types, mapping different (restricted) types. +func Map(dst, src interface{}) error { + return _map(dst, src, false) +} + +// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overriden by +// non-empty src attribute values. +func MapWithOverwrite(dst, src interface{}) error { + return _map(dst, src, true) +} + +func _map(dst, src interface{}, overwrite bool) error { + var ( + vDst, vSrc reflect.Value + err error + ) + if vDst, vSrc, err = resolveValues(dst, src); err != nil { + return err + } + // To be friction-less, we redirect equal-type arguments + // to deepMerge. Only because arguments can be anything. + if vSrc.Kind() == vDst.Kind() { + return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, overwrite) + } + switch vSrc.Kind() { + case reflect.Struct: + if vDst.Kind() != reflect.Map { + return ErrExpectedMapAsDestination + } + case reflect.Map: + if vDst.Kind() != reflect.Struct { + return ErrExpectedStructAsDestination + } + default: + return ErrNotSupported + } + return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, overwrite) +} diff --git a/vendor/github.com/appscode/mergo/merge.go b/vendor/github.com/appscode/mergo/merge.go new file mode 100644 index 000000000..513774f4c --- /dev/null +++ b/vendor/github.com/appscode/mergo/merge.go @@ -0,0 +1,123 @@ +// Copyright 2013 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on src/pkg/reflect/deepequal.go from official +// golang's stdlib. + +package mergo + +import ( + "reflect" +) + +// Traverses recursively both values, assigning src's fields values to dst. +// The map argument tracks comparisons that have already been seen, which allows +// short circuiting on recursive types. +func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, overwrite bool) (err error) { + if !src.IsValid() { + return + } + if dst.CanAddr() { + addr := dst.UnsafeAddr() + h := 17 * addr + seen := visited[h] + typ := dst.Type() + for p := seen; p != nil; p = p.next { + if p.ptr == addr && p.typ == typ { + return nil + } + } + // Remember, remember... + visited[h] = &visit{addr, typ, seen} + } + switch dst.Kind() { + case reflect.Struct: + for i, n := 0, dst.NumField(); i < n; i++ { + if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, overwrite); err != nil { + return + } + } + case reflect.Map: + for _, key := range src.MapKeys() { + srcElement := src.MapIndex(key) + if !srcElement.IsValid() { + continue + } + dstElement := dst.MapIndex(key) + switch srcElement.Kind() { + case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Interface, reflect.Slice: + if srcElement.IsNil() { + continue + } + fallthrough + default: + if !srcElement.CanInterface() { + continue + } + switch reflect.TypeOf(srcElement.Interface()).Kind() { + case reflect.Struct: + fallthrough + case reflect.Ptr: + fallthrough + case reflect.Map: + if err = deepMerge(dstElement, srcElement, visited, depth+1, overwrite); err != nil { + return + } + } + } + if !isEmptyValue(srcElement) && (overwrite || (!dstElement.IsValid() || isEmptyValue(dst))) { + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + dst.SetMapIndex(key, srcElement) + } + } + case reflect.Ptr: + fallthrough + case reflect.Interface: + if src.IsNil() { + break + } else if dst.IsNil() || overwrite { + if dst.CanSet() && (overwrite || isEmptyValue(dst)) { + dst.Set(src) + } + } else if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, overwrite); err != nil { + return + } + default: + if dst.CanSet() && !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) { + dst.Set(src) + } + } + return +} + +// Merge will fill any empty for value type attributes on the dst struct using corresponding +// src attributes if they themselves are not empty. dst and src must be valid same-type structs +// and dst must be a pointer to struct. +// It won't merge unexported (private) fields and will do recursively any exported field. +func Merge(dst, src interface{}) error { + return merge(dst, src, false) +} + +// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overriden by +// non-empty src attribute values. +func MergeWithOverwrite(dst, src interface{}) error { + return merge(dst, src, true) +} + +func merge(dst, src interface{}, overwrite bool) error { + var ( + vDst, vSrc reflect.Value + err error + ) + if vDst, vSrc, err = resolveValues(dst, src); err != nil { + return err + } + if vDst.Type() != vSrc.Type() { + return ErrDifferentArgumentsTypes + } + return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, overwrite) +} diff --git a/vendor/github.com/appscode/mergo/mergo.go b/vendor/github.com/appscode/mergo/mergo.go new file mode 100644 index 000000000..f8a0991ec --- /dev/null +++ b/vendor/github.com/appscode/mergo/mergo.go @@ -0,0 +1,90 @@ +// Copyright 2013 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on src/pkg/reflect/deepequal.go from official +// golang's stdlib. + +package mergo + +import ( + "errors" + "reflect" +) + +// Errors reported by Mergo when it finds invalid arguments. +var ( + ErrNilArguments = errors.New("src and dst must not be nil") + ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type") + ErrNotSupported = errors.New("only structs and maps are supported") + ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") + ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") +) + +// During deepMerge, must keep track of checks that are +// in progress. The comparison algorithm assumes that all +// checks in progress are true when it reencounters them. +// Visited are stored in a map indexed by 17 * a1 + a2; +type visit struct { + ptr uintptr + typ reflect.Type + next *visit +} + +// From src/pkg/encoding/json. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) { + if dst == nil || src == nil { + err = ErrNilArguments + return + } + vDst = reflect.ValueOf(dst).Elem() + if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map { + err = ErrNotSupported + return + } + vSrc = reflect.ValueOf(src) + // We check if vSrc is a pointer to dereference it. + if vSrc.Kind() == reflect.Ptr { + vSrc = vSrc.Elem() + } + return +} + +// Traverses recursively both values, assigning src's fields values to dst. +// The map argument tracks comparisons that have already been seen, which allows +// short circuiting on recursive types. +func deeper(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) { + if dst.CanAddr() { + addr := dst.UnsafeAddr() + h := 17 * addr + seen := visited[h] + typ := dst.Type() + for p := seen; p != nil; p = p.next { + if p.ptr == addr && p.typ == typ { + return nil + } + } + // Remember, remember... + visited[h] = &visit{addr, typ, seen} + } + return // TODO refactor +} From e3af579b97beddca2544a1ee54085ea1aa3c1e18 Mon Sep 17 00:00:00 2001 From: shahriar Date: Tue, 30 Jan 2018 11:49:57 +0600 Subject: [PATCH 4/6] added-all --- pkg/cmds/role.go | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/pkg/cmds/role.go b/pkg/cmds/role.go index 10fb6c3d7..15dd3ab89 100644 --- a/pkg/cmds/role.go +++ b/pkg/cmds/role.go @@ -38,14 +38,9 @@ var policyRuleOperator = []rbac.PolicyRule{ }, { APIGroups: []string{core.GroupName}, - Resources: []string{"secrets"}, + Resources: []string{"secrets", "serviceaccounts"}, Verbs: []string{"create", "delete", "get", "patch"}, }, - { - APIGroups: []string{core.GroupName}, - Resources: []string{"serviceaccounts"}, - Verbs: []string{"create", "delete", "get"}, - }, { APIGroups: []string{apps.GroupName}, Resources: []string{"deployments", "statefulsets"}, From fa14d6b2d36b618f470c2ab53de1f528ecd77bc0 Mon Sep 17 00:00:00 2001 From: shahriar Date: Tue, 30 Jan 2018 11:59:25 +0600 Subject: [PATCH 5/6] fixed --- glide.lock | 21 +- vendor/github.com/go-openapi/spec/bindata.go | 26 +- vendor/github.com/go-openapi/spec/expander.go | 562 ++++----------- vendor/github.com/go-openapi/spec/header.go | 30 - vendor/github.com/go-openapi/spec/items.go | 22 +- .../github.com/go-openapi/spec/parameter.go | 6 +- vendor/github.com/go-openapi/spec/ref.go | 23 +- vendor/github.com/go-openapi/spec/response.go | 23 +- .../github.com/go-openapi/spec/responses.go | 2 +- vendor/github.com/go-openapi/spec/schema.go | 6 +- vendor/github.com/go-openapi/spec/spec.go | 11 +- vendor/github.com/go-openapi/spec/swagger.go | 4 +- .../google.golang.org/appengine/appengine.go | 5 +- .../appengine/internal/api.go | 182 ++--- .../appengine/internal/api_classic.go | 16 +- .../appengine/internal/api_common.go | 39 +- .../appengine/internal/api_pre17.go | 682 ++++++++++++++++++ .../appengine/internal/identity_classic.go | 48 +- .../appengine/internal/identity_vm.go | 6 +- .../appengine/internal/main_vm.go | 6 +- vendor/gopkg.in/yaml.v2/LICENSE | 389 +++++----- vendor/gopkg.in/yaml.v2/decode.go | 10 +- vendor/gopkg.in/yaml.v2/emitterc.go | 15 +- vendor/gopkg.in/yaml.v2/parserc.go | 1 + vendor/gopkg.in/yaml.v2/readerc.go | 7 +- vendor/gopkg.in/yaml.v2/resolve.go | 11 +- vendor/gopkg.in/yaml.v2/scannerc.go | 13 +- vendor/gopkg.in/yaml.v2/yaml.go | 17 +- vendor/gopkg.in/yaml.v2/yamlh.go | 2 +- 29 files changed, 1294 insertions(+), 891 deletions(-) create mode 100644 vendor/google.golang.org/appengine/internal/api_pre17.go diff --git a/glide.lock b/glide.lock index ecb41ede9..9f99692b1 100644 --- a/glide.lock +++ b/glide.lock @@ -1,10 +1,5 @@ -<<<<<<< HEAD -hash: a0bbe9041555b0f2f28bf12a06e143bef8b5de12e1a09976d1266859df1bb483 -updated: 2018-01-30T11:34:14.800706335+06:00 -======= hash: 524d47ba2b9ae54115ade7c896a4fb9ca17c77d3db633fe10daf87a9c35f8df7 -updated: 2018-01-29T08:35:27.263987742-08:00 ->>>>>>> ae620b058022eab459a09fd6305184915f02829f +updated: 2018-01-30T11:57:41.570285242+06:00 imports: - name: cloud.google.com/go version: fe3d41e1ecb2ce36ad3a979037c9b9a2b726226f @@ -12,7 +7,7 @@ imports: - compute/metadata - internal - name: github.com/appscode/go - version: 99dbf420807d9459b63d1d0bf702537bb7aa976f + version: aee9dd25c6354457ed532ae0d463e54e53151ad8 subpackages: - analytics - context @@ -188,7 +183,7 @@ imports: - name: github.com/go-openapi/jsonreference version: 13c6e3589ad90f49bd3e3bbe2c2cb3d7a4142272 - name: github.com/go-openapi/spec - version: 7abd5745472fff5eb3685386d5fb8bf38683154d + version: 6aced65f8501fe1217321abf0749d354824ba2ff - name: github.com/go-openapi/swag version: f3f9494671f93fcff853e3c6e9e948b3eb71e590 - name: github.com/gogo/protobuf @@ -263,7 +258,7 @@ imports: subpackages: - diskcache - name: github.com/hashicorp/go-version - version: 53932f80ddea12bea96be074f9fb2dc545806aba + version: 40efc0a21ca93e3bfbddefd4b5309a3e79d0aeb7 repo: https://github.com/appscode/go-version.git vcs: git - name: github.com/hashicorp/golang-lru @@ -299,11 +294,7 @@ imports: - pkg/storage - pkg/validator - name: github.com/kubedb/elasticsearch -<<<<<<< HEAD - version: cb4b4269abe35bf279ebb65f67d162840f2a198e -======= version: b440c174990c33798231df2124aea3810a262acf ->>>>>>> ae620b058022eab459a09fd6305184915f02829f subpackages: - pkg/docker - pkg/validator @@ -459,7 +450,7 @@ imports: - googleapi/internal/uritemplates - storage/v1 - name: google.golang.org/appengine - version: 4f7eeb5305a4ba1966344836ba4af9996b7b4e05 + version: 5bee14b453b4c71be47ec1781b0fa61c2ea182db subpackages: - internal - internal/app_identity @@ -481,7 +472,7 @@ imports: - name: gopkg.in/warnings.v0 version: 8a331561fe74dadba6edfc59f3be66c22c3b065d - name: gopkg.in/yaml.v2 - version: d670f9405373e636a5a2765eea47fac0c9bc91a4 + version: 53feefa2559fb8dfa8d81baad31be332c97d6c77 - name: k8s.io/api version: af4bc157c3a209798fc897f6d4aaaaeb6c2e0d6a subpackages: diff --git a/vendor/github.com/go-openapi/spec/bindata.go b/vendor/github.com/go-openapi/spec/bindata.go index 9afb5df19..294cbccf7 100644 --- a/vendor/github.com/go-openapi/spec/bindata.go +++ b/vendor/github.com/go-openapi/spec/bindata.go @@ -1,3 +1,17 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // Code generated by go-bindata. // sources: // schemas/jsonschema-draft-04.json @@ -69,7 +83,7 @@ func (fi bindataFileInfo) Sys() interface{} { return nil } -var _jsonschemaDraft04JSON = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xc4\x57\x3b\x6f\xdb\x3e\x10\xdf\xf3\x29\x08\x26\x63\xf2\x97\xff\x40\x27\x6f\x45\xbb\x18\x68\xd1\x0c\xdd\x0c\x0f\xb4\x75\xb2\x19\x50\xa4\x42\x51\x81\x0d\x43\xdf\xbd\xa0\xa8\x07\x29\x91\x92\x2d\xbb\x8d\x97\x28\xbc\xd7\xef\x8e\xf7\xe2\xf9\x01\x21\x84\x30\x8d\xf1\x12\xe1\x83\x52\xd9\x32\x8a\xde\x72\xc1\x5f\xf2\xdd\x01\x52\xf2\x9f\x90\xfb\x28\x96\x24\x51\x2f\x8b\x2f\x91\x39\x7b\xc4\xcf\x46\xe8\xc9\xfc\x3f\x43\x32\x86\x7c\x27\x69\xa6\xa8\xe0\x5a\xfa\x9b\x90\x80\x0c\x0b\x4a\x41\x91\x5a\x45\xc7\x9d\x50\x4e\x35\x73\x8e\x97\xc8\x20\xae\x08\x86\xed\xab\x94\xe4\xe4\x10\x2a\xa2\x3a\x65\xa0\x95\x93\x8a\xfc\xec\x12\x53\xca\x57\x0a\x52\xad\xef\xff\x1e\x89\xd6\xe7\x67\x84\x9f\x24\x24\x5a\xc5\x23\x46\x65\xcb\x54\x76\xfc\x38\x13\x39\x55\xf4\x03\x56\x5c\xc1\x1e\x64\x18\x04\xad\x19\x86\x30\x68\x5a\xa4\x78\x89\x16\x97\xe8\xff\x0e\x09\x29\x98\x5a\x0c\xed\x10\xc6\x7e\x69\xa8\x6b\x07\x76\x64\x45\x2e\xea\x63\x45\xe5\xb3\x66\x8e\x8d\x4e\x0d\x01\x95\x68\xe3\x85\x91\xd3\x34\x63\xf0\xfb\x94\x41\x3e\x34\x0d\xbc\x72\x60\xdd\x46\x1a\xe1\xad\x10\x0c\x08\xd7\x9f\xad\xe3\x08\xf3\x82\x31\xf3\x37\xdd\x9a\x13\xb1\x7d\x83\x9d\xd2\x5f\xb9\x92\x94\xef\x71\xc8\x7e\x45\x9d\x73\xcf\xd6\x65\x36\x7c\x8d\xa9\xf2\xf2\x94\x28\x38\x7d\x2f\xa0\xa1\x2a\x59\x40\x07\xf3\xc1\x02\xdb\xda\x68\x1c\x33\xa7\x99\x14\x19\x48\x45\x7b\xd1\x33\x45\x17\xf0\xa6\x46\xd9\x03\x92\x08\x99\x12\x7d\x57\xb8\x90\x14\x7b\x63\xd5\x15\xe5\xbd\x35\x2b\xaa\x18\x4c\xea\xf5\x8a\xba\xf5\x3e\x4b\x41\x93\xa5\x67\xfb\x38\x2d\x98\xa2\x19\x83\x2a\xf7\x03\x6a\x9b\x74\x0b\x56\x5e\x8f\x02\xc7\x1d\x2b\x72\xfa\x01\x3f\x5b\x16\xf7\xc6\x6d\xfb\xe4\x58\xb3\x8c\x1b\xf7\x0a\x77\x86\xa6\xb4\xb4\xf5\xe4\x92\xbb\xa0\x24\x84\xe5\x01\x84\xad\x13\x37\x21\x9c\xd2\x72\x0b\x42\x72\xfc\x01\x7c\xaf\x0e\xbd\x9e\x3b\xd5\xbc\x1c\x1f\xaf\xd6\xd0\xb6\x52\xb7\xdf\x12\xa5\x40\x4e\xe7\x68\xb0\x78\x24\xec\xe1\xe8\x0f\x26\x89\xe3\x0a\x0a\x61\x4d\x23\xe9\xf7\x70\x7e\x32\x3d\xdc\x39\xd6\xbf\xf3\x30\xd0\xfd\xf6\x55\xb3\x79\x27\x96\xfe\x6d\x82\x37\x73\xf6\x8f\x36\x3a\x03\xa4\x6d\x7d\x1c\x9e\x73\x35\xf6\x18\xbf\x15\x76\x4a\x8e\x2b\xcf\x00\xbf\x2a\x99\xae\x55\xe0\xcf\x25\x77\x68\xfc\x95\xba\x79\x75\x06\xcb\x5c\x77\x67\x69\xf1\xfb\x2c\xe1\xbd\xa0\x12\xe2\x31\x45\xf6\x30\x0f\x14\xc8\xab\x7f\x60\x4e\x27\xe0\x3f\xaf\x92\xd0\x6a\x8a\x82\xdb\xc0\xa4\xbb\x63\x65\x34\x0d\x28\xb0\x6b\x7c\x1e\x1e\xd3\x51\xc7\x6e\xf4\x33\x60\xc5\x90\x01\x8f\x81\xef\xee\x88\x68\x90\x69\x23\xb9\x8a\x2e\x69\x98\x7d\xa6\x91\x32\x1a\xc8\x6e\x9c\x13\x7f\x10\xea\xcd\xfd\x4e\xef\xa6\xb1\x25\xd9\xde\x22\x8d\xfa\x59\x63\xc5\x0d\x80\xf5\x28\xf1\xd6\xb9\x37\x9e\xa3\xee\xb5\x4c\xbe\x37\xe0\x55\xc6\x27\x82\x75\x49\xd0\xda\xe0\xb9\x1d\xca\xbf\x5b\xd4\xcf\xbf\x0b\x47\xac\x2d\x59\x07\xfe\x7a\x49\xc1\x61\xa6\x24\x17\x2a\xf0\xbe\x2e\xdb\x17\x7f\xa0\x3c\x7d\x4b\xf3\xba\xdb\xc3\xed\x06\xee\xdb\x5e\xd7\xdd\x42\x5c\x47\xb2\xb3\x68\x75\x8c\xf2\xe1\x4f\x00\x00\x00\xff\xff\x4e\x9b\x8d\xdf\x17\x11\x00\x00") +var _jsonschemaDraft04JSON = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xcc\x57\x3b\x6f\xdb\x30\x10\xde\xfd\x2b\x04\xa5\x63\x52\xb9\x40\xa7\x6c\x45\xbb\x18\x68\xd1\x0c\xdd\x0c\x0f\xb4\x75\xb2\x19\x50\xa4\x42\x51\x85\x0d\x43\xff\xbd\xa4\xa8\x07\x29\x91\x92\x2d\xbb\x48\xb4\xc4\xe1\xbd\xbe\x3b\xde\x8b\xe7\x45\x20\xbf\x10\xc7\xe1\x73\x10\x1e\x84\xc8\x9e\xa3\xe8\x35\x67\xf4\x29\xdf\x1d\x20\x45\x9f\x19\xdf\x47\x31\x47\x89\x78\x5a\x7e\x8d\xf4\xd9\x43\xf8\xa8\x85\x3e\xe9\xff\x67\x48\xc6\x90\xef\x38\xce\x04\x66\x54\x49\x7f\x67\x1c\x02\xcd\x12\xa4\x20\x50\xad\xa2\xe3\x4e\x30\xc5\x8a\x39\x97\xdc\x1a\x71\x45\xd0\x6c\xdf\x38\x47\x27\x8b\x50\x11\xc5\x29\x03\xa5\x1c\x55\xe4\x47\x9b\x98\x62\xba\x12\x90\x2a\x7d\x5f\x7a\x24\x5c\x9f\x9f\xa5\x83\x1c\x12\xa5\xe2\x21\x0c\xca\x96\xa9\xec\xf8\xc3\x8c\xe5\x12\xd7\x5f\x58\x51\x01\x7b\xe0\x7e\x10\xb8\x66\x18\xc2\xc0\x69\x91\x4a\x8e\xe5\x25\xfa\x7f\x40\x82\x0a\x22\x96\x43\x3b\x88\x90\xdf\x0a\xea\xda\x82\x1d\x19\x91\x8b\xfa\x58\xa5\x21\xc5\x1c\x6b\x9d\x0a\x42\x50\x06\x1b\x27\x8c\x1c\xa7\x19\x81\x3f\xd2\x97\x7c\x68\x1a\x68\xe5\xc0\xba\x8d\x74\x10\x6e\x19\x23\x80\xa8\xfa\xd9\x3a\x1e\x84\xb4\x20\x44\xff\x4d\xb7\xfa\x84\x6d\x5f\x61\x27\xd4\xaf\x5c\x70\x4c\xf7\xa1\xcf\x7e\x45\x9d\x73\xcf\xc6\x65\x36\x7c\x8d\xa9\xf2\xf2\x94\x28\x28\x7e\x2b\xa0\xa1\x0a\x5e\x40\x07\x73\x61\x80\x6d\x6d\x34\x8e\xe9\xd3\x8c\xb3\x0c\xb8\xc0\xbd\xe8\xe9\xa2\xf3\x78\x53\xa3\xec\x01\x49\x18\x4f\x91\xba\xab\xb0\xe0\x38\x74\xc6\xaa\x2b\xca\x7b\x6b\x16\x58\x10\x98\xd4\xeb\x14\xb5\xeb\x7d\x96\x82\x26\x4b\xcf\xe6\x71\x2a\xcf\xb0\x4c\xcd\x2a\xf7\x3d\x6a\x9b\x74\xf3\x56\x5e\x8f\x02\xc7\x1d\x29\x72\x59\x28\xbf\x5a\x16\xfb\xc6\x4d\xfb\xe8\x58\xb3\x8c\x1b\x77\x0a\x77\x86\xa6\xb4\xb4\xf5\x64\x93\xbb\xa0\x24\x88\xe4\x1e\x84\xad\x13\x37\x21\x9c\xd2\x72\x0b\x42\x74\xfc\x09\x74\x2f\x0e\xbd\x9e\x3b\xd5\xbc\x2c\x1f\xaf\xd6\xd0\xb6\x52\xbb\xdf\x22\x21\x80\x4f\xe7\xa8\xb7\x78\xb8\xd4\x7d\x74\x07\x13\xc5\x71\x05\x05\x91\xa6\x91\xf4\x7b\x38\x3d\xe9\x1e\x6e\x1d\xab\xef\x3c\x0c\x74\xbf\x7d\xd5\x6c\xce\x89\xa5\xbe\x8d\xf7\x66\xce\xee\xd1\x86\x67\x80\x34\xad\x8f\xc3\xb3\xae\xc6\x1c\xe3\xb7\xc2\x96\xd9\xb4\x72\x0c\xf0\xab\x92\xe9\x5a\x05\xee\x5c\xb2\x87\xc6\x7f\xa9\x9b\x17\x6b\xb0\xcc\x75\x77\x96\x16\xb7\xcf\x1c\xde\x0a\xcc\x21\x1e\x53\x64\x0e\x73\x4f\x81\xbc\xb8\x07\xa6\xe6\xfa\x50\x55\xe2\x5b\x4d\xad\x4b\xb6\xb6\x81\x49\x77\xc7\xca\x68\x1a\x90\x67\xd7\x78\x3f\x3c\xba\xa3\x8e\xdd\xe8\x7b\xc0\x8a\x21\x03\x1a\x03\xdd\xdd\x11\xd1\x20\xd3\x46\x72\x55\x7d\x93\x0d\xb3\xcf\x34\x52\x46\x03\xd9\x8d\x75\xe2\x0e\x42\xbd\xb9\xdf\xe9\xdd\x34\xb6\x24\x9b\x5b\xa4\x56\x3f\x6b\xac\xd8\x01\x30\x1e\x25\xce\x3a\x77\xc6\x73\xd4\xbd\x96\xc9\xf5\x06\xbc\xca\xf8\x44\xb0\x2e\x09\x5a\xf3\xf5\x3a\x94\x7b\xb7\xa8\x9f\x7f\x17\x8e\x58\x53\xb2\x0e\xfc\xf5\x92\x8c\xc2\x4c\x49\xca\x84\xe7\x7d\x5d\xb6\x2f\x7e\x4f\x79\xba\x96\xe6\x75\xb7\x87\x9b\x0d\xdc\xb5\xbd\xae\xbb\x85\xb8\x8e\x64\x67\xd1\xe8\x18\xe5\xe2\x5f\x00\x00\x00\xff\xff\x4e\x9b\x8d\xdf\x17\x11\x00\x00") func jsonschemaDraft04JSONBytes() ([]byte, error) { return bindataRead( @@ -84,12 +98,12 @@ func jsonschemaDraft04JSON() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "jsonschema-draft-04.json", size: 4375, mode: os.FileMode(420), modTime: time.Unix(1482389892, 0)} + info := bindataFileInfo{name: "jsonschema-draft-04.json", size: 4375, mode: os.FileMode(420), modTime: time.Unix(1441640690, 0)} a := &asset{bytes: bytes, info: info} return a, nil } -var _v2SchemaJSON = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xec\x5d\x4f\x93\xdb\x36\xb2\xbf\xfb\x53\xa0\x14\x57\xd9\xae\xd8\x92\xe3\xf7\x2e\xcf\x97\xd4\xbc\xd8\x49\x66\x37\x5e\x4f\x79\x26\xbb\x87\x78\x5c\x05\x91\x2d\x09\x09\x09\x30\x00\x38\x33\x5a\xef\x7c\xf7\x2d\xf0\x9f\x08\x02\x20\x41\x8a\xd2\xc8\x0e\x0f\xa9\x78\x28\xa0\xd1\xdd\x68\x34\x7e\xdd\xf8\xf7\xf9\x11\x42\x33\x49\x64\x04\xb3\xd7\x68\x76\x86\xfe\x76\xf9\xfe\x1f\xe8\x32\xd8\x40\x8c\xd1\x8a\x71\x74\x79\x8b\xd7\x6b\xe0\xe8\xd5\xfc\x25\x3a\xbb\x38\x9f\xcf\x9e\xab\x0a\x24\x54\xa5\x37\x52\x26\xaf\x17\x0b\x91\x17\x99\x13\xb6\xb8\x79\xb5\x10\x59\xdd\xf9\xef\x82\xd1\x6f\xf2\xc2\x8f\xf3\x4f\xb5\x1a\xea\xc7\x17\x45\x41\xc6\xd7\x8b\x90\xe3\x95\x7c\xf1\xf2\x7f\x8b\xca\x45\x3d\xb9\x4d\x32\xa6\xd8\xf2\x77\x08\x64\xfe\x8d\xc3\x9f\x29\xe1\xa0\x9a\xff\xed\x11\x42\x08\xcd\x8a\xd6\xb3\x9f\x15\x67\x74\xc5\xca\x7f\x27\x58\x6e\xc4\xec\x11\x42\xd7\x59\x5d\x1c\x86\x44\x12\x46\x71\x74\xc1\x59\x02\x5c\x12\x10\xb3\xd7\x68\x85\x23\x01\x59\x81\x04\x4b\x09\x9c\x6a\xbf\x7e\xce\x49\x7d\xba\x7b\x51\xfd\xa1\x44\xe2\xb0\x52\xac\x7d\xb3\x08\x61\x45\x68\x46\x56\x2c\x6e\x80\x86\x8c\xbf\xbd\x93\x40\x05\x61\x74\x96\x95\xbe\x7f\x84\xd0\x7d\x4e\xde\x42\xb7\xe4\xbe\x46\xbb\x14\x5b\x48\x4e\xe8\xba\x90\x05\xa1\x19\xd0\x34\xae\xc4\xce\xbe\xbc\x9a\xbf\x9c\x15\x7f\x5d\x57\xc5\x42\x10\x01\x27\x89\xe2\x48\x51\xb9\xda\x40\xd5\x87\x37\xc0\x15\x5f\x88\xad\x90\xdc\x10\x81\x42\x16\xa4\x31\x50\x39\x2f\x38\xad\xab\xb0\x53\xd8\xac\x94\x56\x6f\xc3\x84\xf4\x11\xa4\x50\xb3\xfa\xe9\xd3\x6f\x9f\x3e\xdf\x2f\xd0\xeb\x8f\x1f\x3f\x7e\xbc\xfe\xf6\xe9\xf7\xaf\x5f\x7f\xfc\x18\x7e\xfb\xec\xfb\xc7\xb3\x36\x79\x54\x43\xe8\x29\xc5\x31\x20\xc6\x11\x49\x9e\xe5\x12\x41\x66\xa0\xe8\xed\x1d\x8e\x93\x08\x5e\xa3\x27\x3b\xc3\x7c\xa2\x73\xba\xc4\x02\x2e\xb0\xdc\xf4\xe5\x76\xd1\xca\x96\xa2\x8a\x94\xcd\x21\xc9\x6c\xec\x2c\x70\x42\x9e\x34\x74\x9d\x19\x7c\xcd\x20\x9c\xea\x2e\x0a\xfe\x42\x84\xd4\x29\x04\x8c\x8a\xb4\x41\xa2\xc1\xdc\x19\x8a\x88\x90\x4a\x49\xef\xce\xdf\xbd\x45\x4a\x52\x81\x70\x10\x40\x22\x21\x44\xcb\x6d\xc5\xec\x4e\x3c\x1c\x45\xef\x57\x9a\xb5\x7d\xae\xfe\xe5\xe4\x31\x86\x90\xe0\xab\x6d\x02\x3b\x2e\xcb\x11\x90\xd9\xa8\xc6\x77\xc2\x59\x98\x06\xfd\xf9\x2e\x78\x45\x01\xa6\xa8\xa0\x71\x5c\xbe\x33\xa7\xd2\xd9\x5f\x95\xef\xd9\xd5\xac\xfd\xdc\x5d\xbf\x5e\xb8\xd1\x3e\xc7\x31\x48\xe0\x5e\x4c\x14\x65\xdf\xb8\xa8\x71\x10\x09\xa3\xc2\xc7\x02\xcb\xa2\x4e\x5a\x02\x82\x94\x13\xb9\xf5\x30\xe6\xb2\xa4\xb5\xfe\x9b\x3e\x7a\xb2\x55\xd2\xa8\x4a\xbc\x16\xb6\x71\x8e\x39\xc7\xdb\x9d\xe1\x10\x09\x71\xbd\x9c\xb3\x41\x89\xd7\xa5\x89\xdc\x57\xb5\x53\x4a\xfe\x4c\xe1\xbc\xa0\x21\x79\x0a\x1a\x0f\x70\xa7\x5c\x08\x8e\xde\xb0\xc0\x43\x24\xad\x74\x63\x0e\xb1\xd9\x90\xe1\xb0\x2d\x13\xa7\x6d\x78\xfd\x04\x14\x38\x8e\x90\xaa\xce\x63\xac\x3e\x23\xbc\x64\xa9\xb4\xf8\x03\x63\xde\xcd\xbe\x16\x13\x4a\x55\xac\x82\x12\xc6\xac\xd4\x35\xf7\x22\xd4\x3a\xff\x22\x73\x0e\x6e\x51\xa0\x75\x1e\xae\x8f\xe8\x5d\xc7\x59\xe6\xe4\x9a\x18\x8d\xd6\x1c\x53\x84\x4d\xb7\x67\x28\x37\x09\x84\x69\x88\x12\x0e\x01\x11\x80\x32\xa2\xf5\xb9\xaa\xc6\xd9\x73\x53\xab\xfb\xb4\x2e\x20\xc6\x54\x92\xa0\x9a\xf3\x69\x1a\x2f\x81\x77\x37\xae\x53\x1a\xce\x40\xc4\xa8\x82\x1c\xb5\xef\xda\x24\x7d\xb9\x61\x69\x14\xa2\x25\xa0\x90\xac\x56\xc0\x81\x4a\xb4\xe2\x2c\xce\x4a\x64\x7a\x9a\x23\xf4\x13\x91\x3f\xa7\x4b\xf4\x63\x84\x6f\x18\x87\x10\xbd\xc3\xfc\x8f\x90\xdd\x52\x44\x04\xc2\x51\xc4\x6e\x21\x74\x48\x21\x81\xc7\xe2\xfd\xea\x12\xf8\x0d\x09\xf6\xe9\x47\x35\xaf\x67\xc4\x14\xf7\x22\x27\x97\xe1\xe2\x76\x2d\x06\x8c\x4a\x1c\x48\x3f\x73\x2d\x0b\x5b\x29\x45\x24\x00\x2a\x0c\x11\xec\x94\xca\xc2\xa6\xc1\x37\x21\x43\x83\x3b\x5f\x97\xf1\x43\x5e\x53\x73\x19\xa5\x36\xd8\x2d\x05\x2e\x34\x0b\xeb\x39\xfc\x1d\x63\x51\x01\xbd\x3d\xbb\x90\x84\x40\x25\x59\x6d\x09\x5d\xa3\x1c\x37\xe6\x5c\x16\x9a\x40\x09\x70\xc1\xe8\x82\xf1\x35\xa6\xe4\xdf\x99\x5c\x8e\x9e\x4d\x79\xb4\x27\x2f\xbf\x7e\xf8\x05\x25\x8c\x50\xa9\x98\x29\x90\x62\x60\xea\x75\xae\x13\xca\xbf\x2b\x1a\x29\x27\x76\xd6\x20\xc6\x64\x5f\xe6\x32\x1a\x08\x87\x21\x07\x21\xbc\xb4\xe4\xe0\x32\x67\xa6\xcd\xf3\x1e\xcd\xd9\x6b\xb6\x6f\x8e\x27\xa7\xed\xdb\xe7\xbc\xcc\x1a\x07\xce\x6f\x87\x33\xf0\xba\x51\x17\x22\x66\x78\x79\x8e\xce\xe5\x13\x81\x80\x06\x2c\xe5\x78\x0d\xa1\xb2\xb8\x54\xa8\x79\x09\xbd\xbf\x3c\x47\x01\x8b\x13\x2c\xc9\x32\xaa\xaa\x1d\xd5\xee\xab\x36\xbd\x6c\xfd\x54\x6c\xc8\x08\x01\x3c\xbd\xe7\x07\x88\xb0\x24\x37\x79\x90\x28\x4a\x1d\x10\x1a\x92\x1b\x12\xa6\x38\x42\x40\xc3\x4c\x43\x62\x8e\xae\x36\xb0\x45\x71\x2a\xa4\x9a\x23\x79\x59\xb1\xa8\xf2\xa4\x0c\x60\x9f\xcc\x8d\x40\xf5\x80\xca\xa8\x99\xc3\xa7\x85\x1f\x31\x25\xa9\x82\xc5\x6d\xbd\xd8\x36\x76\x7c\x02\x28\x97\xf6\x1d\x74\x3b\x11\x7e\x91\xae\x32\xf8\x6c\xf4\xe6\x7b\x9a\xa5\x1f\x62\xc6\x21\xcf\x9a\xe5\xed\x8b\x02\xf3\x2c\x33\x33\xdf\x00\xca\xc9\x09\xb4\x04\xf5\xa5\x08\xd7\xc3\x02\x18\x66\xf1\xab\x1e\x83\x37\x4c\xcd\x12\xc1\x1d\x50\xf6\xaa\xbd\xfe\xe2\x73\x48\x38\x08\xa0\x32\x9b\x18\x44\x86\x0b\x6a\xc1\xaa\x26\x96\x2d\x96\x3c\xa0\x54\x65\x73\x87\x15\xca\x15\xe5\xf5\x94\x46\x9f\x33\x1a\x0c\x9a\xb1\x5a\xd9\x6a\x95\xcd\xcb\x7e\xec\x9a\xc5\x94\x3b\x37\x26\x31\xd7\xfc\xe4\x1f\x13\x8c\x31\x75\x9c\xba\xf7\x87\x3c\xa1\xb7\x4f\x17\x1b\x09\x82\x98\xc4\x70\x95\xd3\xe8\x4c\x48\x5a\xa6\xd6\x2a\x3d\x56\x42\x80\x9f\xaf\xae\x2e\x50\x0c\x42\xe0\x35\x34\x3c\x8a\x62\x03\x37\xba\xb2\x27\x04\xda\x25\x8d\x06\xe2\xa0\x13\x8a\xf3\xf5\xec\x10\x72\x67\x88\x90\x3d\x4b\x64\xeb\xaa\xda\x8f\xf7\x5a\x75\x47\x9a\xa8\x51\x70\x26\xd2\x38\xc6\x7c\xbb\x57\xfc\xbd\xe4\x04\x56\xa8\xa0\x54\x9a\x45\xd5\xf7\x0f\x16\xfc\x57\x1c\x3c\xdf\x23\xba\x77\x38\xda\x16\x4b\x31\x53\x6a\x4d\x9a\x15\x63\xe7\xe1\x18\x69\x9f\x22\xe0\x24\xbb\x94\x4b\x97\xee\x2d\xf9\x70\x87\x72\x7b\xe6\xc4\x33\x2a\x66\x5e\x1c\x35\x72\xe3\x2d\xda\x73\xe4\xc7\x51\x6d\xa4\xa1\x2a\x4f\xde\x94\xcb\xb2\x3e\x31\x48\xae\x82\xce\xc9\xc8\x65\xcd\xc3\xb7\x34\xb6\x2b\xdf\x58\x65\x78\x6e\x73\xac\x5e\x24\x0d\x3f\xdc\x70\x23\xc6\xda\x52\x0b\x2d\x63\x7d\xa9\x49\x2d\x54\x48\x28\xc0\x12\x9c\xe3\x63\xc9\x58\x04\x98\x36\x07\xc8\x0a\xa7\x91\xd4\xf0\xbc\xc1\xa8\xb9\x70\xd0\xc6\xa9\xb6\x78\x80\x5a\xa3\xb4\x2c\xf4\x18\x0b\x8a\x9d\xd0\xb4\x55\x10\xee\x0d\xc5\xd6\xe0\x99\x93\xdc\xa1\x04\xbb\xf1\xa7\x23\xd1\xd1\x97\x8c\x87\x13\x0a\x21\x02\xe9\x99\x25\xed\x20\xc5\x92\x66\x3c\x32\x9c\xd6\x06\xb0\x31\x5c\x86\x29\x0a\xcb\x60\x33\x12\xa5\x91\xfc\x96\x75\xd0\x59\xd7\x13\xbd\xd3\x23\x79\xdd\x2a\x90\xa6\x38\x06\x91\x39\x7f\x20\x72\x03\x1c\x2d\x01\x61\xba\x45\x37\x38\x22\x61\x8e\x71\x85\xc4\x32\x15\x28\x60\x61\x16\xb8\x3d\x29\xdc\x4d\x3d\x2f\x12\x13\x7d\xc8\x7e\x37\xee\xa8\x7f\xfa\xdb\xcb\x17\xff\x77\xfd\xf9\x7f\xee\x9f\x3d\xfe\xcf\xa7\xa7\x45\xfb\xcf\x1e\xf7\xf3\xe0\xff\xc4\x51\x0a\x8e\x4c\xcb\x01\xdc\x0a\x65\xb2\x01\x83\xed\x3d\xe4\xa9\xa3\x4e\x2d\x59\xc5\xe8\x2f\x48\x7d\x5a\x6e\x37\xbf\x5c\x9f\x35\x13\x64\x14\xfa\xef\x0b\x68\xa6\x0d\xb4\x8e\xf1\xa8\xff\xbb\x60\xf4\x03\x64\xab\x5b\x81\x65\x51\xe6\xda\xca\xfa\xf0\xb0\xac\x3e\x9c\xca\x26\x0e\x1d\xdb\x57\x5b\xbb\xb4\x9a\xa6\xb6\x9b\x1a\x6b\xd1\x9a\x9e\x7e\x33\x9a\xec\x41\x69\x45\x22\xb8\xb4\x51\xeb\x04\x77\xca\x6f\x7b\x7b\xc8\xb2\xb0\x95\x92\x25\x5b\xd0\x42\xaa\x2a\xdd\x32\x78\x4f\x0c\xab\x68\x46\x6c\xea\x6d\xf4\x5c\x5e\xde\xc4\xac\xa5\xf9\xd1\x00\x9f\x7d\x98\x65\x24\xbd\xc7\x97\xd4\xb3\x3a\xa8\x2b\xa0\x34\x76\xf9\x65\x5f\x2d\x25\x95\x1b\xcf\xd6\xf4\x9b\x5f\x09\x95\xb0\x36\x3f\xdb\xd0\x39\x2a\x93\x1c\x9d\x03\xa2\x4a\xca\xf5\xf6\x10\xb6\x94\x89\x0b\x6a\x70\x12\x13\x49\x6e\x40\xe4\x29\x12\x2b\xbd\x80\x45\x11\x04\xaa\xc2\x8f\x56\x9e\x5c\x6b\xec\x8d\x5a\x0e\x14\x59\x06\x2b\x1e\x24\xcb\xc2\x56\x4a\x31\xbe\x23\x71\x1a\xfb\x51\x2a\x0b\x3b\x1c\x48\x10\xa5\x82\xdc\xc0\xbb\x3e\x24\x8d\x5a\x76\x2e\x09\xed\xc1\x65\x51\xb8\x83\xcb\x3e\x24\x8d\x5a\x2e\x5d\xfe\x02\x74\x2d\x3d\xf1\xef\xae\xb8\x4b\xe6\x5e\xd4\xaa\xe2\x2e\x5c\x5e\xec\x0e\xf5\x5b\x0c\xcb\x0a\xbb\xa4\x3c\xf7\x1f\x2a\x55\x69\x97\x8c\x7d\x68\x95\xa5\xad\xb4\xf4\x9c\xa5\x07\xb9\x7a\x05\xbb\xad\x50\x6f\xfb\xa0\x4e\x9b\x48\x23\x49\x92\x28\x87\x19\x3e\x32\xee\xca\x3b\x46\x7e\x7f\x18\x64\xcc\xcc\x0f\x34\xe9\x36\x8b\xb7\x6c\xa8\xa5\x5b\x54\x4c\x54\x5b\x15\x3a\xf1\x6c\x2d\xfe\x96\xc8\x0d\xba\x7b\x81\x88\xc8\x23\xab\xee\x7d\x3b\x92\xa7\x60\x29\xe3\xdc\xff\xb8\x64\xe1\xf6\xa2\x5a\x59\xdc\x6f\xeb\x45\x7d\x6a\xd1\x76\x1e\xea\xb8\xf1\xfa\x14\xd3\x36\x63\xe5\xd7\xf3\xe4\xbe\x25\xbd\x5e\x05\xeb\x73\x74\xb5\x21\x2a\x2e\x4e\xa3\x30\xdf\xbf\x43\x28\x2a\xd1\xa5\x2a\x9d\x8a\xfd\x76\xd8\x8d\xbc\x67\x65\xc7\xb8\x03\x45\xec\xa3\xb0\x37\x8a\x70\x4c\x68\x91\x51\x8e\x58\x80\xed\x4a\xf3\x81\x62\xca\x96\xbb\xf1\x52\xcd\x80\xfb\xe4\x4a\x5d\x6c\xdf\x6e\x20\x4b\x80\x30\x8e\x28\x93\xf9\xe9\x8d\x8a\x6d\xd5\x59\x65\x7b\xaa\x44\x9e\xc0\xc2\xd1\x7c\x40\x26\xd6\x1a\xce\xf9\xc5\x69\x7b\x6c\xec\xc8\x71\x7b\xe5\x21\x2e\xd3\xe5\x65\x93\x91\x53\x0b\x7b\x3a\xc7\xfa\x17\x6a\x01\xa7\x33\xd0\xf4\x40\x0f\x39\x87\xda\xe4\x54\x87\x3a\xd5\xe3\xc7\xa6\x8e\x20\xd4\x11\xb2\x4e\xb1\xe9\x14\x9b\x4e\xb1\xe9\x14\x9b\xfe\x15\x63\xd3\x47\xf5\xff\x97\x38\xe9\xcf\x14\xf8\x76\x82\x49\x13\x4c\xaa\x7d\xcd\x6c\x62\x42\x49\x87\x43\x49\x19\x33\x6f\xe3\x44\x6e\x9b\xab\x8a\x3e\x86\xaa\x99\x52\x1b\x5b\x59\x33\x02\x09\xa0\x21\xa1\x6b\x84\x6b\x66\xbb\xdc\x16\x0c\xd3\x68\xab\xec\x36\x4b\xd8\x60\x8a\x40\x31\x85\x6e\x14\x57\x13\xc2\xfb\x92\x10\xde\xbf\x88\xdc\xbc\x53\x5e\x7f\x82\x7a\x13\xd4\x9b\xa0\xde\x04\xf5\x90\x01\xf5\x94\xcb\x7b\x83\x25\x9e\xd0\xde\x84\xf6\x6a\x5f\x4b\xb3\x98\x00\xdf\x04\xf8\x6c\xbc\x7f\x19\x80\xaf\xf1\x71\x45\x22\x98\x40\xe0\x04\x02\x27\x10\xd8\x29\xf5\x04\x02\xff\x4a\x20\x30\xc1\x72\xf3\x65\x02\x40\xd7\xc1\xd1\xe2\x6b\xf1\xa9\x7b\xfb\xe4\x20\xc0\x68\x9d\xd4\xb4\xd3\x96\xb5\xa6\xd1\x41\x20\xe6\x89\xc3\x48\x65\x58\x13\x84\x9c\x56\x56\x3b\x0c\xe0\x6b\x83\x5c\x13\xd2\x9a\x90\xd6\x84\xb4\x26\xa4\x85\x0c\xa4\x45\x19\xfd\xff\x63\x6c\x52\xb5\x1f\x1e\x19\x74\x3a\xcd\xb9\x69\xce\xa6\x3a\x0f\x7a\x2d\x19\xc7\x81\x14\x5d\xcb\xd5\x03\xc9\x39\xd0\xb0\xd1\xb3\xcd\xfb\x7a\x2d\x5d\x3a\x48\xe1\xfa\x2e\xe6\x81\x42\x18\x86\xd6\xc1\xbe\xb1\x23\xd3\xf7\x34\xed\x19\x0a\x0b\xc4\x48\x44\xfd\x22\x50\xb6\x42\x58\xbb\xe5\x3d\xa7\x73\xd4\x8b\xc4\x8c\x70\x61\xec\x73\xee\xc3\x81\x8b\xf5\xe2\xd7\x52\x3e\xcf\xeb\xeb\x17\x3b\x71\x16\xda\x7d\xb8\xde\xf0\x7a\x8f\x06\x2d\xa7\x40\x7b\xc1\x9d\x41\x4d\xb6\x61\xa2\x4e\x9f\x3d\xa0\xc5\xae\xe3\x1c\x1d\x40\x6c\x48\x8b\x63\xa0\xb5\x01\xed\x8e\x02\xe9\x86\xc8\x3b\x06\xee\xdb\x4b\xde\xbd\xc0\xa1\x6f\xcb\xda\xfc\xc2\x44\x16\x87\x9c\x17\x31\xd3\x30\x20\x39\x42\xcb\x6f\xf2\xf1\xf4\x72\x10\xf8\x1c\xa0\xf3\xbd\x10\xea\x21\x35\x7d\xe8\x86\xdb\x15\xed\x81\x81\x07\x28\xbb\x13\x28\xc7\xf8\xce\x7d\x8d\xc2\x31\xb4\x7e\x94\xd6\xdb\x55\xef\x4a\xfb\xed\xc3\x40\x3e\xeb\x9f\xe9\x99\x0f\xdf\x08\x65\x88\x27\x73\x86\x31\x9d\x47\xdf\x55\x19\xba\x3d\xee\x15\x0a\xcd\x8c\xaa\x5e\xb9\xf6\x57\x33\x73\x5a\xa1\x89\x7b\x3b\xa0\xb2\xa4\xc2\xf6\xc1\x53\xb5\x00\xca\x23\xe5\xf4\x60\x6a\xb4\x2d\x74\xea\x4e\xed\x3b\xe3\x47\xfb\xed\x82\x3d\x19\xd4\x3b\x6b\xaf\xae\x2b\x2f\x57\xb3\x82\x68\xcb\xed\x88\x2e\xe1\x5c\xd7\x26\xfa\x0a\x65\xe7\xce\x11\x33\xb4\xdd\x66\xe3\x37\xf6\xfa\x70\xd6\x4f\xa1\x21\x51\xd8\x3c\x26\x14\x4b\xc6\x87\x44\x27\x1c\x70\xf8\x9e\x46\xce\xab\x21\x07\x5f\xc1\x76\x17\x1b\x77\xb4\xda\x75\xa0\x0a\x3a\x30\xe1\xf8\x97\x32\x16\x2b\x00\x75\x85\xee\x62\x46\xef\xd3\x85\xb5\x6b\x60\xbe\xf2\x30\x7a\x8c\x0b\x4b\xa6\xd0\xf9\x64\x42\xe7\x07\x41\x41\xe3\x2c\x5d\xf9\x6d\xe9\x39\x98\x3b\x3b\x5d\x67\xd4\x5c\xed\xf2\xf0\x48\x7b\xbd\x2d\x31\xdd\x3f\x34\xad\x44\x76\x51\x9a\x56\x22\xa7\x95\xc8\x69\x25\xf2\xe1\x56\x22\x1f\x00\x32\x6a\x73\x92\xed\xe1\xc6\x7d\x9f\x49\x2c\x69\x7e\xc8\x31\x4c\x0c\xb4\xf2\x54\x3b\x79\x3b\x9e\x4d\xb4\xd1\x18\x3e\x5f\x9a\x93\xa2\x11\xc3\xda\x27\x0b\xaf\x37\x2e\x5c\x37\xfb\xeb\x9a\xd6\xc3\xac\xc3\xcc\xf8\x1e\x5b\x9d\xac\x22\x64\xb7\xed\x26\xb8\xf3\xb9\x3c\xbb\x1f\xe2\xb0\x22\x77\x43\x6a\x62\x29\x39\x59\xa6\xe6\xe5\xcd\x7b\x83\xc0\x5b\x8e\x93\x64\xac\xeb\xca\x4f\x65\xac\x4a\xbc\x1e\xcd\x82\xfa\x3c\x70\x36\xb6\xb5\xed\x79\xef\xec\x68\x00\xff\x54\xfa\xb5\xe3\xf1\xdb\xe1\xbe\xce\x76\x17\xaf\x57\xb6\x6b\x89\x05\x09\xce\x52\xb9\x01\x2a\x49\xbe\xd9\xf4\xd2\xb8\x7a\xbf\x91\x02\xf3\x22\x8c\x13\xf2\x77\xd8\x8e\x43\x8b\xe1\x54\x6e\x5e\x9d\xc7\x49\x44\x02\x22\xc7\xa4\x79\x81\x85\xb8\x65\x3c\x1c\x93\xe6\x59\xa2\xf8\x1c\x51\x95\x05\xd9\x20\x00\x21\x7e\x60\x21\x58\xa9\x56\xff\xbe\xb6\x5a\x5e\x5b\x3f\x1f\xd6\xd3\x3c\xc4\x4d\xba\x99\xb4\x63\x6e\x7d\x3e\x3d\x57\xd2\x18\x5f\x47\xe8\xc3\x06\x8a\x68\x6c\x7f\x3b\x72\x0f\xe7\xe2\x77\x77\xf1\xd0\x99\xab\xdf\x2e\xfe\xd6\xbb\xcd\x1a\xb9\x90\xd1\xaf\xf2\x38\x3d\xdb\x74\xf8\xeb\xe3\xda\xe8\x2a\x62\xb7\xda\x1b\x07\xa9\xdc\x30\x5e\xbc\x68\xfb\x6b\x9f\x97\xf1\xc6\xb1\xd8\x5c\x29\x1e\x49\x30\xc5\xf7\xde\xad\x91\x42\xf9\xdd\xed\x89\x80\x25\xbe\x37\xd7\xe7\x32\x5c\xe6\x35\xac\xd4\x0c\x2d\xf7\x90\xc4\xe3\xf5\xe3\x2f\x7f\x54\x18\x88\xe3\x61\x47\x85\x64\x7f\xc0\xd7\x3f\x1a\x92\x42\xe9\xc7\x1e\x0d\x95\x76\xa7\x51\xa0\x8f\x02\x1b\x46\x9e\x06\x42\xd1\xf2\x01\x07\x02\xde\xe9\x7d\x1a\x0b\xa7\x32\x16\xcc\xc0\xee\xc4\x90\xd2\x5f\x6f\x98\x54\x5d\xf2\x95\xe1\xa7\x69\x10\x3a\x06\xe1\x65\xb3\x17\x47\x58\x78\xd0\x45\xd6\x5b\xd5\x5f\x25\x1d\x71\x49\xa6\x7a\x64\xda\xd0\x6f\xc7\x3a\x4c\xe3\x09\xc0\x6e\x96\x2c\xa7\xa7\x77\x34\x10\x05\x08\x21\x44\x92\x65\x77\xdf\x20\x5c\xbc\xe7\x97\x3f\xf4\x1a\x45\xd6\xe7\x27\x4a\xde\x74\x27\x66\x11\x7d\x70\xba\xd3\x78\xf9\x1e\x0d\xca\xc8\x39\xde\x7c\xb3\xa6\xe1\xbc\xd7\xc1\x6a\x6f\xb3\x0e\x52\xbe\xe4\x98\x8a\x15\x70\x94\x70\x26\x59\xc0\xa2\xf2\x1c\xfb\xd9\xc5\xf9\xbc\xd5\x92\x9c\xa3\xdf\xe6\x1e\xb3\x0d\x49\xba\x87\x50\x5f\x84\xfe\xe9\xd6\xf8\xbb\xe6\xf0\x7a\xeb\xa6\x65\x3b\x86\x8b\x79\x93\xf5\x59\x20\x6e\xb4\xa7\x44\xf4\x3f\xa5\xfe\x67\x42\x12\xdb\xd3\xe7\xbb\xa5\xa3\x8c\x5c\x2b\x97\xbb\xbb\x7f\x8e\xc5\x6e\xed\x43\x5c\xbf\x74\xc8\x8f\xff\xe6\xd6\xbe\x91\xb6\xf5\x95\xe4\xed\x93\xc4\xa8\x5b\xf9\x76\x4d\x35\xb7\xd8\x8c\xb6\x7d\xaf\x72\xe0\xb6\xbd\x01\x63\x9e\x76\xab\x1a\x32\x76\xe4\x8c\x76\xc2\xad\x6c\xa2\x65\xf7\xcf\xf8\xa7\xda\x2a\xb9\x8c\x3d\x3c\xa3\x9d\x64\x33\xe5\x1a\xb5\x2d\xfb\x86\xa2\x5a\x7f\x19\x5b\x7f\xc6\x3f\xd1\x53\xd3\xe2\x41\x5b\xd3\x4f\xf0\xec\xb0\x42\x73\x43\xd2\x68\x27\xd3\x6a\x6a\x34\xf6\x4e\x1e\x52\x8b\x87\x6c\xcc\xae\x44\xfb\x9e\xa7\x51\x4f\x9d\x55\x03\x81\x8e\x67\xfc\xb4\x69\xf0\x3a\x18\xf2\x40\xd0\xf6\xa8\x34\xe3\xc9\x98\xaf\xf6\xda\x24\xd3\xeb\x60\xb9\x0e\xd3\x1f\xa9\xff\xee\x1f\xfd\x37\x00\x00\xff\xff\x69\x5d\x0a\x6a\x39\x9d\x00\x00") +var _v2SchemaJSON = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xec\x5d\xdf\x73\xdc\xb6\xf1\x7f\xcf\x5f\x81\xb9\x78\x46\xf6\x24\xd6\x39\xfe\x7e\x5f\xea\x97\x8c\x1a\x39\x89\x5a\xbb\xd2\xf8\x9c\xf6\xc1\x95\x67\x70\x24\x4e\x87\x84\x3f\x2e\x04\x29\xe9\xea\xea\x7f\xef\x02\xfc\x71\x04\x01\x90\x20\x89\x3b\x9d\x6d\x7a\xa6\x8d\x8e\x04\x16\x8b\xc5\x62\xf7\xb3\x0b\x10\xf8\xf4\x0d\x42\xb3\x94\xa6\x01\x99\xbd\x42\xb3\x33\xf4\xb7\xc5\xe5\x3f\xd0\xc2\x5b\x93\x10\xa3\x55\x9c\xa0\xc5\x1d\xbe\xb9\x21\x09\x7a\x79\xfa\x02\x9d\x5d\x5d\x9c\xce\xbe\xe7\x15\xa8\xcf\x4b\xaf\xd3\x74\xf3\x6a\x3e\x67\x79\x91\x53\x1a\xcf\x6f\x5f\xce\x99\xa8\x7b\xfa\x3b\x8b\xa3\x6f\xf3\xc2\x4f\xf2\x47\xb5\x1a\xfc\xe5\xf3\xa2\x60\x9c\xdc\xcc\xfd\x04\xaf\xd2\xe7\x2f\xfe\xbf\xa8\x5c\xd4\x4b\xb7\x1b\xc1\x54\xbc\xfc\x9d\x78\x69\xfe\x2c\x21\x7f\x66\x34\x21\xbc\xf9\x0f\xf0\x1b\x9e\x14\xad\x8b\xd7\x9c\xb3\x68\x15\x97\x7f\x6f\x70\xba\x66\x33\xf8\xfb\x5a\xd4\xc5\xbe\x4f\x53\x1a\x47\x38\xb8\x4a\xe2\x0d\x49\x52\x4a\x18\xd0\x59\xe1\x80\x11\x51\x00\xca\xa7\x24\x89\xa4\xb7\x9f\x72\x52\x1f\xef\x9f\x57\x3f\x78\x97\x12\xb2\xe2\xac\x7d\x3b\xf7\xc9\x8a\x46\x82\x2c\x9b\xdf\x92\xc8\x8f\x93\xd7\xf7\x29\x89\x18\x3c\x98\x89\xd2\x0f\xf0\xff\x0f\x39\x79\x0d\xdd\x92\xfb\x1a\xed\xb2\xdb\x2c\x4d\x68\x74\x53\xf4\x05\x9e\x93\x28\x0b\xab\x6e\x8b\x27\x30\x26\xb3\xe2\xd7\x75\x55\xcc\x27\xcc\x4b\xe8\x86\x73\xc4\xa9\xbc\x5f\x93\x6a\x0c\x6f\x49\xc2\xf9\x42\xf1\x0a\xa5\x6b\xca\x90\x1f\x7b\x59\x48\xa2\xf4\xb4\xe0\xb4\x2e\xc2\xce\xce\x8a\x52\x52\xbd\x75\xcc\x52\x9b\x8e\x14\x62\xe6\xaf\x3e\x7e\xf8\xf8\xe9\x61\x8e\x5e\xfd\x1b\xfe\x5d\x7f\xf7\xf4\xc7\x57\xf0\x97\xff\xdd\xb3\x1f\x9f\xcc\xda\xfa\xc3\x1b\x42\x4f\x23\x1c\x12\x04\x1a\x4a\x37\xcf\xf2\x1e\x11\xa1\xa0\xe8\xf5\x3d\x0e\x37\x01\x79\x85\x4e\x76\x8a\x79\x22\x73\xba\xc4\x8c\x5c\x81\x72\xf4\xe5\x76\xde\xca\x16\xa7\x8a\xb8\xce\xa1\x34\xd6\xb1\x33\xc7\x1b\x7a\xd2\x90\xb5\x50\xf8\x9a\x42\x18\xc5\x5d\x14\x7c\x43\x41\xc6\x12\x05\x0f\xde\x66\x0d\x12\x0d\xe6\xce\x50\x00\xd5\xb8\x90\xde\x5e\xbc\x7d\x8d\x78\x4f\x19\xc2\x9e\x47\x36\x29\xf1\xd1\x72\x5b\x31\xbb\xeb\x9e\x9e\x89\x90\xf8\x14\xbf\x87\xea\x2a\x1b\xa0\xdc\x7e\xe6\xf5\x67\xa3\x68\x1a\x79\x38\x42\x05\x8d\x51\x6c\x88\x29\xdf\x29\xcd\xca\x32\xec\x6a\xd6\x5e\x77\xd7\xaf\x17\x6e\xb4\x9f\x80\x5a\x82\xc2\x58\x31\x51\x94\x3d\x37\x51\x4b\x08\xdb\xc0\x43\x1b\xfd\x28\x8b\x1a\x69\x31\xe2\x65\x09\x4d\xb7\x16\xaa\x56\x96\xd4\xd6\x3f\xef\x23\x27\x5d\x25\x89\x6a\x8a\x6f\x98\x6e\x16\xe2\x24\xc1\xdb\x9d\x1e\xd0\x94\x84\xf5\x72\xc6\x06\x81\x5e\x69\x12\x1f\xaa\xda\x59\x44\xff\xcc\xc8\x45\x41\x23\x4d\x32\x22\xf1\x40\xee\xf9\x04\xc7\xc1\x79\xec\x59\x74\x49\x2a\xdd\xb0\xf0\x3a\x1d\x52\xcc\xa9\xc6\xad\xe9\x66\xcb\x2f\x24\x22\x09\x0e\x10\xaf\x9e\x84\x98\x3f\x46\x78\x19\x67\xa9\x66\xb6\x2a\x5e\x51\x3c\x2d\xcc\x7d\x55\xac\x72\xf4\x8a\xcf\xe8\xf2\x8c\xe5\xd4\x32\x78\x47\xf1\x5a\xf6\x90\x2d\x02\xd4\x7a\xc9\x52\x8e\xf2\xc0\x69\x3c\x66\xad\x1b\x8d\xd6\x0c\x06\x5c\x27\xdb\x33\x94\xab\x04\xc2\x91\x0f\x56\x87\x78\x14\x2c\xb7\x20\x5a\xf7\x24\x35\xce\xbe\x57\xa5\x3a\xa6\x75\x06\x20\x27\x4a\xa9\x57\x79\x64\x70\xed\x4b\x70\xd0\x9d\x8d\xcb\x94\x86\x33\x10\xc4\x11\x07\x04\xb5\xe7\x92\x0b\x5d\xac\xe3\x2c\x00\xcf\x40\x90\x4f\x57\x2b\x92\x00\x46\x40\xab\x24\x0e\x45\x09\x21\xa7\x53\x84\x7e\xa1\xe9\xaf\xd9\x12\xfd\x1c\xe0\xdb\x18\x74\x0f\xbd\xc5\xc9\x1f\x7e\x7c\x17\x21\x40\x16\x38\x08\xe2\x3b\xe2\x1b\x7a\x01\x6a\x14\xb2\xcb\xd5\x82\x24\xb7\xd4\x1b\x33\x8e\xdc\xeb\x0a\x62\x9c\x7b\x96\x93\x13\xa8\xb5\x5d\x8a\xe0\x32\x53\xec\xa5\x76\xea\x5a\x16\xd6\x52\x0a\xa0\x41\x30\xba\x76\x94\xca\xc2\xaa\xc2\x37\x1d\x7a\x83\x3b\x5b\x93\xf1\x53\x5e\x53\x32\x19\xa5\x34\x60\x60\x40\xd7\x24\x0d\xeb\x39\xfd\x0d\x73\x91\xc3\xb0\x91\x43\x48\x7d\x50\x30\xba\xda\x42\x59\x94\xa3\xba\x9c\xcb\x42\x12\x08\xda\x85\x80\x61\x0e\x91\x02\x8e\xe8\x7f\x44\xbf\x0c\x23\x9b\x25\xc1\x48\x5e\x7e\x7b\xf7\x06\x6d\x62\x0a\xfc\x00\x33\x05\x8e\xf3\x54\xb9\x9e\xca\x84\xf2\xe7\x9c\x06\xb8\x3b\x3d\x6b\x30\xe5\xe9\x58\xe6\x04\x0d\x04\xc3\x05\xde\x9e\x59\x49\xc9\xc0\x65\xce\x4c\x9b\xe5\x3d\x98\xb1\x97\x74\x5f\x9d\x4f\x46\xdd\xd7\xfb\x3c\xa1\x8d\x03\xfd\xdb\xfe\x14\xbc\xae\xd4\x45\x17\x05\xfc\x3d\x45\x17\xe9\x09\x43\x24\xf2\xe2\x2c\xc1\x37\x60\x44\x41\xe3\x32\xc6\xfd\x12\xba\x5c\x00\x28\x8e\x43\x18\x08\xba\x0c\xaa\x6a\x07\xd5\xfb\xaa\x4d\x2b\x5d\x3f\x16\x1d\x52\x42\x00\x4b\xeb\xf9\x8e\x04\x20\xeb\xdb\x3c\x84\x63\xa5\x0c\x68\xe4\xd3\x5b\xea\x67\x80\xc4\x80\x0d\x21\x21\x76\x8a\x40\x62\x5b\x14\x66\x10\xcd\x80\x8f\x4c\xca\x8a\x45\x95\x93\x32\xbc\x3c\x39\x55\xc2\xc8\x3d\x0a\xa3\xa6\x0e\x10\xa8\x5a\x11\xe3\x3d\xe5\xb0\xb8\x6d\x14\xdb\xe6\x8e\x4d\x00\x65\x92\xbe\x81\x6e\x27\xc2\x2f\x92\x49\x0a\x9f\x8d\xd1\xbc\x8c\x44\x72\x20\x04\x68\x92\xe7\xb4\xf2\xf6\x59\x81\x79\x96\x42\xcd\x61\xb0\x72\x72\x0c\xc6\x91\x3f\x29\x82\x69\xbf\x00\x86\x22\x1c\x95\x23\xe4\x86\xaa\x69\x22\xb8\x3d\xf6\xbd\x6a\xaf\x7f\xf7\x13\x02\x38\x97\x81\x9f\x15\x8e\x81\x09\x5c\x50\x0b\x56\xa5\x6e\xe9\x62\xc9\x3d\xf6\xaa\x6c\x6e\xbf\x9d\x32\x45\x79\x3d\x7b\x23\xfb\x8c\x06\x83\x6a\xac\x56\xb6\x5a\xe5\xda\xc4\xcb\x2e\x2f\xc6\xcd\xb9\xe2\xc4\x4c\xfe\xc9\x3e\x26\x70\xe1\x3a\x8e\xdd\xfa\x93\x3c\xdd\x36\x66\x88\x95\x04\x41\x48\x43\xf2\x3e\xa7\xd1\x99\x2e\xd4\xb8\xd6\x2a\xdb\x55\x42\x80\x5f\xdf\xbf\xbf\x42\x21\x40\x38\x70\xf9\x0d\x8b\xc2\xd9\xc0\x8d\xa1\xec\x09\x81\x76\x49\xa3\x81\x38\xe8\x88\xe2\x7c\x39\x3b\x24\x09\x43\xce\x10\x89\x57\x6a\x96\x48\x37\x54\xb5\x97\x0f\x52\x75\x43\x9a\xa8\x51\x70\x06\x0e\x22\xc4\xc9\x76\x54\xfc\xbd\x4c\x28\x81\x88\x35\xa7\x54\xaa\x45\x35\xf6\x8f\x16\xfc\x57\x1c\x7c\x3f\x22\xba\x37\x18\x5a\xf1\xce\x36\xa5\xd6\xa4\x59\x31\x76\xe1\xbb\x48\xfb\x14\x01\x27\xdd\xa5\x5c\xba\x64\xaf\x49\x6f\x1b\x84\xdb\x33\xc5\xdd\x22\x16\x4d\x9a\xbb\xc9\x96\x26\xf9\x3f\x88\xad\x82\x8e\x2b\xb6\xb4\x59\xf0\x16\x92\xbb\xf2\x66\x9a\xba\x5c\x78\x0b\x49\xc5\x0a\x36\x26\xb1\xb2\xee\xd2\x42\x4b\x59\x7b\x69\x52\xf3\x39\x0e\xf1\x70\x4a\x8c\xda\xb9\x8c\xe3\x80\xe0\xa8\xa9\x9e\x2b\x9c\x05\xa9\x84\xa6\x15\x46\xd5\xb4\x7d\x1b\xa7\x52\xea\x5e\xd0\x32\xc6\x48\x02\xf8\xbb\x02\x42\x47\xe4\x34\x0a\xc2\xbd\x81\xd0\x0d\xb1\xcc\x08\xee\x7c\xb4\x5e\xf9\x33\x47\x74\xe4\xe5\xd4\xe1\x84\x7c\x12\xc0\xdc\x72\x42\x2a\xde\x34\xa3\x81\xe1\xb4\xd6\x04\x2b\xd3\x65\x98\xa0\x70\xea\xad\x1d\x51\x72\x64\xb7\xb4\x93\x4e\xbb\x9a\x67\x9d\x9c\xc8\xeb\x56\x61\x2c\x4f\x29\x31\x61\xbb\x09\x05\x4b\x9e\xf0\x44\x04\x8e\xb6\xe8\x16\x07\xd4\xcf\x11\x26\x83\x60\x23\x83\x32\xb1\x2f\xc2\xa6\x93\xc2\xdc\xd4\xb3\x12\x21\x95\xa7\xec\x0f\x6e\x67\xfd\xd3\x0f\x2f\x9e\xff\xe5\xfa\xd3\xff\x3d\x3c\x7b\xf2\xdf\x8f\x4f\x8b\xf6\x9f\x3d\xe9\x67\xc1\xff\x89\x83\x8c\x18\xf2\x1c\x7b\x30\x2b\x51\x9c\x36\x40\xa8\x7e\x84\x2c\x65\xd4\x29\x25\x6d\x37\xfa\x77\x64\xd7\x95\x2e\xf5\xcb\xe5\x59\x53\xc1\x38\x22\x97\x2b\x29\x86\xe8\x31\x3a\xda\x81\xb1\xa8\xcf\xb7\x00\xbd\x23\x62\x6d\xc9\xd3\x2c\x89\x5c\x6b\x59\x1f\x1e\x14\xd5\xa7\x53\xd9\xc4\xbe\x23\xeb\x6a\xdb\x93\x54\x53\x95\x76\x53\x62\x2d\x52\x93\x93\x5f\x4a\x93\x3d\x28\xad\x68\x40\x16\x3a\x6a\xb5\x5f\xd7\x46\xbb\x6d\x6d\x21\xcb\xc2\x86\x48\x41\x89\xd5\x5b\x48\x55\xa5\x5b\x26\xef\x91\x61\x15\x49\x89\x55\xb9\x39\xcf\xa4\xe5\x4d\xcc\x5a\x9a\x77\x06\xf8\xf4\xd3\x4c\x90\xb4\x9e\x5f\xa9\x9c\x53\x91\x98\xd2\x85\x73\xca\x0e\x38\xf1\x54\x53\x92\x9b\x71\xb1\xa2\xde\x7c\x4a\xa3\x94\xdc\xa8\x8f\x75\xe8\x1c\x95\x29\x86\xce\x09\x51\xa5\xc4\x7a\x5b\x08\x5d\xc2\xc2\x04\x35\x12\x1a\x52\xbe\xca\xc0\xf2\x04\x85\x96\x9e\x17\x07\x01\x0c\x25\x54\xf8\x59\xcb\x93\x69\x85\xbb\x51\xcb\x80\x22\xcb\x60\xc5\x82\x64\x59\x58\x4b\x29\xc4\xf7\x34\xcc\x42\x3b\x4a\x65\x61\x83\x01\xf1\x82\x8c\x81\x50\xde\xf6\x21\xa9\xd4\xd2\x73\x09\xe5\xed\xb9\x2c\x0a\x77\x70\xd9\x87\xa4\x52\xcb\x24\xcb\x37\x24\xba\x49\x2d\xf1\xef\xae\xb8\xa9\xcf\xbd\xa8\x55\xc5\x4d\xb8\xbc\xd8\x39\x69\xb7\x14\x25\x0a\x9b\x7a\x79\x61\x3f\x55\xaa\xd2\xa6\x3e\xf6\xa1\x55\x96\xd6\xd2\x92\x33\x86\x16\xe4\xea\x15\xf4\xba\x12\x59\xeb\x47\x64\xd4\x09\x98\x79\x14\x3c\xe5\xa5\x12\x06\x1b\xfa\xb8\x2b\x6f\x98\xf9\xfd\x61\x90\xe2\x99\x1f\xc9\xe9\x36\x8b\xb7\xec\x4e\x85\xe0\xa9\x70\x54\x5b\x1e\x3a\x25\x62\x25\xfc\x0e\x82\x2b\x74\xff\x9c\x67\x3d\x45\x64\xd5\xbd\x6b\x86\xe7\x8d\x35\x65\x8c\xbb\x0f\x97\xb1\xbf\xbd\xaa\xd6\xf5\xc6\x6d\x7c\xa8\xbb\x16\x69\xdf\x9f\x8c\x1b\xaf\x8f\x31\x6d\xe3\x2a\xbb\x9d\xa7\xd6\x35\xc9\xed\x2a\x58\xe7\xcb\xf7\x94\xc7\xc5\x7c\x8f\x9b\xd8\x3d\x43\x21\x8a\x2e\xd0\x25\x2f\x9d\xb1\x71\xfb\xdb\x1c\xef\x18\xd9\x31\x6e\x40\x11\x63\x04\x76\xce\x09\x83\x95\x2b\x12\xc2\x41\xec\x61\xbd\xd0\x6c\xa0\x18\xd7\xe5\x6e\xbc\x54\x53\xe0\x3e\xb9\x52\x13\xdb\x77\x6b\x22\x12\x20\x71\x82\x20\x76\xcf\xbf\x6c\xa8\xd8\xe6\x83\x55\xb6\xc7\x4b\xe4\x09\x2c\x1c\x9c\x0e\xc8\xc4\x6a\xc3\x39\xbb\x38\x6d\xc4\xb6\x8a\x1c\xb7\x57\x16\x62\x91\x2d\x17\x4d\x46\x8e\x2d\xec\xe9\x9c\xeb\x9f\xa9\x06\x1c\xcf\x44\x93\x03\x3d\xfe\x4f\x3f\xd5\x26\xa3\x3a\xd4\xa8\x1e\x3e\x36\x35\x04\xa1\x86\x90\x75\x8a\x4d\xa7\xd8\x74\x8a\x4d\x5b\x7b\x3d\xc5\xa6\x5f\x68\x6c\xfa\x4d\xfd\xbf\x25\x4e\x02\xde\x93\xed\x04\x93\x26\x98\x54\x7b\x2a\x74\x62\x42\x49\xfb\x43\x49\x82\x99\xd7\xe1\x26\xdd\x36\x57\x15\xa5\x96\x6d\x76\xbf\xb4\xb1\x25\x9a\x61\x88\xc1\x94\xe2\x49\x19\x5c\x53\xdb\xe5\xb6\x60\x38\x0a\xb6\x5c\x6f\x45\xc2\x86\xaf\x8a\x73\xa6\x78\xce\x26\x33\x7d\x33\x31\x21\xbc\xa3\x44\x78\xff\x82\x01\x7c\xcb\xad\xfe\x04\xf5\xd0\x04\xf5\x26\xa8\x37\x41\x3d\xd4\x84\x7a\xdc\xe4\x9d\xe3\x14\x4f\x68\x6f\x42\x7b\xb5\xa7\xa5\x5a\x4c\x80\x6f\x02\x7c\x3a\xde\x3f\x0f\xc0\xd7\x78\xc8\xf7\x69\x4d\x20\x10\x4d\x20\x70\x02\x81\x5d\xbd\x9e\x40\xe0\xd7\x04\x02\xf9\x27\x2c\x9f\x27\x00\x34\x7d\xb6\x59\x3c\x2d\x1e\x75\x6f\x9f\x1c\x04\x18\xb5\x4e\x4d\xfa\xd6\xb1\xd6\xb4\xa8\xe1\x1c\x62\x1e\x39\x8c\xe4\x8a\x35\x41\xc8\x69\x65\xb5\xfa\xf7\x75\x40\xae\x09\x69\xa1\x09\x69\x4d\x48\x6b\x42\x5a\xa8\x89\xb4\xa2\x38\xfa\xeb\x21\x36\xa9\xea\x3f\x1e\x19\xf4\x75\x9a\x71\xd3\x9c\x4e\x74\x16\xf4\x5a\x32\x8e\x03\x29\x9a\x96\xab\x07\x92\x33\xa0\x61\x65\x64\xaf\x1b\x18\x5a\x33\xa4\x83\x04\x2e\xef\x62\x1e\xd8\x09\x45\xd1\x3a\xd8\x57\x76\x64\xda\x7e\x4d\x7b\x06\x80\x2b\x47\x8c\x94\xd5\x8f\xe1\x04\x04\x83\xa5\x13\xd0\x73\x3a\x07\x3d\xc6\x4b\x09\x17\x5c\x7f\xe7\x3e\x1c\xb8\x68\x8f\x5d\x2d\xfb\x67\x79\xb4\xfb\x7c\xd7\x9d\xb9\x74\x1a\xad\x35\xbc\x1e\xd1\xa0\xe6\x2b\xd0\x5e\x70\x67\x50\x93\x6d\x98\xa8\xd3\x66\x0f\x68\xb1\xeb\x73\x8e\x0e\x20\x36\xa4\x45\x17\x68\x6d\x40\xbb\x4e\x20\xdd\x90\xfe\xba\xc0\x7d\xa3\xfa\x3b\x0a\x1c\xda\xb6\x2c\xf9\x97\x98\x89\x38\xe4\xa2\x88\x99\x86\x01\x49\x07\x2d\x9f\xe7\xf3\xe9\xc5\x20\xf0\x39\x40\xe6\xa3\x10\xea\x3e\x25\xbd\xef\x86\xdb\x05\x6d\x81\x81\x07\x08\xbb\x13\x28\x83\xc0\xcd\xc7\x28\x1c\x42\xea\x07\x69\xbd\x5d\xf4\xa6\xb4\xdf\x18\x06\x72\xaf\x7f\x26\x67\x3e\x6c\x23\x94\x21\x96\xcc\x18\xc6\x74\x7e\xfa\xce\xcb\x44\xdb\xc3\x1e\xa1\xd0\xcc\xa8\xca\x95\x6b\xbf\x9a\x99\xd3\x0a\x4d\x3c\xe8\x01\x95\x26\x15\x36\x06\x4f\xd5\x02\x28\x8b\x94\xd3\xa3\x89\x51\xb7\xd0\x29\x1b\xb5\x1f\x94\x97\xfa\xb3\xfd\x7a\x32\x28\x0f\xd6\xa8\xa1\xc3\x41\xa0\xa2\xb6\x96\xb3\x09\x4d\x9d\x33\x1d\x5a\x68\xdb\x29\x3d\x77\x86\x98\xc1\x1c\x21\xe5\x7d\xea\x9e\x7b\x7d\x38\xeb\x27\x50\x9f\x72\x6c\x0e\x62\xc2\x69\x9c\x0c\x89\x4e\x12\x88\xf9\x2f\xa3\xc0\x78\x30\xe3\xe0\x23\xd8\xee\x43\xe5\x84\x54\xbd\x0c\x78\x41\x03\x26\x74\x7f\x24\x62\xb1\x02\x50\x17\xe8\x2e\x66\xb4\xfe\xba\xb0\x76\x0c\xcc\x17\x1e\x46\xbb\x38\xb0\x64\x0a\x9d\x8f\x26\x74\x7e\x14\x14\xe4\x66\xe9\xca\x6e\x4b\xcf\xde\xcc\xd9\xf1\x1a\xa3\xe6\x6a\x97\x85\x45\x1a\x75\xb3\xc3\x74\xfe\xd0\xb4\x12\xd9\x45\x69\x5a\x89\x9c\x56\x22\xa7\x95\xc8\xc7\x5b\x89\x7c\x04\xc8\x28\xf9\x24\xdd\xb5\x89\x63\x2f\x29\x2c\x69\xbe\xcb\x31\x0c\xbf\x16\x62\xa6\xf4\xb7\xe3\xd2\x42\x1d\x8d\xe1\xfe\x52\x75\x8a\x4a\x0c\xab\x77\x16\x56\x37\x4c\x98\xce\xd5\x97\x25\x2d\x87\x59\xfb\xf1\xf8\x16\x5b\x9d\xb4\x5d\x10\xa7\xed\x6e\x70\xe7\x65\x75\x7a\x3b\x04\x4a\x40\xef\x87\xd4\x04\x95\x4f\xe8\x32\x53\x0f\x6f\x1e\x0d\x02\xef\x12\xbc\xd9\xb8\x3a\xae\xfc\x58\xe6\x2a\xbf\xfc\xd3\x95\x06\xf5\xb9\x5e\xcc\xb5\xb6\x8d\x3c\x77\xd6\x19\xc0\x3f\x96\x71\xed\xb8\x7a\x76\xb8\xad\xd3\x9d\xc5\x6b\x95\xed\x5a\x62\x46\xbd\xb3\x2c\x5d\xf3\x7b\x24\xf2\xcd\xa6\x0b\xe5\xe8\xfd\x46\x0a\xcc\x8a\x30\xde\xd0\xbf\x93\xad\x1b\x5a\x31\x06\x06\x5f\x5e\x40\x60\x46\x3d\x9a\xba\xa4\x79\x85\x19\xbb\x8b\x13\xdf\x25\xcd\xb3\x0d\xe7\xd3\xa1\x28\x0b\xb2\x9e\x47\x18\xfb\x29\xf6\x89\x96\x6a\xf5\xf7\xb5\x56\xf3\xda\xc6\x79\xbf\x96\xe6\x31\x4e\xd2\x15\xbd\x75\xb9\xf5\xf9\xf8\x4c\x49\x63\x7e\x1d\x60\x0c\x1b\x28\xa2\xb1\xfd\xed\xc0\x23\x9c\x77\xbf\x7b\x88\x87\x7a\xae\x7e\xbb\xf8\x5b\xcf\x36\x6b\xe4\x42\x9c\x1f\xe5\x71\x7c\xba\x69\xb0\xd7\x87\xd5\xd1\x55\x10\xdf\x49\x77\x1c\x00\x4f\x71\x52\xdc\x27\xfb\x5b\x9f\x7b\xe9\xdc\x68\x6c\x2e\x14\x8b\x24\x18\xe7\x7b\x74\x6b\xb4\x10\x7e\x77\x7b\xcc\x83\x5e\xdb\x5e\x84\x22\xfa\xb0\xc8\x6b\x68\xa9\x29\x52\xee\xd1\x13\x8b\xbb\x87\x3f\xff\x59\xa1\x20\x8e\xc7\x9d\x15\x69\xfc\x07\xf9\xf2\x67\xc3\xa6\x10\xfa\xa1\x67\x43\x25\xdd\x69\x16\xc8\xb3\x40\x87\x91\xa7\x89\x50\xb4\xbc\xc7\x89\x80\x77\x72\x9f\xe6\xc2\xb1\xcc\x05\x35\xb0\x3b\x32\xa4\xf4\xf5\x4d\x93\x6a\x48\xbe\x30\xfc\x34\x4d\x42\xa4\x9f\x84\x8b\xe6\x28\x3a\x58\x78\x90\xbb\x2c\xb7\x2a\xdf\x3d\xea\x70\x49\xa6\xba\xe2\x59\x91\x6f\xc7\x3a\x4c\xe3\x0a\xc0\x6e\x96\x34\x5f\x4f\xef\x68\xa0\x88\x10\x9f\xf8\x28\x8d\xc5\xd9\x37\x08\x17\xf7\xf9\xe5\xf7\xb4\x06\x81\xf6\xfa\x89\x92\x37\xd9\x88\x69\xba\x3e\x38\xdd\xa9\xdc\x3b\x2f\x89\xc8\x9a\x8c\xe1\xce\x37\x6d\x1a\xce\x7a\x1d\xac\x76\x37\xeb\x20\xe1\xa7\x09\x8e\x18\xf0\xc4\x2f\xff\x48\x63\x2f\x0e\xca\xef\xd8\xc5\x75\xff\x6d\xe2\x34\xce\x7e\x9d\x79\x14\x1b\x92\x64\x0b\xc1\x9f\x30\xf9\xd1\x9d\xf2\xbb\x66\xf0\x7a\xcb\xa6\x65\x3b\x86\x89\x79\x95\xf5\x99\xc7\x6e\xa5\xab\x44\xe4\x9f\xa9\xfc\x73\x43\x37\xba\x8b\xc7\x77\x4b\x47\x82\x5c\x2b\x97\xbb\xb3\x7f\x0e\xc5\x6e\xed\x41\x58\x3f\x74\xc8\x8e\xff\xe6\xd6\x3e\x47\xdb\xfa\x4a\xf2\x7a\x27\xe1\x74\x2b\xdf\xae\xa9\xe6\x16\x1b\x67\xdb\xf7\x2a\x03\xae\xdb\x1b\xe0\xf2\x6b\xb7\xaa\x21\x65\x47\x8e\xb3\x2f\xdc\xca\x26\x5a\x76\xff\xb8\xff\xaa\xad\xea\x97\xb2\x87\xc7\xd9\x97\x6c\x6a\xbf\x9c\xb6\xa5\xdf\x50\x54\x1b\x2f\x65\xeb\x8f\xfb\x2f\x7a\x6a\x52\xdc\x6b\x6b\xf2\x17\x3c\x3b\xac\xd0\xdc\x90\xe4\xec\xcb\xb4\x9a\x18\x95\xbd\x93\xfb\x94\xe2\x3e\x1b\xd3\x0b\x51\xbf\xe7\xc9\xe9\x57\x67\xd5\x44\x88\xdc\x29\x7f\xd4\x54\x78\x19\x0c\x59\x20\x68\x7d\x54\x2a\x78\x52\xfc\xd5\xa8\x4d\x32\xbd\x3e\x2c\x97\x61\xfa\x37\xfc\x7f\x0f\xff\x0b\x00\x00\xff\xff\x31\x8b\xeb\xb6\x54\x9c\x00\x00") func v2SchemaJSONBytes() ([]byte, error) { return bindataRead( @@ -104,7 +118,7 @@ func v2SchemaJSON() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "v2/schema.json", size: 40249, mode: os.FileMode(420), modTime: time.Unix(1482389892, 0)} + info := bindataFileInfo{name: "v2/schema.json", size: 40020, mode: os.FileMode(420), modTime: time.Unix(1446147817, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -162,7 +176,7 @@ func AssetNames() []string { // _bindata is a table, holding each asset generator, mapped to its name. var _bindata = map[string]func() (*asset, error){ "jsonschema-draft-04.json": jsonschemaDraft04JSON, - "v2/schema.json": v2SchemaJSON, + "v2/schema.json": v2SchemaJSON, } // AssetDir returns the file names below a certain @@ -204,6 +218,7 @@ type bintree struct { Func func() (*asset, error) Children map[string]*bintree } + var _bintree = &bintree{nil, map[string]*bintree{ "jsonschema-draft-04.json": &bintree{jsonschemaDraft04JSON, map[string]*bintree{}}, "v2": &bintree{nil, map[string]*bintree{ @@ -257,4 +272,3 @@ func _filePath(dir, name string) string { cannonicalName := strings.Replace(name, "\\", "/", -1) return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) } - diff --git a/vendor/github.com/go-openapi/spec/expander.go b/vendor/github.com/go-openapi/spec/expander.go index b4429a21c..eb1490b05 100644 --- a/vendor/github.com/go-openapi/spec/expander.go +++ b/vendor/github.com/go-openapi/spec/expander.go @@ -17,10 +17,7 @@ package spec import ( "encoding/json" "fmt" - "log" "net/url" - "os" - "path/filepath" "reflect" "strings" "sync" @@ -29,18 +26,6 @@ import ( "github.com/go-openapi/swag" ) -var ( - // Debug enables logging when SWAGGER_DEBUG env var is not empty - Debug = os.Getenv("SWAGGER_DEBUG") != "" -) - -// ExpandOptions provides options for expand. -type ExpandOptions struct { - RelativeBase string - SkipSchemas bool - ContinueOnError bool -} - // ResolutionCache a cache for resolving urls type ResolutionCache interface { Get(string) (interface{}, bool) @@ -52,11 +37,7 @@ type simpleCache struct { store map[string]interface{} } -var resCache ResolutionCache - -func init() { - resCache = initResolutionCache() -} +var resCache = initResolutionCache() func initResolutionCache() ResolutionCache { return &simpleCache{store: map[string]interface{}{ @@ -66,11 +47,8 @@ func initResolutionCache() ResolutionCache { } func (s *simpleCache) Get(uri string) (interface{}, bool) { - debugLog("getting %q from resolution cache", uri) s.lock.Lock() v, ok := s.store[uri] - debugLog("got %q from resolution cache: %t", uri, ok) - s.lock.Unlock() return v, ok } @@ -81,9 +59,9 @@ func (s *simpleCache) Set(uri string, data interface{}) { s.lock.Unlock() } -// ResolveRefWithBase resolves a reference against a context root with preservation of base path -func ResolveRefWithBase(root interface{}, ref *Ref, opts *ExpandOptions) (*Schema, error) { - resolver, err := defaultSchemaLoader(root, nil, opts, nil) +// ResolveRef resolves a reference against a context root +func ResolveRef(root interface{}, ref *Ref) (*Schema, error) { + resolver, err := defaultSchemaLoader(root, nil, nil) if err != nil { return nil, err } @@ -95,19 +73,9 @@ func ResolveRefWithBase(root interface{}, ref *Ref, opts *ExpandOptions) (*Schem return result, nil } -// ResolveRef resolves a reference against a context root -func ResolveRef(root interface{}, ref *Ref) (*Schema, error) { - return ResolveRefWithBase(root, ref, nil) -} - // ResolveParameter resolves a paramter reference against a context root func ResolveParameter(root interface{}, ref Ref) (*Parameter, error) { - return ResolveParameterWithBase(root, ref, nil) -} - -// ResolveParameterWithBase resolves a paramter reference against a context root and base path -func ResolveParameterWithBase(root interface{}, ref Ref, opts *ExpandOptions) (*Parameter, error) { - resolver, err := defaultSchemaLoader(root, nil, opts, nil) + resolver, err := defaultSchemaLoader(root, nil, nil) if err != nil { return nil, err } @@ -121,12 +89,7 @@ func ResolveParameterWithBase(root interface{}, ref Ref, opts *ExpandOptions) (* // ResolveResponse resolves response a reference against a context root func ResolveResponse(root interface{}, ref Ref) (*Response, error) { - return ResolveResponseWithBase(root, ref, nil) -} - -// ResolveResponseWithBase resolves response a reference against a context root and base path -func ResolveResponseWithBase(root interface{}, ref Ref, opts *ExpandOptions) (*Response, error) { - resolver, err := defaultSchemaLoader(root, nil, opts, nil) + resolver, err := defaultSchemaLoader(root, nil, nil) if err != nil { return nil, err } @@ -138,72 +101,23 @@ func ResolveResponseWithBase(root interface{}, ref Ref, opts *ExpandOptions) (*R return result, nil } -// ResolveItems resolves header and parameter items reference against a context root and base path -func ResolveItems(root interface{}, ref Ref, opts *ExpandOptions) (*Items, error) { - resolver, err := defaultSchemaLoader(root, nil, opts, nil) - if err != nil { - return nil, err - } - - result := new(Items) - if err := resolver.Resolve(&ref, result); err != nil { - return nil, err - } - return result, nil -} - -// ResolvePathItem resolves response a path item against a context root and base path -func ResolvePathItem(root interface{}, ref Ref, opts *ExpandOptions) (*PathItem, error) { - resolver, err := defaultSchemaLoader(root, nil, opts, nil) - if err != nil { - return nil, err - } - - result := new(PathItem) - if err := resolver.Resolve(&ref, result); err != nil { - return nil, err - } - return result, nil -} - type schemaLoader struct { loadingRef *Ref startingRef *Ref currentRef *Ref root interface{} - options *ExpandOptions cache ResolutionCache loadDoc func(string) (json.RawMessage, error) } var idPtr, _ = jsonpointer.New("/id") +var schemaPtr, _ = jsonpointer.New("/$schema") var refPtr, _ = jsonpointer.New("/$ref") -// PathLoader function to use when loading remote refs -var PathLoader func(string) (json.RawMessage, error) - -func init() { - PathLoader = func(path string) (json.RawMessage, error) { - data, err := swag.LoadFromFileOrHTTP(path) - if err != nil { - return nil, err - } - return json.RawMessage(data), nil - } -} - -func defaultSchemaLoader( - root interface{}, - ref *Ref, - expandOptions *ExpandOptions, - cache ResolutionCache) (*schemaLoader, error) { - +func defaultSchemaLoader(root interface{}, ref *Ref, cache ResolutionCache) (*schemaLoader, error) { if cache == nil { cache = resCache } - if expandOptions == nil { - expandOptions = &ExpandOptions{} - } var ptr *jsonpointer.Pointer if ref != nil { @@ -213,16 +127,18 @@ func defaultSchemaLoader( currentRef := nextRef(root, ref, ptr) return &schemaLoader{ + root: root, loadingRef: ref, startingRef: ref, - currentRef: currentRef, - root: root, - options: expandOptions, cache: cache, loadDoc: func(path string) (json.RawMessage, error) { - debugLog("fetching document at %q", path) - return PathLoader(path) + data, err := swag.LoadFromFileOrHTTP(path) + if err != nil { + return nil, err + } + return json.RawMessage(data), nil }, + currentRef: currentRef, }, nil } @@ -243,7 +159,6 @@ func nextRef(startingNode interface{}, startingRef *Ref, ptr *jsonpointer.Pointe if startingRef == nil { return nil } - if ptr == nil { return startingRef } @@ -269,111 +184,32 @@ func nextRef(startingNode interface{}, startingRef *Ref, ptr *jsonpointer.Pointe refRef, _, _ := refPtr.Get(node) if refRef != nil { - var rf Ref - switch value := refRef.(type) { - case string: - rf, _ = NewRef(value) - } + rf, _ := NewRef(refRef.(string)) nw, err := ret.Inherits(rf) if err != nil { break } - nwURL := nw.GetURL() - if nwURL.Scheme == "file" || (nwURL.Scheme == "" && nwURL.Host == "") { - nwpt := filepath.ToSlash(nwURL.Path) - if filepath.IsAbs(nwpt) { - _, err := os.Stat(nwpt) - if err != nil { - nwURL.Path = filepath.Join(".", nwpt) - } - } - } - ret = nw } } - return ret } -func debugLog(msg string, args ...interface{}) { - if Debug { - log.Printf(msg, args...) - } -} - -func normalizeFileRef(ref *Ref, relativeBase string) *Ref { - refURL := ref.GetURL() - debugLog("normalizing %s against %s (%s)", ref.String(), relativeBase, refURL.String()) - if strings.HasPrefix(refURL.String(), "#") { - return ref - } - - if refURL.Scheme == "file" || (refURL.Scheme == "" && refURL.Host == "") { - filePath := refURL.Path - debugLog("normalizing file path: %s", filePath) - - if !filepath.IsAbs(filepath.FromSlash(filePath)) && len(relativeBase) != 0 { - debugLog("joining %s with %s", relativeBase, filePath) - if fi, err := os.Stat(filepath.FromSlash(relativeBase)); err == nil { - if !fi.IsDir() { - relativeBase = filepath.Dir(filepath.FromSlash(relativeBase)) - } - } - filePath = filepath.Join(filepath.FromSlash(relativeBase), filepath.FromSlash(filePath)) - } - if !filepath.IsAbs(filepath.FromSlash(filePath)) { - pwd, err := os.Getwd() - if err == nil { - debugLog("joining cwd %s with %s", pwd, filePath) - filePath = filepath.Join(pwd, filepath.FromSlash(filePath)) - } - } - - debugLog("cleaning %s", filePath) - filePath = filepath.Clean(filepath.FromSlash(filePath)) - _, err := os.Stat(filepath.FromSlash(filePath)) - if err == nil { - debugLog("rewriting url %s to scheme \"\" path %s", refURL.String(), filePath) - slp := filepath.FromSlash(filePath) - if filepath.IsAbs(slp) && filepath.Separator == '\\' && len(slp) > 1 && slp[1] == ':' && ('a' <= slp[0] && slp[0] <= 'z' || 'A' <= slp[0] && slp[0] <= 'Z') { - slp = slp[2:] - } - refURL.Scheme = "" - refURL.Path = filepath.ToSlash(slp) - debugLog("new url with joined filepath: %s", refURL.String()) - *ref = MustCreateRef(refURL.String()) - } - } - - debugLog("refurl: %s", ref.GetURL().String()) - return ref -} - func (r *schemaLoader) resolveRef(currentRef, ref *Ref, node, target interface{}) error { - tgt := reflect.ValueOf(target) if tgt.Kind() != reflect.Ptr { return fmt.Errorf("resolve ref: target needs to be a pointer") } oldRef := currentRef - if currentRef != nil { - debugLog("resolve ref current %s new %s", currentRef.String(), ref.String()) - nextRef := nextRef(node, ref, currentRef.GetPointer()) - if nextRef == nil || nextRef.GetURL() == nil { - return nil - } var err error - currentRef, err = currentRef.Inherits(*nextRef) - debugLog("resolved ref current %s", currentRef.String()) + currentRef, err = currentRef.Inherits(*nextRef(node, ref, currentRef.GetPointer())) if err != nil { return err } } - if currentRef == nil { currentRef = ref } @@ -409,71 +245,42 @@ func (r *schemaLoader) resolveRef(currentRef, ref *Ref, node, target interface{} return nil } - relativeBase := "" - if r.options != nil && r.options.RelativeBase != "" { - relativeBase = r.options.RelativeBase - } - normalizeFileRef(currentRef, relativeBase) - debugLog("current ref normalized file: %s", currentRef.String()) - normalizeFileRef(ref, relativeBase) - debugLog("ref normalized file: %s", currentRef.String()) - - data, _, _, err := r.load(currentRef.GetURL()) - if err != nil { - return err - } + if refURL.Scheme != "" && refURL.Host != "" { + // most definitely take the red pill + data, _, _, err := r.load(refURL) + if err != nil { + return err + } - if ((oldRef == nil && currentRef != nil) || - (oldRef != nil && currentRef == nil) || - oldRef.String() != currentRef.String()) && - ((oldRef == nil && ref != nil) || - (oldRef != nil && ref == nil) || - (oldRef.String() != ref.String())) { + if ((oldRef == nil && currentRef != nil) || + (oldRef != nil && currentRef == nil) || + oldRef.String() != currentRef.String()) && + ((oldRef == nil && ref != nil) || + (oldRef != nil && ref == nil) || + (oldRef.String() != ref.String())) { - return r.resolveRef(currentRef, ref, data, target) - } - - var res interface{} - if currentRef.String() != "" { - res, _, err = currentRef.GetPointer().Get(data) - if err != nil { - if strings.HasPrefix(ref.String(), "#") { - if r.loadingRef != nil { - rr, er := r.loadingRef.Inherits(*ref) - if er != nil { - return er - } - refURL = rr.GetURL() - - data, _, _, err = r.load(refURL) - if err != nil { - return err - } - } else { - data = r.root - } - } + return r.resolveRef(currentRef, ref, data, target) + } - res, _, err = ref.GetPointer().Get(data) + var res interface{} + if currentRef.String() != "" { + res, _, err = currentRef.GetPointer().Get(data) if err != nil { return err } + } else { + res = data } - } else { - res = data - } - - if err := swag.DynamicJSONToStruct(res, target); err != nil { - return err - } - r.currentRef = currentRef + if err := swag.DynamicJSONToStruct(res, target); err != nil { + return err + } + } return nil } func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error) { - debugLog("loading schema from url: %s", refURL) toFetch := *refURL toFetch.Fragment = "" @@ -492,51 +299,44 @@ func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error) return data, toFetch, fromCache, nil } - func (r *schemaLoader) Resolve(ref *Ref, target interface{}) error { - return r.resolveRef(r.currentRef, ref, r.root, target) -} - -func (r *schemaLoader) reset() { - ref := r.startingRef - - var ptr *jsonpointer.Pointer - if ref != nil { - ptr = ref.GetPointer() + if err := r.resolveRef(r.currentRef, ref, r.root, target); err != nil { + return err } - r.currentRef = nextRef(r.root, ref, ptr) + return nil +} + +type specExpander struct { + spec *Swagger + resolver *schemaLoader } // ExpandSpec expands the references in a swagger spec -func ExpandSpec(spec *Swagger, options *ExpandOptions) error { - resolver, err := defaultSchemaLoader(spec, nil, options, nil) - // Just in case this ever returns an error. - if shouldStopOnError(err, resolver.options) { +func ExpandSpec(spec *Swagger) error { + resolver, err := defaultSchemaLoader(spec, nil, nil) + if err != nil { return err } - if options == nil || !options.SkipSchemas { - for key, definition := range spec.Definitions { - var def *Schema - var err error - if def, err = expandSchema(definition, []string{"#/definitions/" + key}, resolver); shouldStopOnError(err, resolver.options) { - return err - } - resolver.reset() - spec.Definitions[key] = *def + for key, defintition := range spec.Definitions { + var def *Schema + var err error + if def, err = expandSchema(defintition, []string{"#/definitions/" + key}, resolver); err != nil { + return err } + spec.Definitions[key] = *def } for key, parameter := range spec.Parameters { - if err := expandParameter(¶meter, resolver); shouldStopOnError(err, resolver.options) { + if err := expandParameter(¶meter, resolver); err != nil { return err } spec.Parameters[key] = parameter } for key, response := range spec.Responses { - if err := expandResponse(&response, resolver); shouldStopOnError(err, resolver.options) { + if err := expandResponse(&response, resolver); err != nil { return err } spec.Responses[key] = response @@ -544,7 +344,7 @@ func ExpandSpec(spec *Swagger, options *ExpandOptions) error { if spec.Paths != nil { for key, path := range spec.Paths.Paths { - if err := expandPathItem(&path, resolver); shouldStopOnError(err, resolver.options) { + if err := expandPathItem(&path, resolver); err != nil { return err } spec.Paths.Paths[key] = path @@ -554,25 +354,9 @@ func ExpandSpec(spec *Swagger, options *ExpandOptions) error { return nil } -func shouldStopOnError(err error, opts *ExpandOptions) bool { - if err != nil && !opts.ContinueOnError { - return true - } - - if err != nil { - log.Println(err) - } - - return false -} - // ExpandSchema expands the refs in the schema object func ExpandSchema(schema *Schema, root interface{}, cache ResolutionCache) error { - return ExpandSchemaWithBasePath(schema, root, cache, nil) -} -// ExpandSchemaWithBasePath expands the refs in the schema object, base path configured through expand options -func ExpandSchemaWithBasePath(schema *Schema, root interface{}, cache ResolutionCache, opts *ExpandOptions) error { if schema == nil { return nil } @@ -583,17 +367,18 @@ func ExpandSchemaWithBasePath(schema *Schema, root interface{}, cache Resolution nrr, _ := NewRef(schema.ID) var rrr *Ref if nrr.String() != "" { - switch rt := root.(type) { + switch root.(type) { case *Schema: - rid, _ := NewRef(rt.ID) + rid, _ := NewRef(root.(*Schema).ID) rrr, _ = rid.Inherits(nrr) case *Swagger: - rid, _ := NewRef(rt.ID) + rid, _ := NewRef(root.(*Swagger).ID) rrr, _ = rid.Inherits(nrr) } + } - resolver, err := defaultSchemaLoader(root, rrr, opts, cache) + resolver, err := defaultSchemaLoader(root, rrr, cache) if err != nil { return err } @@ -604,7 +389,7 @@ func ExpandSchemaWithBasePath(schema *Schema, root interface{}, cache Resolution } var s *Schema if s, err = expandSchema(*schema, refs, resolver); err != nil { - return err + return nil } *schema = *s return nil @@ -615,15 +400,7 @@ func expandItems(target Schema, parentRefs []string, resolver *schemaLoader) (*S if target.Items.Schema != nil { t, err := expandSchema(*target.Items.Schema, parentRefs, resolver) if err != nil { - if target.Items.Schema.ID == "" { - target.Items.Schema.ID = target.ID - if err != nil { - t, err = expandSchema(*target.Items.Schema, parentRefs, resolver) - if err != nil { - return nil, err - } - } - } + return nil, err } *target.Items.Schema = *t } @@ -638,173 +415,137 @@ func expandItems(target Schema, parentRefs []string, resolver *schemaLoader) (*S return &target, nil } -func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader) (*Schema, error) { +func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader) (schema *Schema, err error) { + defer func() { + schema = &target + }() if target.Ref.String() == "" && target.Ref.IsRoot() { - debugLog("skipping expand schema for no ref and root: %v", resolver.root) - - return resolver.root.(*Schema), nil + target = *resolver.root.(*Schema) + return } // t is the new expanded schema var t *Schema - for target.Ref.String() != "" { - if swag.ContainsStringsCI(parentRefs, target.Ref.String()) { - return &target, nil + // var newTarget Schema + pRefs := strings.Join(parentRefs, ",") + pRefs += "," + if strings.Contains(pRefs, target.Ref.String()+",") { + err = nil + return } - if err := resolver.Resolve(&target.Ref, &t); shouldStopOnError(err, resolver.options) { - return &target, err - } - - if swag.ContainsStringsCI(parentRefs, target.Ref.String()) { - debugLog("ref already exists in parent") - return &target, nil + if err = resolver.Resolve(&target.Ref, &t); err != nil { + return } parentRefs = append(parentRefs, target.Ref.String()) - if t != nil { - target = *t - } + target = *t } - t, err := expandItems(target, parentRefs, resolver) - if shouldStopOnError(err, resolver.options) { - return &target, err - } - if t != nil { - target = *t + if t, err = expandItems(target, parentRefs, resolver); err != nil { + return } + target = *t for i := range target.AllOf { - t, err := expandSchema(target.AllOf[i], parentRefs, resolver) - if shouldStopOnError(err, resolver.options) { - return &target, err - } - if t != nil { - target.AllOf[i] = *t + if t, err = expandSchema(target.AllOf[i], parentRefs, resolver); err != nil { + return } + target.AllOf[i] = *t } for i := range target.AnyOf { - t, err := expandSchema(target.AnyOf[i], parentRefs, resolver) - if shouldStopOnError(err, resolver.options) { - return &target, err + if t, err = expandSchema(target.AnyOf[i], parentRefs, resolver); err != nil { + return } target.AnyOf[i] = *t } for i := range target.OneOf { - t, err := expandSchema(target.OneOf[i], parentRefs, resolver) - if shouldStopOnError(err, resolver.options) { - return &target, err - } - if t != nil { - target.OneOf[i] = *t + if t, err = expandSchema(target.OneOf[i], parentRefs, resolver); err != nil { + return } + target.OneOf[i] = *t } if target.Not != nil { - t, err := expandSchema(*target.Not, parentRefs, resolver) - if shouldStopOnError(err, resolver.options) { - return &target, err - } - if t != nil { - *target.Not = *t + if t, err = expandSchema(*target.Not, parentRefs, resolver); err != nil { + return } + *target.Not = *t } - for k := range target.Properties { - t, err := expandSchema(target.Properties[k], parentRefs, resolver) - if shouldStopOnError(err, resolver.options) { - return &target, err - } - if t != nil { - target.Properties[k] = *t + for k, _ := range target.Properties { + if t, err = expandSchema(target.Properties[k], parentRefs, resolver); err != nil { + return } + target.Properties[k] = *t } if target.AdditionalProperties != nil && target.AdditionalProperties.Schema != nil { - t, err := expandSchema(*target.AdditionalProperties.Schema, parentRefs, resolver) - if shouldStopOnError(err, resolver.options) { - return &target, err - } - if t != nil { - *target.AdditionalProperties.Schema = *t + if t, err = expandSchema(*target.AdditionalProperties.Schema, parentRefs, resolver); err != nil { + return } + *target.AdditionalProperties.Schema = *t } - for k := range target.PatternProperties { - t, err := expandSchema(target.PatternProperties[k], parentRefs, resolver) - if shouldStopOnError(err, resolver.options) { - return &target, err - } - if t != nil { - target.PatternProperties[k] = *t + for k, _ := range target.PatternProperties { + if t, err = expandSchema(target.PatternProperties[k], parentRefs, resolver); err != nil { + return } + target.PatternProperties[k] = *t } - for k := range target.Dependencies { + for k, _ := range target.Dependencies { if target.Dependencies[k].Schema != nil { - t, err := expandSchema(*target.Dependencies[k].Schema, parentRefs, resolver) - if shouldStopOnError(err, resolver.options) { - return &target, err - } - if t != nil { - *target.Dependencies[k].Schema = *t + if t, err = expandSchema(*target.Dependencies[k].Schema, parentRefs, resolver); err != nil { + return } + *target.Dependencies[k].Schema = *t } } if target.AdditionalItems != nil && target.AdditionalItems.Schema != nil { - t, err := expandSchema(*target.AdditionalItems.Schema, parentRefs, resolver) - if shouldStopOnError(err, resolver.options) { - return &target, err - } - if t != nil { - *target.AdditionalItems.Schema = *t + if t, err = expandSchema(*target.AdditionalItems.Schema, parentRefs, resolver); err != nil { + return } + *target.AdditionalItems.Schema = *t } - for k := range target.Definitions { - t, err := expandSchema(target.Definitions[k], parentRefs, resolver) - if shouldStopOnError(err, resolver.options) { - return &target, err - } - if t != nil { - target.Definitions[k] = *t + for k, _ := range target.Definitions { + if t, err = expandSchema(target.Definitions[k], parentRefs, resolver); err != nil { + return } + target.Definitions[k] = *t } - return &target, nil + return } func expandPathItem(pathItem *PathItem, resolver *schemaLoader) error { if pathItem == nil { return nil } - if pathItem.Ref.String() != "" { if err := resolver.Resolve(&pathItem.Ref, &pathItem); err != nil { return err } - resolver.reset() - pathItem.Ref = Ref{} } for idx := range pathItem.Parameters { - if err := expandParameter(&(pathItem.Parameters[idx]), resolver); shouldStopOnError(err, resolver.options) { + if err := expandParameter(&(pathItem.Parameters[idx]), resolver); err != nil { return err } } - if err := expandOperation(pathItem.Get, resolver); shouldStopOnError(err, resolver.options) { + if err := expandOperation(pathItem.Get, resolver); err != nil { return err } - if err := expandOperation(pathItem.Head, resolver); shouldStopOnError(err, resolver.options) { + if err := expandOperation(pathItem.Head, resolver); err != nil { return err } - if err := expandOperation(pathItem.Options, resolver); shouldStopOnError(err, resolver.options) { + if err := expandOperation(pathItem.Options, resolver); err != nil { return err } - if err := expandOperation(pathItem.Put, resolver); shouldStopOnError(err, resolver.options) { + if err := expandOperation(pathItem.Put, resolver); err != nil { return err } - if err := expandOperation(pathItem.Post, resolver); shouldStopOnError(err, resolver.options) { + if err := expandOperation(pathItem.Post, resolver); err != nil { return err } - if err := expandOperation(pathItem.Patch, resolver); shouldStopOnError(err, resolver.options) { + if err := expandOperation(pathItem.Patch, resolver); err != nil { return err } - if err := expandOperation(pathItem.Delete, resolver); shouldStopOnError(err, resolver.options) { + if err := expandOperation(pathItem.Delete, resolver); err != nil { return err } return nil @@ -814,9 +555,8 @@ func expandOperation(op *Operation, resolver *schemaLoader) error { if op == nil { return nil } - for i, param := range op.Parameters { - if err := expandParameter(¶m, resolver); shouldStopOnError(err, resolver.options) { + if err := expandParameter(¶m, resolver); err != nil { return err } op.Parameters[i] = param @@ -824,11 +564,11 @@ func expandOperation(op *Operation, resolver *schemaLoader) error { if op.Responses != nil { responses := op.Responses - if err := expandResponse(responses.Default, resolver); shouldStopOnError(err, resolver.options) { + if err := expandResponse(responses.Default, resolver); err != nil { return err } for code, response := range responses.StatusCodeResponses { - if err := expandResponse(&response, resolver); shouldStopOnError(err, resolver.options) { + if err := expandResponse(&response, resolver); err != nil { return err } responses.StatusCodeResponses[code] = response @@ -842,29 +582,22 @@ func expandResponse(response *Response, resolver *schemaLoader) error { return nil } - var parentRefs []string - if response.Ref.String() != "" { - parentRefs = append(parentRefs, response.Ref.String()) - if err := resolver.Resolve(&response.Ref, response); shouldStopOnError(err, resolver.options) { + if err := resolver.Resolve(&response.Ref, response); err != nil { return err } - resolver.reset() - response.Ref = Ref{} } - if !resolver.options.SkipSchemas && response.Schema != nil { - parentRefs = append(parentRefs, response.Schema.Ref.String()) - debugLog("response ref: %s", response.Schema.Ref) - if err := resolver.Resolve(&response.Schema.Ref, &response.Schema); shouldStopOnError(err, resolver.options) { + if response.Schema != nil { + parentRefs := []string{response.Schema.Ref.String()} + if err := resolver.Resolve(&response.Schema.Ref, &response.Schema); err != nil { return err } - s, err := expandSchema(*response.Schema, parentRefs, resolver) - if shouldStopOnError(err, resolver.options) { + if s, err := expandSchema(*response.Schema, parentRefs, resolver); err != nil { return err + } else { + *response.Schema = *s } - resolver.reset() - *response.Schema = *s } return nil } @@ -873,28 +606,21 @@ func expandParameter(parameter *Parameter, resolver *schemaLoader) error { if parameter == nil { return nil } - - var parentRefs []string - if parameter.Ref.String() != "" { - parentRefs = append(parentRefs, parameter.Ref.String()) - if err := resolver.Resolve(¶meter.Ref, parameter); shouldStopOnError(err, resolver.options) { + if err := resolver.Resolve(¶meter.Ref, parameter); err != nil { return err } - resolver.reset() - parameter.Ref = Ref{} } - if !resolver.options.SkipSchemas && parameter.Schema != nil { - parentRefs = append(parentRefs, parameter.Schema.Ref.String()) - if err := resolver.Resolve(¶meter.Schema.Ref, ¶meter.Schema); shouldStopOnError(err, resolver.options) { + if parameter.Schema != nil { + parentRefs := []string{parameter.Schema.Ref.String()} + if err := resolver.Resolve(¶meter.Schema.Ref, ¶meter.Schema); err != nil { return err } - s, err := expandSchema(*parameter.Schema, parentRefs, resolver) - if shouldStopOnError(err, resolver.options) { + if s, err := expandSchema(*parameter.Schema, parentRefs, resolver); err != nil { return err + } else { + *parameter.Schema = *s } - resolver.reset() - *parameter.Schema = *s } return nil } diff --git a/vendor/github.com/go-openapi/spec/header.go b/vendor/github.com/go-openapi/spec/header.go index 85c4d454c..758b84531 100644 --- a/vendor/github.com/go-openapi/spec/header.go +++ b/vendor/github.com/go-openapi/spec/header.go @@ -16,9 +16,7 @@ package spec import ( "encoding/json" - "strings" - "github.com/go-openapi/jsonpointer" "github.com/go-openapi/swag" ) @@ -32,7 +30,6 @@ type HeaderProps struct { type Header struct { CommonValidations SimpleSchema - VendorExtensible HeaderProps } @@ -161,35 +158,8 @@ func (h *Header) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &h.SimpleSchema); err != nil { return err } - if err := json.Unmarshal(data, &h.VendorExtensible); err != nil { - return err - } if err := json.Unmarshal(data, &h.HeaderProps); err != nil { return err } return nil } - -// JSONLookup look up a value by the json property name -func (p Header) JSONLookup(token string) (interface{}, error) { - if ex, ok := p.Extensions[token]; ok { - return &ex, nil - } - - r, _, err := jsonpointer.GetForToken(p.CommonValidations, token) - if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { - return nil, err - } - if r != nil { - return r, nil - } - r, _, err = jsonpointer.GetForToken(p.SimpleSchema, token) - if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { - return nil, err - } - if r != nil { - return r, nil - } - r, _, err = jsonpointer.GetForToken(p.HeaderProps, token) - return r, err -} diff --git a/vendor/github.com/go-openapi/spec/items.go b/vendor/github.com/go-openapi/spec/items.go index 46944fb69..4d57ea5ca 100644 --- a/vendor/github.com/go-openapi/spec/items.go +++ b/vendor/github.com/go-openapi/spec/items.go @@ -16,9 +16,7 @@ package spec import ( "encoding/json" - "strings" - "github.com/go-openapi/jsonpointer" "github.com/go-openapi/swag" ) @@ -62,12 +60,11 @@ type CommonValidations struct { // Items a limited subset of JSON-Schema's items object. // It is used by parameter definitions that are not located in "body". // -// For more information: http://goo.gl/8us55a#items-object +// For more information: http://goo.gl/8us55a#items-object- type Items struct { Refable CommonValidations SimpleSchema - VendorExtensible } // NewItems creates a new instance of items @@ -200,20 +197,3 @@ func (i Items) MarshalJSON() ([]byte, error) { } return swag.ConcatJSON(b3, b1, b2), nil } - -// JSONLookup look up a value by the json property name -func (p Items) JSONLookup(token string) (interface{}, error) { - if token == "$ref" { - return &p.Ref, nil - } - - r, _, err := jsonpointer.GetForToken(p.CommonValidations, token) - if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { - return nil, err - } - if r != nil { - return r, nil - } - r, _, err = jsonpointer.GetForToken(p.SimpleSchema, token) - return r, err -} diff --git a/vendor/github.com/go-openapi/spec/parameter.go b/vendor/github.com/go-openapi/spec/parameter.go index 71aee1e80..8fb66d12a 100644 --- a/vendor/github.com/go-openapi/spec/parameter.go +++ b/vendor/github.com/go-openapi/spec/parameter.go @@ -16,7 +16,6 @@ package spec import ( "encoding/json" - "strings" "github.com/go-openapi/jsonpointer" "github.com/go-openapi/swag" @@ -101,16 +100,15 @@ func (p Parameter) JSONLookup(token string) (interface{}, error) { if token == "$ref" { return &p.Ref, nil } - r, _, err := jsonpointer.GetForToken(p.CommonValidations, token) - if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + if err != nil { return nil, err } if r != nil { return r, nil } r, _, err = jsonpointer.GetForToken(p.SimpleSchema, token) - if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + if err != nil { return nil, err } if r != nil { diff --git a/vendor/github.com/go-openapi/spec/ref.go b/vendor/github.com/go-openapi/spec/ref.go index 4833b87e2..68631df8b 100644 --- a/vendor/github.com/go-openapi/spec/ref.go +++ b/vendor/github.com/go-openapi/spec/ref.go @@ -55,7 +55,7 @@ func (r *Ref) RemoteURI() string { } // IsValidURI returns true when the url the ref points to can be found -func (r *Ref) IsValidURI(basepaths ...string) bool { +func (r *Ref) IsValidURI() bool { if r.String() == "" { return true } @@ -81,18 +81,14 @@ func (r *Ref) IsValidURI(basepaths ...string) bool { // check for local file pth := v if r.HasURLPathOnly { - base := "." - if len(basepaths) > 0 { - base = filepath.Dir(filepath.Join(basepaths...)) - } - p, e := filepath.Abs(filepath.ToSlash(filepath.Join(base, pth))) + p, e := filepath.Abs(pth) if e != nil { return false } pth = p } - fi, err := os.Stat(filepath.ToSlash(pth)) + fi, err := os.Stat(pth) if err != nil { return false } @@ -120,18 +116,25 @@ func NewRef(refURI string) (Ref, error) { return Ref{Ref: ref}, nil } -// MustCreateRef creates a ref object but panics when refURI is invalid. -// Use the NewRef method for a version that returns an error. +// MustCreateRef creates a ref object but func MustCreateRef(refURI string) Ref { return Ref{Ref: jsonreference.MustCreateRef(refURI)} } +// // NewResolvedRef creates a resolved ref +// func NewResolvedRef(refURI string, data interface{}) Ref { +// return Ref{ +// Ref: jsonreference.MustCreateRef(refURI), +// Resolved: data, +// } +// } + // MarshalJSON marshals this ref into a JSON object func (r Ref) MarshalJSON() ([]byte, error) { str := r.String() if str == "" { if r.IsRoot() { - return []byte(`{"$ref":""}`), nil + return []byte(`{"$ref":"#"}`), nil } return []byte("{}"), nil } diff --git a/vendor/github.com/go-openapi/spec/response.go b/vendor/github.com/go-openapi/spec/response.go index a32b039ea..308cc8478 100644 --- a/vendor/github.com/go-openapi/spec/response.go +++ b/vendor/github.com/go-openapi/spec/response.go @@ -17,7 +17,6 @@ package spec import ( "encoding/json" - "github.com/go-openapi/jsonpointer" "github.com/go-openapi/swag" ) @@ -35,19 +34,6 @@ type ResponseProps struct { type Response struct { Refable ResponseProps - VendorExtensible -} - -// JSONLookup look up a value by the json property name -func (p Response) JSONLookup(token string) (interface{}, error) { - if ex, ok := p.Extensions[token]; ok { - return &ex, nil - } - if token == "$ref" { - return &p.Ref, nil - } - r, _, err := jsonpointer.GetForToken(p.ResponseProps, token) - return r, err } // UnmarshalJSON hydrates this items instance with the data from JSON @@ -58,9 +44,6 @@ func (r *Response) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &r.Refable); err != nil { return err } - if err := json.Unmarshal(data, &r.VendorExtensible); err != nil { - return err - } return nil } @@ -74,11 +57,7 @@ func (r Response) MarshalJSON() ([]byte, error) { if err != nil { return nil, err } - b3, err := json.Marshal(r.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2, b3), nil + return swag.ConcatJSON(b1, b2), nil } // NewResponse creates a new response instance diff --git a/vendor/github.com/go-openapi/spec/responses.go b/vendor/github.com/go-openapi/spec/responses.go index 3ab06697f..ea071ca63 100644 --- a/vendor/github.com/go-openapi/spec/responses.go +++ b/vendor/github.com/go-openapi/spec/responses.go @@ -51,7 +51,7 @@ func (r Responses) JSONLookup(token string) (interface{}, error) { } if i, err := strconv.Atoi(token); err == nil { if scr, ok := r.StatusCodeResponses[i]; ok { - return scr, nil + return &scr, nil } } return nil, fmt.Errorf("object has no field %q", token) diff --git a/vendor/github.com/go-openapi/spec/schema.go b/vendor/github.com/go-openapi/spec/schema.go index 1cdcc163f..eb88f005c 100644 --- a/vendor/github.com/go-openapi/spec/schema.go +++ b/vendor/github.com/go-openapi/spec/schema.go @@ -201,8 +201,8 @@ func (r *SchemaURL) UnmarshalJSON(data []byte) error { type SchemaProps struct { ID string `json:"id,omitempty"` - Ref Ref `json:"-"` - Schema SchemaURL `json:"-"` + Ref Ref `json:"-,omitempty"` + Schema SchemaURL `json:"-,omitempty"` Description string `json:"description,omitempty"` Type StringOrArray `json:"type,omitempty"` Format string `json:"format,omitempty"` @@ -269,7 +269,7 @@ func (s Schema) JSONLookup(token string) (interface{}, error) { } r, _, err := jsonpointer.GetForToken(s.SchemaProps, token) - if r != nil || (err != nil && !strings.HasPrefix(err.Error(), "object has no field")) { + if r != nil || err != nil { return r, err } r, _, err = jsonpointer.GetForToken(s.SwaggerSchemaProps, token) diff --git a/vendor/github.com/go-openapi/spec/spec.go b/vendor/github.com/go-openapi/spec/spec.go index 0bb045bc0..cc2ae56b2 100644 --- a/vendor/github.com/go-openapi/spec/spec.go +++ b/vendor/github.com/go-openapi/spec/spec.go @@ -16,8 +16,6 @@ package spec import "encoding/json" -//go:generate curl -L --progress -o ./schemas/v2/schema.json http://swagger.io/v2/schema.json -//go:generate curl -L --progress -o ./schemas/jsonschema-draft-04.json http://json-schema.org/draft-04/schema //go:generate go-bindata -pkg=spec -prefix=./schemas -ignore=.*\.md ./schemas/... //go:generate perl -pi -e s,Json,JSON,g bindata.go @@ -29,14 +27,9 @@ const ( ) var ( - jsonSchema *Schema - swaggerSchema *Schema -) - -func init() { - jsonSchema = MustLoadJSONSchemaDraft04() + jsonSchema = MustLoadJSONSchemaDraft04() swaggerSchema = MustLoadSwagger20Schema() -} +) // MustLoadJSONSchemaDraft04 panics when Swagger20Schema returns an error func MustLoadJSONSchemaDraft04() *Schema { diff --git a/vendor/github.com/go-openapi/spec/swagger.go b/vendor/github.com/go-openapi/spec/swagger.go index 23780c78a..ff3ef875e 100644 --- a/vendor/github.com/go-openapi/spec/swagger.go +++ b/vendor/github.com/go-openapi/spec/swagger.go @@ -77,7 +77,7 @@ type SwaggerProps struct { Host string `json:"host,omitempty"` BasePath string `json:"basePath,omitempty"` // must start with a leading "/" Paths *Paths `json:"paths"` // required - Definitions Definitions `json:"definitions,omitempty"` + Definitions Definitions `json:"definitions"` Parameters map[string]Parameter `json:"parameters,omitempty"` Responses map[string]Response `json:"responses,omitempty"` SecurityDefinitions SecurityDefinitions `json:"securityDefinitions,omitempty"` @@ -156,7 +156,7 @@ func (s SchemaOrStringArray) MarshalJSON() ([]byte, error) { if s.Schema != nil { return json.Marshal(s.Schema) } - return []byte("null"), nil + return nil, nil } // UnmarshalJSON converts this schema object or array from a JSON structure diff --git a/vendor/google.golang.org/appengine/appengine.go b/vendor/google.golang.org/appengine/appengine.go index 475cf2e32..76dedc81d 100644 --- a/vendor/google.golang.org/appengine/appengine.go +++ b/vendor/google.golang.org/appengine/appengine.go @@ -28,7 +28,8 @@ import ( // See https://cloud.google.com/appengine/docs/flexible/custom-runtimes#health_check_requests // for details on how to do your own health checking. // -// Main is not yet supported on App Engine Standard. +// On App Engine Standard it ensures the server has started and is prepared to +// receive requests. // // Main never returns. // @@ -62,7 +63,7 @@ func IsDevAppServer() bool { // NewContext returns a context for an in-flight HTTP request. // This function is cheap. func NewContext(req *http.Request) context.Context { - return WithContext(context.Background(), req) + return internal.ReqContext(req) } // WithContext returns a copy of the parent context diff --git a/vendor/google.golang.org/appengine/internal/api.go b/vendor/google.golang.org/appengine/internal/api.go index ec5aa59b3..16f87c5d3 100644 --- a/vendor/google.golang.org/appengine/internal/api.go +++ b/vendor/google.golang.org/appengine/internal/api.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. // +build !appengine +// +build go1.7 package internal @@ -32,7 +33,8 @@ import ( ) const ( - apiPath = "/rpc_http" + apiPath = "/rpc_http" + defaultTicketSuffix = "/default.20150612t184001.0" ) var ( @@ -60,6 +62,11 @@ var ( Dial: limitDial, }, } + + defaultTicketOnce sync.Once + defaultTicket string + backgroundContextOnce sync.Once + backgroundContext netcontext.Context ) func apiURL() *url.URL { @@ -83,16 +90,10 @@ func handleHTTP(w http.ResponseWriter, r *http.Request) { outHeader: w.Header(), apiURL: apiURL(), } - stopFlushing := make(chan int) + r = r.WithContext(withContext(r.Context(), c)) + c.req = r - ctxs.Lock() - ctxs.m[r] = c - ctxs.Unlock() - defer func() { - ctxs.Lock() - delete(ctxs.m, r) - ctxs.Unlock() - }() + stopFlushing := make(chan int) // Patch up RemoteAddr so it looks reasonable. if addr := r.Header.Get(userIPHeader); addr != "" { @@ -191,18 +192,6 @@ func renderPanic(x interface{}) string { return string(buf) } -var ctxs = struct { - sync.Mutex - m map[*http.Request]*context - bg *context // background context, lazily initialized - // dec is used by tests to decorate the netcontext.Context returned - // for a given request. This allows tests to add overrides (such as - // WithAppIDOverride) to the context. The map is nil outside tests. - dec map[*http.Request]func(netcontext.Context) netcontext.Context -}{ - m: make(map[*http.Request]*context), -} - // context represents the context of an in-flight HTTP request. // It implements the appengine.Context and http.ResponseWriter interfaces. type context struct { @@ -223,6 +212,34 @@ type context struct { var contextKey = "holds a *context" +// jointContext joins two contexts in a superficial way. +// It takes values and timeouts from a base context, and only values from another context. +type jointContext struct { + base netcontext.Context + valuesOnly netcontext.Context +} + +func (c jointContext) Deadline() (time.Time, bool) { + return c.base.Deadline() +} + +func (c jointContext) Done() <-chan struct{} { + return c.base.Done() +} + +func (c jointContext) Err() error { + return c.base.Err() +} + +func (c jointContext) Value(key interface{}) interface{} { + if val := c.base.Value(key); val != nil { + return val + } + return c.valuesOnly.Value(key) +} + +// fromContext returns the App Engine context or nil if ctx is not +// derived from an App Engine context. func fromContext(ctx netcontext.Context) *context { c, _ := ctx.Value(&contextKey).(*context) return c @@ -247,86 +264,70 @@ func IncomingHeaders(ctx netcontext.Context) http.Header { return nil } -func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { - ctxs.Lock() - c := ctxs.m[req] - d := ctxs.dec[req] - ctxs.Unlock() +func ReqContext(req *http.Request) netcontext.Context { + return req.Context() +} - if d != nil { - parent = d(parent) +func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { + return jointContext{ + base: parent, + valuesOnly: req.Context(), } +} - if c == nil { - // Someone passed in an http.Request that is not in-flight. - // We panic here rather than panicking at a later point - // so that stack traces will be more sensible. - log.Panic("appengine: NewContext passed an unknown http.Request") - } - return withContext(parent, c) +// DefaultTicket returns a ticket used for background context or dev_appserver. +func DefaultTicket() string { + defaultTicketOnce.Do(func() { + if IsDevAppServer() { + defaultTicket = "testapp" + defaultTicketSuffix + return + } + appID := partitionlessAppID() + escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1) + majVersion := VersionID(nil) + if i := strings.Index(majVersion, "."); i > 0 { + majVersion = majVersion[:i] + } + defaultTicket = fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID()) + }) + return defaultTicket } func BackgroundContext() netcontext.Context { - ctxs.Lock() - defer ctxs.Unlock() - - if ctxs.bg != nil { - return toContext(ctxs.bg) - } - - // Compute background security ticket. - appID := partitionlessAppID() - escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1) - majVersion := VersionID(nil) - if i := strings.Index(majVersion, "."); i > 0 { - majVersion = majVersion[:i] - } - ticket := fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID()) - - ctxs.bg = &context{ - req: &http.Request{ - Header: http.Header{ - ticketHeader: []string{ticket}, + backgroundContextOnce.Do(func() { + // Compute background security ticket. + ticket := DefaultTicket() + + c := &context{ + req: &http.Request{ + Header: http.Header{ + ticketHeader: []string{ticket}, + }, }, - }, - apiURL: apiURL(), - } + apiURL: apiURL(), + } + backgroundContext = toContext(c) - // TODO(dsymonds): Wire up the shutdown handler to do a final flush. - go ctxs.bg.logFlusher(make(chan int)) + // TODO(dsymonds): Wire up the shutdown handler to do a final flush. + go c.logFlusher(make(chan int)) + }) - return toContext(ctxs.bg) + return backgroundContext } // RegisterTestRequest registers the HTTP request req for testing, such that // any API calls are sent to the provided URL. It returns a closure to delete // the registration. // It should only be used by aetest package. -func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) func() { +func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) (*http.Request, func()) { c := &context{ req: req, apiURL: apiURL, } - ctxs.Lock() - defer ctxs.Unlock() - if _, ok := ctxs.m[req]; ok { - log.Panic("req already associated with context") - } - if _, ok := ctxs.dec[req]; ok { - log.Panic("req already associated with context") - } - if ctxs.dec == nil { - ctxs.dec = make(map[*http.Request]func(netcontext.Context) netcontext.Context) - } - ctxs.m[req] = c - ctxs.dec[req] = decorate - - return func() { - ctxs.Lock() - delete(ctxs.m, req) - delete(ctxs.dec, req) - ctxs.Unlock() - } + ctx := withContext(decorate(req.Context()), c) + req = req.WithContext(ctx) + c.req = req + return req, func() {} } var errTimeout = &CallError{ @@ -452,7 +453,7 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) c := fromContext(ctx) if c == nil { // Give a good error message rather than a panic lower down. - return errors.New("not an App Engine context") + return errNotAppEngineContext } // Apply transaction modifications if we're in a transaction. @@ -475,6 +476,16 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) } ticket := c.req.Header.Get(ticketHeader) + // Use a test ticket under test environment. + if ticket == "" { + if appid := ctx.Value(&appIDOverrideKey); appid != nil { + ticket = appid.(string) + defaultTicketSuffix + } + } + // Fall back to use background ticket when the request ticket is not available in Flex or dev_appserver. + if ticket == "" { + ticket = DefaultTicket() + } req := &remotepb.Request{ ServiceName: &service, Method: &method, @@ -550,6 +561,9 @@ var logLevelName = map[int64]string{ } func logf(c *context, level int64, format string, args ...interface{}) { + if c == nil { + panic("not an App Engine context") + } s := fmt.Sprintf(format, args...) s = strings.TrimRight(s, "\n") // Remove any trailing newline characters. c.addLogLine(&logpb.UserAppLogLine{ diff --git a/vendor/google.golang.org/appengine/internal/api_classic.go b/vendor/google.golang.org/appengine/internal/api_classic.go index 597f66e6e..f0f40b2e3 100644 --- a/vendor/google.golang.org/appengine/internal/api_classic.go +++ b/vendor/google.golang.org/appengine/internal/api_classic.go @@ -22,14 +22,20 @@ import ( var contextKey = "holds an appengine.Context" +// fromContext returns the App Engine context or nil if ctx is not +// derived from an App Engine context. func fromContext(ctx netcontext.Context) appengine.Context { c, _ := ctx.Value(&contextKey).(appengine.Context) return c } // This is only for classic App Engine adapters. -func ClassicContextFromContext(ctx netcontext.Context) appengine.Context { - return fromContext(ctx) +func ClassicContextFromContext(ctx netcontext.Context) (appengine.Context, error) { + c := fromContext(ctx) + if c == nil { + return nil, errNotAppEngineContext + } + return c, nil } func withContext(parent netcontext.Context, c appengine.Context) netcontext.Context { @@ -53,6 +59,10 @@ func IncomingHeaders(ctx netcontext.Context) http.Header { return nil } +func ReqContext(req *http.Request) netcontext.Context { + return WithContext(netcontext.Background(), req) +} + func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { c := appengine.NewContext(req) return withContext(parent, c) @@ -98,7 +108,7 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) c := fromContext(ctx) if c == nil { // Give a good error message rather than a panic lower down. - return errors.New("not an App Engine context") + return errNotAppEngineContext } // Apply transaction modifications if we're in a transaction. diff --git a/vendor/google.golang.org/appengine/internal/api_common.go b/vendor/google.golang.org/appengine/internal/api_common.go index 2db33a774..e0c0b214b 100644 --- a/vendor/google.golang.org/appengine/internal/api_common.go +++ b/vendor/google.golang.org/appengine/internal/api_common.go @@ -5,10 +5,15 @@ package internal import ( + "errors" + "os" + "github.com/golang/protobuf/proto" netcontext "golang.org/x/net/context" ) +var errNotAppEngineContext = errors.New("not an App Engine context") + type CallOverrideFunc func(ctx netcontext.Context, service, method string, in, out proto.Message) error var callOverrideKey = "holds []CallOverrideFunc" @@ -77,10 +82,42 @@ func Logf(ctx netcontext.Context, level int64, format string, args ...interface{ f(level, format, args...) return } - logf(fromContext(ctx), level, format, args...) + c := fromContext(ctx) + if c == nil { + panic(errNotAppEngineContext) + } + logf(c, level, format, args...) } // NamespacedContext wraps a Context to support namespaces. func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context { return withNamespace(ctx, namespace) } + +// SetTestEnv sets the env variables for testing background ticket in Flex. +func SetTestEnv() func() { + var environ = []struct { + key, value string + }{ + {"GAE_LONG_APP_ID", "my-app-id"}, + {"GAE_MINOR_VERSION", "067924799508853122"}, + {"GAE_MODULE_INSTANCE", "0"}, + {"GAE_MODULE_NAME", "default"}, + {"GAE_MODULE_VERSION", "20150612t184001"}, + } + + for _, v := range environ { + old := os.Getenv(v.key) + os.Setenv(v.key, v.value) + v.value = old + } + return func() { // Restore old environment after the test completes. + for _, v := range environ { + if v.value == "" { + os.Unsetenv(v.key) + continue + } + os.Setenv(v.key, v.value) + } + } +} diff --git a/vendor/google.golang.org/appengine/internal/api_pre17.go b/vendor/google.golang.org/appengine/internal/api_pre17.go new file mode 100644 index 000000000..028b4f056 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/api_pre17.go @@ -0,0 +1,682 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build !appengine +// +build !go1.7 + +package internal + +import ( + "bytes" + "errors" + "fmt" + "io/ioutil" + "log" + "net" + "net/http" + "net/url" + "os" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/golang/protobuf/proto" + netcontext "golang.org/x/net/context" + + basepb "google.golang.org/appengine/internal/base" + logpb "google.golang.org/appengine/internal/log" + remotepb "google.golang.org/appengine/internal/remote_api" +) + +const ( + apiPath = "/rpc_http" + defaultTicketSuffix = "/default.20150612t184001.0" +) + +var ( + // Incoming headers. + ticketHeader = http.CanonicalHeaderKey("X-AppEngine-API-Ticket") + dapperHeader = http.CanonicalHeaderKey("X-Google-DapperTraceInfo") + traceHeader = http.CanonicalHeaderKey("X-Cloud-Trace-Context") + curNamespaceHeader = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace") + userIPHeader = http.CanonicalHeaderKey("X-AppEngine-User-IP") + remoteAddrHeader = http.CanonicalHeaderKey("X-AppEngine-Remote-Addr") + + // Outgoing headers. + apiEndpointHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Endpoint") + apiEndpointHeaderValue = []string{"app-engine-apis"} + apiMethodHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Method") + apiMethodHeaderValue = []string{"/VMRemoteAPI.CallRemoteAPI"} + apiDeadlineHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Deadline") + apiContentType = http.CanonicalHeaderKey("Content-Type") + apiContentTypeValue = []string{"application/octet-stream"} + logFlushHeader = http.CanonicalHeaderKey("X-AppEngine-Log-Flush-Count") + + apiHTTPClient = &http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: limitDial, + }, + } + + defaultTicketOnce sync.Once + defaultTicket string +) + +func apiURL() *url.URL { + host, port := "appengine.googleapis.internal", "10001" + if h := os.Getenv("API_HOST"); h != "" { + host = h + } + if p := os.Getenv("API_PORT"); p != "" { + port = p + } + return &url.URL{ + Scheme: "http", + Host: host + ":" + port, + Path: apiPath, + } +} + +func handleHTTP(w http.ResponseWriter, r *http.Request) { + c := &context{ + req: r, + outHeader: w.Header(), + apiURL: apiURL(), + } + stopFlushing := make(chan int) + + ctxs.Lock() + ctxs.m[r] = c + ctxs.Unlock() + defer func() { + ctxs.Lock() + delete(ctxs.m, r) + ctxs.Unlock() + }() + + // Patch up RemoteAddr so it looks reasonable. + if addr := r.Header.Get(userIPHeader); addr != "" { + r.RemoteAddr = addr + } else if addr = r.Header.Get(remoteAddrHeader); addr != "" { + r.RemoteAddr = addr + } else { + // Should not normally reach here, but pick a sensible default anyway. + r.RemoteAddr = "127.0.0.1" + } + // The address in the headers will most likely be of these forms: + // 123.123.123.123 + // 2001:db8::1 + // net/http.Request.RemoteAddr is specified to be in "IP:port" form. + if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil { + // Assume the remote address is only a host; add a default port. + r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80") + } + + // Start goroutine responsible for flushing app logs. + // This is done after adding c to ctx.m (and stopped before removing it) + // because flushing logs requires making an API call. + go c.logFlusher(stopFlushing) + + executeRequestSafely(c, r) + c.outHeader = nil // make sure header changes aren't respected any more + + stopFlushing <- 1 // any logging beyond this point will be dropped + + // Flush any pending logs asynchronously. + c.pendingLogs.Lock() + flushes := c.pendingLogs.flushes + if len(c.pendingLogs.lines) > 0 { + flushes++ + } + c.pendingLogs.Unlock() + go c.flushLog(false) + w.Header().Set(logFlushHeader, strconv.Itoa(flushes)) + + // Avoid nil Write call if c.Write is never called. + if c.outCode != 0 { + w.WriteHeader(c.outCode) + } + if c.outBody != nil { + w.Write(c.outBody) + } +} + +func executeRequestSafely(c *context, r *http.Request) { + defer func() { + if x := recover(); x != nil { + logf(c, 4, "%s", renderPanic(x)) // 4 == critical + c.outCode = 500 + } + }() + + http.DefaultServeMux.ServeHTTP(c, r) +} + +func renderPanic(x interface{}) string { + buf := make([]byte, 16<<10) // 16 KB should be plenty + buf = buf[:runtime.Stack(buf, false)] + + // Remove the first few stack frames: + // this func + // the recover closure in the caller + // That will root the stack trace at the site of the panic. + const ( + skipStart = "internal.renderPanic" + skipFrames = 2 + ) + start := bytes.Index(buf, []byte(skipStart)) + p := start + for i := 0; i < skipFrames*2 && p+1 < len(buf); i++ { + p = bytes.IndexByte(buf[p+1:], '\n') + p + 1 + if p < 0 { + break + } + } + if p >= 0 { + // buf[start:p+1] is the block to remove. + // Copy buf[p+1:] over buf[start:] and shrink buf. + copy(buf[start:], buf[p+1:]) + buf = buf[:len(buf)-(p+1-start)] + } + + // Add panic heading. + head := fmt.Sprintf("panic: %v\n\n", x) + if len(head) > len(buf) { + // Extremely unlikely to happen. + return head + } + copy(buf[len(head):], buf) + copy(buf, head) + + return string(buf) +} + +var ctxs = struct { + sync.Mutex + m map[*http.Request]*context + bg *context // background context, lazily initialized + // dec is used by tests to decorate the netcontext.Context returned + // for a given request. This allows tests to add overrides (such as + // WithAppIDOverride) to the context. The map is nil outside tests. + dec map[*http.Request]func(netcontext.Context) netcontext.Context +}{ + m: make(map[*http.Request]*context), +} + +// context represents the context of an in-flight HTTP request. +// It implements the appengine.Context and http.ResponseWriter interfaces. +type context struct { + req *http.Request + + outCode int + outHeader http.Header + outBody []byte + + pendingLogs struct { + sync.Mutex + lines []*logpb.UserAppLogLine + flushes int + } + + apiURL *url.URL +} + +var contextKey = "holds a *context" + +// fromContext returns the App Engine context or nil if ctx is not +// derived from an App Engine context. +func fromContext(ctx netcontext.Context) *context { + c, _ := ctx.Value(&contextKey).(*context) + return c +} + +func withContext(parent netcontext.Context, c *context) netcontext.Context { + ctx := netcontext.WithValue(parent, &contextKey, c) + if ns := c.req.Header.Get(curNamespaceHeader); ns != "" { + ctx = withNamespace(ctx, ns) + } + return ctx +} + +func toContext(c *context) netcontext.Context { + return withContext(netcontext.Background(), c) +} + +func IncomingHeaders(ctx netcontext.Context) http.Header { + if c := fromContext(ctx); c != nil { + return c.req.Header + } + return nil +} + +func ReqContext(req *http.Request) netcontext.Context { + return WithContext(netcontext.Background(), req) +} + +func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { + ctxs.Lock() + c := ctxs.m[req] + d := ctxs.dec[req] + ctxs.Unlock() + + if d != nil { + parent = d(parent) + } + + if c == nil { + // Someone passed in an http.Request that is not in-flight. + // We panic here rather than panicking at a later point + // so that stack traces will be more sensible. + log.Panic("appengine: NewContext passed an unknown http.Request") + } + return withContext(parent, c) +} + +// DefaultTicket returns a ticket used for background context or dev_appserver. +func DefaultTicket() string { + defaultTicketOnce.Do(func() { + if IsDevAppServer() { + defaultTicket = "testapp" + defaultTicketSuffix + return + } + appID := partitionlessAppID() + escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1) + majVersion := VersionID(nil) + if i := strings.Index(majVersion, "."); i > 0 { + majVersion = majVersion[:i] + } + defaultTicket = fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID()) + }) + return defaultTicket +} + +func BackgroundContext() netcontext.Context { + ctxs.Lock() + defer ctxs.Unlock() + + if ctxs.bg != nil { + return toContext(ctxs.bg) + } + + // Compute background security ticket. + ticket := DefaultTicket() + + ctxs.bg = &context{ + req: &http.Request{ + Header: http.Header{ + ticketHeader: []string{ticket}, + }, + }, + apiURL: apiURL(), + } + + // TODO(dsymonds): Wire up the shutdown handler to do a final flush. + go ctxs.bg.logFlusher(make(chan int)) + + return toContext(ctxs.bg) +} + +// RegisterTestRequest registers the HTTP request req for testing, such that +// any API calls are sent to the provided URL. It returns a closure to delete +// the registration. +// It should only be used by aetest package. +func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) (*http.Request, func()) { + c := &context{ + req: req, + apiURL: apiURL, + } + ctxs.Lock() + defer ctxs.Unlock() + if _, ok := ctxs.m[req]; ok { + log.Panic("req already associated with context") + } + if _, ok := ctxs.dec[req]; ok { + log.Panic("req already associated with context") + } + if ctxs.dec == nil { + ctxs.dec = make(map[*http.Request]func(netcontext.Context) netcontext.Context) + } + ctxs.m[req] = c + ctxs.dec[req] = decorate + + return req, func() { + ctxs.Lock() + delete(ctxs.m, req) + delete(ctxs.dec, req) + ctxs.Unlock() + } +} + +var errTimeout = &CallError{ + Detail: "Deadline exceeded", + Code: int32(remotepb.RpcError_CANCELLED), + Timeout: true, +} + +func (c *context) Header() http.Header { return c.outHeader } + +// Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status +// codes do not permit a response body (nor response entity headers such as +// Content-Length, Content-Type, etc). +func bodyAllowedForStatus(status int) bool { + switch { + case status >= 100 && status <= 199: + return false + case status == 204: + return false + case status == 304: + return false + } + return true +} + +func (c *context) Write(b []byte) (int, error) { + if c.outCode == 0 { + c.WriteHeader(http.StatusOK) + } + if len(b) > 0 && !bodyAllowedForStatus(c.outCode) { + return 0, http.ErrBodyNotAllowed + } + c.outBody = append(c.outBody, b...) + return len(b), nil +} + +func (c *context) WriteHeader(code int) { + if c.outCode != 0 { + logf(c, 3, "WriteHeader called multiple times on request.") // error level + return + } + c.outCode = code +} + +func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) { + hreq := &http.Request{ + Method: "POST", + URL: c.apiURL, + Header: http.Header{ + apiEndpointHeader: apiEndpointHeaderValue, + apiMethodHeader: apiMethodHeaderValue, + apiContentType: apiContentTypeValue, + apiDeadlineHeader: []string{strconv.FormatFloat(timeout.Seconds(), 'f', -1, 64)}, + }, + Body: ioutil.NopCloser(bytes.NewReader(body)), + ContentLength: int64(len(body)), + Host: c.apiURL.Host, + } + if info := c.req.Header.Get(dapperHeader); info != "" { + hreq.Header.Set(dapperHeader, info) + } + if info := c.req.Header.Get(traceHeader); info != "" { + hreq.Header.Set(traceHeader, info) + } + + tr := apiHTTPClient.Transport.(*http.Transport) + + var timedOut int32 // atomic; set to 1 if timed out + t := time.AfterFunc(timeout, func() { + atomic.StoreInt32(&timedOut, 1) + tr.CancelRequest(hreq) + }) + defer t.Stop() + defer func() { + // Check if timeout was exceeded. + if atomic.LoadInt32(&timedOut) != 0 { + err = errTimeout + } + }() + + hresp, err := apiHTTPClient.Do(hreq) + if err != nil { + return nil, &CallError{ + Detail: fmt.Sprintf("service bridge HTTP failed: %v", err), + Code: int32(remotepb.RpcError_UNKNOWN), + } + } + defer hresp.Body.Close() + hrespBody, err := ioutil.ReadAll(hresp.Body) + if hresp.StatusCode != 200 { + return nil, &CallError{ + Detail: fmt.Sprintf("service bridge returned HTTP %d (%q)", hresp.StatusCode, hrespBody), + Code: int32(remotepb.RpcError_UNKNOWN), + } + } + if err != nil { + return nil, &CallError{ + Detail: fmt.Sprintf("service bridge response bad: %v", err), + Code: int32(remotepb.RpcError_UNKNOWN), + } + } + return hrespBody, nil +} + +func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { + if ns := NamespaceFromContext(ctx); ns != "" { + if fn, ok := NamespaceMods[service]; ok { + fn(in, ns) + } + } + + if f, ctx, ok := callOverrideFromContext(ctx); ok { + return f(ctx, service, method, in, out) + } + + // Handle already-done contexts quickly. + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + c := fromContext(ctx) + if c == nil { + // Give a good error message rather than a panic lower down. + return errNotAppEngineContext + } + + // Apply transaction modifications if we're in a transaction. + if t := transactionFromContext(ctx); t != nil { + if t.finished { + return errors.New("transaction context has expired") + } + applyTransaction(in, &t.transaction) + } + + // Default RPC timeout is 60s. + timeout := 60 * time.Second + if deadline, ok := ctx.Deadline(); ok { + timeout = deadline.Sub(time.Now()) + } + + data, err := proto.Marshal(in) + if err != nil { + return err + } + + ticket := c.req.Header.Get(ticketHeader) + // Use a test ticket under test environment. + if ticket == "" { + if appid := ctx.Value(&appIDOverrideKey); appid != nil { + ticket = appid.(string) + defaultTicketSuffix + } + } + // Fall back to use background ticket when the request ticket is not available in Flex or dev_appserver. + if ticket == "" { + ticket = DefaultTicket() + } + req := &remotepb.Request{ + ServiceName: &service, + Method: &method, + Request: data, + RequestId: &ticket, + } + hreqBody, err := proto.Marshal(req) + if err != nil { + return err + } + + hrespBody, err := c.post(hreqBody, timeout) + if err != nil { + return err + } + + res := &remotepb.Response{} + if err := proto.Unmarshal(hrespBody, res); err != nil { + return err + } + if res.RpcError != nil { + ce := &CallError{ + Detail: res.RpcError.GetDetail(), + Code: *res.RpcError.Code, + } + switch remotepb.RpcError_ErrorCode(ce.Code) { + case remotepb.RpcError_CANCELLED, remotepb.RpcError_DEADLINE_EXCEEDED: + ce.Timeout = true + } + return ce + } + if res.ApplicationError != nil { + return &APIError{ + Service: *req.ServiceName, + Detail: res.ApplicationError.GetDetail(), + Code: *res.ApplicationError.Code, + } + } + if res.Exception != nil || res.JavaException != nil { + // This shouldn't happen, but let's be defensive. + return &CallError{ + Detail: "service bridge returned exception", + Code: int32(remotepb.RpcError_UNKNOWN), + } + } + return proto.Unmarshal(res.Response, out) +} + +func (c *context) Request() *http.Request { + return c.req +} + +func (c *context) addLogLine(ll *logpb.UserAppLogLine) { + // Truncate long log lines. + // TODO(dsymonds): Check if this is still necessary. + const lim = 8 << 10 + if len(*ll.Message) > lim { + suffix := fmt.Sprintf("...(length %d)", len(*ll.Message)) + ll.Message = proto.String((*ll.Message)[:lim-len(suffix)] + suffix) + } + + c.pendingLogs.Lock() + c.pendingLogs.lines = append(c.pendingLogs.lines, ll) + c.pendingLogs.Unlock() +} + +var logLevelName = map[int64]string{ + 0: "DEBUG", + 1: "INFO", + 2: "WARNING", + 3: "ERROR", + 4: "CRITICAL", +} + +func logf(c *context, level int64, format string, args ...interface{}) { + if c == nil { + panic("not an App Engine context") + } + s := fmt.Sprintf(format, args...) + s = strings.TrimRight(s, "\n") // Remove any trailing newline characters. + c.addLogLine(&logpb.UserAppLogLine{ + TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3), + Level: &level, + Message: &s, + }) + log.Print(logLevelName[level] + ": " + s) +} + +// flushLog attempts to flush any pending logs to the appserver. +// It should not be called concurrently. +func (c *context) flushLog(force bool) (flushed bool) { + c.pendingLogs.Lock() + // Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious. + n, rem := 0, 30<<20 + for ; n < len(c.pendingLogs.lines); n++ { + ll := c.pendingLogs.lines[n] + // Each log line will require about 3 bytes of overhead. + nb := proto.Size(ll) + 3 + if nb > rem { + break + } + rem -= nb + } + lines := c.pendingLogs.lines[:n] + c.pendingLogs.lines = c.pendingLogs.lines[n:] + c.pendingLogs.Unlock() + + if len(lines) == 0 && !force { + // Nothing to flush. + return false + } + + rescueLogs := false + defer func() { + if rescueLogs { + c.pendingLogs.Lock() + c.pendingLogs.lines = append(lines, c.pendingLogs.lines...) + c.pendingLogs.Unlock() + } + }() + + buf, err := proto.Marshal(&logpb.UserAppLogGroup{ + LogLine: lines, + }) + if err != nil { + log.Printf("internal.flushLog: marshaling UserAppLogGroup: %v", err) + rescueLogs = true + return false + } + + req := &logpb.FlushRequest{ + Logs: buf, + } + res := &basepb.VoidProto{} + c.pendingLogs.Lock() + c.pendingLogs.flushes++ + c.pendingLogs.Unlock() + if err := Call(toContext(c), "logservice", "Flush", req, res); err != nil { + log.Printf("internal.flushLog: Flush RPC: %v", err) + rescueLogs = true + return false + } + return true +} + +const ( + // Log flushing parameters. + flushInterval = 1 * time.Second + forceFlushInterval = 60 * time.Second +) + +func (c *context) logFlusher(stop <-chan int) { + lastFlush := time.Now() + tick := time.NewTicker(flushInterval) + for { + select { + case <-stop: + // Request finished. + tick.Stop() + return + case <-tick.C: + force := time.Now().Sub(lastFlush) > forceFlushInterval + if c.flushLog(force) { + lastFlush = time.Now() + } + } + } +} + +func ContextForTesting(req *http.Request) netcontext.Context { + return toContext(&context{req: req}) +} diff --git a/vendor/google.golang.org/appengine/internal/identity_classic.go b/vendor/google.golang.org/appengine/internal/identity_classic.go index e6b9227c5..b59603f13 100644 --- a/vendor/google.golang.org/appengine/internal/identity_classic.go +++ b/vendor/google.golang.org/appengine/internal/identity_classic.go @@ -13,15 +13,45 @@ import ( ) func DefaultVersionHostname(ctx netcontext.Context) string { - return appengine.DefaultVersionHostname(fromContext(ctx)) + c := fromContext(ctx) + if c == nil { + panic(errNotAppEngineContext) + } + return appengine.DefaultVersionHostname(c) } -func RequestID(ctx netcontext.Context) string { return appengine.RequestID(fromContext(ctx)) } -func Datacenter(_ netcontext.Context) string { return appengine.Datacenter() } -func ServerSoftware() string { return appengine.ServerSoftware() } -func ModuleName(ctx netcontext.Context) string { return appengine.ModuleName(fromContext(ctx)) } -func VersionID(ctx netcontext.Context) string { return appengine.VersionID(fromContext(ctx)) } -func InstanceID() string { return appengine.InstanceID() } -func IsDevAppServer() bool { return appengine.IsDevAppServer() } +func Datacenter(_ netcontext.Context) string { return appengine.Datacenter() } +func ServerSoftware() string { return appengine.ServerSoftware() } +func InstanceID() string { return appengine.InstanceID() } +func IsDevAppServer() bool { return appengine.IsDevAppServer() } -func fullyQualifiedAppID(ctx netcontext.Context) string { return fromContext(ctx).FullyQualifiedAppID() } +func RequestID(ctx netcontext.Context) string { + c := fromContext(ctx) + if c == nil { + panic(errNotAppEngineContext) + } + return appengine.RequestID(c) +} + +func ModuleName(ctx netcontext.Context) string { + c := fromContext(ctx) + if c == nil { + panic(errNotAppEngineContext) + } + return appengine.ModuleName(c) +} +func VersionID(ctx netcontext.Context) string { + c := fromContext(ctx) + if c == nil { + panic(errNotAppEngineContext) + } + return appengine.VersionID(c) +} + +func fullyQualifiedAppID(ctx netcontext.Context) string { + c := fromContext(ctx) + if c == nil { + panic(errNotAppEngineContext) + } + return c.FullyQualifiedAppID() +} diff --git a/vendor/google.golang.org/appengine/internal/identity_vm.go b/vendor/google.golang.org/appengine/internal/identity_vm.go index ebe68b785..d5fa75be7 100644 --- a/vendor/google.golang.org/appengine/internal/identity_vm.go +++ b/vendor/google.golang.org/appengine/internal/identity_vm.go @@ -23,7 +23,11 @@ const ( ) func ctxHeaders(ctx netcontext.Context) http.Header { - return fromContext(ctx).Request().Header + c := fromContext(ctx) + if c == nil { + return nil + } + return c.Request().Header } func DefaultVersionHostname(ctx netcontext.Context) string { diff --git a/vendor/google.golang.org/appengine/internal/main_vm.go b/vendor/google.golang.org/appengine/internal/main_vm.go index 57331ad17..822e784a4 100644 --- a/vendor/google.golang.org/appengine/internal/main_vm.go +++ b/vendor/google.golang.org/appengine/internal/main_vm.go @@ -22,7 +22,11 @@ func Main() { port = s } - if err := http.ListenAndServe(":"+port, http.HandlerFunc(handleHTTP)); err != nil { + host := "" + if IsDevAppServer() { + host = "127.0.0.1" + } + if err := http.ListenAndServe(host+":"+port, http.HandlerFunc(handleHTTP)); err != nil { log.Fatalf("http.ListenAndServe: %v", err) } } diff --git a/vendor/gopkg.in/yaml.v2/LICENSE b/vendor/gopkg.in/yaml.v2/LICENSE index 8dada3eda..a68e67f01 100644 --- a/vendor/gopkg.in/yaml.v2/LICENSE +++ b/vendor/gopkg.in/yaml.v2/LICENSE @@ -1,201 +1,188 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. + +Copyright (c) 2011-2014 - Canonical Inc. + +This software is licensed under the LGPLv3, included below. + +As a special exception to the GNU Lesser General Public License version 3 +("LGPL3"), the copyright holders of this Library give you permission to +convey to a third party a Combined Work that links statically or dynamically +to this Library without providing any Minimal Corresponding Source or +Minimal Application Code as set out in 4d or providing the installation +information set out in section 4e, provided that you comply with the other +provisions of LGPL3 and provided that you meet, for the Application the +terms and conditions of the license(s) which apply to the Application. + +Except as stated in this special exception, the provisions of LGPL3 will +continue to comply in full to this Library. If you modify this Library, you +may apply this exception to your version of this Library, but you are not +obliged to do so. If you do not wish to do so, delete this exception +statement from your version. This exception does not (and cannot) modify any +license terms which apply to the Application, with which you must still +comply. + + + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. diff --git a/vendor/gopkg.in/yaml.v2/decode.go b/vendor/gopkg.in/yaml.v2/decode.go index e85eb2e3f..085cddc44 100644 --- a/vendor/gopkg.in/yaml.v2/decode.go +++ b/vendor/gopkg.in/yaml.v2/decode.go @@ -120,6 +120,7 @@ func (p *parser) parse() *node { default: panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ))) } + panic("unreachable") } func (p *parser) node(kind int) *node { @@ -190,7 +191,6 @@ type decoder struct { aliases map[string]bool mapType reflect.Type terrors []string - strict bool } var ( @@ -200,8 +200,8 @@ var ( ifaceType = defaultMapType.Elem() ) -func newDecoder(strict bool) *decoder { - d := &decoder{mapType: defaultMapType, strict: strict} +func newDecoder() *decoder { + d := &decoder{mapType: defaultMapType} d.aliases = make(map[string]bool) return d } @@ -251,7 +251,7 @@ func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { // // If n holds a null value, prepare returns before doing anything. func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { - if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) { + if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "") { return out, false, false } again := true @@ -640,8 +640,6 @@ func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { value := reflect.New(elemType).Elem() d.unmarshal(n.children[i+1], value) inlineMap.SetMapIndex(name, value) - } else if d.strict { - d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in struct %s", ni.line+1, name.String(), out.Type())) } } return true diff --git a/vendor/gopkg.in/yaml.v2/emitterc.go b/vendor/gopkg.in/yaml.v2/emitterc.go index dcaf502f0..2befd553e 100644 --- a/vendor/gopkg.in/yaml.v2/emitterc.go +++ b/vendor/gopkg.in/yaml.v2/emitterc.go @@ -666,6 +666,7 @@ func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, return yaml_emitter_set_emitter_error(emitter, "expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS") } + return false } // Expect ALIAS. @@ -994,10 +995,10 @@ func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { break_space = false space_break = false - preceded_by_whitespace = false - followed_by_whitespace = false - previous_space = false - previous_break = false + preceeded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false ) emitter.scalar_data.value = value @@ -1016,7 +1017,7 @@ func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { flow_indicators = true } - preceded_by_whitespace = true + preceeded_by_whitespace = true for i, w := 0, 0; i < len(value); i += w { w = width(value[i]) followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) @@ -1047,7 +1048,7 @@ func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { block_indicators = true } case '#': - if preceded_by_whitespace { + if preceeded_by_whitespace { flow_indicators = true block_indicators = true } @@ -1088,7 +1089,7 @@ func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { } // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. - preceded_by_whitespace = is_blankz(value, i) + preceeded_by_whitespace = is_blankz(value, i) } emitter.scalar_data.multiline = line_breaks diff --git a/vendor/gopkg.in/yaml.v2/parserc.go b/vendor/gopkg.in/yaml.v2/parserc.go index 81d05dfe5..0a7037ad1 100644 --- a/vendor/gopkg.in/yaml.v2/parserc.go +++ b/vendor/gopkg.in/yaml.v2/parserc.go @@ -166,6 +166,7 @@ func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool default: panic("invalid parser state") } + return false } // Parse the production: diff --git a/vendor/gopkg.in/yaml.v2/readerc.go b/vendor/gopkg.in/yaml.v2/readerc.go index f45079171..d5fb09727 100644 --- a/vendor/gopkg.in/yaml.v2/readerc.go +++ b/vendor/gopkg.in/yaml.v2/readerc.go @@ -247,7 +247,7 @@ func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { if parser.encoding == yaml_UTF16LE_ENCODING { low, high = 0, 1 } else { - low, high = 1, 0 + high, low = 1, 0 } // The UTF-16 encoding is not as simple as one might @@ -357,26 +357,23 @@ func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { if value <= 0x7F { // 0000 0000-0000 007F . 0xxxxxxx parser.buffer[buffer_len+0] = byte(value) - buffer_len += 1 } else if value <= 0x7FF { // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) - buffer_len += 2 } else if value <= 0xFFFF { // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) - buffer_len += 3 } else { // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) - buffer_len += 4 } + buffer_len += width parser.unread++ } diff --git a/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/gopkg.in/yaml.v2/resolve.go index 232313cc0..93a863274 100644 --- a/vendor/gopkg.in/yaml.v2/resolve.go +++ b/vendor/gopkg.in/yaml.v2/resolve.go @@ -3,7 +3,6 @@ package yaml import ( "encoding/base64" "math" - "regexp" "strconv" "strings" "unicode/utf8" @@ -81,8 +80,6 @@ func resolvableTag(tag string) bool { return false } -var yamlStyleFloat = regexp.MustCompile(`^[-+]?[0-9]*\.?[0-9]+([eE][-+][0-9]+)?$`) - func resolve(tag string, in string) (rtag string, out interface{}) { if !resolvableTag(tag) { return tag, in @@ -138,11 +135,9 @@ func resolve(tag string, in string) (rtag string, out interface{}) { if err == nil { return yaml_INT_TAG, uintv } - if yamlStyleFloat.MatchString(plain) { - floatv, err := strconv.ParseFloat(plain, 64) - if err == nil { - return yaml_FLOAT_TAG, floatv - } + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv } if strings.HasPrefix(plain, "0b") { intv, err := strconv.ParseInt(plain[2:], 2, 64) diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v2/scannerc.go index 074484455..d97d76fa5 100644 --- a/vendor/gopkg.in/yaml.v2/scannerc.go +++ b/vendor/gopkg.in/yaml.v2/scannerc.go @@ -9,7 +9,7 @@ import ( // ************ // // The following notes assume that you are familiar with the YAML specification -// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in +// (http://yaml.org/spec/cvs/current.html). We mostly follow it, although in // some cases we are less restrictive that it requires. // // The process of transforming a YAML stream into a sequence of events is @@ -611,7 +611,7 @@ func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, co if directive { context = "while parsing a %TAG directive" } - return yaml_parser_set_scanner_error(parser, context, context_mark, problem) + return yaml_parser_set_scanner_error(parser, context, context_mark, "did not find URI escaped octet") } func trace(args ...interface{}) func() { @@ -1546,7 +1546,7 @@ func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool // Unknown directive. } else { yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "found unknown directive name") + start_mark, "found uknown directive name") return false } @@ -1944,7 +1944,7 @@ func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_ma } else { // It's either the '!' tag or not really a tag handle. If it's a %TAG // directive, it's an error. If it's a tag token, it must be a part of URI. - if directive && string(s) != "!" { + if directive && !(s[0] == '!' && s[1] == 0) { yaml_parser_set_scanner_tag_error(parser, directive, start_mark, "did not find expected '!'") return false @@ -1959,7 +1959,6 @@ func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_ma func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { //size_t length = head ? strlen((char *)head) : 0 var s []byte - hasTag := len(head) > 0 // Copy the head if needed. // @@ -2001,10 +2000,10 @@ func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } - hasTag = true } - if !hasTag { + // Check if the tag is non-empty. + if len(s) == 0 { yaml_parser_set_scanner_tag_error(parser, directive, start_mark, "did not find expected tag URI") return false diff --git a/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/gopkg.in/yaml.v2/yaml.go index 5e3c2daee..d133edf9d 100644 --- a/vendor/gopkg.in/yaml.v2/yaml.go +++ b/vendor/gopkg.in/yaml.v2/yaml.go @@ -77,19 +77,8 @@ type Marshaler interface { // supported tag options. // func Unmarshal(in []byte, out interface{}) (err error) { - return unmarshal(in, out, false) -} - -// UnmarshalStrict is like Unmarshal except that any fields that are found -// in the data that do not have corresponding struct members will result in -// an error. -func UnmarshalStrict(in []byte, out interface{}) (err error) { - return unmarshal(in, out, true) -} - -func unmarshal(in []byte, out interface{}, strict bool) (err error) { defer handleErr(&err) - d := newDecoder(strict) + d := newDecoder() p := newParser(in) defer p.destroy() node := p.parse() @@ -140,7 +129,7 @@ func unmarshal(in []byte, out interface{}, strict bool) (err error) { // For example: // // type T struct { -// F int `yaml:"a,omitempty"` +// F int "a,omitempty" // B int // } // yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" @@ -233,7 +222,7 @@ func getStructInfo(st reflect.Type) (*structInfo, error) { inlineMap := -1 for i := 0; i != n; i++ { field := st.Field(i) - if field.PkgPath != "" && !field.Anonymous { + if field.PkgPath != "" { continue // Private field } diff --git a/vendor/gopkg.in/yaml.v2/yamlh.go b/vendor/gopkg.in/yaml.v2/yamlh.go index 3caeca049..d60a6b6b0 100644 --- a/vendor/gopkg.in/yaml.v2/yamlh.go +++ b/vendor/gopkg.in/yaml.v2/yamlh.go @@ -508,7 +508,7 @@ type yaml_parser_t struct { problem string // Error description. - // The byte about which the problem occurred. + // The byte about which the problem occured. problem_offset int problem_value int problem_mark yaml_mark_t From f60bd9f54d1d110f145aa751daf3e8ac1560f70c Mon Sep 17 00:00:00 2001 From: tamal Date: Mon, 29 Jan 2018 22:08:44 -0800 Subject: [PATCH 6/6] Revendor --- glide.lock | 12 +- vendor/github.com/go-openapi/spec/bindata.go | 26 +- vendor/github.com/go-openapi/spec/expander.go | 562 +++++++++++---- vendor/github.com/go-openapi/spec/header.go | 30 + vendor/github.com/go-openapi/spec/items.go | 22 +- .../github.com/go-openapi/spec/parameter.go | 6 +- vendor/github.com/go-openapi/spec/ref.go | 23 +- vendor/github.com/go-openapi/spec/response.go | 23 +- .../github.com/go-openapi/spec/responses.go | 2 +- vendor/github.com/go-openapi/spec/schema.go | 6 +- vendor/github.com/go-openapi/spec/spec.go | 11 +- vendor/github.com/go-openapi/spec/swagger.go | 4 +- .../google.golang.org/appengine/appengine.go | 5 +- .../appengine/internal/api.go | 182 +++-- .../appengine/internal/api_classic.go | 16 +- .../appengine/internal/api_common.go | 39 +- .../appengine/internal/api_pre17.go | 682 ------------------ .../appengine/internal/identity_classic.go | 48 +- .../appengine/internal/identity_vm.go | 6 +- .../appengine/internal/main_vm.go | 6 +- vendor/gopkg.in/yaml.v2/LICENSE | 389 +++++----- vendor/gopkg.in/yaml.v2/decode.go | 10 +- vendor/gopkg.in/yaml.v2/emitterc.go | 15 +- vendor/gopkg.in/yaml.v2/parserc.go | 1 - vendor/gopkg.in/yaml.v2/readerc.go | 7 +- vendor/gopkg.in/yaml.v2/resolve.go | 11 +- vendor/gopkg.in/yaml.v2/scannerc.go | 13 +- vendor/gopkg.in/yaml.v2/yaml.go | 17 +- vendor/gopkg.in/yaml.v2/yamlh.go | 2 +- 29 files changed, 882 insertions(+), 1294 deletions(-) delete mode 100644 vendor/google.golang.org/appengine/internal/api_pre17.go diff --git a/glide.lock b/glide.lock index 9f99692b1..56e520633 100644 --- a/glide.lock +++ b/glide.lock @@ -1,5 +1,5 @@ hash: 524d47ba2b9ae54115ade7c896a4fb9ca17c77d3db633fe10daf87a9c35f8df7 -updated: 2018-01-30T11:57:41.570285242+06:00 +updated: 2018-01-29T22:07:06.837426641-08:00 imports: - name: cloud.google.com/go version: fe3d41e1ecb2ce36ad3a979037c9b9a2b726226f @@ -7,7 +7,7 @@ imports: - compute/metadata - internal - name: github.com/appscode/go - version: aee9dd25c6354457ed532ae0d463e54e53151ad8 + version: 99dbf420807d9459b63d1d0bf702537bb7aa976f subpackages: - analytics - context @@ -183,7 +183,7 @@ imports: - name: github.com/go-openapi/jsonreference version: 13c6e3589ad90f49bd3e3bbe2c2cb3d7a4142272 - name: github.com/go-openapi/spec - version: 6aced65f8501fe1217321abf0749d354824ba2ff + version: 7abd5745472fff5eb3685386d5fb8bf38683154d - name: github.com/go-openapi/swag version: f3f9494671f93fcff853e3c6e9e948b3eb71e590 - name: github.com/gogo/protobuf @@ -258,7 +258,7 @@ imports: subpackages: - diskcache - name: github.com/hashicorp/go-version - version: 40efc0a21ca93e3bfbddefd4b5309a3e79d0aeb7 + version: 53932f80ddea12bea96be074f9fb2dc545806aba repo: https://github.com/appscode/go-version.git vcs: git - name: github.com/hashicorp/golang-lru @@ -450,7 +450,7 @@ imports: - googleapi/internal/uritemplates - storage/v1 - name: google.golang.org/appengine - version: 5bee14b453b4c71be47ec1781b0fa61c2ea182db + version: 4f7eeb5305a4ba1966344836ba4af9996b7b4e05 subpackages: - internal - internal/app_identity @@ -472,7 +472,7 @@ imports: - name: gopkg.in/warnings.v0 version: 8a331561fe74dadba6edfc59f3be66c22c3b065d - name: gopkg.in/yaml.v2 - version: 53feefa2559fb8dfa8d81baad31be332c97d6c77 + version: d670f9405373e636a5a2765eea47fac0c9bc91a4 - name: k8s.io/api version: af4bc157c3a209798fc897f6d4aaaaeb6c2e0d6a subpackages: diff --git a/vendor/github.com/go-openapi/spec/bindata.go b/vendor/github.com/go-openapi/spec/bindata.go index 294cbccf7..9afb5df19 100644 --- a/vendor/github.com/go-openapi/spec/bindata.go +++ b/vendor/github.com/go-openapi/spec/bindata.go @@ -1,17 +1,3 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - // Code generated by go-bindata. // sources: // schemas/jsonschema-draft-04.json @@ -83,7 +69,7 @@ func (fi bindataFileInfo) Sys() interface{} { return nil } -var _jsonschemaDraft04JSON = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xcc\x57\x3b\x6f\xdb\x30\x10\xde\xfd\x2b\x04\xa5\x63\x52\xb9\x40\xa7\x6c\x45\xbb\x18\x68\xd1\x0c\xdd\x0c\x0f\xb4\x75\xb2\x19\x50\xa4\x42\x51\x85\x0d\x43\xff\xbd\xa4\xa8\x07\x29\x91\x92\x2d\xbb\x48\xb4\xc4\xe1\xbd\xbe\x3b\xde\x8b\xe7\x45\x20\xbf\x10\xc7\xe1\x73\x10\x1e\x84\xc8\x9e\xa3\xe8\x35\x67\xf4\x29\xdf\x1d\x20\x45\x9f\x19\xdf\x47\x31\x47\x89\x78\x5a\x7e\x8d\xf4\xd9\x43\xf8\xa8\x85\x3e\xe9\xff\x67\x48\xc6\x90\xef\x38\xce\x04\x66\x54\x49\x7f\x67\x1c\x02\xcd\x12\xa4\x20\x50\xad\xa2\xe3\x4e\x30\xc5\x8a\x39\x97\xdc\x1a\x71\x45\xd0\x6c\xdf\x38\x47\x27\x8b\x50\x11\xc5\x29\x03\xa5\x1c\x55\xe4\x47\x9b\x98\x62\xba\x12\x90\x2a\x7d\x5f\x7a\x24\x5c\x9f\x9f\xa5\x83\x1c\x12\xa5\xe2\x21\x0c\xca\x96\xa9\xec\xf8\xc3\x8c\xe5\x12\xd7\x5f\x58\x51\x01\x7b\xe0\x7e\x10\xb8\x66\x18\xc2\xc0\x69\x91\x4a\x8e\xe5\x25\xfa\x7f\x40\x82\x0a\x22\x96\x43\x3b\x88\x90\xdf\x0a\xea\xda\x82\x1d\x19\x91\x8b\xfa\x58\xa5\x21\xc5\x1c\x6b\x9d\x0a\x42\x50\x06\x1b\x27\x8c\x1c\xa7\x19\x81\x3f\xd2\x97\x7c\x68\x1a\x68\xe5\xc0\xba\x8d\x74\x10\x6e\x19\x23\x80\xa8\xfa\xd9\x3a\x1e\x84\xb4\x20\x44\xff\x4d\xb7\xfa\x84\x6d\x5f\x61\x27\xd4\xaf\x5c\x70\x4c\xf7\xa1\xcf\x7e\x45\x9d\x73\xcf\xc6\x65\x36\x7c\x8d\xa9\xf2\xf2\x94\x28\x28\x7e\x2b\xa0\xa1\x0a\x5e\x40\x07\x73\x61\x80\x6d\x6d\x34\x8e\xe9\xd3\x8c\xb3\x0c\xb8\xc0\xbd\xe8\xe9\xa2\xf3\x78\x53\xa3\xec\x01\x49\x18\x4f\x91\xba\xab\xb0\xe0\x38\x74\xc6\xaa\x2b\xca\x7b\x6b\x16\x58\x10\x98\xd4\xeb\x14\xb5\xeb\x7d\x96\x82\x26\x4b\xcf\xe6\x71\x2a\xcf\xb0\x4c\xcd\x2a\xf7\x3d\x6a\x9b\x74\xf3\x56\x5e\x8f\x02\xc7\x1d\x29\x72\x59\x28\xbf\x5a\x16\xfb\xc6\x4d\xfb\xe8\x58\xb3\x8c\x1b\x77\x0a\x77\x86\xa6\xb4\xb4\xf5\x64\x93\xbb\xa0\x24\x88\xe4\x1e\x84\xad\x13\x37\x21\x9c\xd2\x72\x0b\x42\x74\xfc\x09\x74\x2f\x0e\xbd\x9e\x3b\xd5\xbc\x2c\x1f\xaf\xd6\xd0\xb6\x52\xbb\xdf\x22\x21\x80\x4f\xe7\xa8\xb7\x78\xb8\xd4\x7d\x74\x07\x13\xc5\x71\x05\x05\x91\xa6\x91\xf4\x7b\x38\x3d\xe9\x1e\x6e\x1d\xab\xef\x3c\x0c\x74\xbf\x7d\xd5\x6c\xce\x89\xa5\xbe\x8d\xf7\x66\xce\xee\xd1\x86\x67\x80\x34\xad\x8f\xc3\xb3\xae\xc6\x1c\xe3\xb7\xc2\x96\xd9\xb4\x72\x0c\xf0\xab\x92\xe9\x5a\x05\xee\x5c\xb2\x87\xc6\x7f\xa9\x9b\x17\x6b\xb0\xcc\x75\x77\x96\x16\xb7\xcf\x1c\xde\x0a\xcc\x21\x1e\x53\x64\x0e\x73\x4f\x81\xbc\xb8\x07\xa6\xe6\xfa\x50\x55\xe2\x5b\x4d\xad\x4b\xb6\xb6\x81\x49\x77\xc7\xca\x68\x1a\x90\x67\xd7\x78\x3f\x3c\xba\xa3\x8e\xdd\xe8\x7b\xc0\x8a\x21\x03\x1a\x03\xdd\xdd\x11\xd1\x20\xd3\x46\x72\x55\x7d\x93\x0d\xb3\xcf\x34\x52\x46\x03\xd9\x8d\x75\xe2\x0e\x42\xbd\xb9\xdf\xe9\xdd\x34\xb6\x24\x9b\x5b\xa4\x56\x3f\x6b\xac\xd8\x01\x30\x1e\x25\xce\x3a\x77\xc6\x73\xd4\xbd\x96\xc9\xf5\x06\xbc\xca\xf8\x44\xb0\x2e\x09\x5a\xf3\xf5\x3a\x94\x7b\xb7\xa8\x9f\x7f\x17\x8e\x58\x53\xb2\x0e\xfc\xf5\x92\x8c\xc2\x4c\x49\xca\x84\xe7\x7d\x5d\xb6\x2f\x7e\x4f\x79\xba\x96\xe6\x75\xb7\x87\x9b\x0d\xdc\xb5\xbd\xae\xbb\x85\xb8\x8e\x64\x67\xd1\xe8\x18\xe5\xe2\x5f\x00\x00\x00\xff\xff\x4e\x9b\x8d\xdf\x17\x11\x00\x00") +var _jsonschemaDraft04JSON = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xc4\x57\x3b\x6f\xdb\x3e\x10\xdf\xf3\x29\x08\x26\x63\xf2\x97\xff\x40\x27\x6f\x45\xbb\x18\x68\xd1\x0c\xdd\x0c\x0f\xb4\x75\xb2\x19\x50\xa4\x42\x51\x81\x0d\x43\xdf\xbd\xa0\xa8\x07\x29\x91\x92\x2d\xbb\x8d\x97\x28\xbc\xd7\xef\x8e\xf7\xe2\xf9\x01\x21\x84\x30\x8d\xf1\x12\xe1\x83\x52\xd9\x32\x8a\xde\x72\xc1\x5f\xf2\xdd\x01\x52\xf2\x9f\x90\xfb\x28\x96\x24\x51\x2f\x8b\x2f\x91\x39\x7b\xc4\xcf\x46\xe8\xc9\xfc\x3f\x43\x32\x86\x7c\x27\x69\xa6\xa8\xe0\x5a\xfa\x9b\x90\x80\x0c\x0b\x4a\x41\x91\x5a\x45\xc7\x9d\x50\x4e\x35\x73\x8e\x97\xc8\x20\xae\x08\x86\xed\xab\x94\xe4\xe4\x10\x2a\xa2\x3a\x65\xa0\x95\x93\x8a\xfc\xec\x12\x53\xca\x57\x0a\x52\xad\xef\xff\x1e\x89\xd6\xe7\x67\x84\x9f\x24\x24\x5a\xc5\x23\x46\x65\xcb\x54\x76\xfc\x38\x13\x39\x55\xf4\x03\x56\x5c\xc1\x1e\x64\x18\x04\xad\x19\x86\x30\x68\x5a\xa4\x78\x89\x16\x97\xe8\xff\x0e\x09\x29\x98\x5a\x0c\xed\x10\xc6\x7e\x69\xa8\x6b\x07\x76\x64\x45\x2e\xea\x63\x45\xe5\xb3\x66\x8e\x8d\x4e\x0d\x01\x95\x68\xe3\x85\x91\xd3\x34\x63\xf0\xfb\x94\x41\x3e\x34\x0d\xbc\x72\x60\xdd\x46\x1a\xe1\xad\x10\x0c\x08\xd7\x9f\xad\xe3\x08\xf3\x82\x31\xf3\x37\xdd\x9a\x13\xb1\x7d\x83\x9d\xd2\x5f\xb9\x92\x94\xef\x71\xc8\x7e\x45\x9d\x73\xcf\xd6\x65\x36\x7c\x8d\xa9\xf2\xf2\x94\x28\x38\x7d\x2f\xa0\xa1\x2a\x59\x40\x07\xf3\xc1\x02\xdb\xda\x68\x1c\x33\xa7\x99\x14\x19\x48\x45\x7b\xd1\x33\x45\x17\xf0\xa6\x46\xd9\x03\x92\x08\x99\x12\x7d\x57\xb8\x90\x14\x7b\x63\xd5\x15\xe5\xbd\x35\x2b\xaa\x18\x4c\xea\xf5\x8a\xba\xf5\x3e\x4b\x41\x93\xa5\x67\xfb\x38\x2d\x98\xa2\x19\x83\x2a\xf7\x03\x6a\x9b\x74\x0b\x56\x5e\x8f\x02\xc7\x1d\x2b\x72\xfa\x01\x3f\x5b\x16\xf7\xc6\x6d\xfb\xe4\x58\xb3\x8c\x1b\xf7\x0a\x77\x86\xa6\xb4\xb4\xf5\xe4\x92\xbb\xa0\x24\x84\xe5\x01\x84\xad\x13\x37\x21\x9c\xd2\x72\x0b\x42\x72\xfc\x01\x7c\xaf\x0e\xbd\x9e\x3b\xd5\xbc\x1c\x1f\xaf\xd6\xd0\xb6\x52\xb7\xdf\x12\xa5\x40\x4e\xe7\x68\xb0\x78\x24\xec\xe1\xe8\x0f\x26\x89\xe3\x0a\x0a\x61\x4d\x23\xe9\xf7\x70\x7e\x32\x3d\xdc\x39\xd6\xbf\xf3\x30\xd0\xfd\xf6\x55\xb3\x79\x27\x96\xfe\x6d\x82\x37\x73\xf6\x8f\x36\x3a\x03\xa4\x6d\x7d\x1c\x9e\x73\x35\xf6\x18\xbf\x15\x76\x4a\x8e\x2b\xcf\x00\xbf\x2a\x99\xae\x55\xe0\xcf\x25\x77\x68\xfc\x95\xba\x79\x75\x06\xcb\x5c\x77\x67\x69\xf1\xfb\x2c\xe1\xbd\xa0\x12\xe2\x31\x45\xf6\x30\x0f\x14\xc8\xab\x7f\x60\x4e\x27\xe0\x3f\xaf\x92\xd0\x6a\x8a\x82\xdb\xc0\xa4\xbb\x63\x65\x34\x0d\x28\xb0\x6b\x7c\x1e\x1e\xd3\x51\xc7\x6e\xf4\x33\x60\xc5\x90\x01\x8f\x81\xef\xee\x88\x68\x90\x69\x23\xb9\x8a\x2e\x69\x98\x7d\xa6\x91\x32\x1a\xc8\x6e\x9c\x13\x7f\x10\xea\xcd\xfd\x4e\xef\xa6\xb1\x25\xd9\xde\x22\x8d\xfa\x59\x63\xc5\x0d\x80\xf5\x28\xf1\xd6\xb9\x37\x9e\xa3\xee\xb5\x4c\xbe\x37\xe0\x55\xc6\x27\x82\x75\x49\xd0\xda\xe0\xb9\x1d\xca\xbf\x5b\xd4\xcf\xbf\x0b\x47\xac\x2d\x59\x07\xfe\x7a\x49\xc1\x61\xa6\x24\x17\x2a\xf0\xbe\x2e\xdb\x17\x7f\xa0\x3c\x7d\x4b\xf3\xba\xdb\xc3\xed\x06\xee\xdb\x5e\xd7\xdd\x42\x5c\x47\xb2\xb3\x68\x75\x8c\xf2\xe1\x4f\x00\x00\x00\xff\xff\x4e\x9b\x8d\xdf\x17\x11\x00\x00") func jsonschemaDraft04JSONBytes() ([]byte, error) { return bindataRead( @@ -98,12 +84,12 @@ func jsonschemaDraft04JSON() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "jsonschema-draft-04.json", size: 4375, mode: os.FileMode(420), modTime: time.Unix(1441640690, 0)} + info := bindataFileInfo{name: "jsonschema-draft-04.json", size: 4375, mode: os.FileMode(420), modTime: time.Unix(1482389892, 0)} a := &asset{bytes: bytes, info: info} return a, nil } -var _v2SchemaJSON = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xec\x5d\xdf\x73\xdc\xb6\xf1\x7f\xcf\x5f\x81\xb9\x78\x46\xf6\x24\xd6\x39\xfe\x7e\x5f\xea\x97\x8c\x1a\x39\x89\x5a\xbb\xd2\xf8\x9c\xf6\xc1\x95\x67\x70\x24\x4e\x87\x84\x3f\x2e\x04\x29\xe9\xea\xea\x7f\xef\x02\xfc\x71\x04\x01\x90\x20\x89\x3b\x9d\x6d\x7a\xa6\x8d\x8e\x04\x16\x8b\xc5\x62\xf7\xb3\x0b\x10\xf8\xf4\x0d\x42\xb3\x94\xa6\x01\x99\xbd\x42\xb3\x33\xf4\xb7\xc5\xe5\x3f\xd0\xc2\x5b\x93\x10\xa3\x55\x9c\xa0\xc5\x1d\xbe\xb9\x21\x09\x7a\x79\xfa\x02\x9d\x5d\x5d\x9c\xce\xbe\xe7\x15\xa8\xcf\x4b\xaf\xd3\x74\xf3\x6a\x3e\x67\x79\x91\x53\x1a\xcf\x6f\x5f\xce\x99\xa8\x7b\xfa\x3b\x8b\xa3\x6f\xf3\xc2\x4f\xf2\x47\xb5\x1a\xfc\xe5\xf3\xa2\x60\x9c\xdc\xcc\xfd\x04\xaf\xd2\xe7\x2f\xfe\xbf\xa8\x5c\xd4\x4b\xb7\x1b\xc1\x54\xbc\xfc\x9d\x78\x69\xfe\x2c\x21\x7f\x66\x34\x21\xbc\xf9\x0f\xf0\x1b\x9e\x14\xad\x8b\xd7\x9c\xb3\x68\x15\x97\x7f\x6f\x70\xba\x66\x33\xf8\xfb\x5a\xd4\xc5\xbe\x4f\x53\x1a\x47\x38\xb8\x4a\xe2\x0d\x49\x52\x4a\x18\xd0\x59\xe1\x80\x11\x51\x00\xca\xa7\x24\x89\xa4\xb7\x9f\x72\x52\x1f\xef\x9f\x57\x3f\x78\x97\x12\xb2\xe2\xac\x7d\x3b\xf7\xc9\x8a\x46\x82\x2c\x9b\xdf\x92\xc8\x8f\x93\xd7\xf7\x29\x89\x18\x3c\x98\x89\xd2\x0f\xf0\xff\x0f\x39\x79\x0d\xdd\x92\xfb\x1a\xed\xb2\xdb\x2c\x4d\x68\x74\x53\xf4\x05\x9e\x93\x28\x0b\xab\x6e\x8b\x27\x30\x26\xb3\xe2\xd7\x75\x55\xcc\x27\xcc\x4b\xe8\x86\x73\xc4\xa9\xbc\x5f\x93\x6a\x0c\x6f\x49\xc2\xf9\x42\xf1\x0a\xa5\x6b\xca\x90\x1f\x7b\x59\x48\xa2\xf4\xb4\xe0\xb4\x2e\xc2\xce\xce\x8a\x52\x52\xbd\x75\xcc\x52\x9b\x8e\x14\x62\xe6\xaf\x3e\x7e\xf8\xf8\xe9\x61\x8e\x5e\xfd\x1b\xfe\x5d\x7f\xf7\xf4\xc7\x57\xf0\x97\xff\xdd\xb3\x1f\x9f\xcc\xda\xfa\xc3\x1b\x42\x4f\x23\x1c\x12\x04\x1a\x4a\x37\xcf\xf2\x1e\x11\xa1\xa0\xe8\xf5\x3d\x0e\x37\x01\x79\x85\x4e\x76\x8a\x79\x22\x73\xba\xc4\x8c\x5c\x81\x72\xf4\xe5\x76\xde\xca\x16\xa7\x8a\xb8\xce\xa1\x34\xd6\xb1\x33\xc7\x1b\x7a\xd2\x90\xb5\x50\xf8\x9a\x42\x18\xc5\x5d\x14\x7c\x43\x41\xc6\x12\x05\x0f\xde\x66\x0d\x12\x0d\xe6\xce\x50\x00\xd5\xb8\x90\xde\x5e\xbc\x7d\x8d\x78\x4f\x19\xc2\x9e\x47\x36\x29\xf1\xd1\x72\x5b\x31\xbb\xeb\x9e\x9e\x89\x90\xf8\x14\xbf\x87\xea\x2a\x1b\xa0\xdc\x7e\xe6\xf5\x67\xa3\x68\x1a\x79\x38\x42\x05\x8d\x51\x6c\x88\x29\xdf\x29\xcd\xca\x32\xec\x6a\xd6\x5e\x77\xd7\xaf\x17\x6e\xb4\x9f\x80\x5a\x82\xc2\x58\x31\x51\x94\x3d\x37\x51\x4b\x08\xdb\xc0\x43\x1b\xfd\x28\x8b\x1a\x69\x31\xe2\x65\x09\x4d\xb7\x16\xaa\x56\x96\xd4\xd6\x3f\xef\x23\x27\x5d\x25\x89\x6a\x8a\x6f\x98\x6e\x16\xe2\x24\xc1\xdb\x9d\x1e\xd0\x94\x84\xf5\x72\xc6\x06\x81\x5e\x69\x12\x1f\xaa\xda\x59\x44\xff\xcc\xc8\x45\x41\x23\x4d\x32\x22\xf1\x40\xee\xf9\x04\xc7\xc1\x79\xec\x59\x74\x49\x2a\xdd\xb0\xf0\x3a\x1d\x52\xcc\xa9\xc6\xad\xe9\x66\xcb\x2f\x24\x22\x09\x0e\x10\xaf\x9e\x84\x98\x3f\x46\x78\x19\x67\xa9\x66\xb6\x2a\x5e\x51\x3c\x2d\xcc\x7d\x55\xac\x72\xf4\x8a\xcf\xe8\xf2\x8c\xe5\xd4\x32\x78\x47\xf1\x5a\xf6\x90\x2d\x02\xd4\x7a\xc9\x52\x8e\xf2\xc0\x69\x3c\x66\xad\x1b\x8d\xd6\x0c\x06\x5c\x27\xdb\x33\x94\xab\x04\xc2\x91\x0f\x56\x87\x78\x14\x2c\xb7\x20\x5a\xf7\x24\x35\xce\xbe\x57\xa5\x3a\xa6\x75\x06\x20\x27\x4a\xa9\x57\x79\x64\x70\xed\x4b\x70\xd0\x9d\x8d\xcb\x94\x86\x33\x10\xc4\x11\x07\x04\xb5\xe7\x92\x0b\x5d\xac\xe3\x2c\x00\xcf\x40\x90\x4f\x57\x2b\x92\x00\x46\x40\xab\x24\x0e\x45\x09\x21\xa7\x53\x84\x7e\xa1\xe9\xaf\xd9\x12\xfd\x1c\xe0\xdb\x18\x74\x0f\xbd\xc5\xc9\x1f\x7e\x7c\x17\x21\x40\x16\x38\x08\xe2\x3b\xe2\x1b\x7a\x01\x6a\x14\xb2\xcb\xd5\x82\x24\xb7\xd4\x1b\x33\x8e\xdc\xeb\x0a\x62\x9c\x7b\x96\x93\x13\xa8\xb5\x5d\x8a\xe0\x32\x53\xec\xa5\x76\xea\x5a\x16\xd6\x52\x0a\xa0\x41\x30\xba\x76\x94\xca\xc2\xaa\xc2\x37\x1d\x7a\x83\x3b\x5b\x93\xf1\x53\x5e\x53\x32\x19\xa5\x34\x60\x60\x40\xd7\x24\x0d\xeb\x39\xfd\x0d\x73\x91\xc3\xb0\x91\x43\x48\x7d\x50\x30\xba\xda\x42\x59\x94\xa3\xba\x9c\xcb\x42\x12\x08\xda\x85\x80\x61\x0e\x91\x02\x8e\xe8\x7f\x44\xbf\x0c\x23\x9b\x25\xc1\x48\x5e\x7e\x7b\xf7\x06\x6d\x62\x0a\xfc\x00\x33\x05\x8e\xf3\x54\xb9\x9e\xca\x84\xf2\xe7\x9c\x06\xb8\x3b\x3d\x6b\x30\xe5\xe9\x58\xe6\x04\x0d\x04\xc3\x05\xde\x9e\x59\x49\xc9\xc0\x65\xce\x4c\x9b\xe5\x3d\x98\xb1\x97\x74\x5f\x9d\x4f\x46\xdd\xd7\xfb\x3c\xa1\x8d\x03\xfd\xdb\xfe\x14\xbc\xae\xd4\x45\x17\x05\xfc\x3d\x45\x17\xe9\x09\x43\x24\xf2\xe2\x2c\xc1\x37\x60\x44\x41\xe3\x32\xc6\xfd\x12\xba\x5c\x00\x28\x8e\x43\x18\x08\xba\x0c\xaa\x6a\x07\xd5\xfb\xaa\x4d\x2b\x5d\x3f\x16\x1d\x52\x42\x00\x4b\xeb\xf9\x8e\x04\x20\xeb\xdb\x3c\x84\x63\xa5\x0c\x68\xe4\xd3\x5b\xea\x67\x80\xc4\x80\x0d\x21\x21\x76\x8a\x40\x62\x5b\x14\x66\x10\xcd\x80\x8f\x4c\xca\x8a\x45\x95\x93\x32\xbc\x3c\x39\x55\xc2\xc8\x3d\x0a\xa3\xa6\x0e\x10\xa8\x5a\x11\xe3\x3d\xe5\xb0\xb8\x6d\x14\xdb\xe6\x8e\x4d\x00\x65\x92\xbe\x81\x6e\x27\xc2\x2f\x92\x49\x0a\x9f\x8d\xd1\xbc\x8c\x44\x72\x20\x04\x68\x92\xe7\xb4\xf2\xf6\x59\x81\x79\x96\x42\xcd\x61\xb0\x72\x72\x0c\xc6\x91\x3f\x29\x82\x69\xbf\x00\x86\x22\x1c\x95\x23\xe4\x86\xaa\x69\x22\xb8\x3d\xf6\xbd\x6a\xaf\x7f\xf7\x13\x02\x38\x97\x81\x9f\x15\x8e\x81\x09\x5c\x50\x0b\x56\xa5\x6e\xe9\x62\xc9\x3d\xf6\xaa\x6c\x6e\xbf\x9d\x32\x45\x79\x3d\x7b\x23\xfb\x8c\x06\x83\x6a\xac\x56\xb6\x5a\xe5\xda\xc4\xcb\x2e\x2f\xc6\xcd\xb9\xe2\xc4\x4c\xfe\xc9\x3e\x26\x70\xe1\x3a\x8e\xdd\xfa\x93\x3c\xdd\x36\x66\x88\x95\x04\x41\x48\x43\xf2\x3e\xa7\xd1\x99\x2e\xd4\xb8\xd6\x2a\xdb\x55\x42\x80\x5f\xdf\xbf\xbf\x42\x21\x40\x38\x70\xf9\x0d\x8b\xc2\xd9\xc0\x8d\xa1\xec\x09\x81\x76\x49\xa3\x81\x38\xe8\x88\xe2\x7c\x39\x3b\x24\x09\x43\xce\x10\x89\x57\x6a\x96\x48\x37\x54\xb5\x97\x0f\x52\x75\x43\x9a\xa8\x51\x70\x06\x0e\x22\xc4\xc9\x76\x54\xfc\xbd\x4c\x28\x81\x88\x35\xa7\x54\xaa\x45\x35\xf6\x8f\x16\xfc\x57\x1c\x7c\x3f\x22\xba\x37\x18\x5a\xf1\xce\x36\xa5\xd6\xa4\x59\x31\x76\xe1\xbb\x48\xfb\x14\x01\x27\xdd\xa5\x5c\xba\x64\xaf\x49\x6f\x1b\x84\xdb\x33\xc5\xdd\x22\x16\x4d\x9a\xbb\xc9\x96\x26\xf9\x3f\x88\xad\x82\x8e\x2b\xb6\xb4\x59\xf0\x16\x92\xbb\xf2\x66\x9a\xba\x5c\x78\x0b\x49\xc5\x0a\x36\x26\xb1\xb2\xee\xd2\x42\x4b\x59\x7b\x69\x52\xf3\x39\x0e\xf1\x70\x4a\x8c\xda\xb9\x8c\xe3\x80\xe0\xa8\xa9\x9e\x2b\x9c\x05\xa9\x84\xa6\x15\x46\xd5\xb4\x7d\x1b\xa7\x52\xea\x5e\xd0\x32\xc6\x48\x02\xf8\xbb\x02\x42\x47\xe4\x34\x0a\xc2\xbd\x81\xd0\x0d\xb1\xcc\x08\xee\x7c\xb4\x5e\xf9\x33\x47\x74\xe4\xe5\xd4\xe1\x84\x7c\x12\xc0\xdc\x72\x42\x2a\xde\x34\xa3\x81\xe1\xb4\xd6\x04\x2b\xd3\x65\x98\xa0\x70\xea\xad\x1d\x51\x72\x64\xb7\xb4\x93\x4e\xbb\x9a\x67\x9d\x9c\xc8\xeb\x56\x61\x2c\x4f\x29\x31\x61\xbb\x09\x05\x4b\x9e\xf0\x44\x04\x8e\xb6\xe8\x16\x07\xd4\xcf\x11\x26\x83\x60\x23\x83\x32\xb1\x2f\xc2\xa6\x93\xc2\xdc\xd4\xb3\x12\x21\x95\xa7\xec\x0f\x6e\x67\xfd\xd3\x0f\x2f\x9e\xff\xe5\xfa\xd3\xff\x3d\x3c\x7b\xf2\xdf\x8f\x4f\x8b\xf6\x9f\x3d\xe9\x67\xc1\xff\x89\x83\x8c\x18\xf2\x1c\x7b\x30\x2b\x51\x9c\x36\x40\xa8\x7e\x84\x2c\x65\xd4\x29\x25\x6d\x37\xfa\x77\x64\xd7\x95\x2e\xf5\xcb\xe5\x59\x53\xc1\x38\x22\x97\x2b\x29\x86\xe8\x31\x3a\xda\x81\xb1\xa8\xcf\xb7\x00\xbd\x23\x62\x6d\xc9\xd3\x2c\x89\x5c\x6b\x59\x1f\x1e\x14\xd5\xa7\x53\xd9\xc4\xbe\x23\xeb\x6a\xdb\x93\x54\x53\x95\x76\x53\x62\x2d\x52\x93\x93\x5f\x4a\x93\x3d\x28\xad\x68\x40\x16\x3a\x6a\xb5\x5f\xd7\x46\xbb\x6d\x6d\x21\xcb\xc2\x86\x48\x41\x89\xd5\x5b\x48\x55\xa5\x5b\x26\xef\x91\x61\x15\x49\x89\x55\xb9\x39\xcf\xa4\xe5\x4d\xcc\x5a\x9a\x77\x06\xf8\xf4\xd3\x4c\x90\xb4\x9e\x5f\xa9\x9c\x53\x91\x98\xd2\x85\x73\xca\x0e\x38\xf1\x54\x53\x92\x9b\x71\xb1\xa2\xde\x7c\x4a\xa3\x94\xdc\xa8\x8f\x75\xe8\x1c\x95\x29\x86\xce\x09\x51\xa5\xc4\x7a\x5b\x08\x5d\xc2\xc2\x04\x35\x12\x1a\x52\xbe\xca\xc0\xf2\x04\x85\x96\x9e\x17\x07\x01\x0c\x25\x54\xf8\x59\xcb\x93\x69\x85\xbb\x51\xcb\x80\x22\xcb\x60\xc5\x82\x64\x59\x58\x4b\x29\xc4\xf7\x34\xcc\x42\x3b\x4a\x65\x61\x83\x01\xf1\x82\x8c\x81\x50\xde\xf6\x21\xa9\xd4\xd2\x73\x09\xe5\xed\xb9\x2c\x0a\x77\x70\xd9\x87\xa4\x52\xcb\x24\xcb\x37\x24\xba\x49\x2d\xf1\xef\xae\xb8\xa9\xcf\xbd\xa8\x55\xc5\x4d\xb8\xbc\xd8\x39\x69\xb7\x14\x25\x0a\x9b\x7a\x79\x61\x3f\x55\xaa\xd2\xa6\x3e\xf6\xa1\x55\x96\xd6\xd2\x92\x33\x86\x16\xe4\xea\x15\xf4\xba\x12\x59\xeb\x47\x64\xd4\x09\x98\x79\x14\x3c\xe5\xa5\x12\x06\x1b\xfa\xb8\x2b\x6f\x98\xf9\xfd\x61\x90\xe2\x99\x1f\xc9\xe9\x36\x8b\xb7\xec\x4e\x85\xe0\xa9\x70\x54\x5b\x1e\x3a\x25\x62\x25\xfc\x0e\x82\x2b\x74\xff\x9c\x67\x3d\x45\x64\xd5\xbd\x6b\x86\xe7\x8d\x35\x65\x8c\xbb\x0f\x97\xb1\xbf\xbd\xaa\xd6\xf5\xc6\x6d\x7c\xa8\xbb\x16\x69\xdf\x9f\x8c\x1b\xaf\x8f\x31\x6d\xe3\x2a\xbb\x9d\xa7\xd6\x35\xc9\xed\x2a\x58\xe7\xcb\xf7\x94\xc7\xc5\x7c\x8f\x9b\xd8\x3d\x43\x21\x8a\x2e\xd0\x25\x2f\x9d\xb1\x71\xfb\xdb\x1c\xef\x18\xd9\x31\x6e\x40\x11\x63\x04\x76\xce\x09\x83\x95\x2b\x12\xc2\x41\xec\x61\xbd\xd0\x6c\xa0\x18\xd7\xe5\x6e\xbc\x54\x53\xe0\x3e\xb9\x52\x13\xdb\x77\x6b\x22\x12\x20\x71\x82\x20\x76\xcf\xbf\x6c\xa8\xd8\xe6\x83\x55\xb6\xc7\x4b\xe4\x09\x2c\x1c\x9c\x0e\xc8\xc4\x6a\xc3\x39\xbb\x38\x6d\xc4\xb6\x8a\x1c\xb7\x57\x16\x62\x91\x2d\x17\x4d\x46\x8e\x2d\xec\xe9\x9c\xeb\x9f\xa9\x06\x1c\xcf\x44\x93\x03\x3d\xfe\x4f\x3f\xd5\x26\xa3\x3a\xd4\xa8\x1e\x3e\x36\x35\x04\xa1\x86\x90\x75\x8a\x4d\xa7\xd8\x74\x8a\x4d\x5b\x7b\x3d\xc5\xa6\x5f\x68\x6c\xfa\x4d\xfd\xbf\x25\x4e\x02\xde\x93\xed\x04\x93\x26\x98\x54\x7b\x2a\x74\x62\x42\x49\xfb\x43\x49\x82\x99\xd7\xe1\x26\xdd\x36\x57\x15\xa5\x96\x6d\x76\xbf\xb4\xb1\x25\x9a\x61\x88\xc1\x94\xe2\x49\x19\x5c\x53\xdb\xe5\xb6\x60\x38\x0a\xb6\x5c\x6f\x45\xc2\x86\xaf\x8a\x73\xa6\x78\xce\x26\x33\x7d\x33\x31\x21\xbc\xa3\x44\x78\xff\x82\x01\x7c\xcb\xad\xfe\x04\xf5\xd0\x04\xf5\x26\xa8\x37\x41\x3d\xd4\x84\x7a\xdc\xe4\x9d\xe3\x14\x4f\x68\x6f\x42\x7b\xb5\xa7\xa5\x5a\x4c\x80\x6f\x02\x7c\x3a\xde\x3f\x0f\xc0\xd7\x78\xc8\xf7\x69\x4d\x20\x10\x4d\x20\x70\x02\x81\x5d\xbd\x9e\x40\xe0\xd7\x04\x02\xf9\x27\x2c\x9f\x27\x00\x34\x7d\xb6\x59\x3c\x2d\x1e\x75\x6f\x9f\x1c\x04\x18\xb5\x4e\x4d\xfa\xd6\xb1\xd6\xb4\xa8\xe1\x1c\x62\x1e\x39\x8c\xe4\x8a\x35\x41\xc8\x69\x65\xb5\xfa\xf7\x75\x40\xae\x09\x69\xa1\x09\x69\x4d\x48\x6b\x42\x5a\xa8\x89\xb4\xa2\x38\xfa\xeb\x21\x36\xa9\xea\x3f\x1e\x19\xf4\x75\x9a\x71\xd3\x9c\x4e\x74\x16\xf4\x5a\x32\x8e\x03\x29\x9a\x96\xab\x07\x92\x33\xa0\x61\x65\x64\xaf\x1b\x18\x5a\x33\xa4\x83\x04\x2e\xef\x62\x1e\xd8\x09\x45\xd1\x3a\xd8\x57\x76\x64\xda\x7e\x4d\x7b\x06\x80\x2b\x47\x8c\x94\xd5\x8f\xe1\x04\x04\x83\xa5\x13\xd0\x73\x3a\x07\x3d\xc6\x4b\x09\x17\x5c\x7f\xe7\x3e\x1c\xb8\x68\x8f\x5d\x2d\xfb\x67\x79\xb4\xfb\x7c\xd7\x9d\xb9\x74\x1a\xad\x35\xbc\x1e\xd1\xa0\xe6\x2b\xd0\x5e\x70\x67\x50\x93\x6d\x98\xa8\xd3\x66\x0f\x68\xb1\xeb\x73\x8e\x0e\x20\x36\xa4\x45\x17\x68\x6d\x40\xbb\x4e\x20\xdd\x90\xfe\xba\xc0\x7d\xa3\xfa\x3b\x0a\x1c\xda\xb6\x2c\xf9\x97\x98\x89\x38\xe4\xa2\x88\x99\x86\x01\x49\x07\x2d\x9f\xe7\xf3\xe9\xc5\x20\xf0\x39\x40\xe6\xa3\x10\xea\x3e\x25\xbd\xef\x86\xdb\x05\x6d\x81\x81\x07\x08\xbb\x13\x28\x83\xc0\xcd\xc7\x28\x1c\x42\xea\x07\x69\xbd\x5d\xf4\xa6\xb4\xdf\x18\x06\x72\xaf\x7f\x26\x67\x3e\x6c\x23\x94\x21\x96\xcc\x18\xc6\x74\x7e\xfa\xce\xcb\x44\xdb\xc3\x1e\xa1\xd0\xcc\xa8\xca\x95\x6b\xbf\x9a\x99\xd3\x0a\x4d\x3c\xe8\x01\x95\x26\x15\x36\x06\x4f\xd5\x02\x28\x8b\x94\xd3\xa3\x89\x51\xb7\xd0\x29\x1b\xb5\x1f\x94\x97\xfa\xb3\xfd\x7a\x32\x28\x0f\xd6\xa8\xa1\xc3\x41\xa0\xa2\xb6\x96\xb3\x09\x4d\x9d\x33\x1d\x5a\x68\xdb\x29\x3d\x77\x86\x98\xc1\x1c\x21\xe5\x7d\xea\x9e\x7b\x7d\x38\xeb\x27\x50\x9f\x72\x6c\x0e\x62\xc2\x69\x9c\x0c\x89\x4e\x12\x88\xf9\x2f\xa3\xc0\x78\x30\xe3\xe0\x23\xd8\xee\x43\xe5\x84\x54\xbd\x0c\x78\x41\x03\x26\x74\x7f\x24\x62\xb1\x02\x50\x17\xe8\x2e\x66\xb4\xfe\xba\xb0\x76\x0c\xcc\x17\x1e\x46\xbb\x38\xb0\x64\x0a\x9d\x8f\x26\x74\x7e\x14\x14\xe4\x66\xe9\xca\x6e\x4b\xcf\xde\xcc\xd9\xf1\x1a\xa3\xe6\x6a\x97\x85\x45\x1a\x75\xb3\xc3\x74\xfe\xd0\xb4\x12\xd9\x45\x69\x5a\x89\x9c\x56\x22\xa7\x95\xc8\xc7\x5b\x89\x7c\x04\xc8\x28\xf9\x24\xdd\xb5\x89\x63\x2f\x29\x2c\x69\xbe\xcb\x31\x0c\xbf\x16\x62\xa6\xf4\xb7\xe3\xd2\x42\x1d\x8d\xe1\xfe\x52\x75\x8a\x4a\x0c\xab\x77\x16\x56\x37\x4c\x98\xce\xd5\x97\x25\x2d\x87\x59\xfb\xf1\xf8\x16\x5b\x9d\xb4\x5d\x10\xa7\xed\x6e\x70\xe7\x65\x75\x7a\x3b\x04\x4a\x40\xef\x87\xd4\x04\x95\x4f\xe8\x32\x53\x0f\x6f\x1e\x0d\x02\xef\x12\xbc\xd9\xb8\x3a\xae\xfc\x58\xe6\x2a\xbf\xfc\xd3\x95\x06\xf5\xb9\x5e\xcc\xb5\xb6\x8d\x3c\x77\xd6\x19\xc0\x3f\x96\x71\xed\xb8\x7a\x76\xb8\xad\xd3\x9d\xc5\x6b\x95\xed\x5a\x62\x46\xbd\xb3\x2c\x5d\xf3\x7b\x24\xf2\xcd\xa6\x0b\xe5\xe8\xfd\x46\x0a\xcc\x8a\x30\xde\xd0\xbf\x93\xad\x1b\x5a\x31\x06\x06\x5f\x5e\x40\x60\x46\x3d\x9a\xba\xa4\x79\x85\x19\xbb\x8b\x13\xdf\x25\xcd\xb3\x0d\xe7\xd3\xa1\x28\x0b\xb2\x9e\x47\x18\xfb\x29\xf6\x89\x96\x6a\xf5\xf7\xb5\x56\xf3\xda\xc6\x79\xbf\x96\xe6\x31\x4e\xd2\x15\xbd\x75\xb9\xf5\xf9\xf8\x4c\x49\x63\x7e\x1d\x60\x0c\x1b\x28\xa2\xb1\xfd\xed\xc0\x23\x9c\x77\xbf\x7b\x88\x87\x7a\xae\x7e\xbb\xf8\x5b\xcf\x36\x6b\xe4\x42\x9c\x1f\xe5\x71\x7c\xba\x69\xb0\xd7\x87\xd5\xd1\x55\x10\xdf\x49\x77\x1c\x00\x4f\x71\x52\xdc\x27\xfb\x5b\x9f\x7b\xe9\xdc\x68\x6c\x2e\x14\x8b\x24\x18\xe7\x7b\x74\x6b\xb4\x10\x7e\x77\x7b\xcc\x83\x5e\xdb\x5e\x84\x22\xfa\xb0\xc8\x6b\x68\xa9\x29\x52\xee\xd1\x13\x8b\xbb\x87\x3f\xff\x59\xa1\x20\x8e\xc7\x9d\x15\x69\xfc\x07\xf9\xf2\x67\xc3\xa6\x10\xfa\xa1\x67\x43\x25\xdd\x69\x16\xc8\xb3\x40\x87\x91\xa7\x89\x50\xb4\xbc\xc7\x89\x80\x77\x72\x9f\xe6\xc2\xb1\xcc\x05\x35\xb0\x3b\x32\xa4\xf4\xf5\x4d\x93\x6a\x48\xbe\x30\xfc\x34\x4d\x42\xa4\x9f\x84\x8b\xe6\x28\x3a\x58\x78\x90\xbb\x2c\xb7\x2a\xdf\x3d\xea\x70\x49\xa6\xba\xe2\x59\x91\x6f\xc7\x3a\x4c\xe3\x0a\xc0\x6e\x96\x34\x5f\x4f\xef\x68\xa0\x88\x10\x9f\xf8\x28\x8d\xc5\xd9\x37\x08\x17\xf7\xf9\xe5\xf7\xb4\x06\x81\xf6\xfa\x89\x92\x37\xd9\x88\x69\xba\x3e\x38\xdd\xa9\xdc\x3b\x2f\x89\xc8\x9a\x8c\xe1\xce\x37\x6d\x1a\xce\x7a\x1d\xac\x76\x37\xeb\x20\xe1\xa7\x09\x8e\x18\xf0\xc4\x2f\xff\x48\x63\x2f\x0e\xca\xef\xd8\xc5\x75\xff\x6d\xe2\x34\xce\x7e\x9d\x79\x14\x1b\x92\x64\x0b\xc1\x9f\x30\xf9\xd1\x9d\xf2\xbb\x66\xf0\x7a\xcb\xa6\x65\x3b\x86\x89\x79\x95\xf5\x99\xc7\x6e\xa5\xab\x44\xe4\x9f\xa9\xfc\x73\x43\x37\xba\x8b\xc7\x77\x4b\x47\x82\x5c\x2b\x97\xbb\xb3\x7f\x0e\xc5\x6e\xed\x41\x58\x3f\x74\xc8\x8e\xff\xe6\xd6\x3e\x47\xdb\xfa\x4a\xf2\x7a\x27\xe1\x74\x2b\xdf\xae\xa9\xe6\x16\x1b\x67\xdb\xf7\x2a\x03\xae\xdb\x1b\xe0\xf2\x6b\xb7\xaa\x21\x65\x47\x8e\xb3\x2f\xdc\xca\x26\x5a\x76\xff\xb8\xff\xaa\xad\xea\x97\xb2\x87\xc7\xd9\x97\x6c\x6a\xbf\x9c\xb6\xa5\xdf\x50\x54\x1b\x2f\x65\xeb\x8f\xfb\x2f\x7a\x6a\x52\xdc\x6b\x6b\xf2\x17\x3c\x3b\xac\xd0\xdc\x90\xe4\xec\xcb\xb4\x9a\x18\x95\xbd\x93\xfb\x94\xe2\x3e\x1b\xd3\x0b\x51\xbf\xe7\xc9\xe9\x57\x67\xd5\x44\x88\xdc\x29\x7f\xd4\x54\x78\x19\x0c\x59\x20\x68\x7d\x54\x2a\x78\x52\xfc\xd5\xa8\x4d\x32\xbd\x3e\x2c\x97\x61\xfa\x37\xfc\x7f\x0f\xff\x0b\x00\x00\xff\xff\x31\x8b\xeb\xb6\x54\x9c\x00\x00") +var _v2SchemaJSON = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xec\x5d\x4f\x93\xdb\x36\xb2\xbf\xfb\x53\xa0\x14\x57\xd9\xae\xd8\x92\xe3\xf7\x2e\xcf\x97\xd4\xbc\xd8\x49\x66\x37\x5e\x4f\x79\x26\xbb\x87\x78\x5c\x05\x91\x2d\x09\x09\x09\x30\x00\x38\x33\x5a\xef\x7c\xf7\x2d\xf0\x9f\x08\x02\x20\x41\x8a\xd2\xc8\x0e\x0f\xa9\x78\x28\xa0\xd1\xdd\x68\x34\x7e\xdd\xf8\xf7\xf9\x11\x42\x33\x49\x64\x04\xb3\xd7\x68\x76\x86\xfe\x76\xf9\xfe\x1f\xe8\x32\xd8\x40\x8c\xd1\x8a\x71\x74\x79\x8b\xd7\x6b\xe0\xe8\xd5\xfc\x25\x3a\xbb\x38\x9f\xcf\x9e\xab\x0a\x24\x54\xa5\x37\x52\x26\xaf\x17\x0b\x91\x17\x99\x13\xb6\xb8\x79\xb5\x10\x59\xdd\xf9\xef\x82\xd1\x6f\xf2\xc2\x8f\xf3\x4f\xb5\x1a\xea\xc7\x17\x45\x41\xc6\xd7\x8b\x90\xe3\x95\x7c\xf1\xf2\x7f\x8b\xca\x45\x3d\xb9\x4d\x32\xa6\xd8\xf2\x77\x08\x64\xfe\x8d\xc3\x9f\x29\xe1\xa0\x9a\xff\xed\x11\x42\x08\xcd\x8a\xd6\xb3\x9f\x15\x67\x74\xc5\xca\x7f\x27\x58\x6e\xc4\xec\x11\x42\xd7\x59\x5d\x1c\x86\x44\x12\x46\x71\x74\xc1\x59\x02\x5c\x12\x10\xb3\xd7\x68\x85\x23\x01\x59\x81\x04\x4b\x09\x9c\x6a\xbf\x7e\xce\x49\x7d\xba\x7b\x51\xfd\xa1\x44\xe2\xb0\x52\xac\x7d\xb3\x08\x61\x45\x68\x46\x56\x2c\x6e\x80\x86\x8c\xbf\xbd\x93\x40\x05\x61\x74\x96\x95\xbe\x7f\x84\xd0\x7d\x4e\xde\x42\xb7\xe4\xbe\x46\xbb\x14\x5b\x48\x4e\xe8\xba\x90\x05\xa1\x19\xd0\x34\xae\xc4\xce\xbe\xbc\x9a\xbf\x9c\x15\x7f\x5d\x57\xc5\x42\x10\x01\x27\x89\xe2\x48\x51\xb9\xda\x40\xd5\x87\x37\xc0\x15\x5f\x88\xad\x90\xdc\x10\x81\x42\x16\xa4\x31\x50\x39\x2f\x38\xad\xab\xb0\x53\xd8\xac\x94\x56\x6f\xc3\x84\xf4\x11\xa4\x50\xb3\xfa\xe9\xd3\x6f\x9f\x3e\xdf\x2f\xd0\xeb\x8f\x1f\x3f\x7e\xbc\xfe\xf6\xe9\xf7\xaf\x5f\x7f\xfc\x18\x7e\xfb\xec\xfb\xc7\xb3\x36\x79\x54\x43\xe8\x29\xc5\x31\x20\xc6\x11\x49\x9e\xe5\x12\x41\x66\xa0\xe8\xed\x1d\x8e\x93\x08\x5e\xa3\x27\x3b\xc3\x7c\xa2\x73\xba\xc4\x02\x2e\xb0\xdc\xf4\xe5\x76\xd1\xca\x96\xa2\x8a\x94\xcd\x21\xc9\x6c\xec\x2c\x70\x42\x9e\x34\x74\x9d\x19\x7c\xcd\x20\x9c\xea\x2e\x0a\xfe\x42\x84\xd4\x29\x04\x8c\x8a\xb4\x41\xa2\xc1\xdc\x19\x8a\x88\x90\x4a\x49\xef\xce\xdf\xbd\x45\x4a\x52\x81\x70\x10\x40\x22\x21\x44\xcb\x6d\xc5\xec\x4e\x3c\x1c\x45\xef\x57\x9a\xb5\x7d\xae\xfe\xe5\xe4\x31\x86\x90\xe0\xab\x6d\x02\x3b\x2e\xcb\x11\x90\xd9\xa8\xc6\x77\xc2\x59\x98\x06\xfd\xf9\x2e\x78\x45\x01\xa6\xa8\xa0\x71\x5c\xbe\x33\xa7\xd2\xd9\x5f\x95\xef\xd9\xd5\xac\xfd\xdc\x5d\xbf\x5e\xb8\xd1\x3e\xc7\x31\x48\xe0\x5e\x4c\x14\x65\xdf\xb8\xa8\x71\x10\x09\xa3\xc2\xc7\x02\xcb\xa2\x4e\x5a\x02\x82\x94\x13\xb9\xf5\x30\xe6\xb2\xa4\xb5\xfe\x9b\x3e\x7a\xb2\x55\xd2\xa8\x4a\xbc\x16\xb6\x71\x8e\x39\xc7\xdb\x9d\xe1\x10\x09\x71\xbd\x9c\xb3\x41\x89\xd7\xa5\x89\xdc\x57\xb5\x53\x4a\xfe\x4c\xe1\xbc\xa0\x21\x79\x0a\x1a\x0f\x70\xa7\x5c\x08\x8e\xde\xb0\xc0\x43\x24\xad\x74\x63\x0e\xb1\xd9\x90\xe1\xb0\x2d\x13\xa7\x6d\x78\xfd\x04\x14\x38\x8e\x90\xaa\xce\x63\xac\x3e\x23\xbc\x64\xa9\xb4\xf8\x03\x63\xde\xcd\xbe\x16\x13\x4a\x55\xac\x82\x12\xc6\xac\xd4\x35\xf7\x22\xd4\x3a\xff\x22\x73\x0e\x6e\x51\xa0\x75\x1e\xae\x8f\xe8\x5d\xc7\x59\xe6\xe4\x9a\x18\x8d\xd6\x1c\x53\x84\x4d\xb7\x67\x28\x37\x09\x84\x69\x88\x12\x0e\x01\x11\x80\x32\xa2\xf5\xb9\xaa\xc6\xd9\x73\x53\xab\xfb\xb4\x2e\x20\xc6\x54\x92\xa0\x9a\xf3\x69\x1a\x2f\x81\x77\x37\xae\x53\x1a\xce\x40\xc4\xa8\x82\x1c\xb5\xef\xda\x24\x7d\xb9\x61\x69\x14\xa2\x25\xa0\x90\xac\x56\xc0\x81\x4a\xb4\xe2\x2c\xce\x4a\x64\x7a\x9a\x23\xf4\x13\x91\x3f\xa7\x4b\xf4\x63\x84\x6f\x18\x87\x10\xbd\xc3\xfc\x8f\x90\xdd\x52\x44\x04\xc2\x51\xc4\x6e\x21\x74\x48\x21\x81\xc7\xe2\xfd\xea\x12\xf8\x0d\x09\xf6\xe9\x47\x35\xaf\x67\xc4\x14\xf7\x22\x27\x97\xe1\xe2\x76\x2d\x06\x8c\x4a\x1c\x48\x3f\x73\x2d\x0b\x5b\x29\x45\x24\x00\x2a\x0c\x11\xec\x94\xca\xc2\xa6\xc1\x37\x21\x43\x83\x3b\x5f\x97\xf1\x43\x5e\x53\x73\x19\xa5\x36\xd8\x2d\x05\x2e\x34\x0b\xeb\x39\xfc\x1d\x63\x51\x01\xbd\x3d\xbb\x90\x84\x40\x25\x59\x6d\x09\x5d\xa3\x1c\x37\xe6\x5c\x16\x9a\x40\x09\x70\xc1\xe8\x82\xf1\x35\xa6\xe4\xdf\x99\x5c\x8e\x9e\x4d\x79\xb4\x27\x2f\xbf\x7e\xf8\x05\x25\x8c\x50\xa9\x98\x29\x90\x62\x60\xea\x75\xae\x13\xca\xbf\x2b\x1a\x29\x27\x76\xd6\x20\xc6\x64\x5f\xe6\x32\x1a\x08\x87\x21\x07\x21\xbc\xb4\xe4\xe0\x32\x67\xa6\xcd\xf3\x1e\xcd\xd9\x6b\xb6\x6f\x8e\x27\xa7\xed\xdb\xe7\xbc\xcc\x1a\x07\xce\x6f\x87\x33\xf0\xba\x51\x17\x22\x66\x78\x79\x8e\xce\xe5\x13\x81\x80\x06\x2c\xe5\x78\x0d\xa1\xb2\xb8\x54\xa8\x79\x09\xbd\xbf\x3c\x47\x01\x8b\x13\x2c\xc9\x32\xaa\xaa\x1d\xd5\xee\xab\x36\xbd\x6c\xfd\x54\x6c\xc8\x08\x01\x3c\xbd\xe7\x07\x88\xb0\x24\x37\x79\x90\x28\x4a\x1d\x10\x1a\x92\x1b\x12\xa6\x38\x42\x40\xc3\x4c\x43\x62\x8e\xae\x36\xb0\x45\x71\x2a\xa4\x9a\x23\x79\x59\xb1\xa8\xf2\xa4\x0c\x60\x9f\xcc\x8d\x40\xf5\x80\xca\xa8\x99\xc3\xa7\x85\x1f\x31\x25\xa9\x82\xc5\x6d\xbd\xd8\x36\x76\x7c\x02\x28\x97\xf6\x1d\x74\x3b\x11\x7e\x91\xae\x32\xf8\x6c\xf4\xe6\x7b\x9a\xa5\x1f\x62\xc6\x21\xcf\x9a\xe5\xed\x8b\x02\xf3\x2c\x33\x33\xdf\x00\xca\xc9\x09\xb4\x04\xf5\xa5\x08\xd7\xc3\x02\x18\x66\xf1\xab\x1e\x83\x37\x4c\xcd\x12\xc1\x1d\x50\xf6\xaa\xbd\xfe\xe2\x73\x48\x38\x08\xa0\x32\x9b\x18\x44\x86\x0b\x6a\xc1\xaa\x26\x96\x2d\x96\x3c\xa0\x54\x65\x73\x87\x15\xca\x15\xe5\xf5\x94\x46\x9f\x33\x1a\x0c\x9a\xb1\x5a\xd9\x6a\x95\xcd\xcb\x7e\xec\x9a\xc5\x94\x3b\x37\x26\x31\xd7\xfc\xe4\x1f\x13\x8c\x31\x75\x9c\xba\xf7\x87\x3c\xa1\xb7\x4f\x17\x1b\x09\x82\x98\xc4\x70\x95\xd3\xe8\x4c\x48\x5a\xa6\xd6\x2a\x3d\x56\x42\x80\x9f\xaf\xae\x2e\x50\x0c\x42\xe0\x35\x34\x3c\x8a\x62\x03\x37\xba\xb2\x27\x04\xda\x25\x8d\x06\xe2\xa0\x13\x8a\xf3\xf5\xec\x10\x72\x67\x88\x90\x3d\x4b\x64\xeb\xaa\xda\x8f\xf7\x5a\x75\x47\x9a\xa8\x51\x70\x26\xd2\x38\xc6\x7c\xbb\x57\xfc\xbd\xe4\x04\x56\xa8\xa0\x54\x9a\x45\xd5\xf7\x0f\x16\xfc\x57\x1c\x3c\xdf\x23\xba\x77\x38\xda\x16\x4b\x31\x53\x6a\x4d\x9a\x15\x63\xe7\xe1\x18\x69\x9f\x22\xe0\x24\xbb\x94\x4b\x97\xee\x2d\xf9\x70\x87\x72\x7b\xe6\xc4\x33\x2a\x66\x5e\x1c\x35\x72\xe3\x2d\xda\x73\xe4\xc7\x51\x6d\xa4\xa1\x2a\x4f\xde\x94\xcb\xb2\x3e\x31\x48\xae\x82\xce\xc9\xc8\x65\xcd\xc3\xb7\x34\xb6\x2b\xdf\x58\x65\x78\x6e\x73\xac\x5e\x24\x0d\x3f\xdc\x70\x23\xc6\xda\x52\x0b\x2d\x63\x7d\xa9\x49\x2d\x54\x48\x28\xc0\x12\x9c\xe3\x63\xc9\x58\x04\x98\x36\x07\xc8\x0a\xa7\x91\xd4\xf0\xbc\xc1\xa8\xb9\x70\xd0\xc6\xa9\xb6\x78\x80\x5a\xa3\xb4\x2c\xf4\x18\x0b\x8a\x9d\xd0\xb4\x55\x10\xee\x0d\xc5\xd6\xe0\x99\x93\xdc\xa1\x04\xbb\xf1\xa7\x23\xd1\xd1\x97\x8c\x87\x13\x0a\x21\x02\xe9\x99\x25\xed\x20\xc5\x92\x66\x3c\x32\x9c\xd6\x06\xb0\x31\x5c\x86\x29\x0a\xcb\x60\x33\x12\xa5\x91\xfc\x96\x75\xd0\x59\xd7\x13\xbd\xd3\x23\x79\xdd\x2a\x90\xa6\x38\x06\x91\x39\x7f\x20\x72\x03\x1c\x2d\x01\x61\xba\x45\x37\x38\x22\x61\x8e\x71\x85\xc4\x32\x15\x28\x60\x61\x16\xb8\x3d\x29\xdc\x4d\x3d\x2f\x12\x13\x7d\xc8\x7e\x37\xee\xa8\x7f\xfa\xdb\xcb\x17\xff\x77\xfd\xf9\x7f\xee\x9f\x3d\xfe\xcf\xa7\xa7\x45\xfb\xcf\x1e\xf7\xf3\xe0\xff\xc4\x51\x0a\x8e\x4c\xcb\x01\xdc\x0a\x65\xb2\x01\x83\xed\x3d\xe4\xa9\xa3\x4e\x2d\x59\xc5\xe8\x2f\x48\x7d\x5a\x6e\x37\xbf\x5c\x9f\x35\x13\x64\x14\xfa\xef\x0b\x68\xa6\x0d\xb4\x8e\xf1\xa8\xff\xbb\x60\xf4\x03\x64\xab\x5b\x81\x65\x51\xe6\xda\xca\xfa\xf0\xb0\xac\x3e\x9c\xca\x26\x0e\x1d\xdb\x57\x5b\xbb\xb4\x9a\xa6\xb6\x9b\x1a\x6b\xd1\x9a\x9e\x7e\x33\x9a\xec\x41\x69\x45\x22\xb8\xb4\x51\xeb\x04\x77\xca\x6f\x7b\x7b\xc8\xb2\xb0\x95\x92\x25\x5b\xd0\x42\xaa\x2a\xdd\x32\x78\x4f\x0c\xab\x68\x46\x6c\xea\x6d\xf4\x5c\x5e\xde\xc4\xac\xa5\xf9\xd1\x00\x9f\x7d\x98\x65\x24\xbd\xc7\x97\xd4\xb3\x3a\xa8\x2b\xa0\x34\x76\xf9\x65\x5f\x2d\x25\x95\x1b\xcf\xd6\xf4\x9b\x5f\x09\x95\xb0\x36\x3f\xdb\xd0\x39\x2a\x93\x1c\x9d\x03\xa2\x4a\xca\xf5\xf6\x10\xb6\x94\x89\x0b\x6a\x70\x12\x13\x49\x6e\x40\xe4\x29\x12\x2b\xbd\x80\x45\x11\x04\xaa\xc2\x8f\x56\x9e\x5c\x6b\xec\x8d\x5a\x0e\x14\x59\x06\x2b\x1e\x24\xcb\xc2\x56\x4a\x31\xbe\x23\x71\x1a\xfb\x51\x2a\x0b\x3b\x1c\x48\x10\xa5\x82\xdc\xc0\xbb\x3e\x24\x8d\x5a\x76\x2e\x09\xed\xc1\x65\x51\xb8\x83\xcb\x3e\x24\x8d\x5a\x2e\x5d\xfe\x02\x74\x2d\x3d\xf1\xef\xae\xb8\x4b\xe6\x5e\xd4\xaa\xe2\x2e\x5c\x5e\xec\x0e\xf5\x5b\x0c\xcb\x0a\xbb\xa4\x3c\xf7\x1f\x2a\x55\x69\x97\x8c\x7d\x68\x95\xa5\xad\xb4\xf4\x9c\xa5\x07\xb9\x7a\x05\xbb\xad\x50\x6f\xfb\xa0\x4e\x9b\x48\x23\x49\x92\x28\x87\x19\x3e\x32\xee\xca\x3b\x46\x7e\x7f\x18\x64\xcc\xcc\x0f\x34\xe9\x36\x8b\xb7\x6c\xa8\xa5\x5b\x54\x4c\x54\x5b\x15\x3a\xf1\x6c\x2d\xfe\x96\xc8\x0d\xba\x7b\x81\x88\xc8\x23\xab\xee\x7d\x3b\x92\xa7\x60\x29\xe3\xdc\xff\xb8\x64\xe1\xf6\xa2\x5a\x59\xdc\x6f\xeb\x45\x7d\x6a\xd1\x76\x1e\xea\xb8\xf1\xfa\x14\xd3\x36\x63\xe5\xd7\xf3\xe4\xbe\x25\xbd\x5e\x05\xeb\x73\x74\xb5\x21\x2a\x2e\x4e\xa3\x30\xdf\xbf\x43\x28\x2a\xd1\xa5\x2a\x9d\x8a\xfd\x76\xd8\x8d\xbc\x67\x65\xc7\xb8\x03\x45\xec\xa3\xb0\x37\x8a\x70\x4c\x68\x91\x51\x8e\x58\x80\xed\x4a\xf3\x81\x62\xca\x96\xbb\xf1\x52\xcd\x80\xfb\xe4\x4a\x5d\x6c\xdf\x6e\x20\x4b\x80\x30\x8e\x28\x93\xf9\xe9\x8d\x8a\x6d\xd5\x59\x65\x7b\xaa\x44\x9e\xc0\xc2\xd1\x7c\x40\x26\xd6\x1a\xce\xf9\xc5\x69\x7b\x6c\xec\xc8\x71\x7b\xe5\x21\x2e\xd3\xe5\x65\x93\x91\x53\x0b\x7b\x3a\xc7\xfa\x17\x6a\x01\xa7\x33\xd0\xf4\x40\x0f\x39\x87\xda\xe4\x54\x87\x3a\xd5\xe3\xc7\xa6\x8e\x20\xd4\x11\xb2\x4e\xb1\xe9\x14\x9b\x4e\xb1\xe9\x14\x9b\xfe\x15\x63\xd3\x47\xf5\xff\x97\x38\xe9\xcf\x14\xf8\x76\x82\x49\x13\x4c\xaa\x7d\xcd\x6c\x62\x42\x49\x87\x43\x49\x19\x33\x6f\xe3\x44\x6e\x9b\xab\x8a\x3e\x86\xaa\x99\x52\x1b\x5b\x59\x33\x02\x09\xa0\x21\xa1\x6b\x84\x6b\x66\xbb\xdc\x16\x0c\xd3\x68\xab\xec\x36\x4b\xd8\x60\x8a\x40\x31\x85\x6e\x14\x57\x13\xc2\xfb\x92\x10\xde\xbf\x88\xdc\xbc\x53\x5e\x7f\x82\x7a\x13\xd4\x9b\xa0\xde\x04\xf5\x90\x01\xf5\x94\xcb\x7b\x83\x25\x9e\xd0\xde\x84\xf6\x6a\x5f\x4b\xb3\x98\x00\xdf\x04\xf8\x6c\xbc\x7f\x19\x80\xaf\xf1\x71\x45\x22\x98\x40\xe0\x04\x02\x27\x10\xd8\x29\xf5\x04\x02\xff\x4a\x20\x30\xc1\x72\xf3\x65\x02\x40\xd7\xc1\xd1\xe2\x6b\xf1\xa9\x7b\xfb\xe4\x20\xc0\x68\x9d\xd4\xb4\xd3\x96\xb5\xa6\xd1\x41\x20\xe6\x89\xc3\x48\x65\x58\x13\x84\x9c\x56\x56\x3b\x0c\xe0\x6b\x83\x5c\x13\xd2\x9a\x90\xd6\x84\xb4\x26\xa4\x85\x0c\xa4\x45\x19\xfd\xff\x63\x6c\x52\xb5\x1f\x1e\x19\x74\x3a\xcd\xb9\x69\xce\xa6\x3a\x0f\x7a\x2d\x19\xc7\x81\x14\x5d\xcb\xd5\x03\xc9\x39\xd0\xb0\xd1\xb3\xcd\xfb\x7a\x2d\x5d\x3a\x48\xe1\xfa\x2e\xe6\x81\x42\x18\x86\xd6\xc1\xbe\xb1\x23\xd3\xf7\x34\xed\x19\x0a\x0b\xc4\x48\x44\xfd\x22\x50\xb6\x42\x58\xbb\xe5\x3d\xa7\x73\xd4\x8b\xc4\x8c\x70\x61\xec\x73\xee\xc3\x81\x8b\xf5\xe2\xd7\x52\x3e\xcf\xeb\xeb\x17\x3b\x71\x16\xda\x7d\xb8\xde\xf0\x7a\x8f\x06\x2d\xa7\x40\x7b\xc1\x9d\x41\x4d\xb6\x61\xa2\x4e\x9f\x3d\xa0\xc5\xae\xe3\x1c\x1d\x40\x6c\x48\x8b\x63\xa0\xb5\x01\xed\x8e\x02\xe9\x86\xc8\x3b\x06\xee\xdb\x4b\xde\xbd\xc0\xa1\x6f\xcb\xda\xfc\xc2\x44\x16\x87\x9c\x17\x31\xd3\x30\x20\x39\x42\xcb\x6f\xf2\xf1\xf4\x72\x10\xf8\x1c\xa0\xf3\xbd\x10\xea\x21\x35\x7d\xe8\x86\xdb\x15\xed\x81\x81\x07\x28\xbb\x13\x28\xc7\xf8\xce\x7d\x8d\xc2\x31\xb4\x7e\x94\xd6\xdb\x55\xef\x4a\xfb\xed\xc3\x40\x3e\xeb\x9f\xe9\x99\x0f\xdf\x08\x65\x88\x27\x73\x86\x31\x9d\x47\xdf\x55\x19\xba\x3d\xee\x15\x0a\xcd\x8c\xaa\x5e\xb9\xf6\x57\x33\x73\x5a\xa1\x89\x7b\x3b\xa0\xb2\xa4\xc2\xf6\xc1\x53\xb5\x00\xca\x23\xe5\xf4\x60\x6a\xb4\x2d\x74\xea\x4e\xed\x3b\xe3\x47\xfb\xed\x82\x3d\x19\xd4\x3b\x6b\xaf\xae\x2b\x2f\x57\xb3\x82\x68\xcb\xed\x88\x2e\xe1\x5c\xd7\x26\xfa\x0a\x65\xe7\xce\x11\x33\xb4\xdd\x66\xe3\x37\xf6\xfa\x70\xd6\x4f\xa1\x21\x51\xd8\x3c\x26\x14\x4b\xc6\x87\x44\x27\x1c\x70\xf8\x9e\x46\xce\xab\x21\x07\x5f\xc1\x76\x17\x1b\x77\xb4\xda\x75\xa0\x0a\x3a\x30\xe1\xf8\x97\x32\x16\x2b\x00\x75\x85\xee\x62\x46\xef\xd3\x85\xb5\x6b\x60\xbe\xf2\x30\x7a\x8c\x0b\x4b\xa6\xd0\xf9\x64\x42\xe7\x07\x41\x41\xe3\x2c\x5d\xf9\x6d\xe9\x39\x98\x3b\x3b\x5d\x67\xd4\x5c\xed\xf2\xf0\x48\x7b\xbd\x2d\x31\xdd\x3f\x34\xad\x44\x76\x51\x9a\x56\x22\xa7\x95\xc8\x69\x25\xf2\xe1\x56\x22\x1f\x00\x32\x6a\x73\x92\xed\xe1\xc6\x7d\x9f\x49\x2c\x69\x7e\xc8\x31\x4c\x0c\xb4\xf2\x54\x3b\x79\x3b\x9e\x4d\xb4\xd1\x18\x3e\x5f\x9a\x93\xa2\x11\xc3\xda\x27\x0b\xaf\x37\x2e\x5c\x37\xfb\xeb\x9a\xd6\xc3\xac\xc3\xcc\xf8\x1e\x5b\x9d\xac\x22\x64\xb7\xed\x26\xb8\xf3\xb9\x3c\xbb\x1f\xe2\xb0\x22\x77\x43\x6a\x62\x29\x39\x59\xa6\xe6\xe5\xcd\x7b\x83\xc0\x5b\x8e\x93\x64\xac\xeb\xca\x4f\x65\xac\x4a\xbc\x1e\xcd\x82\xfa\x3c\x70\x36\xb6\xb5\xed\x79\xef\xec\x68\x00\xff\x54\xfa\xb5\xe3\xf1\xdb\xe1\xbe\xce\x76\x17\xaf\x57\xb6\x6b\x89\x05\x09\xce\x52\xb9\x01\x2a\x49\xbe\xd9\xf4\xd2\xb8\x7a\xbf\x91\x02\xf3\x22\x8c\x13\xf2\x77\xd8\x8e\x43\x8b\xe1\x54\x6e\x5e\x9d\xc7\x49\x44\x02\x22\xc7\xa4\x79\x81\x85\xb8\x65\x3c\x1c\x93\xe6\x59\xa2\xf8\x1c\x51\x95\x05\xd9\x20\x00\x21\x7e\x60\x21\x58\xa9\x56\xff\xbe\xb6\x5a\x5e\x5b\x3f\x1f\xd6\xd3\x3c\xc4\x4d\xba\x99\xb4\x63\x6e\x7d\x3e\x3d\x57\xd2\x18\x5f\x47\xe8\xc3\x06\x8a\x68\x6c\x7f\x3b\x72\x0f\xe7\xe2\x77\x77\xf1\xd0\x99\xab\xdf\x2e\xfe\xd6\xbb\xcd\x1a\xb9\x90\xd1\xaf\xf2\x38\x3d\xdb\x74\xf8\xeb\xe3\xda\xe8\x2a\x62\xb7\xda\x1b\x07\xa9\xdc\x30\x5e\xbc\x68\xfb\x6b\x9f\x97\xf1\xc6\xb1\xd8\x5c\x29\x1e\x49\x30\xc5\xf7\xde\xad\x91\x42\xf9\xdd\xed\x89\x80\x25\xbe\x37\xd7\xe7\x32\x5c\xe6\x35\xac\xd4\x0c\x2d\xf7\x90\xc4\xe3\xf5\xe3\x2f\x7f\x54\x18\x88\xe3\x61\x47\x85\x64\x7f\xc0\xd7\x3f\x1a\x92\x42\xe9\xc7\x1e\x0d\x95\x76\xa7\x51\xa0\x8f\x02\x1b\x46\x9e\x06\x42\xd1\xf2\x01\x07\x02\xde\xe9\x7d\x1a\x0b\xa7\x32\x16\xcc\xc0\xee\xc4\x90\xd2\x5f\x6f\x98\x54\x5d\xf2\x95\xe1\xa7\x69\x10\x3a\x06\xe1\x65\xb3\x17\x47\x58\x78\xd0\x45\xd6\x5b\xd5\x5f\x25\x1d\x71\x49\xa6\x7a\x64\xda\xd0\x6f\xc7\x3a\x4c\xe3\x09\xc0\x6e\x96\x2c\xa7\xa7\x77\x34\x10\x05\x08\x21\x44\x92\x65\x77\xdf\x20\x5c\xbc\xe7\x97\x3f\xf4\x1a\x45\xd6\xe7\x27\x4a\xde\x74\x27\x66\x11\x7d\x70\xba\xd3\x78\xf9\x1e\x0d\xca\xc8\x39\xde\x7c\xb3\xa6\xe1\xbc\xd7\xc1\x6a\x6f\xb3\x0e\x52\xbe\xe4\x98\x8a\x15\x70\x94\x70\x26\x59\xc0\xa2\xf2\x1c\xfb\xd9\xc5\xf9\xbc\xd5\x92\x9c\xa3\xdf\xe6\x1e\xb3\x0d\x49\xba\x87\x50\x5f\x84\xfe\xe9\xd6\xf8\xbb\xe6\xf0\x7a\xeb\xa6\x65\x3b\x86\x8b\x79\x93\xf5\x59\x20\x6e\xb4\xa7\x44\xf4\x3f\xa5\xfe\x67\x42\x12\xdb\xd3\xe7\xbb\xa5\xa3\x8c\x5c\x2b\x97\xbb\xbb\x7f\x8e\xc5\x6e\xed\x43\x5c\xbf\x74\xc8\x8f\xff\xe6\xd6\xbe\x91\xb6\xf5\x95\xe4\xed\x93\xc4\xa8\x5b\xf9\x76\x4d\x35\xb7\xd8\x8c\xb6\x7d\xaf\x72\xe0\xb6\xbd\x01\x63\x9e\x76\xab\x1a\x32\x76\xe4\x8c\x76\xc2\xad\x6c\xa2\x65\xf7\xcf\xf8\xa7\xda\x2a\xb9\x8c\x3d\x3c\xa3\x9d\x64\x33\xe5\x1a\xb5\x2d\xfb\x86\xa2\x5a\x7f\x19\x5b\x7f\xc6\x3f\xd1\x53\xd3\xe2\x41\x5b\xd3\x4f\xf0\xec\xb0\x42\x73\x43\xd2\x68\x27\xd3\x6a\x6a\x34\xf6\x4e\x1e\x52\x8b\x87\x6c\xcc\xae\x44\xfb\x9e\xa7\x51\x4f\x9d\x55\x03\x81\x8e\x67\xfc\xb4\x69\xf0\x3a\x18\xf2\x40\xd0\xf6\xa8\x34\xe3\xc9\x98\xaf\xf6\xda\x24\xd3\xeb\x60\xb9\x0e\xd3\x1f\xa9\xff\xee\x1f\xfd\x37\x00\x00\xff\xff\x69\x5d\x0a\x6a\x39\x9d\x00\x00") func v2SchemaJSONBytes() ([]byte, error) { return bindataRead( @@ -118,7 +104,7 @@ func v2SchemaJSON() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "v2/schema.json", size: 40020, mode: os.FileMode(420), modTime: time.Unix(1446147817, 0)} + info := bindataFileInfo{name: "v2/schema.json", size: 40249, mode: os.FileMode(420), modTime: time.Unix(1482389892, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -176,7 +162,7 @@ func AssetNames() []string { // _bindata is a table, holding each asset generator, mapped to its name. var _bindata = map[string]func() (*asset, error){ "jsonschema-draft-04.json": jsonschemaDraft04JSON, - "v2/schema.json": v2SchemaJSON, + "v2/schema.json": v2SchemaJSON, } // AssetDir returns the file names below a certain @@ -218,7 +204,6 @@ type bintree struct { Func func() (*asset, error) Children map[string]*bintree } - var _bintree = &bintree{nil, map[string]*bintree{ "jsonschema-draft-04.json": &bintree{jsonschemaDraft04JSON, map[string]*bintree{}}, "v2": &bintree{nil, map[string]*bintree{ @@ -272,3 +257,4 @@ func _filePath(dir, name string) string { cannonicalName := strings.Replace(name, "\\", "/", -1) return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) } + diff --git a/vendor/github.com/go-openapi/spec/expander.go b/vendor/github.com/go-openapi/spec/expander.go index eb1490b05..b4429a21c 100644 --- a/vendor/github.com/go-openapi/spec/expander.go +++ b/vendor/github.com/go-openapi/spec/expander.go @@ -17,7 +17,10 @@ package spec import ( "encoding/json" "fmt" + "log" "net/url" + "os" + "path/filepath" "reflect" "strings" "sync" @@ -26,6 +29,18 @@ import ( "github.com/go-openapi/swag" ) +var ( + // Debug enables logging when SWAGGER_DEBUG env var is not empty + Debug = os.Getenv("SWAGGER_DEBUG") != "" +) + +// ExpandOptions provides options for expand. +type ExpandOptions struct { + RelativeBase string + SkipSchemas bool + ContinueOnError bool +} + // ResolutionCache a cache for resolving urls type ResolutionCache interface { Get(string) (interface{}, bool) @@ -37,7 +52,11 @@ type simpleCache struct { store map[string]interface{} } -var resCache = initResolutionCache() +var resCache ResolutionCache + +func init() { + resCache = initResolutionCache() +} func initResolutionCache() ResolutionCache { return &simpleCache{store: map[string]interface{}{ @@ -47,8 +66,11 @@ func initResolutionCache() ResolutionCache { } func (s *simpleCache) Get(uri string) (interface{}, bool) { + debugLog("getting %q from resolution cache", uri) s.lock.Lock() v, ok := s.store[uri] + debugLog("got %q from resolution cache: %t", uri, ok) + s.lock.Unlock() return v, ok } @@ -59,9 +81,9 @@ func (s *simpleCache) Set(uri string, data interface{}) { s.lock.Unlock() } -// ResolveRef resolves a reference against a context root -func ResolveRef(root interface{}, ref *Ref) (*Schema, error) { - resolver, err := defaultSchemaLoader(root, nil, nil) +// ResolveRefWithBase resolves a reference against a context root with preservation of base path +func ResolveRefWithBase(root interface{}, ref *Ref, opts *ExpandOptions) (*Schema, error) { + resolver, err := defaultSchemaLoader(root, nil, opts, nil) if err != nil { return nil, err } @@ -73,9 +95,19 @@ func ResolveRef(root interface{}, ref *Ref) (*Schema, error) { return result, nil } +// ResolveRef resolves a reference against a context root +func ResolveRef(root interface{}, ref *Ref) (*Schema, error) { + return ResolveRefWithBase(root, ref, nil) +} + // ResolveParameter resolves a paramter reference against a context root func ResolveParameter(root interface{}, ref Ref) (*Parameter, error) { - resolver, err := defaultSchemaLoader(root, nil, nil) + return ResolveParameterWithBase(root, ref, nil) +} + +// ResolveParameterWithBase resolves a paramter reference against a context root and base path +func ResolveParameterWithBase(root interface{}, ref Ref, opts *ExpandOptions) (*Parameter, error) { + resolver, err := defaultSchemaLoader(root, nil, opts, nil) if err != nil { return nil, err } @@ -89,7 +121,12 @@ func ResolveParameter(root interface{}, ref Ref) (*Parameter, error) { // ResolveResponse resolves response a reference against a context root func ResolveResponse(root interface{}, ref Ref) (*Response, error) { - resolver, err := defaultSchemaLoader(root, nil, nil) + return ResolveResponseWithBase(root, ref, nil) +} + +// ResolveResponseWithBase resolves response a reference against a context root and base path +func ResolveResponseWithBase(root interface{}, ref Ref, opts *ExpandOptions) (*Response, error) { + resolver, err := defaultSchemaLoader(root, nil, opts, nil) if err != nil { return nil, err } @@ -101,23 +138,72 @@ func ResolveResponse(root interface{}, ref Ref) (*Response, error) { return result, nil } +// ResolveItems resolves header and parameter items reference against a context root and base path +func ResolveItems(root interface{}, ref Ref, opts *ExpandOptions) (*Items, error) { + resolver, err := defaultSchemaLoader(root, nil, opts, nil) + if err != nil { + return nil, err + } + + result := new(Items) + if err := resolver.Resolve(&ref, result); err != nil { + return nil, err + } + return result, nil +} + +// ResolvePathItem resolves response a path item against a context root and base path +func ResolvePathItem(root interface{}, ref Ref, opts *ExpandOptions) (*PathItem, error) { + resolver, err := defaultSchemaLoader(root, nil, opts, nil) + if err != nil { + return nil, err + } + + result := new(PathItem) + if err := resolver.Resolve(&ref, result); err != nil { + return nil, err + } + return result, nil +} + type schemaLoader struct { loadingRef *Ref startingRef *Ref currentRef *Ref root interface{} + options *ExpandOptions cache ResolutionCache loadDoc func(string) (json.RawMessage, error) } var idPtr, _ = jsonpointer.New("/id") -var schemaPtr, _ = jsonpointer.New("/$schema") var refPtr, _ = jsonpointer.New("/$ref") -func defaultSchemaLoader(root interface{}, ref *Ref, cache ResolutionCache) (*schemaLoader, error) { +// PathLoader function to use when loading remote refs +var PathLoader func(string) (json.RawMessage, error) + +func init() { + PathLoader = func(path string) (json.RawMessage, error) { + data, err := swag.LoadFromFileOrHTTP(path) + if err != nil { + return nil, err + } + return json.RawMessage(data), nil + } +} + +func defaultSchemaLoader( + root interface{}, + ref *Ref, + expandOptions *ExpandOptions, + cache ResolutionCache) (*schemaLoader, error) { + if cache == nil { cache = resCache } + if expandOptions == nil { + expandOptions = &ExpandOptions{} + } var ptr *jsonpointer.Pointer if ref != nil { @@ -127,18 +213,16 @@ func defaultSchemaLoader(root interface{}, ref *Ref, cache ResolutionCache) (*sc currentRef := nextRef(root, ref, ptr) return &schemaLoader{ - root: root, loadingRef: ref, startingRef: ref, + currentRef: currentRef, + root: root, + options: expandOptions, cache: cache, loadDoc: func(path string) (json.RawMessage, error) { - data, err := swag.LoadFromFileOrHTTP(path) - if err != nil { - return nil, err - } - return json.RawMessage(data), nil + debugLog("fetching document at %q", path) + return PathLoader(path) }, - currentRef: currentRef, }, nil } @@ -159,6 +243,7 @@ func nextRef(startingNode interface{}, startingRef *Ref, ptr *jsonpointer.Pointe if startingRef == nil { return nil } + if ptr == nil { return startingRef } @@ -184,32 +269,111 @@ func nextRef(startingNode interface{}, startingRef *Ref, ptr *jsonpointer.Pointe refRef, _, _ := refPtr.Get(node) if refRef != nil { - rf, _ := NewRef(refRef.(string)) + var rf Ref + switch value := refRef.(type) { + case string: + rf, _ = NewRef(value) + } nw, err := ret.Inherits(rf) if err != nil { break } + nwURL := nw.GetURL() + if nwURL.Scheme == "file" || (nwURL.Scheme == "" && nwURL.Host == "") { + nwpt := filepath.ToSlash(nwURL.Path) + if filepath.IsAbs(nwpt) { + _, err := os.Stat(nwpt) + if err != nil { + nwURL.Path = filepath.Join(".", nwpt) + } + } + } + ret = nw } } + return ret } +func debugLog(msg string, args ...interface{}) { + if Debug { + log.Printf(msg, args...) + } +} + +func normalizeFileRef(ref *Ref, relativeBase string) *Ref { + refURL := ref.GetURL() + debugLog("normalizing %s against %s (%s)", ref.String(), relativeBase, refURL.String()) + if strings.HasPrefix(refURL.String(), "#") { + return ref + } + + if refURL.Scheme == "file" || (refURL.Scheme == "" && refURL.Host == "") { + filePath := refURL.Path + debugLog("normalizing file path: %s", filePath) + + if !filepath.IsAbs(filepath.FromSlash(filePath)) && len(relativeBase) != 0 { + debugLog("joining %s with %s", relativeBase, filePath) + if fi, err := os.Stat(filepath.FromSlash(relativeBase)); err == nil { + if !fi.IsDir() { + relativeBase = filepath.Dir(filepath.FromSlash(relativeBase)) + } + } + filePath = filepath.Join(filepath.FromSlash(relativeBase), filepath.FromSlash(filePath)) + } + if !filepath.IsAbs(filepath.FromSlash(filePath)) { + pwd, err := os.Getwd() + if err == nil { + debugLog("joining cwd %s with %s", pwd, filePath) + filePath = filepath.Join(pwd, filepath.FromSlash(filePath)) + } + } + + debugLog("cleaning %s", filePath) + filePath = filepath.Clean(filepath.FromSlash(filePath)) + _, err := os.Stat(filepath.FromSlash(filePath)) + if err == nil { + debugLog("rewriting url %s to scheme \"\" path %s", refURL.String(), filePath) + slp := filepath.FromSlash(filePath) + if filepath.IsAbs(slp) && filepath.Separator == '\\' && len(slp) > 1 && slp[1] == ':' && ('a' <= slp[0] && slp[0] <= 'z' || 'A' <= slp[0] && slp[0] <= 'Z') { + slp = slp[2:] + } + refURL.Scheme = "" + refURL.Path = filepath.ToSlash(slp) + debugLog("new url with joined filepath: %s", refURL.String()) + *ref = MustCreateRef(refURL.String()) + } + } + + debugLog("refurl: %s", ref.GetURL().String()) + return ref +} + func (r *schemaLoader) resolveRef(currentRef, ref *Ref, node, target interface{}) error { + tgt := reflect.ValueOf(target) if tgt.Kind() != reflect.Ptr { return fmt.Errorf("resolve ref: target needs to be a pointer") } oldRef := currentRef + if currentRef != nil { + debugLog("resolve ref current %s new %s", currentRef.String(), ref.String()) + nextRef := nextRef(node, ref, currentRef.GetPointer()) + if nextRef == nil || nextRef.GetURL() == nil { + return nil + } var err error - currentRef, err = currentRef.Inherits(*nextRef(node, ref, currentRef.GetPointer())) + currentRef, err = currentRef.Inherits(*nextRef) + debugLog("resolved ref current %s", currentRef.String()) if err != nil { return err } } + if currentRef == nil { currentRef = ref } @@ -245,42 +409,71 @@ func (r *schemaLoader) resolveRef(currentRef, ref *Ref, node, target interface{} return nil } - if refURL.Scheme != "" && refURL.Host != "" { - // most definitely take the red pill - data, _, _, err := r.load(refURL) - if err != nil { - return err - } + relativeBase := "" + if r.options != nil && r.options.RelativeBase != "" { + relativeBase = r.options.RelativeBase + } + normalizeFileRef(currentRef, relativeBase) + debugLog("current ref normalized file: %s", currentRef.String()) + normalizeFileRef(ref, relativeBase) + debugLog("ref normalized file: %s", currentRef.String()) - if ((oldRef == nil && currentRef != nil) || - (oldRef != nil && currentRef == nil) || - oldRef.String() != currentRef.String()) && - ((oldRef == nil && ref != nil) || - (oldRef != nil && ref == nil) || - (oldRef.String() != ref.String())) { + data, _, _, err := r.load(currentRef.GetURL()) + if err != nil { + return err + } - return r.resolveRef(currentRef, ref, data, target) - } + if ((oldRef == nil && currentRef != nil) || + (oldRef != nil && currentRef == nil) || + oldRef.String() != currentRef.String()) && + ((oldRef == nil && ref != nil) || + (oldRef != nil && ref == nil) || + (oldRef.String() != ref.String())) { - var res interface{} - if currentRef.String() != "" { - res, _, err = currentRef.GetPointer().Get(data) + return r.resolveRef(currentRef, ref, data, target) + } + + var res interface{} + if currentRef.String() != "" { + res, _, err = currentRef.GetPointer().Get(data) + if err != nil { + if strings.HasPrefix(ref.String(), "#") { + if r.loadingRef != nil { + rr, er := r.loadingRef.Inherits(*ref) + if er != nil { + return er + } + refURL = rr.GetURL() + + data, _, _, err = r.load(refURL) + if err != nil { + return err + } + } else { + data = r.root + } + } + + res, _, err = ref.GetPointer().Get(data) if err != nil { return err } - } else { - res = data - } - - if err := swag.DynamicJSONToStruct(res, target); err != nil { - return err } + } else { + res = data + } + if err := swag.DynamicJSONToStruct(res, target); err != nil { + return err } + + r.currentRef = currentRef + return nil } func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error) { + debugLog("loading schema from url: %s", refURL) toFetch := *refURL toFetch.Fragment = "" @@ -299,44 +492,51 @@ func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error) return data, toFetch, fromCache, nil } -func (r *schemaLoader) Resolve(ref *Ref, target interface{}) error { - if err := r.resolveRef(r.currentRef, ref, r.root, target); err != nil { - return err - } - return nil +func (r *schemaLoader) Resolve(ref *Ref, target interface{}) error { + return r.resolveRef(r.currentRef, ref, r.root, target) } -type specExpander struct { - spec *Swagger - resolver *schemaLoader +func (r *schemaLoader) reset() { + ref := r.startingRef + + var ptr *jsonpointer.Pointer + if ref != nil { + ptr = ref.GetPointer() + } + + r.currentRef = nextRef(r.root, ref, ptr) } // ExpandSpec expands the references in a swagger spec -func ExpandSpec(spec *Swagger) error { - resolver, err := defaultSchemaLoader(spec, nil, nil) - if err != nil { +func ExpandSpec(spec *Swagger, options *ExpandOptions) error { + resolver, err := defaultSchemaLoader(spec, nil, options, nil) + // Just in case this ever returns an error. + if shouldStopOnError(err, resolver.options) { return err } - for key, defintition := range spec.Definitions { - var def *Schema - var err error - if def, err = expandSchema(defintition, []string{"#/definitions/" + key}, resolver); err != nil { - return err + if options == nil || !options.SkipSchemas { + for key, definition := range spec.Definitions { + var def *Schema + var err error + if def, err = expandSchema(definition, []string{"#/definitions/" + key}, resolver); shouldStopOnError(err, resolver.options) { + return err + } + resolver.reset() + spec.Definitions[key] = *def } - spec.Definitions[key] = *def } for key, parameter := range spec.Parameters { - if err := expandParameter(¶meter, resolver); err != nil { + if err := expandParameter(¶meter, resolver); shouldStopOnError(err, resolver.options) { return err } spec.Parameters[key] = parameter } for key, response := range spec.Responses { - if err := expandResponse(&response, resolver); err != nil { + if err := expandResponse(&response, resolver); shouldStopOnError(err, resolver.options) { return err } spec.Responses[key] = response @@ -344,7 +544,7 @@ func ExpandSpec(spec *Swagger) error { if spec.Paths != nil { for key, path := range spec.Paths.Paths { - if err := expandPathItem(&path, resolver); err != nil { + if err := expandPathItem(&path, resolver); shouldStopOnError(err, resolver.options) { return err } spec.Paths.Paths[key] = path @@ -354,9 +554,25 @@ func ExpandSpec(spec *Swagger) error { return nil } +func shouldStopOnError(err error, opts *ExpandOptions) bool { + if err != nil && !opts.ContinueOnError { + return true + } + + if err != nil { + log.Println(err) + } + + return false +} + // ExpandSchema expands the refs in the schema object func ExpandSchema(schema *Schema, root interface{}, cache ResolutionCache) error { + return ExpandSchemaWithBasePath(schema, root, cache, nil) +} +// ExpandSchemaWithBasePath expands the refs in the schema object, base path configured through expand options +func ExpandSchemaWithBasePath(schema *Schema, root interface{}, cache ResolutionCache, opts *ExpandOptions) error { if schema == nil { return nil } @@ -367,18 +583,17 @@ func ExpandSchema(schema *Schema, root interface{}, cache ResolutionCache) error nrr, _ := NewRef(schema.ID) var rrr *Ref if nrr.String() != "" { - switch root.(type) { + switch rt := root.(type) { case *Schema: - rid, _ := NewRef(root.(*Schema).ID) + rid, _ := NewRef(rt.ID) rrr, _ = rid.Inherits(nrr) case *Swagger: - rid, _ := NewRef(root.(*Swagger).ID) + rid, _ := NewRef(rt.ID) rrr, _ = rid.Inherits(nrr) } - } - resolver, err := defaultSchemaLoader(root, rrr, cache) + resolver, err := defaultSchemaLoader(root, rrr, opts, cache) if err != nil { return err } @@ -389,7 +604,7 @@ func ExpandSchema(schema *Schema, root interface{}, cache ResolutionCache) error } var s *Schema if s, err = expandSchema(*schema, refs, resolver); err != nil { - return nil + return err } *schema = *s return nil @@ -400,7 +615,15 @@ func expandItems(target Schema, parentRefs []string, resolver *schemaLoader) (*S if target.Items.Schema != nil { t, err := expandSchema(*target.Items.Schema, parentRefs, resolver) if err != nil { - return nil, err + if target.Items.Schema.ID == "" { + target.Items.Schema.ID = target.ID + if err != nil { + t, err = expandSchema(*target.Items.Schema, parentRefs, resolver) + if err != nil { + return nil, err + } + } + } } *target.Items.Schema = *t } @@ -415,137 +638,173 @@ func expandItems(target Schema, parentRefs []string, resolver *schemaLoader) (*S return &target, nil } -func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader) (schema *Schema, err error) { - defer func() { - schema = &target - }() +func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader) (*Schema, error) { if target.Ref.String() == "" && target.Ref.IsRoot() { - target = *resolver.root.(*Schema) - return + debugLog("skipping expand schema for no ref and root: %v", resolver.root) + + return resolver.root.(*Schema), nil } // t is the new expanded schema var t *Schema + for target.Ref.String() != "" { - // var newTarget Schema - pRefs := strings.Join(parentRefs, ",") - pRefs += "," - if strings.Contains(pRefs, target.Ref.String()+",") { - err = nil - return + if swag.ContainsStringsCI(parentRefs, target.Ref.String()) { + return &target, nil } - if err = resolver.Resolve(&target.Ref, &t); err != nil { - return + if err := resolver.Resolve(&target.Ref, &t); shouldStopOnError(err, resolver.options) { + return &target, err + } + + if swag.ContainsStringsCI(parentRefs, target.Ref.String()) { + debugLog("ref already exists in parent") + return &target, nil } parentRefs = append(parentRefs, target.Ref.String()) - target = *t + if t != nil { + target = *t + } } - if t, err = expandItems(target, parentRefs, resolver); err != nil { - return + t, err := expandItems(target, parentRefs, resolver) + if shouldStopOnError(err, resolver.options) { + return &target, err + } + if t != nil { + target = *t } - target = *t for i := range target.AllOf { - if t, err = expandSchema(target.AllOf[i], parentRefs, resolver); err != nil { - return + t, err := expandSchema(target.AllOf[i], parentRefs, resolver) + if shouldStopOnError(err, resolver.options) { + return &target, err + } + if t != nil { + target.AllOf[i] = *t } - target.AllOf[i] = *t } for i := range target.AnyOf { - if t, err = expandSchema(target.AnyOf[i], parentRefs, resolver); err != nil { - return + t, err := expandSchema(target.AnyOf[i], parentRefs, resolver) + if shouldStopOnError(err, resolver.options) { + return &target, err } target.AnyOf[i] = *t } for i := range target.OneOf { - if t, err = expandSchema(target.OneOf[i], parentRefs, resolver); err != nil { - return + t, err := expandSchema(target.OneOf[i], parentRefs, resolver) + if shouldStopOnError(err, resolver.options) { + return &target, err + } + if t != nil { + target.OneOf[i] = *t } - target.OneOf[i] = *t } if target.Not != nil { - if t, err = expandSchema(*target.Not, parentRefs, resolver); err != nil { - return + t, err := expandSchema(*target.Not, parentRefs, resolver) + if shouldStopOnError(err, resolver.options) { + return &target, err + } + if t != nil { + *target.Not = *t } - *target.Not = *t } - for k, _ := range target.Properties { - if t, err = expandSchema(target.Properties[k], parentRefs, resolver); err != nil { - return + for k := range target.Properties { + t, err := expandSchema(target.Properties[k], parentRefs, resolver) + if shouldStopOnError(err, resolver.options) { + return &target, err + } + if t != nil { + target.Properties[k] = *t } - target.Properties[k] = *t } if target.AdditionalProperties != nil && target.AdditionalProperties.Schema != nil { - if t, err = expandSchema(*target.AdditionalProperties.Schema, parentRefs, resolver); err != nil { - return + t, err := expandSchema(*target.AdditionalProperties.Schema, parentRefs, resolver) + if shouldStopOnError(err, resolver.options) { + return &target, err + } + if t != nil { + *target.AdditionalProperties.Schema = *t } - *target.AdditionalProperties.Schema = *t } - for k, _ := range target.PatternProperties { - if t, err = expandSchema(target.PatternProperties[k], parentRefs, resolver); err != nil { - return + for k := range target.PatternProperties { + t, err := expandSchema(target.PatternProperties[k], parentRefs, resolver) + if shouldStopOnError(err, resolver.options) { + return &target, err + } + if t != nil { + target.PatternProperties[k] = *t } - target.PatternProperties[k] = *t } - for k, _ := range target.Dependencies { + for k := range target.Dependencies { if target.Dependencies[k].Schema != nil { - if t, err = expandSchema(*target.Dependencies[k].Schema, parentRefs, resolver); err != nil { - return + t, err := expandSchema(*target.Dependencies[k].Schema, parentRefs, resolver) + if shouldStopOnError(err, resolver.options) { + return &target, err + } + if t != nil { + *target.Dependencies[k].Schema = *t } - *target.Dependencies[k].Schema = *t } } if target.AdditionalItems != nil && target.AdditionalItems.Schema != nil { - if t, err = expandSchema(*target.AdditionalItems.Schema, parentRefs, resolver); err != nil { - return + t, err := expandSchema(*target.AdditionalItems.Schema, parentRefs, resolver) + if shouldStopOnError(err, resolver.options) { + return &target, err + } + if t != nil { + *target.AdditionalItems.Schema = *t } - *target.AdditionalItems.Schema = *t } - for k, _ := range target.Definitions { - if t, err = expandSchema(target.Definitions[k], parentRefs, resolver); err != nil { - return + for k := range target.Definitions { + t, err := expandSchema(target.Definitions[k], parentRefs, resolver) + if shouldStopOnError(err, resolver.options) { + return &target, err + } + if t != nil { + target.Definitions[k] = *t } - target.Definitions[k] = *t } - return + return &target, nil } func expandPathItem(pathItem *PathItem, resolver *schemaLoader) error { if pathItem == nil { return nil } + if pathItem.Ref.String() != "" { if err := resolver.Resolve(&pathItem.Ref, &pathItem); err != nil { return err } + resolver.reset() + pathItem.Ref = Ref{} } for idx := range pathItem.Parameters { - if err := expandParameter(&(pathItem.Parameters[idx]), resolver); err != nil { + if err := expandParameter(&(pathItem.Parameters[idx]), resolver); shouldStopOnError(err, resolver.options) { return err } } - if err := expandOperation(pathItem.Get, resolver); err != nil { + if err := expandOperation(pathItem.Get, resolver); shouldStopOnError(err, resolver.options) { return err } - if err := expandOperation(pathItem.Head, resolver); err != nil { + if err := expandOperation(pathItem.Head, resolver); shouldStopOnError(err, resolver.options) { return err } - if err := expandOperation(pathItem.Options, resolver); err != nil { + if err := expandOperation(pathItem.Options, resolver); shouldStopOnError(err, resolver.options) { return err } - if err := expandOperation(pathItem.Put, resolver); err != nil { + if err := expandOperation(pathItem.Put, resolver); shouldStopOnError(err, resolver.options) { return err } - if err := expandOperation(pathItem.Post, resolver); err != nil { + if err := expandOperation(pathItem.Post, resolver); shouldStopOnError(err, resolver.options) { return err } - if err := expandOperation(pathItem.Patch, resolver); err != nil { + if err := expandOperation(pathItem.Patch, resolver); shouldStopOnError(err, resolver.options) { return err } - if err := expandOperation(pathItem.Delete, resolver); err != nil { + if err := expandOperation(pathItem.Delete, resolver); shouldStopOnError(err, resolver.options) { return err } return nil @@ -555,8 +814,9 @@ func expandOperation(op *Operation, resolver *schemaLoader) error { if op == nil { return nil } + for i, param := range op.Parameters { - if err := expandParameter(¶m, resolver); err != nil { + if err := expandParameter(¶m, resolver); shouldStopOnError(err, resolver.options) { return err } op.Parameters[i] = param @@ -564,11 +824,11 @@ func expandOperation(op *Operation, resolver *schemaLoader) error { if op.Responses != nil { responses := op.Responses - if err := expandResponse(responses.Default, resolver); err != nil { + if err := expandResponse(responses.Default, resolver); shouldStopOnError(err, resolver.options) { return err } for code, response := range responses.StatusCodeResponses { - if err := expandResponse(&response, resolver); err != nil { + if err := expandResponse(&response, resolver); shouldStopOnError(err, resolver.options) { return err } responses.StatusCodeResponses[code] = response @@ -582,22 +842,29 @@ func expandResponse(response *Response, resolver *schemaLoader) error { return nil } + var parentRefs []string + if response.Ref.String() != "" { - if err := resolver.Resolve(&response.Ref, response); err != nil { + parentRefs = append(parentRefs, response.Ref.String()) + if err := resolver.Resolve(&response.Ref, response); shouldStopOnError(err, resolver.options) { return err } + resolver.reset() + response.Ref = Ref{} } - if response.Schema != nil { - parentRefs := []string{response.Schema.Ref.String()} - if err := resolver.Resolve(&response.Schema.Ref, &response.Schema); err != nil { + if !resolver.options.SkipSchemas && response.Schema != nil { + parentRefs = append(parentRefs, response.Schema.Ref.String()) + debugLog("response ref: %s", response.Schema.Ref) + if err := resolver.Resolve(&response.Schema.Ref, &response.Schema); shouldStopOnError(err, resolver.options) { return err } - if s, err := expandSchema(*response.Schema, parentRefs, resolver); err != nil { + s, err := expandSchema(*response.Schema, parentRefs, resolver) + if shouldStopOnError(err, resolver.options) { return err - } else { - *response.Schema = *s } + resolver.reset() + *response.Schema = *s } return nil } @@ -606,21 +873,28 @@ func expandParameter(parameter *Parameter, resolver *schemaLoader) error { if parameter == nil { return nil } + + var parentRefs []string + if parameter.Ref.String() != "" { - if err := resolver.Resolve(¶meter.Ref, parameter); err != nil { + parentRefs = append(parentRefs, parameter.Ref.String()) + if err := resolver.Resolve(¶meter.Ref, parameter); shouldStopOnError(err, resolver.options) { return err } + resolver.reset() + parameter.Ref = Ref{} } - if parameter.Schema != nil { - parentRefs := []string{parameter.Schema.Ref.String()} - if err := resolver.Resolve(¶meter.Schema.Ref, ¶meter.Schema); err != nil { + if !resolver.options.SkipSchemas && parameter.Schema != nil { + parentRefs = append(parentRefs, parameter.Schema.Ref.String()) + if err := resolver.Resolve(¶meter.Schema.Ref, ¶meter.Schema); shouldStopOnError(err, resolver.options) { return err } - if s, err := expandSchema(*parameter.Schema, parentRefs, resolver); err != nil { + s, err := expandSchema(*parameter.Schema, parentRefs, resolver) + if shouldStopOnError(err, resolver.options) { return err - } else { - *parameter.Schema = *s } + resolver.reset() + *parameter.Schema = *s } return nil } diff --git a/vendor/github.com/go-openapi/spec/header.go b/vendor/github.com/go-openapi/spec/header.go index 758b84531..85c4d454c 100644 --- a/vendor/github.com/go-openapi/spec/header.go +++ b/vendor/github.com/go-openapi/spec/header.go @@ -16,7 +16,9 @@ package spec import ( "encoding/json" + "strings" + "github.com/go-openapi/jsonpointer" "github.com/go-openapi/swag" ) @@ -30,6 +32,7 @@ type HeaderProps struct { type Header struct { CommonValidations SimpleSchema + VendorExtensible HeaderProps } @@ -158,8 +161,35 @@ func (h *Header) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &h.SimpleSchema); err != nil { return err } + if err := json.Unmarshal(data, &h.VendorExtensible); err != nil { + return err + } if err := json.Unmarshal(data, &h.HeaderProps); err != nil { return err } return nil } + +// JSONLookup look up a value by the json property name +func (p Header) JSONLookup(token string) (interface{}, error) { + if ex, ok := p.Extensions[token]; ok { + return &ex, nil + } + + r, _, err := jsonpointer.GetForToken(p.CommonValidations, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(p.SimpleSchema, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(p.HeaderProps, token) + return r, err +} diff --git a/vendor/github.com/go-openapi/spec/items.go b/vendor/github.com/go-openapi/spec/items.go index 4d57ea5ca..46944fb69 100644 --- a/vendor/github.com/go-openapi/spec/items.go +++ b/vendor/github.com/go-openapi/spec/items.go @@ -16,7 +16,9 @@ package spec import ( "encoding/json" + "strings" + "github.com/go-openapi/jsonpointer" "github.com/go-openapi/swag" ) @@ -60,11 +62,12 @@ type CommonValidations struct { // Items a limited subset of JSON-Schema's items object. // It is used by parameter definitions that are not located in "body". // -// For more information: http://goo.gl/8us55a#items-object- +// For more information: http://goo.gl/8us55a#items-object type Items struct { Refable CommonValidations SimpleSchema + VendorExtensible } // NewItems creates a new instance of items @@ -197,3 +200,20 @@ func (i Items) MarshalJSON() ([]byte, error) { } return swag.ConcatJSON(b3, b1, b2), nil } + +// JSONLookup look up a value by the json property name +func (p Items) JSONLookup(token string) (interface{}, error) { + if token == "$ref" { + return &p.Ref, nil + } + + r, _, err := jsonpointer.GetForToken(p.CommonValidations, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(p.SimpleSchema, token) + return r, err +} diff --git a/vendor/github.com/go-openapi/spec/parameter.go b/vendor/github.com/go-openapi/spec/parameter.go index 8fb66d12a..71aee1e80 100644 --- a/vendor/github.com/go-openapi/spec/parameter.go +++ b/vendor/github.com/go-openapi/spec/parameter.go @@ -16,6 +16,7 @@ package spec import ( "encoding/json" + "strings" "github.com/go-openapi/jsonpointer" "github.com/go-openapi/swag" @@ -100,15 +101,16 @@ func (p Parameter) JSONLookup(token string) (interface{}, error) { if token == "$ref" { return &p.Ref, nil } + r, _, err := jsonpointer.GetForToken(p.CommonValidations, token) - if err != nil { + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { return nil, err } if r != nil { return r, nil } r, _, err = jsonpointer.GetForToken(p.SimpleSchema, token) - if err != nil { + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { return nil, err } if r != nil { diff --git a/vendor/github.com/go-openapi/spec/ref.go b/vendor/github.com/go-openapi/spec/ref.go index 68631df8b..4833b87e2 100644 --- a/vendor/github.com/go-openapi/spec/ref.go +++ b/vendor/github.com/go-openapi/spec/ref.go @@ -55,7 +55,7 @@ func (r *Ref) RemoteURI() string { } // IsValidURI returns true when the url the ref points to can be found -func (r *Ref) IsValidURI() bool { +func (r *Ref) IsValidURI(basepaths ...string) bool { if r.String() == "" { return true } @@ -81,14 +81,18 @@ func (r *Ref) IsValidURI() bool { // check for local file pth := v if r.HasURLPathOnly { - p, e := filepath.Abs(pth) + base := "." + if len(basepaths) > 0 { + base = filepath.Dir(filepath.Join(basepaths...)) + } + p, e := filepath.Abs(filepath.ToSlash(filepath.Join(base, pth))) if e != nil { return false } pth = p } - fi, err := os.Stat(pth) + fi, err := os.Stat(filepath.ToSlash(pth)) if err != nil { return false } @@ -116,25 +120,18 @@ func NewRef(refURI string) (Ref, error) { return Ref{Ref: ref}, nil } -// MustCreateRef creates a ref object but +// MustCreateRef creates a ref object but panics when refURI is invalid. +// Use the NewRef method for a version that returns an error. func MustCreateRef(refURI string) Ref { return Ref{Ref: jsonreference.MustCreateRef(refURI)} } -// // NewResolvedRef creates a resolved ref -// func NewResolvedRef(refURI string, data interface{}) Ref { -// return Ref{ -// Ref: jsonreference.MustCreateRef(refURI), -// Resolved: data, -// } -// } - // MarshalJSON marshals this ref into a JSON object func (r Ref) MarshalJSON() ([]byte, error) { str := r.String() if str == "" { if r.IsRoot() { - return []byte(`{"$ref":"#"}`), nil + return []byte(`{"$ref":""}`), nil } return []byte("{}"), nil } diff --git a/vendor/github.com/go-openapi/spec/response.go b/vendor/github.com/go-openapi/spec/response.go index 308cc8478..a32b039ea 100644 --- a/vendor/github.com/go-openapi/spec/response.go +++ b/vendor/github.com/go-openapi/spec/response.go @@ -17,6 +17,7 @@ package spec import ( "encoding/json" + "github.com/go-openapi/jsonpointer" "github.com/go-openapi/swag" ) @@ -34,6 +35,19 @@ type ResponseProps struct { type Response struct { Refable ResponseProps + VendorExtensible +} + +// JSONLookup look up a value by the json property name +func (p Response) JSONLookup(token string) (interface{}, error) { + if ex, ok := p.Extensions[token]; ok { + return &ex, nil + } + if token == "$ref" { + return &p.Ref, nil + } + r, _, err := jsonpointer.GetForToken(p.ResponseProps, token) + return r, err } // UnmarshalJSON hydrates this items instance with the data from JSON @@ -44,6 +58,9 @@ func (r *Response) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &r.Refable); err != nil { return err } + if err := json.Unmarshal(data, &r.VendorExtensible); err != nil { + return err + } return nil } @@ -57,7 +74,11 @@ func (r Response) MarshalJSON() ([]byte, error) { if err != nil { return nil, err } - return swag.ConcatJSON(b1, b2), nil + b3, err := json.Marshal(r.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2, b3), nil } // NewResponse creates a new response instance diff --git a/vendor/github.com/go-openapi/spec/responses.go b/vendor/github.com/go-openapi/spec/responses.go index ea071ca63..3ab06697f 100644 --- a/vendor/github.com/go-openapi/spec/responses.go +++ b/vendor/github.com/go-openapi/spec/responses.go @@ -51,7 +51,7 @@ func (r Responses) JSONLookup(token string) (interface{}, error) { } if i, err := strconv.Atoi(token); err == nil { if scr, ok := r.StatusCodeResponses[i]; ok { - return &scr, nil + return scr, nil } } return nil, fmt.Errorf("object has no field %q", token) diff --git a/vendor/github.com/go-openapi/spec/schema.go b/vendor/github.com/go-openapi/spec/schema.go index eb88f005c..1cdcc163f 100644 --- a/vendor/github.com/go-openapi/spec/schema.go +++ b/vendor/github.com/go-openapi/spec/schema.go @@ -201,8 +201,8 @@ func (r *SchemaURL) UnmarshalJSON(data []byte) error { type SchemaProps struct { ID string `json:"id,omitempty"` - Ref Ref `json:"-,omitempty"` - Schema SchemaURL `json:"-,omitempty"` + Ref Ref `json:"-"` + Schema SchemaURL `json:"-"` Description string `json:"description,omitempty"` Type StringOrArray `json:"type,omitempty"` Format string `json:"format,omitempty"` @@ -269,7 +269,7 @@ func (s Schema) JSONLookup(token string) (interface{}, error) { } r, _, err := jsonpointer.GetForToken(s.SchemaProps, token) - if r != nil || err != nil { + if r != nil || (err != nil && !strings.HasPrefix(err.Error(), "object has no field")) { return r, err } r, _, err = jsonpointer.GetForToken(s.SwaggerSchemaProps, token) diff --git a/vendor/github.com/go-openapi/spec/spec.go b/vendor/github.com/go-openapi/spec/spec.go index cc2ae56b2..0bb045bc0 100644 --- a/vendor/github.com/go-openapi/spec/spec.go +++ b/vendor/github.com/go-openapi/spec/spec.go @@ -16,6 +16,8 @@ package spec import "encoding/json" +//go:generate curl -L --progress -o ./schemas/v2/schema.json http://swagger.io/v2/schema.json +//go:generate curl -L --progress -o ./schemas/jsonschema-draft-04.json http://json-schema.org/draft-04/schema //go:generate go-bindata -pkg=spec -prefix=./schemas -ignore=.*\.md ./schemas/... //go:generate perl -pi -e s,Json,JSON,g bindata.go @@ -27,10 +29,15 @@ const ( ) var ( - jsonSchema = MustLoadJSONSchemaDraft04() - swaggerSchema = MustLoadSwagger20Schema() + jsonSchema *Schema + swaggerSchema *Schema ) +func init() { + jsonSchema = MustLoadJSONSchemaDraft04() + swaggerSchema = MustLoadSwagger20Schema() +} + // MustLoadJSONSchemaDraft04 panics when Swagger20Schema returns an error func MustLoadJSONSchemaDraft04() *Schema { d, e := JSONSchemaDraft04() diff --git a/vendor/github.com/go-openapi/spec/swagger.go b/vendor/github.com/go-openapi/spec/swagger.go index ff3ef875e..23780c78a 100644 --- a/vendor/github.com/go-openapi/spec/swagger.go +++ b/vendor/github.com/go-openapi/spec/swagger.go @@ -77,7 +77,7 @@ type SwaggerProps struct { Host string `json:"host,omitempty"` BasePath string `json:"basePath,omitempty"` // must start with a leading "/" Paths *Paths `json:"paths"` // required - Definitions Definitions `json:"definitions"` + Definitions Definitions `json:"definitions,omitempty"` Parameters map[string]Parameter `json:"parameters,omitempty"` Responses map[string]Response `json:"responses,omitempty"` SecurityDefinitions SecurityDefinitions `json:"securityDefinitions,omitempty"` @@ -156,7 +156,7 @@ func (s SchemaOrStringArray) MarshalJSON() ([]byte, error) { if s.Schema != nil { return json.Marshal(s.Schema) } - return nil, nil + return []byte("null"), nil } // UnmarshalJSON converts this schema object or array from a JSON structure diff --git a/vendor/google.golang.org/appengine/appengine.go b/vendor/google.golang.org/appengine/appengine.go index 76dedc81d..475cf2e32 100644 --- a/vendor/google.golang.org/appengine/appengine.go +++ b/vendor/google.golang.org/appengine/appengine.go @@ -28,8 +28,7 @@ import ( // See https://cloud.google.com/appengine/docs/flexible/custom-runtimes#health_check_requests // for details on how to do your own health checking. // -// On App Engine Standard it ensures the server has started and is prepared to -// receive requests. +// Main is not yet supported on App Engine Standard. // // Main never returns. // @@ -63,7 +62,7 @@ func IsDevAppServer() bool { // NewContext returns a context for an in-flight HTTP request. // This function is cheap. func NewContext(req *http.Request) context.Context { - return internal.ReqContext(req) + return WithContext(context.Background(), req) } // WithContext returns a copy of the parent context diff --git a/vendor/google.golang.org/appengine/internal/api.go b/vendor/google.golang.org/appengine/internal/api.go index 16f87c5d3..ec5aa59b3 100644 --- a/vendor/google.golang.org/appengine/internal/api.go +++ b/vendor/google.golang.org/appengine/internal/api.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. // +build !appengine -// +build go1.7 package internal @@ -33,8 +32,7 @@ import ( ) const ( - apiPath = "/rpc_http" - defaultTicketSuffix = "/default.20150612t184001.0" + apiPath = "/rpc_http" ) var ( @@ -62,11 +60,6 @@ var ( Dial: limitDial, }, } - - defaultTicketOnce sync.Once - defaultTicket string - backgroundContextOnce sync.Once - backgroundContext netcontext.Context ) func apiURL() *url.URL { @@ -90,11 +83,17 @@ func handleHTTP(w http.ResponseWriter, r *http.Request) { outHeader: w.Header(), apiURL: apiURL(), } - r = r.WithContext(withContext(r.Context(), c)) - c.req = r - stopFlushing := make(chan int) + ctxs.Lock() + ctxs.m[r] = c + ctxs.Unlock() + defer func() { + ctxs.Lock() + delete(ctxs.m, r) + ctxs.Unlock() + }() + // Patch up RemoteAddr so it looks reasonable. if addr := r.Header.Get(userIPHeader); addr != "" { r.RemoteAddr = addr @@ -192,6 +191,18 @@ func renderPanic(x interface{}) string { return string(buf) } +var ctxs = struct { + sync.Mutex + m map[*http.Request]*context + bg *context // background context, lazily initialized + // dec is used by tests to decorate the netcontext.Context returned + // for a given request. This allows tests to add overrides (such as + // WithAppIDOverride) to the context. The map is nil outside tests. + dec map[*http.Request]func(netcontext.Context) netcontext.Context +}{ + m: make(map[*http.Request]*context), +} + // context represents the context of an in-flight HTTP request. // It implements the appengine.Context and http.ResponseWriter interfaces. type context struct { @@ -212,34 +223,6 @@ type context struct { var contextKey = "holds a *context" -// jointContext joins two contexts in a superficial way. -// It takes values and timeouts from a base context, and only values from another context. -type jointContext struct { - base netcontext.Context - valuesOnly netcontext.Context -} - -func (c jointContext) Deadline() (time.Time, bool) { - return c.base.Deadline() -} - -func (c jointContext) Done() <-chan struct{} { - return c.base.Done() -} - -func (c jointContext) Err() error { - return c.base.Err() -} - -func (c jointContext) Value(key interface{}) interface{} { - if val := c.base.Value(key); val != nil { - return val - } - return c.valuesOnly.Value(key) -} - -// fromContext returns the App Engine context or nil if ctx is not -// derived from an App Engine context. func fromContext(ctx netcontext.Context) *context { c, _ := ctx.Value(&contextKey).(*context) return c @@ -264,70 +247,86 @@ func IncomingHeaders(ctx netcontext.Context) http.Header { return nil } -func ReqContext(req *http.Request) netcontext.Context { - return req.Context() -} - func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { - return jointContext{ - base: parent, - valuesOnly: req.Context(), + ctxs.Lock() + c := ctxs.m[req] + d := ctxs.dec[req] + ctxs.Unlock() + + if d != nil { + parent = d(parent) } -} -// DefaultTicket returns a ticket used for background context or dev_appserver. -func DefaultTicket() string { - defaultTicketOnce.Do(func() { - if IsDevAppServer() { - defaultTicket = "testapp" + defaultTicketSuffix - return - } - appID := partitionlessAppID() - escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1) - majVersion := VersionID(nil) - if i := strings.Index(majVersion, "."); i > 0 { - majVersion = majVersion[:i] - } - defaultTicket = fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID()) - }) - return defaultTicket + if c == nil { + // Someone passed in an http.Request that is not in-flight. + // We panic here rather than panicking at a later point + // so that stack traces will be more sensible. + log.Panic("appengine: NewContext passed an unknown http.Request") + } + return withContext(parent, c) } func BackgroundContext() netcontext.Context { - backgroundContextOnce.Do(func() { - // Compute background security ticket. - ticket := DefaultTicket() - - c := &context{ - req: &http.Request{ - Header: http.Header{ - ticketHeader: []string{ticket}, - }, + ctxs.Lock() + defer ctxs.Unlock() + + if ctxs.bg != nil { + return toContext(ctxs.bg) + } + + // Compute background security ticket. + appID := partitionlessAppID() + escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1) + majVersion := VersionID(nil) + if i := strings.Index(majVersion, "."); i > 0 { + majVersion = majVersion[:i] + } + ticket := fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID()) + + ctxs.bg = &context{ + req: &http.Request{ + Header: http.Header{ + ticketHeader: []string{ticket}, }, - apiURL: apiURL(), - } - backgroundContext = toContext(c) + }, + apiURL: apiURL(), + } - // TODO(dsymonds): Wire up the shutdown handler to do a final flush. - go c.logFlusher(make(chan int)) - }) + // TODO(dsymonds): Wire up the shutdown handler to do a final flush. + go ctxs.bg.logFlusher(make(chan int)) - return backgroundContext + return toContext(ctxs.bg) } // RegisterTestRequest registers the HTTP request req for testing, such that // any API calls are sent to the provided URL. It returns a closure to delete // the registration. // It should only be used by aetest package. -func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) (*http.Request, func()) { +func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) func() { c := &context{ req: req, apiURL: apiURL, } - ctx := withContext(decorate(req.Context()), c) - req = req.WithContext(ctx) - c.req = req - return req, func() {} + ctxs.Lock() + defer ctxs.Unlock() + if _, ok := ctxs.m[req]; ok { + log.Panic("req already associated with context") + } + if _, ok := ctxs.dec[req]; ok { + log.Panic("req already associated with context") + } + if ctxs.dec == nil { + ctxs.dec = make(map[*http.Request]func(netcontext.Context) netcontext.Context) + } + ctxs.m[req] = c + ctxs.dec[req] = decorate + + return func() { + ctxs.Lock() + delete(ctxs.m, req) + delete(ctxs.dec, req) + ctxs.Unlock() + } } var errTimeout = &CallError{ @@ -453,7 +452,7 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) c := fromContext(ctx) if c == nil { // Give a good error message rather than a panic lower down. - return errNotAppEngineContext + return errors.New("not an App Engine context") } // Apply transaction modifications if we're in a transaction. @@ -476,16 +475,6 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) } ticket := c.req.Header.Get(ticketHeader) - // Use a test ticket under test environment. - if ticket == "" { - if appid := ctx.Value(&appIDOverrideKey); appid != nil { - ticket = appid.(string) + defaultTicketSuffix - } - } - // Fall back to use background ticket when the request ticket is not available in Flex or dev_appserver. - if ticket == "" { - ticket = DefaultTicket() - } req := &remotepb.Request{ ServiceName: &service, Method: &method, @@ -561,9 +550,6 @@ var logLevelName = map[int64]string{ } func logf(c *context, level int64, format string, args ...interface{}) { - if c == nil { - panic("not an App Engine context") - } s := fmt.Sprintf(format, args...) s = strings.TrimRight(s, "\n") // Remove any trailing newline characters. c.addLogLine(&logpb.UserAppLogLine{ diff --git a/vendor/google.golang.org/appengine/internal/api_classic.go b/vendor/google.golang.org/appengine/internal/api_classic.go index f0f40b2e3..597f66e6e 100644 --- a/vendor/google.golang.org/appengine/internal/api_classic.go +++ b/vendor/google.golang.org/appengine/internal/api_classic.go @@ -22,20 +22,14 @@ import ( var contextKey = "holds an appengine.Context" -// fromContext returns the App Engine context or nil if ctx is not -// derived from an App Engine context. func fromContext(ctx netcontext.Context) appengine.Context { c, _ := ctx.Value(&contextKey).(appengine.Context) return c } // This is only for classic App Engine adapters. -func ClassicContextFromContext(ctx netcontext.Context) (appengine.Context, error) { - c := fromContext(ctx) - if c == nil { - return nil, errNotAppEngineContext - } - return c, nil +func ClassicContextFromContext(ctx netcontext.Context) appengine.Context { + return fromContext(ctx) } func withContext(parent netcontext.Context, c appengine.Context) netcontext.Context { @@ -59,10 +53,6 @@ func IncomingHeaders(ctx netcontext.Context) http.Header { return nil } -func ReqContext(req *http.Request) netcontext.Context { - return WithContext(netcontext.Background(), req) -} - func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { c := appengine.NewContext(req) return withContext(parent, c) @@ -108,7 +98,7 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) c := fromContext(ctx) if c == nil { // Give a good error message rather than a panic lower down. - return errNotAppEngineContext + return errors.New("not an App Engine context") } // Apply transaction modifications if we're in a transaction. diff --git a/vendor/google.golang.org/appengine/internal/api_common.go b/vendor/google.golang.org/appengine/internal/api_common.go index e0c0b214b..2db33a774 100644 --- a/vendor/google.golang.org/appengine/internal/api_common.go +++ b/vendor/google.golang.org/appengine/internal/api_common.go @@ -5,15 +5,10 @@ package internal import ( - "errors" - "os" - "github.com/golang/protobuf/proto" netcontext "golang.org/x/net/context" ) -var errNotAppEngineContext = errors.New("not an App Engine context") - type CallOverrideFunc func(ctx netcontext.Context, service, method string, in, out proto.Message) error var callOverrideKey = "holds []CallOverrideFunc" @@ -82,42 +77,10 @@ func Logf(ctx netcontext.Context, level int64, format string, args ...interface{ f(level, format, args...) return } - c := fromContext(ctx) - if c == nil { - panic(errNotAppEngineContext) - } - logf(c, level, format, args...) + logf(fromContext(ctx), level, format, args...) } // NamespacedContext wraps a Context to support namespaces. func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context { return withNamespace(ctx, namespace) } - -// SetTestEnv sets the env variables for testing background ticket in Flex. -func SetTestEnv() func() { - var environ = []struct { - key, value string - }{ - {"GAE_LONG_APP_ID", "my-app-id"}, - {"GAE_MINOR_VERSION", "067924799508853122"}, - {"GAE_MODULE_INSTANCE", "0"}, - {"GAE_MODULE_NAME", "default"}, - {"GAE_MODULE_VERSION", "20150612t184001"}, - } - - for _, v := range environ { - old := os.Getenv(v.key) - os.Setenv(v.key, v.value) - v.value = old - } - return func() { // Restore old environment after the test completes. - for _, v := range environ { - if v.value == "" { - os.Unsetenv(v.key) - continue - } - os.Setenv(v.key, v.value) - } - } -} diff --git a/vendor/google.golang.org/appengine/internal/api_pre17.go b/vendor/google.golang.org/appengine/internal/api_pre17.go deleted file mode 100644 index 028b4f056..000000000 --- a/vendor/google.golang.org/appengine/internal/api_pre17.go +++ /dev/null @@ -1,682 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build !appengine -// +build !go1.7 - -package internal - -import ( - "bytes" - "errors" - "fmt" - "io/ioutil" - "log" - "net" - "net/http" - "net/url" - "os" - "runtime" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" - - basepb "google.golang.org/appengine/internal/base" - logpb "google.golang.org/appengine/internal/log" - remotepb "google.golang.org/appengine/internal/remote_api" -) - -const ( - apiPath = "/rpc_http" - defaultTicketSuffix = "/default.20150612t184001.0" -) - -var ( - // Incoming headers. - ticketHeader = http.CanonicalHeaderKey("X-AppEngine-API-Ticket") - dapperHeader = http.CanonicalHeaderKey("X-Google-DapperTraceInfo") - traceHeader = http.CanonicalHeaderKey("X-Cloud-Trace-Context") - curNamespaceHeader = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace") - userIPHeader = http.CanonicalHeaderKey("X-AppEngine-User-IP") - remoteAddrHeader = http.CanonicalHeaderKey("X-AppEngine-Remote-Addr") - - // Outgoing headers. - apiEndpointHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Endpoint") - apiEndpointHeaderValue = []string{"app-engine-apis"} - apiMethodHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Method") - apiMethodHeaderValue = []string{"/VMRemoteAPI.CallRemoteAPI"} - apiDeadlineHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Deadline") - apiContentType = http.CanonicalHeaderKey("Content-Type") - apiContentTypeValue = []string{"application/octet-stream"} - logFlushHeader = http.CanonicalHeaderKey("X-AppEngine-Log-Flush-Count") - - apiHTTPClient = &http.Client{ - Transport: &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: limitDial, - }, - } - - defaultTicketOnce sync.Once - defaultTicket string -) - -func apiURL() *url.URL { - host, port := "appengine.googleapis.internal", "10001" - if h := os.Getenv("API_HOST"); h != "" { - host = h - } - if p := os.Getenv("API_PORT"); p != "" { - port = p - } - return &url.URL{ - Scheme: "http", - Host: host + ":" + port, - Path: apiPath, - } -} - -func handleHTTP(w http.ResponseWriter, r *http.Request) { - c := &context{ - req: r, - outHeader: w.Header(), - apiURL: apiURL(), - } - stopFlushing := make(chan int) - - ctxs.Lock() - ctxs.m[r] = c - ctxs.Unlock() - defer func() { - ctxs.Lock() - delete(ctxs.m, r) - ctxs.Unlock() - }() - - // Patch up RemoteAddr so it looks reasonable. - if addr := r.Header.Get(userIPHeader); addr != "" { - r.RemoteAddr = addr - } else if addr = r.Header.Get(remoteAddrHeader); addr != "" { - r.RemoteAddr = addr - } else { - // Should not normally reach here, but pick a sensible default anyway. - r.RemoteAddr = "127.0.0.1" - } - // The address in the headers will most likely be of these forms: - // 123.123.123.123 - // 2001:db8::1 - // net/http.Request.RemoteAddr is specified to be in "IP:port" form. - if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil { - // Assume the remote address is only a host; add a default port. - r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80") - } - - // Start goroutine responsible for flushing app logs. - // This is done after adding c to ctx.m (and stopped before removing it) - // because flushing logs requires making an API call. - go c.logFlusher(stopFlushing) - - executeRequestSafely(c, r) - c.outHeader = nil // make sure header changes aren't respected any more - - stopFlushing <- 1 // any logging beyond this point will be dropped - - // Flush any pending logs asynchronously. - c.pendingLogs.Lock() - flushes := c.pendingLogs.flushes - if len(c.pendingLogs.lines) > 0 { - flushes++ - } - c.pendingLogs.Unlock() - go c.flushLog(false) - w.Header().Set(logFlushHeader, strconv.Itoa(flushes)) - - // Avoid nil Write call if c.Write is never called. - if c.outCode != 0 { - w.WriteHeader(c.outCode) - } - if c.outBody != nil { - w.Write(c.outBody) - } -} - -func executeRequestSafely(c *context, r *http.Request) { - defer func() { - if x := recover(); x != nil { - logf(c, 4, "%s", renderPanic(x)) // 4 == critical - c.outCode = 500 - } - }() - - http.DefaultServeMux.ServeHTTP(c, r) -} - -func renderPanic(x interface{}) string { - buf := make([]byte, 16<<10) // 16 KB should be plenty - buf = buf[:runtime.Stack(buf, false)] - - // Remove the first few stack frames: - // this func - // the recover closure in the caller - // That will root the stack trace at the site of the panic. - const ( - skipStart = "internal.renderPanic" - skipFrames = 2 - ) - start := bytes.Index(buf, []byte(skipStart)) - p := start - for i := 0; i < skipFrames*2 && p+1 < len(buf); i++ { - p = bytes.IndexByte(buf[p+1:], '\n') + p + 1 - if p < 0 { - break - } - } - if p >= 0 { - // buf[start:p+1] is the block to remove. - // Copy buf[p+1:] over buf[start:] and shrink buf. - copy(buf[start:], buf[p+1:]) - buf = buf[:len(buf)-(p+1-start)] - } - - // Add panic heading. - head := fmt.Sprintf("panic: %v\n\n", x) - if len(head) > len(buf) { - // Extremely unlikely to happen. - return head - } - copy(buf[len(head):], buf) - copy(buf, head) - - return string(buf) -} - -var ctxs = struct { - sync.Mutex - m map[*http.Request]*context - bg *context // background context, lazily initialized - // dec is used by tests to decorate the netcontext.Context returned - // for a given request. This allows tests to add overrides (such as - // WithAppIDOverride) to the context. The map is nil outside tests. - dec map[*http.Request]func(netcontext.Context) netcontext.Context -}{ - m: make(map[*http.Request]*context), -} - -// context represents the context of an in-flight HTTP request. -// It implements the appengine.Context and http.ResponseWriter interfaces. -type context struct { - req *http.Request - - outCode int - outHeader http.Header - outBody []byte - - pendingLogs struct { - sync.Mutex - lines []*logpb.UserAppLogLine - flushes int - } - - apiURL *url.URL -} - -var contextKey = "holds a *context" - -// fromContext returns the App Engine context or nil if ctx is not -// derived from an App Engine context. -func fromContext(ctx netcontext.Context) *context { - c, _ := ctx.Value(&contextKey).(*context) - return c -} - -func withContext(parent netcontext.Context, c *context) netcontext.Context { - ctx := netcontext.WithValue(parent, &contextKey, c) - if ns := c.req.Header.Get(curNamespaceHeader); ns != "" { - ctx = withNamespace(ctx, ns) - } - return ctx -} - -func toContext(c *context) netcontext.Context { - return withContext(netcontext.Background(), c) -} - -func IncomingHeaders(ctx netcontext.Context) http.Header { - if c := fromContext(ctx); c != nil { - return c.req.Header - } - return nil -} - -func ReqContext(req *http.Request) netcontext.Context { - return WithContext(netcontext.Background(), req) -} - -func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { - ctxs.Lock() - c := ctxs.m[req] - d := ctxs.dec[req] - ctxs.Unlock() - - if d != nil { - parent = d(parent) - } - - if c == nil { - // Someone passed in an http.Request that is not in-flight. - // We panic here rather than panicking at a later point - // so that stack traces will be more sensible. - log.Panic("appengine: NewContext passed an unknown http.Request") - } - return withContext(parent, c) -} - -// DefaultTicket returns a ticket used for background context or dev_appserver. -func DefaultTicket() string { - defaultTicketOnce.Do(func() { - if IsDevAppServer() { - defaultTicket = "testapp" + defaultTicketSuffix - return - } - appID := partitionlessAppID() - escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1) - majVersion := VersionID(nil) - if i := strings.Index(majVersion, "."); i > 0 { - majVersion = majVersion[:i] - } - defaultTicket = fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID()) - }) - return defaultTicket -} - -func BackgroundContext() netcontext.Context { - ctxs.Lock() - defer ctxs.Unlock() - - if ctxs.bg != nil { - return toContext(ctxs.bg) - } - - // Compute background security ticket. - ticket := DefaultTicket() - - ctxs.bg = &context{ - req: &http.Request{ - Header: http.Header{ - ticketHeader: []string{ticket}, - }, - }, - apiURL: apiURL(), - } - - // TODO(dsymonds): Wire up the shutdown handler to do a final flush. - go ctxs.bg.logFlusher(make(chan int)) - - return toContext(ctxs.bg) -} - -// RegisterTestRequest registers the HTTP request req for testing, such that -// any API calls are sent to the provided URL. It returns a closure to delete -// the registration. -// It should only be used by aetest package. -func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) (*http.Request, func()) { - c := &context{ - req: req, - apiURL: apiURL, - } - ctxs.Lock() - defer ctxs.Unlock() - if _, ok := ctxs.m[req]; ok { - log.Panic("req already associated with context") - } - if _, ok := ctxs.dec[req]; ok { - log.Panic("req already associated with context") - } - if ctxs.dec == nil { - ctxs.dec = make(map[*http.Request]func(netcontext.Context) netcontext.Context) - } - ctxs.m[req] = c - ctxs.dec[req] = decorate - - return req, func() { - ctxs.Lock() - delete(ctxs.m, req) - delete(ctxs.dec, req) - ctxs.Unlock() - } -} - -var errTimeout = &CallError{ - Detail: "Deadline exceeded", - Code: int32(remotepb.RpcError_CANCELLED), - Timeout: true, -} - -func (c *context) Header() http.Header { return c.outHeader } - -// Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status -// codes do not permit a response body (nor response entity headers such as -// Content-Length, Content-Type, etc). -func bodyAllowedForStatus(status int) bool { - switch { - case status >= 100 && status <= 199: - return false - case status == 204: - return false - case status == 304: - return false - } - return true -} - -func (c *context) Write(b []byte) (int, error) { - if c.outCode == 0 { - c.WriteHeader(http.StatusOK) - } - if len(b) > 0 && !bodyAllowedForStatus(c.outCode) { - return 0, http.ErrBodyNotAllowed - } - c.outBody = append(c.outBody, b...) - return len(b), nil -} - -func (c *context) WriteHeader(code int) { - if c.outCode != 0 { - logf(c, 3, "WriteHeader called multiple times on request.") // error level - return - } - c.outCode = code -} - -func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) { - hreq := &http.Request{ - Method: "POST", - URL: c.apiURL, - Header: http.Header{ - apiEndpointHeader: apiEndpointHeaderValue, - apiMethodHeader: apiMethodHeaderValue, - apiContentType: apiContentTypeValue, - apiDeadlineHeader: []string{strconv.FormatFloat(timeout.Seconds(), 'f', -1, 64)}, - }, - Body: ioutil.NopCloser(bytes.NewReader(body)), - ContentLength: int64(len(body)), - Host: c.apiURL.Host, - } - if info := c.req.Header.Get(dapperHeader); info != "" { - hreq.Header.Set(dapperHeader, info) - } - if info := c.req.Header.Get(traceHeader); info != "" { - hreq.Header.Set(traceHeader, info) - } - - tr := apiHTTPClient.Transport.(*http.Transport) - - var timedOut int32 // atomic; set to 1 if timed out - t := time.AfterFunc(timeout, func() { - atomic.StoreInt32(&timedOut, 1) - tr.CancelRequest(hreq) - }) - defer t.Stop() - defer func() { - // Check if timeout was exceeded. - if atomic.LoadInt32(&timedOut) != 0 { - err = errTimeout - } - }() - - hresp, err := apiHTTPClient.Do(hreq) - if err != nil { - return nil, &CallError{ - Detail: fmt.Sprintf("service bridge HTTP failed: %v", err), - Code: int32(remotepb.RpcError_UNKNOWN), - } - } - defer hresp.Body.Close() - hrespBody, err := ioutil.ReadAll(hresp.Body) - if hresp.StatusCode != 200 { - return nil, &CallError{ - Detail: fmt.Sprintf("service bridge returned HTTP %d (%q)", hresp.StatusCode, hrespBody), - Code: int32(remotepb.RpcError_UNKNOWN), - } - } - if err != nil { - return nil, &CallError{ - Detail: fmt.Sprintf("service bridge response bad: %v", err), - Code: int32(remotepb.RpcError_UNKNOWN), - } - } - return hrespBody, nil -} - -func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { - if ns := NamespaceFromContext(ctx); ns != "" { - if fn, ok := NamespaceMods[service]; ok { - fn(in, ns) - } - } - - if f, ctx, ok := callOverrideFromContext(ctx); ok { - return f(ctx, service, method, in, out) - } - - // Handle already-done contexts quickly. - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - c := fromContext(ctx) - if c == nil { - // Give a good error message rather than a panic lower down. - return errNotAppEngineContext - } - - // Apply transaction modifications if we're in a transaction. - if t := transactionFromContext(ctx); t != nil { - if t.finished { - return errors.New("transaction context has expired") - } - applyTransaction(in, &t.transaction) - } - - // Default RPC timeout is 60s. - timeout := 60 * time.Second - if deadline, ok := ctx.Deadline(); ok { - timeout = deadline.Sub(time.Now()) - } - - data, err := proto.Marshal(in) - if err != nil { - return err - } - - ticket := c.req.Header.Get(ticketHeader) - // Use a test ticket under test environment. - if ticket == "" { - if appid := ctx.Value(&appIDOverrideKey); appid != nil { - ticket = appid.(string) + defaultTicketSuffix - } - } - // Fall back to use background ticket when the request ticket is not available in Flex or dev_appserver. - if ticket == "" { - ticket = DefaultTicket() - } - req := &remotepb.Request{ - ServiceName: &service, - Method: &method, - Request: data, - RequestId: &ticket, - } - hreqBody, err := proto.Marshal(req) - if err != nil { - return err - } - - hrespBody, err := c.post(hreqBody, timeout) - if err != nil { - return err - } - - res := &remotepb.Response{} - if err := proto.Unmarshal(hrespBody, res); err != nil { - return err - } - if res.RpcError != nil { - ce := &CallError{ - Detail: res.RpcError.GetDetail(), - Code: *res.RpcError.Code, - } - switch remotepb.RpcError_ErrorCode(ce.Code) { - case remotepb.RpcError_CANCELLED, remotepb.RpcError_DEADLINE_EXCEEDED: - ce.Timeout = true - } - return ce - } - if res.ApplicationError != nil { - return &APIError{ - Service: *req.ServiceName, - Detail: res.ApplicationError.GetDetail(), - Code: *res.ApplicationError.Code, - } - } - if res.Exception != nil || res.JavaException != nil { - // This shouldn't happen, but let's be defensive. - return &CallError{ - Detail: "service bridge returned exception", - Code: int32(remotepb.RpcError_UNKNOWN), - } - } - return proto.Unmarshal(res.Response, out) -} - -func (c *context) Request() *http.Request { - return c.req -} - -func (c *context) addLogLine(ll *logpb.UserAppLogLine) { - // Truncate long log lines. - // TODO(dsymonds): Check if this is still necessary. - const lim = 8 << 10 - if len(*ll.Message) > lim { - suffix := fmt.Sprintf("...(length %d)", len(*ll.Message)) - ll.Message = proto.String((*ll.Message)[:lim-len(suffix)] + suffix) - } - - c.pendingLogs.Lock() - c.pendingLogs.lines = append(c.pendingLogs.lines, ll) - c.pendingLogs.Unlock() -} - -var logLevelName = map[int64]string{ - 0: "DEBUG", - 1: "INFO", - 2: "WARNING", - 3: "ERROR", - 4: "CRITICAL", -} - -func logf(c *context, level int64, format string, args ...interface{}) { - if c == nil { - panic("not an App Engine context") - } - s := fmt.Sprintf(format, args...) - s = strings.TrimRight(s, "\n") // Remove any trailing newline characters. - c.addLogLine(&logpb.UserAppLogLine{ - TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3), - Level: &level, - Message: &s, - }) - log.Print(logLevelName[level] + ": " + s) -} - -// flushLog attempts to flush any pending logs to the appserver. -// It should not be called concurrently. -func (c *context) flushLog(force bool) (flushed bool) { - c.pendingLogs.Lock() - // Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious. - n, rem := 0, 30<<20 - for ; n < len(c.pendingLogs.lines); n++ { - ll := c.pendingLogs.lines[n] - // Each log line will require about 3 bytes of overhead. - nb := proto.Size(ll) + 3 - if nb > rem { - break - } - rem -= nb - } - lines := c.pendingLogs.lines[:n] - c.pendingLogs.lines = c.pendingLogs.lines[n:] - c.pendingLogs.Unlock() - - if len(lines) == 0 && !force { - // Nothing to flush. - return false - } - - rescueLogs := false - defer func() { - if rescueLogs { - c.pendingLogs.Lock() - c.pendingLogs.lines = append(lines, c.pendingLogs.lines...) - c.pendingLogs.Unlock() - } - }() - - buf, err := proto.Marshal(&logpb.UserAppLogGroup{ - LogLine: lines, - }) - if err != nil { - log.Printf("internal.flushLog: marshaling UserAppLogGroup: %v", err) - rescueLogs = true - return false - } - - req := &logpb.FlushRequest{ - Logs: buf, - } - res := &basepb.VoidProto{} - c.pendingLogs.Lock() - c.pendingLogs.flushes++ - c.pendingLogs.Unlock() - if err := Call(toContext(c), "logservice", "Flush", req, res); err != nil { - log.Printf("internal.flushLog: Flush RPC: %v", err) - rescueLogs = true - return false - } - return true -} - -const ( - // Log flushing parameters. - flushInterval = 1 * time.Second - forceFlushInterval = 60 * time.Second -) - -func (c *context) logFlusher(stop <-chan int) { - lastFlush := time.Now() - tick := time.NewTicker(flushInterval) - for { - select { - case <-stop: - // Request finished. - tick.Stop() - return - case <-tick.C: - force := time.Now().Sub(lastFlush) > forceFlushInterval - if c.flushLog(force) { - lastFlush = time.Now() - } - } - } -} - -func ContextForTesting(req *http.Request) netcontext.Context { - return toContext(&context{req: req}) -} diff --git a/vendor/google.golang.org/appengine/internal/identity_classic.go b/vendor/google.golang.org/appengine/internal/identity_classic.go index b59603f13..e6b9227c5 100644 --- a/vendor/google.golang.org/appengine/internal/identity_classic.go +++ b/vendor/google.golang.org/appengine/internal/identity_classic.go @@ -13,45 +13,15 @@ import ( ) func DefaultVersionHostname(ctx netcontext.Context) string { - c := fromContext(ctx) - if c == nil { - panic(errNotAppEngineContext) - } - return appengine.DefaultVersionHostname(c) + return appengine.DefaultVersionHostname(fromContext(ctx)) } -func Datacenter(_ netcontext.Context) string { return appengine.Datacenter() } -func ServerSoftware() string { return appengine.ServerSoftware() } -func InstanceID() string { return appengine.InstanceID() } -func IsDevAppServer() bool { return appengine.IsDevAppServer() } +func RequestID(ctx netcontext.Context) string { return appengine.RequestID(fromContext(ctx)) } +func Datacenter(_ netcontext.Context) string { return appengine.Datacenter() } +func ServerSoftware() string { return appengine.ServerSoftware() } +func ModuleName(ctx netcontext.Context) string { return appengine.ModuleName(fromContext(ctx)) } +func VersionID(ctx netcontext.Context) string { return appengine.VersionID(fromContext(ctx)) } +func InstanceID() string { return appengine.InstanceID() } +func IsDevAppServer() bool { return appengine.IsDevAppServer() } -func RequestID(ctx netcontext.Context) string { - c := fromContext(ctx) - if c == nil { - panic(errNotAppEngineContext) - } - return appengine.RequestID(c) -} - -func ModuleName(ctx netcontext.Context) string { - c := fromContext(ctx) - if c == nil { - panic(errNotAppEngineContext) - } - return appengine.ModuleName(c) -} -func VersionID(ctx netcontext.Context) string { - c := fromContext(ctx) - if c == nil { - panic(errNotAppEngineContext) - } - return appengine.VersionID(c) -} - -func fullyQualifiedAppID(ctx netcontext.Context) string { - c := fromContext(ctx) - if c == nil { - panic(errNotAppEngineContext) - } - return c.FullyQualifiedAppID() -} +func fullyQualifiedAppID(ctx netcontext.Context) string { return fromContext(ctx).FullyQualifiedAppID() } diff --git a/vendor/google.golang.org/appengine/internal/identity_vm.go b/vendor/google.golang.org/appengine/internal/identity_vm.go index d5fa75be7..ebe68b785 100644 --- a/vendor/google.golang.org/appengine/internal/identity_vm.go +++ b/vendor/google.golang.org/appengine/internal/identity_vm.go @@ -23,11 +23,7 @@ const ( ) func ctxHeaders(ctx netcontext.Context) http.Header { - c := fromContext(ctx) - if c == nil { - return nil - } - return c.Request().Header + return fromContext(ctx).Request().Header } func DefaultVersionHostname(ctx netcontext.Context) string { diff --git a/vendor/google.golang.org/appengine/internal/main_vm.go b/vendor/google.golang.org/appengine/internal/main_vm.go index 822e784a4..57331ad17 100644 --- a/vendor/google.golang.org/appengine/internal/main_vm.go +++ b/vendor/google.golang.org/appengine/internal/main_vm.go @@ -22,11 +22,7 @@ func Main() { port = s } - host := "" - if IsDevAppServer() { - host = "127.0.0.1" - } - if err := http.ListenAndServe(host+":"+port, http.HandlerFunc(handleHTTP)); err != nil { + if err := http.ListenAndServe(":"+port, http.HandlerFunc(handleHTTP)); err != nil { log.Fatalf("http.ListenAndServe: %v", err) } } diff --git a/vendor/gopkg.in/yaml.v2/LICENSE b/vendor/gopkg.in/yaml.v2/LICENSE index a68e67f01..8dada3eda 100644 --- a/vendor/gopkg.in/yaml.v2/LICENSE +++ b/vendor/gopkg.in/yaml.v2/LICENSE @@ -1,188 +1,201 @@ - -Copyright (c) 2011-2014 - Canonical Inc. - -This software is licensed under the LGPLv3, included below. - -As a special exception to the GNU Lesser General Public License version 3 -("LGPL3"), the copyright holders of this Library give you permission to -convey to a third party a Combined Work that links statically or dynamically -to this Library without providing any Minimal Corresponding Source or -Minimal Application Code as set out in 4d or providing the installation -information set out in section 4e, provided that you comply with the other -provisions of LGPL3 and provided that you meet, for the Application the -terms and conditions of the license(s) which apply to the Application. - -Except as stated in this special exception, the provisions of LGPL3 will -continue to comply in full to this Library. If you modify this Library, you -may apply this exception to your version of this Library, but you are not -obliged to do so. If you do not wish to do so, delete this exception -statement from your version. This exception does not (and cannot) modify any -license terms which apply to the Application, with which you must still -comply. - - - GNU LESSER GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - - This version of the GNU Lesser General Public License incorporates -the terms and conditions of version 3 of the GNU General Public -License, supplemented by the additional permissions listed below. - - 0. Additional Definitions. - - As used herein, "this License" refers to version 3 of the GNU Lesser -General Public License, and the "GNU GPL" refers to version 3 of the GNU -General Public License. - - "The Library" refers to a covered work governed by this License, -other than an Application or a Combined Work as defined below. - - An "Application" is any work that makes use of an interface provided -by the Library, but which is not otherwise based on the Library. -Defining a subclass of a class defined by the Library is deemed a mode -of using an interface provided by the Library. - - A "Combined Work" is a work produced by combining or linking an -Application with the Library. The particular version of the Library -with which the Combined Work was made is also called the "Linked -Version". - - The "Minimal Corresponding Source" for a Combined Work means the -Corresponding Source for the Combined Work, excluding any source code -for portions of the Combined Work that, considered in isolation, are -based on the Application, and not on the Linked Version. - - The "Corresponding Application Code" for a Combined Work means the -object code and/or source code for the Application, including any data -and utility programs needed for reproducing the Combined Work from the -Application, but excluding the System Libraries of the Combined Work. - - 1. Exception to Section 3 of the GNU GPL. - - You may convey a covered work under sections 3 and 4 of this License -without being bound by section 3 of the GNU GPL. - - 2. Conveying Modified Versions. - - If you modify a copy of the Library, and, in your modifications, a -facility refers to a function or data to be supplied by an Application -that uses the facility (other than as an argument passed when the -facility is invoked), then you may convey a copy of the modified -version: - - a) under this License, provided that you make a good faith effort to - ensure that, in the event an Application does not supply the - function or data, the facility still operates, and performs - whatever part of its purpose remains meaningful, or - - b) under the GNU GPL, with none of the additional permissions of - this License applicable to that copy. - - 3. Object Code Incorporating Material from Library Header Files. - - The object code form of an Application may incorporate material from -a header file that is part of the Library. You may convey such object -code under terms of your choice, provided that, if the incorporated -material is not limited to numerical parameters, data structure -layouts and accessors, or small macros, inline functions and templates -(ten or fewer lines in length), you do both of the following: - - a) Give prominent notice with each copy of the object code that the - Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the object code with a copy of the GNU GPL and this license - document. - - 4. Combined Works. - - You may convey a Combined Work under terms of your choice that, -taken together, effectively do not restrict modification of the -portions of the Library contained in the Combined Work and reverse -engineering for debugging such modifications, if you also do each of -the following: - - a) Give prominent notice with each copy of the Combined Work that - the Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the Combined Work with a copy of the GNU GPL and this license - document. - - c) For a Combined Work that displays copyright notices during - execution, include the copyright notice for the Library among - these notices, as well as a reference directing the user to the - copies of the GNU GPL and this license document. - - d) Do one of the following: - - 0) Convey the Minimal Corresponding Source under the terms of this - License, and the Corresponding Application Code in a form - suitable for, and under terms that permit, the user to - recombine or relink the Application with a modified version of - the Linked Version to produce a modified Combined Work, in the - manner specified by section 6 of the GNU GPL for conveying - Corresponding Source. - - 1) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (a) uses at run time - a copy of the Library already present on the user's computer - system, and (b) will operate properly with a modified version - of the Library that is interface-compatible with the Linked - Version. - - e) Provide Installation Information, but only if you would otherwise - be required to provide such information under section 6 of the - GNU GPL, and only to the extent that such information is - necessary to install and execute a modified version of the - Combined Work produced by recombining or relinking the - Application with a modified version of the Linked Version. (If - you use option 4d0, the Installation Information must accompany - the Minimal Corresponding Source and Corresponding Application - Code. If you use option 4d1, you must provide the Installation - Information in the manner specified by section 6 of the GNU GPL - for conveying Corresponding Source.) - - 5. Combined Libraries. - - You may place library facilities that are a work based on the -Library side by side in a single library together with other library -facilities that are not Applications and are not covered by this -License, and convey such a combined library under terms of your -choice, if you do both of the following: - - a) Accompany the combined library with a copy of the same work based - on the Library, uncombined with any other library facilities, - conveyed under the terms of this License. - - b) Give prominent notice with the combined library that part of it - is a work based on the Library, and explaining where to find the - accompanying uncombined form of the same work. - - 6. Revised Versions of the GNU Lesser General Public License. - - The Free Software Foundation may publish revised and/or new versions -of the GNU Lesser General Public License from time to time. Such new -versions will be similar in spirit to the present version, but may -differ in detail to address new problems or concerns. - - Each version is given a distinguishing version number. If the -Library as you received it specifies that a certain numbered version -of the GNU Lesser General Public License "or any later version" -applies to it, you have the option of following the terms and -conditions either of that published version or of any later version -published by the Free Software Foundation. If the Library as you -received it does not specify a version number of the GNU Lesser -General Public License, you may choose any version of the GNU Lesser -General Public License ever published by the Free Software Foundation. - - If the Library as you received it specifies that a proxy can decide -whether future versions of the GNU Lesser General Public License shall -apply, that proxy's public statement of acceptance of any version is -permanent authorization for you to choose that version for the -Library. + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/gopkg.in/yaml.v2/decode.go b/vendor/gopkg.in/yaml.v2/decode.go index 085cddc44..e85eb2e3f 100644 --- a/vendor/gopkg.in/yaml.v2/decode.go +++ b/vendor/gopkg.in/yaml.v2/decode.go @@ -120,7 +120,6 @@ func (p *parser) parse() *node { default: panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ))) } - panic("unreachable") } func (p *parser) node(kind int) *node { @@ -191,6 +190,7 @@ type decoder struct { aliases map[string]bool mapType reflect.Type terrors []string + strict bool } var ( @@ -200,8 +200,8 @@ var ( ifaceType = defaultMapType.Elem() ) -func newDecoder() *decoder { - d := &decoder{mapType: defaultMapType} +func newDecoder(strict bool) *decoder { + d := &decoder{mapType: defaultMapType, strict: strict} d.aliases = make(map[string]bool) return d } @@ -251,7 +251,7 @@ func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { // // If n holds a null value, prepare returns before doing anything. func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { - if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "") { + if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) { return out, false, false } again := true @@ -640,6 +640,8 @@ func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { value := reflect.New(elemType).Elem() d.unmarshal(n.children[i+1], value) inlineMap.SetMapIndex(name, value) + } else if d.strict { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in struct %s", ni.line+1, name.String(), out.Type())) } } return true diff --git a/vendor/gopkg.in/yaml.v2/emitterc.go b/vendor/gopkg.in/yaml.v2/emitterc.go index 2befd553e..dcaf502f0 100644 --- a/vendor/gopkg.in/yaml.v2/emitterc.go +++ b/vendor/gopkg.in/yaml.v2/emitterc.go @@ -666,7 +666,6 @@ func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, return yaml_emitter_set_emitter_error(emitter, "expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS") } - return false } // Expect ALIAS. @@ -995,10 +994,10 @@ func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { break_space = false space_break = false - preceeded_by_whitespace = false - followed_by_whitespace = false - previous_space = false - previous_break = false + preceded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false ) emitter.scalar_data.value = value @@ -1017,7 +1016,7 @@ func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { flow_indicators = true } - preceeded_by_whitespace = true + preceded_by_whitespace = true for i, w := 0, 0; i < len(value); i += w { w = width(value[i]) followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) @@ -1048,7 +1047,7 @@ func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { block_indicators = true } case '#': - if preceeded_by_whitespace { + if preceded_by_whitespace { flow_indicators = true block_indicators = true } @@ -1089,7 +1088,7 @@ func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { } // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. - preceeded_by_whitespace = is_blankz(value, i) + preceded_by_whitespace = is_blankz(value, i) } emitter.scalar_data.multiline = line_breaks diff --git a/vendor/gopkg.in/yaml.v2/parserc.go b/vendor/gopkg.in/yaml.v2/parserc.go index 0a7037ad1..81d05dfe5 100644 --- a/vendor/gopkg.in/yaml.v2/parserc.go +++ b/vendor/gopkg.in/yaml.v2/parserc.go @@ -166,7 +166,6 @@ func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool default: panic("invalid parser state") } - return false } // Parse the production: diff --git a/vendor/gopkg.in/yaml.v2/readerc.go b/vendor/gopkg.in/yaml.v2/readerc.go index d5fb09727..f45079171 100644 --- a/vendor/gopkg.in/yaml.v2/readerc.go +++ b/vendor/gopkg.in/yaml.v2/readerc.go @@ -247,7 +247,7 @@ func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { if parser.encoding == yaml_UTF16LE_ENCODING { low, high = 0, 1 } else { - high, low = 1, 0 + low, high = 1, 0 } // The UTF-16 encoding is not as simple as one might @@ -357,23 +357,26 @@ func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { if value <= 0x7F { // 0000 0000-0000 007F . 0xxxxxxx parser.buffer[buffer_len+0] = byte(value) + buffer_len += 1 } else if value <= 0x7FF { // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + buffer_len += 2 } else if value <= 0xFFFF { // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + buffer_len += 3 } else { // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + buffer_len += 4 } - buffer_len += width parser.unread++ } diff --git a/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/gopkg.in/yaml.v2/resolve.go index 93a863274..232313cc0 100644 --- a/vendor/gopkg.in/yaml.v2/resolve.go +++ b/vendor/gopkg.in/yaml.v2/resolve.go @@ -3,6 +3,7 @@ package yaml import ( "encoding/base64" "math" + "regexp" "strconv" "strings" "unicode/utf8" @@ -80,6 +81,8 @@ func resolvableTag(tag string) bool { return false } +var yamlStyleFloat = regexp.MustCompile(`^[-+]?[0-9]*\.?[0-9]+([eE][-+][0-9]+)?$`) + func resolve(tag string, in string) (rtag string, out interface{}) { if !resolvableTag(tag) { return tag, in @@ -135,9 +138,11 @@ func resolve(tag string, in string) (rtag string, out interface{}) { if err == nil { return yaml_INT_TAG, uintv } - floatv, err := strconv.ParseFloat(plain, 64) - if err == nil { - return yaml_FLOAT_TAG, floatv + if yamlStyleFloat.MatchString(plain) { + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } } if strings.HasPrefix(plain, "0b") { intv, err := strconv.ParseInt(plain[2:], 2, 64) diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v2/scannerc.go index d97d76fa5..074484455 100644 --- a/vendor/gopkg.in/yaml.v2/scannerc.go +++ b/vendor/gopkg.in/yaml.v2/scannerc.go @@ -9,7 +9,7 @@ import ( // ************ // // The following notes assume that you are familiar with the YAML specification -// (http://yaml.org/spec/cvs/current.html). We mostly follow it, although in +// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in // some cases we are less restrictive that it requires. // // The process of transforming a YAML stream into a sequence of events is @@ -611,7 +611,7 @@ func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, co if directive { context = "while parsing a %TAG directive" } - return yaml_parser_set_scanner_error(parser, context, context_mark, "did not find URI escaped octet") + return yaml_parser_set_scanner_error(parser, context, context_mark, problem) } func trace(args ...interface{}) func() { @@ -1546,7 +1546,7 @@ func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool // Unknown directive. } else { yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "found uknown directive name") + start_mark, "found unknown directive name") return false } @@ -1944,7 +1944,7 @@ func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_ma } else { // It's either the '!' tag or not really a tag handle. If it's a %TAG // directive, it's an error. If it's a tag token, it must be a part of URI. - if directive && !(s[0] == '!' && s[1] == 0) { + if directive && string(s) != "!" { yaml_parser_set_scanner_tag_error(parser, directive, start_mark, "did not find expected '!'") return false @@ -1959,6 +1959,7 @@ func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_ma func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { //size_t length = head ? strlen((char *)head) : 0 var s []byte + hasTag := len(head) > 0 // Copy the head if needed. // @@ -2000,10 +2001,10 @@ func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { return false } + hasTag = true } - // Check if the tag is non-empty. - if len(s) == 0 { + if !hasTag { yaml_parser_set_scanner_tag_error(parser, directive, start_mark, "did not find expected tag URI") return false diff --git a/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/gopkg.in/yaml.v2/yaml.go index d133edf9d..5e3c2daee 100644 --- a/vendor/gopkg.in/yaml.v2/yaml.go +++ b/vendor/gopkg.in/yaml.v2/yaml.go @@ -77,8 +77,19 @@ type Marshaler interface { // supported tag options. // func Unmarshal(in []byte, out interface{}) (err error) { + return unmarshal(in, out, false) +} + +// UnmarshalStrict is like Unmarshal except that any fields that are found +// in the data that do not have corresponding struct members will result in +// an error. +func UnmarshalStrict(in []byte, out interface{}) (err error) { + return unmarshal(in, out, true) +} + +func unmarshal(in []byte, out interface{}, strict bool) (err error) { defer handleErr(&err) - d := newDecoder() + d := newDecoder(strict) p := newParser(in) defer p.destroy() node := p.parse() @@ -129,7 +140,7 @@ func Unmarshal(in []byte, out interface{}) (err error) { // For example: // // type T struct { -// F int "a,omitempty" +// F int `yaml:"a,omitempty"` // B int // } // yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" @@ -222,7 +233,7 @@ func getStructInfo(st reflect.Type) (*structInfo, error) { inlineMap := -1 for i := 0; i != n; i++ { field := st.Field(i) - if field.PkgPath != "" { + if field.PkgPath != "" && !field.Anonymous { continue // Private field } diff --git a/vendor/gopkg.in/yaml.v2/yamlh.go b/vendor/gopkg.in/yaml.v2/yamlh.go index d60a6b6b0..3caeca049 100644 --- a/vendor/gopkg.in/yaml.v2/yamlh.go +++ b/vendor/gopkg.in/yaml.v2/yamlh.go @@ -508,7 +508,7 @@ type yaml_parser_t struct { problem string // Error description. - // The byte about which the problem occured. + // The byte about which the problem occurred. problem_offset int problem_value int problem_mark yaml_mark_t