diff --git a/cmd/webhook/main.go b/cmd/webhook/main.go index 0a66ba12a87..d6e21950397 100644 --- a/cmd/webhook/main.go +++ b/cmd/webhook/main.go @@ -51,8 +51,9 @@ import ( var types = map[schema.GroupVersionKind]resourcesemantics.GenericCRD{ // v1alpha1 - v1alpha1.SchemeGroupVersion.WithKind("PipelineResource"): &resourcev1alpha1.PipelineResource{}, - v1alpha1.SchemeGroupVersion.WithKind("Run"): &v1alpha1.Run{}, + v1alpha1.SchemeGroupVersion.WithKind("PipelineResource"): &resourcev1alpha1.PipelineResource{}, + v1alpha1.SchemeGroupVersion.WithKind("Run"): &v1alpha1.Run{}, + v1alpha1.SchemeGroupVersion.WithKind("VerificationPolicy"): &v1alpha1.VerificationPolicy{}, // v1beta1 v1beta1.SchemeGroupVersion.WithKind("Pipeline"): &v1beta1.Pipeline{}, v1beta1.SchemeGroupVersion.WithKind("Task"): &v1beta1.Task{}, diff --git a/config/200-clusterrole.yaml b/config/200-clusterrole.yaml index 5ef264d3051..e04677f306d 100644 --- a/config/200-clusterrole.yaml +++ b/config/200-clusterrole.yaml @@ -30,11 +30,14 @@ rules: - apiGroups: ["tekton.dev"] resources: ["tasks", "clustertasks", "taskruns", "pipelines", "pipelineruns", "pipelineresources", "runs", "customruns"] verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] + - apiGroups: ["tekton.dev"] + resources: ["verificationpolicies"] + verbs: ["get", "list", "watch"] - apiGroups: ["tekton.dev"] resources: ["taskruns/finalizers", "pipelineruns/finalizers", "runs/finalizers", "customruns/finalizers"] verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] - apiGroups: ["tekton.dev"] - resources: ["tasks/status", "clustertasks/status", "taskruns/status", "pipelines/status", "pipelineruns/status", "pipelineresources/status", "runs/status", "customruns/status"] + resources: ["tasks/status", "clustertasks/status", "taskruns/status", "pipelines/status", "pipelineruns/status", "pipelineresources/status", "runs/status", "customruns/status", "verificationpolicies/status"] verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] # resolution.tekton.dev - apiGroups: ["resolution.tekton.dev"] @@ -92,6 +95,7 @@ rules: - pipelineresources.tekton.dev - resolutionrequests.resolution.tekton.dev - customruns.tekton.dev + - verificationpolicies.tekton.dev # knative.dev/pkg needs list/watch permissions to set up informers for the webhook. - apiGroups: ["apiextensions.k8s.io"] resources: ["customresourcedefinitions"] diff --git a/config/300-verificationpolicy.yaml b/config/300-verificationpolicy.yaml new file mode 100644 index 00000000000..b4885c48827 --- /dev/null +++ b/config/300-verificationpolicy.yaml @@ -0,0 +1,48 @@ +# Copyright 2022 The Tekton Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: verificationpolicies.tekton.dev + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines + pipeline.tekton.dev/release: "devel" + version: "devel" +spec: + group: tekton.dev + versions: + - name: v1alpha1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + # One can use x-kubernetes-preserve-unknown-fields: true + # at the root of the schema (and inside any properties, additionalProperties) + # to get the traditional CRD behaviour that nothing is pruned, despite + # setting spec.preserveUnknownProperties: false. + # + # See https://kubernetes.io/blog/2019/06/20/crd-structural-schema/ + # See issue: https://github.com/knative/serving/issues/912 + x-kubernetes-preserve-unknown-fields: true + names: + kind: VerificationPolicy + plural: verificationpolicies + singular: verificationpolicy + categories: + - tekton + - tekton-pipelines + scope: Namespaced diff --git a/docs/install.md b/docs/install.md index f34cf7ba9ee..7c6b3cd5c63 100644 --- a/docs/install.md +++ b/docs/install.md @@ -30,6 +30,7 @@ This guide explains how to install Tekton Pipelines. It covers the following top - [Verify Tekton Pipelines release](#verify-tekton-pipelines-release) - [Verify signatures using `cosign`](#verify-signatures-using-cosign) - [Verify the tansparency logs using `rekor-cli`](#verify-the-transparency-logs-using-rekor-cli) +- [Verify Tekton Resources](#verify-tekton-resources) - [Next steps](#next-steps) ## Before you begin @@ -271,11 +272,11 @@ data: ## Configuring built-in remote Task and Pipeline resolution Three remote resolvers are currently provided as part of the Tekton Pipelines installation. -By default, these remote resolvers are disabled. Each resolver is enabled by setting -the appropriate feature flag in the `resolvers-feature-flags` ConfigMap in the `tekton-pipelines-resolvers` +By default, these remote resolvers are disabled. Each resolver is enabled by setting +the appropriate feature flag in the `resolvers-feature-flags` ConfigMap in the `tekton-pipelines-resolvers` namespace: -1. [The `bundles` resolver](./bundle-resolver.md), enabled by setting the `enable-bundles-resolver` +1. [The `bundles` resolver](./bundle-resolver.md), enabled by setting the `enable-bundles-resolver` feature flag to `true`. 1. [The `git` resolver](./git-resolver.md), enabled by setting the `enable-git-resolver` feature flag to `true`. @@ -423,9 +424,9 @@ features](#alpha-features) to be used. - `resource-verification-mode`: Setting this flag to "enforce" will enforce verification of tasks/pipeline. Failing to verify will fail the taskrun/pipelinerun. "warn" will only log the err message and "skip" will skip the whole verification. - `results-from`: set this flag to "termination-message" to use the container's termination message to fetch results from. This is the default method of extracting results. Set it to "sidecar-logs" to enable use of a results sidecar logs to extract results instead of termination message. -- `enable-provenance-in-status`: set this flag to "true" to enable recording - the `provenance` field in `TaskRun` and `PipelineRun` status. The `provenance` - field contains metadata about resources used in the TaskRun/PipelineRun such as the +- `enable-provenance-in-status`: set this flag to "true" to enable recording + the `provenance` field in `TaskRun` and `PipelineRun` status. The `provenance` + field contains metadata about resources used in the TaskRun/PipelineRun such as the source from where a remote Task/Pipeline definition was fetched. - `custom-task-version`: set this flag to "v1alpha1" to have `PipelineRuns` create `Runs` @@ -712,6 +713,10 @@ gcr.io/tekton-releases/github.com/tektoncd/pipeline/cmd/pullrequest-init gcr.io/tekton-releases/github.com/tektoncd/pipeline/cmd/webhook ``` +## Verify Tekton Resources + +Trusted Resources is a feature to verify Tekton Tasks and Pipelines. The current version of feature supports `v1beta1` `Task` and `Pipeline`. For more details please take a look at [Trusted Resources](./trusted-resources.md). + ## Next steps To get started with Tekton Pipelines, see the [Tekton Pipelines Tutorial](./tutorial.md) and take a look at our [examples](https://github.com/tektoncd/pipeline/tree/main/examples). diff --git a/docs/pipeline-api.md b/docs/pipeline-api.md index caf80d134a1..11f89a4b63f 100644 --- a/docs/pipeline-api.md +++ b/docs/pipeline-api.md @@ -5738,6 +5738,8 @@ Resource Types:

Run @@ -5955,6 +5957,101 @@ RunStatus +

VerificationPolicy +

+
+

VerificationPolicy defines the Tekton resources and corresponding authorities to verify. +The VerificationPolicy is used in trusted resources to store the public keys to verify +Tekton resources.

+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+apiVersion
+string
+ +tekton.dev/v1alpha1 + +
+kind
+string +
VerificationPolicy
+metadata
+ + +Kubernetes meta/v1.ObjectMeta + + +
+(Optional) +Refer to the Kubernetes API documentation for the fields of the +metadata field. +
+spec
+ + +VerificationPolicySpec + + +
+

Spec holds the desired state of the VerificationPolicy.

+
+
+ + + + + + + + + +
+resources
+ + +[]ResourcePattern + + +
+

Resources defines the patterns of Resources names that should be subject to this policy. +For example, we may want to apply this Policy only from a certain github repo. +Then the ResourcesPattern should include the path. If using gitresolver, and we want to config keys from a certain git repo. +ResourcesPattern can be https://github.com/tektoncd/catalog.git, we will use regex to filter out those resources.

+
+authorities
+ + +[]Authority + + +
+

Authorities defines the rules for validating signatures.

+
+

PipelineResource

@@ -6085,6 +6182,48 @@ the controller, but was unused as there is no controller for PipelineResource. +

Authority +

+

+(Appears on:VerificationPolicySpec) +

+
+

The Authority block defines the rules for validating signatures.

+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+name
+ +string + +
+

Name is the name for this authority.

+
+key
+ + +KeyRef + + +
+

Key defines the type of key to validate the resource.

+

EmbeddedRunSpec

@@ -6156,6 +6295,95 @@ structs.

+

KeyRef +

+

+(Appears on:Authority) +

+
+

KeyRef defines the reference to a public key

+
+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+secretRef
+ + +Kubernetes core/v1.SecretReference + + +
+(Optional) +

SecretRef sets a reference to a secret with the key.

+
+data
+ +string + +
+(Optional) +

Data contains the inline public key.

+
+hashAlgorithm
+ +string + +
+(Optional) +

HashAlgorithm always defaults to sha256 if the algorithm hasn’t been explicitly set

+
+

ResourcePattern +

+

+(Appears on:VerificationPolicySpec) +

+
+

ResourcePattern defines the pattern of the resource source

+
+ + + + + + + + + + + + + +
FieldDescription
+pattern
+ +string + +
+

Pattern defines a resource pattern. Regex is created to filter resources based on Pattern +Examples patterns: +Github resource: https://github.com/tektoncd/catalog.git, https://github.com/tektoncd/* +Bundle resource: gcr.io/tekton-releases/catalog/upstream/git-clone, gcr.io/tekton-releases/catalog/upstream/* +Hub resource: https://artifacthub.io/*,

+

RunReason (string alias)

@@ -6333,6 +6561,53 @@ Refer Go’s ParseDuration documentation for expected format: VerificationPolicySpec + +

+(Appears on:VerificationPolicy) +

+
+

VerificationPolicySpec defines the patterns and authorities.

+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+resources
+ + +[]ResourcePattern + + +
+

Resources defines the patterns of Resources names that should be subject to this policy. +For example, we may want to apply this Policy only from a certain github repo. +Then the ResourcesPattern should include the path. If using gitresolver, and we want to config keys from a certain git repo. +ResourcesPattern can be https://github.com/tektoncd/catalog.git, we will use regex to filter out those resources.

+
+authorities
+ + +[]Authority + + +
+

Authorities defines the rules for validating signatures.

+

PipelineResourceSpec

diff --git a/docs/trusted-resources.md b/docs/trusted-resources.md index 4895a5200ab..6f69c3244d1 100644 --- a/docs/trusted-resources.md +++ b/docs/trusted-resources.md @@ -17,7 +17,7 @@ Verification failure will mark corresponding taskrun/pipelinerun as Failed statu ## Instructions ### Sign Resources -For `Sign` cli you may refer to [experimental repo](https://github.com/tektoncd/experimental/tree/main/pipeline/trusted-resources) to sign the resources. We're working to add `sign` and `verify` into [Tekton Cli](https://github.com/tektoncd/cli) as a subcommand. +We have added `sign` and `verify` into [Tekton Cli](https://github.com/tektoncd/cli) as a subcommand in release [v0.28.0 and later](https://github.com/tektoncd/cli/releases/tag/v0.28.0). Please refer to [cli docs](https://github.com/tektoncd/cli/blob/main/docs/cmd/tkn_task_sign.md) to sign and Tekton resources. A signed task example: ```yaml @@ -65,8 +65,11 @@ Or patch the new values: kubectl patch configmap feature-flags -n tekton-pipelines -p='{"data":{"resource-verification-mode":"enforce"}} ``` -#### Config key at configmap -Note that multiple keys reference should be separated by comma. If the resource can pass any key in the list, it will pass the verification. + +#### Config key at configmap (will be deprecated) +Multiple keys reference should be separated by comma. If the resource can pass any key in the list, it will pass the verification. + +**Note:** key configuration in configmap will be deprecated, the issue [#5852](https://github.com/tektoncd/pipeline/issues/5852) will track the deprecation. We currently hardcode SHA256 as hashfunc for loading public keys as verifiers. @@ -91,3 +94,70 @@ metadata: data: publickeys: "/etc/verification-secrets/cosign.pub, /etc/verification-secrets/cosign2.pub" ``` + +#### Config key at VerificationPolicy +VerificationPolicy supports SecretRef or encoded public key data. + +How does VerificationPolicy work? +You can create multiple `VerificationPolicy` and apply them to the cluster. +1. Trusted resources will look up policies from the resource namespace (usually this is the same as taskrun/pipelinerun namespace). +2. If multiple policies are found. For each policy we will check if the resource url is matching any of the `patterns` in the `resources` list. If matched then this policy will be used for verification. +3. If multiple policies are matched, the resource needs to pass all of them to pass verification. +4. To pass one policy, the resource can pass any public keys in the policy. + +Take the following `VerificationPolicies` for example, a resource from "https://github.com/tektoncd/catalog.git", needs to pass both `verification-policy-a` and `verification-policy-b`, to pass `verification-policy-a` the resource needs to pass either `key1` or `key2`. + +Example: +```yaml +apiVersion: tekton.dev/v1alpha1 +kind: VerificationPolicy +metadata: + name: verification-policy-a + namespace: resource-namespace +spec: + # resources defines a list of patterns + resources: + - pattern: "https://github.com/tektoncd/catalog.git" #git resource pattern + - pattern: "gcr.io/tekton-releases/catalog/upstream/git-clone" # bundle resource pattern + - pattern: " https://artifacthub.io/" # hub resource pattern + # authorities defines a list of public keys + authorities: + - name: key1 + key: + # secretRef refers to a secret in the cluster, this secret should contain public keys data + secretRef: + name: secret-name-a + namespace: secret-namespace + hashAlgorithm: sha256 + - name: key2 + key: + # data stores the inline public key data + data: "STRING_ENCODED_PUBLIC_KEY" +``` + +```yaml +apiVersion: tekton.dev/v1alpha1 +kind: VerificationPolicy +metadata: + name: verification-policy-b + namespace: resource-namespace +spec: + resources: + - pattern: "https://github.com/tektoncd/catalog.git" + authorities: + - name: key3 + key: + # data stores the inline public key data + data: "STRING_ENCODED_PUBLIC_KEY" +``` + +`namespace` should be the same of corresponding resources' namespace. + +`pattern` is used to filter out remote resources by their sources URL. e.g. git resources pattern can be set to https://github.com/tektoncd/catalog.git. The `pattern` should follow regex schema, we use go regex library's [`Match`](https://pkg.go.dev/regexp#Match) to match the pattern from VerificationPolicy to the `ConfigSource` URL resolved by remote resolution. Note that `.*` will match all resources. +To learn more about regex syntax please refer to [syntax](https://pkg.go.dev/regexp/syntax). +To learn more about `ConfigSource` please refer to resolvers doc for more context. e.g. [gitresolver](./git-resolver.md) + + `key` is used to store the public key, note that secretRef and data cannot be configured at the same time. + +`hashAlgorithm` is the algorithm for the public key, by default is `sha256`. It also supports `SHA224`, `SHA384`, `SHA512`. + diff --git a/pkg/apis/config/feature_flags.go b/pkg/apis/config/feature_flags.go index 71dee99f42a..c83f70f823a 100644 --- a/pkg/apis/config/feature_flags.go +++ b/pkg/apis/config/feature_flags.go @@ -336,6 +336,20 @@ func EnableBetaAPIFields(ctx context.Context) context.Context { return setEnableAPIFields(ctx, "beta") } +// CheckEnforceResourceVerificationMode returns true if the ResourceVerificationMode is EnforceResourceVerificationMode +// else returns false +func CheckEnforceResourceVerificationMode(ctx context.Context) bool { + cfg := FromContextOrDefaults(ctx) + return cfg.FeatureFlags.ResourceVerificationMode == EnforceResourceVerificationMode +} + +// CheckWarnResourceVerificationMode returns true if the ResourceVerificationMode is WarnResourceVerificationMode +// else returns false +func CheckWarnResourceVerificationMode(ctx context.Context) bool { + cfg := FromContextOrDefaults(ctx) + return cfg.FeatureFlags.ResourceVerificationMode == WarnResourceVerificationMode +} + func setEnableAPIFields(ctx context.Context, want string) context.Context { featureFlags, _ := NewFeatureFlagsFromMap(map[string]string{ "enable-api-fields": want, diff --git a/pkg/apis/config/feature_flags_test.go b/pkg/apis/config/feature_flags_test.go index 4d38cb266a6..34d102862c0 100644 --- a/pkg/apis/config/feature_flags_test.go +++ b/pkg/apis/config/feature_flags_test.go @@ -17,12 +17,16 @@ limitations under the License. package config_test import ( + "context" "testing" "github.com/google/go-cmp/cmp" "github.com/tektoncd/pipeline/pkg/apis/config" test "github.com/tektoncd/pipeline/pkg/reconciler/testing" "github.com/tektoncd/pipeline/test/diff" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/logging" ) func TestNewFeatureFlagsFromConfigMap(t *testing.T) { @@ -255,6 +259,48 @@ func TestNewFeatureFlagsConfigMapErrors(t *testing.T) { } } +func TestCheckEnforceResourceVerificationMode(t *testing.T) { + ctx := context.Background() + if config.CheckEnforceResourceVerificationMode(ctx) { + t.Errorf("CheckCheckEnforceResourceVerificationMode got true but expected to be false") + } + store := config.NewStore(logging.FromContext(ctx).Named("config-store")) + featureflags := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "feature-flags", + }, + Data: map[string]string{ + "resource-verification-mode": config.EnforceResourceVerificationMode, + }, + } + store.OnConfigChanged(featureflags) + ctx = store.ToContext(ctx) + if !config.CheckEnforceResourceVerificationMode(ctx) { + t.Errorf("CheckCheckEnforceResourceVerificationMode got false but expected to be true") + } +} + +func TestCheckWarnResourceVerificationMode(t *testing.T) { + ctx := context.Background() + if config.CheckWarnResourceVerificationMode(ctx) { + t.Errorf("CheckWarnResourceVerificationMode got true but expected to be false") + } + store := config.NewStore(logging.FromContext(ctx).Named("config-store")) + featureflags := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "feature-flags", + }, + Data: map[string]string{ + "resource-verification-mode": config.WarnResourceVerificationMode, + }, + } + store.OnConfigChanged(featureflags) + ctx = store.ToContext(ctx) + if !config.CheckWarnResourceVerificationMode(ctx) { + t.Errorf("CheckWarnResourceVerificationMode got false but expected to be true") + } +} + func verifyConfigFileWithExpectedFeatureFlagsConfig(t *testing.T, fileName string, expectedConfig *config.FeatureFlags) { cm := test.ConfigMapFromTestFile(t, fileName) if flags, err := config.NewFeatureFlagsFromConfigMap(cm); err == nil { diff --git a/pkg/apis/pipeline/v1alpha1/register.go b/pkg/apis/pipeline/v1alpha1/register.go index 42b5e4b18ec..fe5e76f459c 100644 --- a/pkg/apis/pipeline/v1alpha1/register.go +++ b/pkg/apis/pipeline/v1alpha1/register.go @@ -48,6 +48,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &Run{}, &RunList{}, + &VerificationPolicy{}, + &VerificationPolicyList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil diff --git a/pkg/apis/pipeline/v1alpha1/verificationpolicy_defaults.go b/pkg/apis/pipeline/v1alpha1/verificationpolicy_defaults.go new file mode 100644 index 00000000000..6b18c318660 --- /dev/null +++ b/pkg/apis/pipeline/v1alpha1/verificationpolicy_defaults.go @@ -0,0 +1,30 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + + "knative.dev/pkg/apis" +) + +var _ apis.Defaultable = (*VerificationPolicy)(nil) + +// SetDefaults implements apis.Defaultable +func (v *VerificationPolicy) SetDefaults(ctx context.Context) { + +} diff --git a/pkg/apis/pipeline/v1alpha1/verificationpolicy_types.go b/pkg/apis/pipeline/v1alpha1/verificationpolicy_types.go new file mode 100644 index 00000000000..8df3774b4bd --- /dev/null +++ b/pkg/apis/pipeline/v1alpha1/verificationpolicy_types.go @@ -0,0 +1,117 @@ +/* +Copyright 2022 The Tekton Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "crypto" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// +genclient +// +genclient:noStatus +// +genreconciler:krshapedlogic=false +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VerificationPolicy defines the rules to verify Tekton resources. +// VerificationPolicy can config the mapping from resources to a list of public +// keys, so when verifying the resources we can use the corresponding public keys. +// +k8s:openapi-gen=true +type VerificationPolicy struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata"` + + // Spec holds the desired state of the VerificationPolicy. + Spec VerificationPolicySpec `json:"spec"` +} + +// VerificationPolicyList contains a list of VerificationPolicy +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type VerificationPolicyList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + Items []VerificationPolicy `json:"items"` +} + +// GetGroupVersionKind implements kmeta.OwnerRefable. +func (*VerificationPolicy) GetGroupVersionKind() schema.GroupVersionKind { + return SchemeGroupVersion.WithKind("VerificationPolicy") +} + +// VerificationPolicySpec defines the patterns and authorities. +type VerificationPolicySpec struct { + // Resources defines the patterns of resources sources that should be subject to this policy. + // For example, we may want to apply this Policy from a certain GitHub repo. + // Then the ResourcesPattern should be valid regex. E.g. If using gitresolver, and we want to config keys from a certain git repo. + // `ResourcesPattern` can be `https://github.com/tektoncd/catalog.git`, we will use regex to filter out those resources. + Resources []ResourcePattern `json:"resources"` + // Authorities defines the rules for validating signatures. + Authorities []Authority `json:"authorities"` +} + +// ResourcePattern defines the pattern of the resource source +type ResourcePattern struct { + // Pattern defines a resource pattern. Regex is created to filter resources based on `Pattern` + // Example patterns: + // GitHub resource: https://github.com/tektoncd/catalog.git, https://github.com/tektoncd/* + // Bundle resource: gcr.io/tekton-releases/catalog/upstream/git-clone, gcr.io/tekton-releases/catalog/upstream/* + // Hub resource: https://artifacthub.io/*, + Pattern string `json:"pattern"` +} + +// The Authority block defines the keys for validating signatures. +type Authority struct { + // Name is the name for this authority. + Name string `json:"name"` + // Key contains the public key to validate the resource. + Key *KeyRef `json:"key,omitempty"` +} + +// KeyRef defines the reference to a public key +type KeyRef struct { + // SecretRef sets a reference to a secret with the key. + // +optional + SecretRef *v1.SecretReference `json:"secretRef,omitempty"` + // Data contains the inline public key. + // +optional + Data string `json:"data,omitempty"` + // HashAlgorithm always defaults to sha256 if the algorithm hasn't been explicitly set + // +optional + HashAlgorithm HashAlgorithm `json:"hashAlgorithm,omitempty"` +} + +// HashAlgorithm defines the hash algorithm used for the public key +type HashAlgorithm string + +const ( + sha224 HashAlgorithm = "sha224" + sha256 HashAlgorithm = "sha256" + sha384 HashAlgorithm = "sha384" + sha512 HashAlgorithm = "sha512" + empty HashAlgorithm = "" +) + +// SupportedSignatureAlgorithms sets a list of support signature algorithms that is similar to the list supported by cosign. +// empty HashAlgorithm is allowed and will be set to SHA256. +var SupportedSignatureAlgorithms = map[HashAlgorithm]crypto.Hash{ + sha224: crypto.SHA224, + sha256: crypto.SHA256, + sha384: crypto.SHA384, + sha512: crypto.SHA512, + empty: crypto.SHA256, +} diff --git a/pkg/apis/pipeline/v1alpha1/verificationpolicy_validation.go b/pkg/apis/pipeline/v1alpha1/verificationpolicy_validation.go new file mode 100644 index 00000000000..002228a3dbd --- /dev/null +++ b/pkg/apis/pipeline/v1alpha1/verificationpolicy_validation.go @@ -0,0 +1,93 @@ +/* +Copyright 2022 The Tekton Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + "fmt" + "regexp" + "strings" + + "github.com/tektoncd/pipeline/pkg/apis/validate" + "knative.dev/pkg/apis" +) + +var _ apis.Validatable = (*VerificationPolicy)(nil) + +var ( + // InvalidResourcePatternErr is returned when the pattern is not valid regex expression + InvalidResourcePatternErr = "resourcePattern cannot be compiled by regex" +) + +// Validate VerificationPolicy +func (v *VerificationPolicy) Validate(ctx context.Context) (errs *apis.FieldError) { + errs = errs.Also(validate.ObjectMetadata(v.GetObjectMeta()).ViaField("metadata")) + errs = errs.Also(v.Spec.Validate(ctx)) + return errs +} + +// Validate VerificationPolicySpec, the validation requires Resources is not empty, for each +// resource it must be able to be regex expression and can be compiled with no error. The Authorities +// shouldn't be empty and each Authority should be valid. +func (vs *VerificationPolicySpec) Validate(ctx context.Context) (errs *apis.FieldError) { + if len(vs.Resources) == 0 { + errs = errs.Also(apis.ErrMissingField("resources")) + } + for _, r := range vs.Resources { + errs = errs.Also(r.Validate(ctx)) + } + if len(vs.Authorities) == 0 { + errs = errs.Also(apis.ErrMissingField("authorities")) + } + for i, a := range vs.Authorities { + if a.Key != nil { + errs = errs.Also(a.Key.Validate(ctx).ViaFieldIndex("key", i)) + } + } + return errs +} + +// Validate KeyRef will check if one of KeyRef's Data or SecretRef exists, and the +// Supported HashAlgorithm is in supportedSignatureAlgorithms. +func (key *KeyRef) Validate(ctx context.Context) (errs *apis.FieldError) { + if key.Data == "" && key.SecretRef == nil { + errs = errs.Also(apis.ErrMissingOneOf("data", "secretref")) + } + + if key.Data != "" && key.SecretRef != nil { + errs = errs.Also(apis.ErrMultipleOneOf("data", "secretref")) + } + errs = errs.Also(validateHashAlgorithm(key.HashAlgorithm)) + + return errs +} + +// Validate ResourcePattern and make sure the Pattern is valid regex expression +func (r *ResourcePattern) Validate(ctx context.Context) (errs *apis.FieldError) { + if _, err := regexp.Compile(r.Pattern); err != nil { + errs = errs.Also(apis.ErrInvalidValue(r.Pattern, "ResourcePattern", fmt.Sprintf("%v: %v", InvalidResourcePatternErr, err))) + return errs + } + return nil +} + +// validateHashAlgorithm checks if the algorithm is supported +func validateHashAlgorithm(algorithmName HashAlgorithm) (errs *apis.FieldError) { + normalizedAlgo := strings.ToLower(string(algorithmName)) + _, exists := SupportedSignatureAlgorithms[HashAlgorithm(normalizedAlgo)] + if !exists { + return apis.ErrInvalidValue(algorithmName, "HashAlgorithm") + } + return nil +} diff --git a/pkg/apis/pipeline/v1alpha1/verificationpolicy_validation_test.go b/pkg/apis/pipeline/v1alpha1/verificationpolicy_validation_test.go new file mode 100644 index 00000000000..88e71e41dce --- /dev/null +++ b/pkg/apis/pipeline/v1alpha1/verificationpolicy_validation_test.go @@ -0,0 +1,206 @@ +/* +Copyright 2022 The Tekton Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1_test + +import ( + "context" + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + "github.com/tektoncd/pipeline/test/diff" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/apis" +) + +func TestVerificationPolicy_Invalid(t *testing.T) { + tests := []struct { + name string + verificationPolicy *v1alpha1.VerificationPolicy + want *apis.FieldError + }{{ + name: "missing Resources", + verificationPolicy: &v1alpha1.VerificationPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vp", + }, + Spec: v1alpha1.VerificationPolicySpec{ + Authorities: []v1alpha1.Authority{ + { + Name: "foo", + Key: &v1alpha1.KeyRef{ + Data: "inline_key", + }, + }, + }, + }, + }, + want: apis.ErrMissingField("resources"), + }, { + name: "invalid ResourcePattern", + verificationPolicy: &v1alpha1.VerificationPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vp", + }, + Spec: v1alpha1.VerificationPolicySpec{ + Resources: []v1alpha1.ResourcePattern{{"^["}}, + Authorities: []v1alpha1.Authority{ + { + Name: "foo", + Key: &v1alpha1.KeyRef{ + Data: "inline_key", + }, + }, + }, + }, + }, + want: apis.ErrInvalidValue("^[", "ResourcePattern", fmt.Sprintf("%v: error parsing regexp: missing closing ]: `[`", v1alpha1.InvalidResourcePatternErr)), + }, { + name: "missing Authoritities", + verificationPolicy: &v1alpha1.VerificationPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vp", + }, + Spec: v1alpha1.VerificationPolicySpec{ + Resources: []v1alpha1.ResourcePattern{{".*"}}, + }, + }, + want: apis.ErrMissingField("authorities"), + }, { + name: "missing Authority key", + verificationPolicy: &v1alpha1.VerificationPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vp", + }, + Spec: v1alpha1.VerificationPolicySpec{ + Resources: []v1alpha1.ResourcePattern{{".*"}}, + Authorities: []v1alpha1.Authority{ + { + Name: "foo", + Key: &v1alpha1.KeyRef{}, + }, + }, + }, + }, + want: apis.ErrMissingOneOf("key[0].data", "key[0].secretref"), + }, { + name: "should not have both data and secretref", + verificationPolicy: &v1alpha1.VerificationPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vp", + }, + Spec: v1alpha1.VerificationPolicySpec{ + Resources: []v1alpha1.ResourcePattern{{".*"}}, + Authorities: []v1alpha1.Authority{ + { + Name: "foo", + Key: &v1alpha1.KeyRef{ + Data: "inlinekey", + SecretRef: &corev1.SecretReference{ + Name: "name", + }, + }, + }, + }, + }, + }, + want: apis.ErrMultipleOneOf("key[0].data", "key[0].secretref"), + }, { + name: "invalid hash algorithm", + verificationPolicy: &v1alpha1.VerificationPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vp", + }, + Spec: v1alpha1.VerificationPolicySpec{ + Resources: []v1alpha1.ResourcePattern{{".*"}}, + Authorities: []v1alpha1.Authority{ + { + Name: "foo", + Key: &v1alpha1.KeyRef{ + Data: "inlinekey", + HashAlgorithm: "sha1", + }, + }, + }, + }, + }, + want: apis.ErrInvalidValue("sha1", "HashAlgorithm").ViaFieldIndex("key", 0), + }} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.verificationPolicy.Validate(context.Background()) + if d := cmp.Diff(tt.want.Error(), err.Error()); d != "" { + t.Error("VerificationPolicy validate error mismatch", diff.PrintWantGot(d)) + } + }) + } +} + +func TestVerificationPolicy_Valid(t *testing.T) { + tests := []struct { + name string + verificationPolicy *v1alpha1.VerificationPolicy + }{ + { + name: "key in data", + verificationPolicy: &v1alpha1.VerificationPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vp", + }, + Spec: v1alpha1.VerificationPolicySpec{ + Resources: []v1alpha1.ResourcePattern{{".*"}}, + Authorities: []v1alpha1.Authority{ + { + Name: "foo", + Key: &v1alpha1.KeyRef{ + Data: "inlinekey", + HashAlgorithm: "sha256", + }, + }, + }, + }, + }, + }, { + name: "key in secretref", + verificationPolicy: &v1alpha1.VerificationPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vp", + }, + Spec: v1alpha1.VerificationPolicySpec{ + Resources: []v1alpha1.ResourcePattern{{".*"}}, + Authorities: []v1alpha1.Authority{ + { + Name: "foo", + Key: &v1alpha1.KeyRef{ + SecretRef: &corev1.SecretReference{ + Name: "name", + }, + HashAlgorithm: "sha256", + }, + }, + }, + }, + }, + }} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.verificationPolicy.Validate(context.Background()) + if err != nil { + t.Errorf("validating valid VerificationPolicy: %v", err) + } + }) + } +} diff --git a/pkg/apis/pipeline/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/pipeline/v1alpha1/zz_generated.deepcopy.go index 47123e63dd9..9c0c5254591 100644 --- a/pkg/apis/pipeline/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/pipeline/v1alpha1/zz_generated.deepcopy.go @@ -24,10 +24,32 @@ package v1alpha1 import ( pod "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod" v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Authority) DeepCopyInto(out *Authority) { + *out = *in + if in.Key != nil { + in, out := &in.Key, &out.Key + *out = new(KeyRef) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Authority. +func (in *Authority) DeepCopy() *Authority { + if in == nil { + return nil + } + out := new(Authority) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EmbeddedRunSpec) DeepCopyInto(out *EmbeddedRunSpec) { *out = *in @@ -47,6 +69,43 @@ func (in *EmbeddedRunSpec) DeepCopy() *EmbeddedRunSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeyRef) DeepCopyInto(out *KeyRef) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(v1.SecretReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyRef. +func (in *KeyRef) DeepCopy() *KeyRef { + if in == nil { + return nil + } + out := new(KeyRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcePattern) DeepCopyInto(out *ResourcePattern) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePattern. +func (in *ResourcePattern) DeepCopy() *ResourcePattern { + if in == nil { + return nil + } + out := new(ResourcePattern) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Run) DeepCopyInto(out *Run) { *out = *in @@ -135,7 +194,7 @@ func (in *RunSpec) DeepCopyInto(out *RunSpec) { } if in.Timeout != nil { in, out := &in.Timeout, &out.Timeout - *out = new(v1.Duration) + *out = new(metav1.Duration) **out = **in } if in.Workspaces != nil { @@ -157,3 +216,91 @@ func (in *RunSpec) DeepCopy() *RunSpec { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VerificationPolicy) DeepCopyInto(out *VerificationPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VerificationPolicy. +func (in *VerificationPolicy) DeepCopy() *VerificationPolicy { + if in == nil { + return nil + } + out := new(VerificationPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VerificationPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VerificationPolicyList) DeepCopyInto(out *VerificationPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VerificationPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VerificationPolicyList. +func (in *VerificationPolicyList) DeepCopy() *VerificationPolicyList { + if in == nil { + return nil + } + out := new(VerificationPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VerificationPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VerificationPolicySpec) DeepCopyInto(out *VerificationPolicySpec) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ResourcePattern, len(*in)) + copy(*out, *in) + } + if in.Authorities != nil { + in, out := &in.Authorities, &out.Authorities + *out = make([]Authority, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VerificationPolicySpec. +func (in *VerificationPolicySpec) DeepCopy() *VerificationPolicySpec { + if in == nil { + return nil + } + out := new(VerificationPolicySpec) + in.DeepCopyInto(out) + return out +} diff --git a/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_pipeline_client.go b/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_pipeline_client.go index 7ef7a75e92b..b6966151af5 100644 --- a/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_pipeline_client.go +++ b/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_pipeline_client.go @@ -32,6 +32,10 @@ func (c *FakeTektonV1alpha1) Runs(namespace string) v1alpha1.RunInterface { return &FakeRuns{c, namespace} } +func (c *FakeTektonV1alpha1) VerificationPolicies(namespace string) v1alpha1.VerificationPolicyInterface { + return &FakeVerificationPolicies{c, namespace} +} + // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. func (c *FakeTektonV1alpha1) RESTClient() rest.Interface { diff --git a/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_verificationpolicy.go b/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_verificationpolicy.go new file mode 100644 index 00000000000..f03f8661767 --- /dev/null +++ b/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_verificationpolicy.go @@ -0,0 +1,130 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeVerificationPolicies implements VerificationPolicyInterface +type FakeVerificationPolicies struct { + Fake *FakeTektonV1alpha1 + ns string +} + +var verificationpoliciesResource = schema.GroupVersionResource{Group: "tekton.dev", Version: "v1alpha1", Resource: "verificationpolicies"} + +var verificationpoliciesKind = schema.GroupVersionKind{Group: "tekton.dev", Version: "v1alpha1", Kind: "VerificationPolicy"} + +// Get takes name of the verificationPolicy, and returns the corresponding verificationPolicy object, and an error if there is any. +func (c *FakeVerificationPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.VerificationPolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(verificationpoliciesResource, c.ns, name), &v1alpha1.VerificationPolicy{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.VerificationPolicy), err +} + +// List takes label and field selectors, and returns the list of VerificationPolicies that match those selectors. +func (c *FakeVerificationPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.VerificationPolicyList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(verificationpoliciesResource, verificationpoliciesKind, c.ns, opts), &v1alpha1.VerificationPolicyList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.VerificationPolicyList{ListMeta: obj.(*v1alpha1.VerificationPolicyList).ListMeta} + for _, item := range obj.(*v1alpha1.VerificationPolicyList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested verificationPolicies. +func (c *FakeVerificationPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(verificationpoliciesResource, c.ns, opts)) + +} + +// Create takes the representation of a verificationPolicy and creates it. Returns the server's representation of the verificationPolicy, and an error, if there is any. +func (c *FakeVerificationPolicies) Create(ctx context.Context, verificationPolicy *v1alpha1.VerificationPolicy, opts v1.CreateOptions) (result *v1alpha1.VerificationPolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(verificationpoliciesResource, c.ns, verificationPolicy), &v1alpha1.VerificationPolicy{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.VerificationPolicy), err +} + +// Update takes the representation of a verificationPolicy and updates it. Returns the server's representation of the verificationPolicy, and an error, if there is any. +func (c *FakeVerificationPolicies) Update(ctx context.Context, verificationPolicy *v1alpha1.VerificationPolicy, opts v1.UpdateOptions) (result *v1alpha1.VerificationPolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(verificationpoliciesResource, c.ns, verificationPolicy), &v1alpha1.VerificationPolicy{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.VerificationPolicy), err +} + +// Delete takes name of the verificationPolicy and deletes it. Returns an error if one occurs. +func (c *FakeVerificationPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(verificationpoliciesResource, c.ns, name, opts), &v1alpha1.VerificationPolicy{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeVerificationPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(verificationpoliciesResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha1.VerificationPolicyList{}) + return err +} + +// Patch applies the patch and returns the patched verificationPolicy. +func (c *FakeVerificationPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VerificationPolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(verificationpoliciesResource, c.ns, name, pt, data, subresources...), &v1alpha1.VerificationPolicy{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.VerificationPolicy), err +} diff --git a/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/generated_expansion.go b/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/generated_expansion.go index 40814697cff..490c227c6db 100644 --- a/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/generated_expansion.go +++ b/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/generated_expansion.go @@ -19,3 +19,5 @@ limitations under the License. package v1alpha1 type RunExpansion interface{} + +type VerificationPolicyExpansion interface{} diff --git a/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipeline_client.go b/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipeline_client.go index 28d39482a6f..19d91e935d6 100644 --- a/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipeline_client.go +++ b/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipeline_client.go @@ -29,6 +29,7 @@ import ( type TektonV1alpha1Interface interface { RESTClient() rest.Interface RunsGetter + VerificationPoliciesGetter } // TektonV1alpha1Client is used to interact with features provided by the tekton.dev group. @@ -40,6 +41,10 @@ func (c *TektonV1alpha1Client) Runs(namespace string) RunInterface { return newRuns(c, namespace) } +func (c *TektonV1alpha1Client) VerificationPolicies(namespace string) VerificationPolicyInterface { + return newVerificationPolicies(c, namespace) +} + // NewForConfig creates a new TektonV1alpha1Client for the given config. // NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), // where httpClient was generated with rest.HTTPClientFor(c). diff --git a/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/verificationpolicy.go b/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/verificationpolicy.go new file mode 100644 index 00000000000..92f534093ff --- /dev/null +++ b/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/verificationpolicy.go @@ -0,0 +1,178 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + scheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// VerificationPoliciesGetter has a method to return a VerificationPolicyInterface. +// A group's client should implement this interface. +type VerificationPoliciesGetter interface { + VerificationPolicies(namespace string) VerificationPolicyInterface +} + +// VerificationPolicyInterface has methods to work with VerificationPolicy resources. +type VerificationPolicyInterface interface { + Create(ctx context.Context, verificationPolicy *v1alpha1.VerificationPolicy, opts v1.CreateOptions) (*v1alpha1.VerificationPolicy, error) + Update(ctx context.Context, verificationPolicy *v1alpha1.VerificationPolicy, opts v1.UpdateOptions) (*v1alpha1.VerificationPolicy, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.VerificationPolicy, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.VerificationPolicyList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VerificationPolicy, err error) + VerificationPolicyExpansion +} + +// verificationPolicies implements VerificationPolicyInterface +type verificationPolicies struct { + client rest.Interface + ns string +} + +// newVerificationPolicies returns a VerificationPolicies +func newVerificationPolicies(c *TektonV1alpha1Client, namespace string) *verificationPolicies { + return &verificationPolicies{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the verificationPolicy, and returns the corresponding verificationPolicy object, and an error if there is any. +func (c *verificationPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.VerificationPolicy, err error) { + result = &v1alpha1.VerificationPolicy{} + err = c.client.Get(). + Namespace(c.ns). + Resource("verificationpolicies"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of VerificationPolicies that match those selectors. +func (c *verificationPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.VerificationPolicyList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.VerificationPolicyList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("verificationpolicies"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested verificationPolicies. +func (c *verificationPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("verificationpolicies"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a verificationPolicy and creates it. Returns the server's representation of the verificationPolicy, and an error, if there is any. +func (c *verificationPolicies) Create(ctx context.Context, verificationPolicy *v1alpha1.VerificationPolicy, opts v1.CreateOptions) (result *v1alpha1.VerificationPolicy, err error) { + result = &v1alpha1.VerificationPolicy{} + err = c.client.Post(). + Namespace(c.ns). + Resource("verificationpolicies"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(verificationPolicy). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a verificationPolicy and updates it. Returns the server's representation of the verificationPolicy, and an error, if there is any. +func (c *verificationPolicies) Update(ctx context.Context, verificationPolicy *v1alpha1.VerificationPolicy, opts v1.UpdateOptions) (result *v1alpha1.VerificationPolicy, err error) { + result = &v1alpha1.VerificationPolicy{} + err = c.client.Put(). + Namespace(c.ns). + Resource("verificationpolicies"). + Name(verificationPolicy.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(verificationPolicy). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the verificationPolicy and deletes it. Returns an error if one occurs. +func (c *verificationPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("verificationpolicies"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *verificationPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("verificationpolicies"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched verificationPolicy. +func (c *verificationPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VerificationPolicy, err error) { + result = &v1alpha1.VerificationPolicy{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("verificationpolicies"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/pkg/client/informers/externalversions/generic.go b/pkg/client/informers/externalversions/generic.go index 3f3c58d271c..50110a1fd29 100644 --- a/pkg/client/informers/externalversions/generic.go +++ b/pkg/client/informers/externalversions/generic.go @@ -67,6 +67,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource // Group=tekton.dev, Version=v1alpha1 case v1alpha1.SchemeGroupVersion.WithResource("runs"): return &genericInformer{resource: resource.GroupResource(), informer: f.Tekton().V1alpha1().Runs().Informer()}, nil + case v1alpha1.SchemeGroupVersion.WithResource("verificationpolicies"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Tekton().V1alpha1().VerificationPolicies().Informer()}, nil // Group=tekton.dev, Version=v1beta1 case v1beta1.SchemeGroupVersion.WithResource("clustertasks"): diff --git a/pkg/client/informers/externalversions/pipeline/v1alpha1/interface.go b/pkg/client/informers/externalversions/pipeline/v1alpha1/interface.go index 25a56f81347..c9b783d319a 100644 --- a/pkg/client/informers/externalversions/pipeline/v1alpha1/interface.go +++ b/pkg/client/informers/externalversions/pipeline/v1alpha1/interface.go @@ -26,6 +26,8 @@ import ( type Interface interface { // Runs returns a RunInformer. Runs() RunInformer + // VerificationPolicies returns a VerificationPolicyInformer. + VerificationPolicies() VerificationPolicyInformer } type version struct { @@ -43,3 +45,8 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList func (v *version) Runs() RunInformer { return &runInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } + +// VerificationPolicies returns a VerificationPolicyInformer. +func (v *version) VerificationPolicies() VerificationPolicyInformer { + return &verificationPolicyInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/pkg/client/informers/externalversions/pipeline/v1alpha1/verificationpolicy.go b/pkg/client/informers/externalversions/pipeline/v1alpha1/verificationpolicy.go new file mode 100644 index 00000000000..622a6a030dc --- /dev/null +++ b/pkg/client/informers/externalversions/pipeline/v1alpha1/verificationpolicy.go @@ -0,0 +1,90 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + time "time" + + pipelinev1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" + internalinterfaces "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/internalinterfaces" + v1alpha1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// VerificationPolicyInformer provides access to a shared informer and lister for +// VerificationPolicies. +type VerificationPolicyInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.VerificationPolicyLister +} + +type verificationPolicyInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewVerificationPolicyInformer constructs a new informer for VerificationPolicy type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewVerificationPolicyInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredVerificationPolicyInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredVerificationPolicyInformer constructs a new informer for VerificationPolicy type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredVerificationPolicyInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.TektonV1alpha1().VerificationPolicies(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.TektonV1alpha1().VerificationPolicies(namespace).Watch(context.TODO(), options) + }, + }, + &pipelinev1alpha1.VerificationPolicy{}, + resyncPeriod, + indexers, + ) +} + +func (f *verificationPolicyInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredVerificationPolicyInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *verificationPolicyInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&pipelinev1alpha1.VerificationPolicy{}, f.defaultInformer) +} + +func (f *verificationPolicyInformer) Lister() v1alpha1.VerificationPolicyLister { + return v1alpha1.NewVerificationPolicyLister(f.Informer().GetIndexer()) +} diff --git a/pkg/client/injection/client/client.go b/pkg/client/injection/client/client.go index 3104910cf66..67cd130ad4e 100644 --- a/pkg/client/injection/client/client.go +++ b/pkg/client/injection/client/client.go @@ -246,6 +246,137 @@ func (w *wrapTektonV1alpha1RunImpl) Watch(ctx context.Context, opts v1.ListOptio return nil, errors.New("NYI: Watch") } +func (w *wrapTektonV1alpha1) VerificationPolicies(namespace string) typedtektonv1alpha1.VerificationPolicyInterface { + return &wrapTektonV1alpha1VerificationPolicyImpl{ + dyn: w.dyn.Resource(schema.GroupVersionResource{ + Group: "tekton.dev", + Version: "v1alpha1", + Resource: "verificationpolicies", + }), + + namespace: namespace, + } +} + +type wrapTektonV1alpha1VerificationPolicyImpl struct { + dyn dynamic.NamespaceableResourceInterface + + namespace string +} + +var _ typedtektonv1alpha1.VerificationPolicyInterface = (*wrapTektonV1alpha1VerificationPolicyImpl)(nil) + +func (w *wrapTektonV1alpha1VerificationPolicyImpl) Create(ctx context.Context, in *v1alpha1.VerificationPolicy, opts v1.CreateOptions) (*v1alpha1.VerificationPolicy, error) { + in.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "tekton.dev", + Version: "v1alpha1", + Kind: "VerificationPolicy", + }) + uo := &unstructured.Unstructured{} + if err := convert(in, uo); err != nil { + return nil, err + } + uo, err := w.dyn.Namespace(w.namespace).Create(ctx, uo, opts) + if err != nil { + return nil, err + } + out := &v1alpha1.VerificationPolicy{} + if err := convert(uo, out); err != nil { + return nil, err + } + return out, nil +} + +func (w *wrapTektonV1alpha1VerificationPolicyImpl) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return w.dyn.Namespace(w.namespace).Delete(ctx, name, opts) +} + +func (w *wrapTektonV1alpha1VerificationPolicyImpl) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + return w.dyn.Namespace(w.namespace).DeleteCollection(ctx, opts, listOpts) +} + +func (w *wrapTektonV1alpha1VerificationPolicyImpl) Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.VerificationPolicy, error) { + uo, err := w.dyn.Namespace(w.namespace).Get(ctx, name, opts) + if err != nil { + return nil, err + } + out := &v1alpha1.VerificationPolicy{} + if err := convert(uo, out); err != nil { + return nil, err + } + return out, nil +} + +func (w *wrapTektonV1alpha1VerificationPolicyImpl) List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.VerificationPolicyList, error) { + uo, err := w.dyn.Namespace(w.namespace).List(ctx, opts) + if err != nil { + return nil, err + } + out := &v1alpha1.VerificationPolicyList{} + if err := convert(uo, out); err != nil { + return nil, err + } + return out, nil +} + +func (w *wrapTektonV1alpha1VerificationPolicyImpl) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VerificationPolicy, err error) { + uo, err := w.dyn.Namespace(w.namespace).Patch(ctx, name, pt, data, opts) + if err != nil { + return nil, err + } + out := &v1alpha1.VerificationPolicy{} + if err := convert(uo, out); err != nil { + return nil, err + } + return out, nil +} + +func (w *wrapTektonV1alpha1VerificationPolicyImpl) Update(ctx context.Context, in *v1alpha1.VerificationPolicy, opts v1.UpdateOptions) (*v1alpha1.VerificationPolicy, error) { + in.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "tekton.dev", + Version: "v1alpha1", + Kind: "VerificationPolicy", + }) + uo := &unstructured.Unstructured{} + if err := convert(in, uo); err != nil { + return nil, err + } + uo, err := w.dyn.Namespace(w.namespace).Update(ctx, uo, opts) + if err != nil { + return nil, err + } + out := &v1alpha1.VerificationPolicy{} + if err := convert(uo, out); err != nil { + return nil, err + } + return out, nil +} + +func (w *wrapTektonV1alpha1VerificationPolicyImpl) UpdateStatus(ctx context.Context, in *v1alpha1.VerificationPolicy, opts v1.UpdateOptions) (*v1alpha1.VerificationPolicy, error) { + in.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "tekton.dev", + Version: "v1alpha1", + Kind: "VerificationPolicy", + }) + uo := &unstructured.Unstructured{} + if err := convert(in, uo); err != nil { + return nil, err + } + uo, err := w.dyn.Namespace(w.namespace).UpdateStatus(ctx, uo, opts) + if err != nil { + return nil, err + } + out := &v1alpha1.VerificationPolicy{} + if err := convert(uo, out); err != nil { + return nil, err + } + return out, nil +} + +func (w *wrapTektonV1alpha1VerificationPolicyImpl) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return nil, errors.New("NYI: Watch") +} + // TektonV1beta1 retrieves the TektonV1beta1Client func (w *wrapClient) TektonV1beta1() typedtektonv1beta1.TektonV1beta1Interface { return &wrapTektonV1beta1{ diff --git a/pkg/client/injection/informers/pipeline/v1alpha1/verificationpolicy/fake/fake.go b/pkg/client/injection/informers/pipeline/v1alpha1/verificationpolicy/fake/fake.go new file mode 100644 index 00000000000..e64a59090bb --- /dev/null +++ b/pkg/client/injection/informers/pipeline/v1alpha1/verificationpolicy/fake/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + context "context" + + fake "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory/fake" + verificationpolicy "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/verificationpolicy" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" +) + +var Get = verificationpolicy.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Tekton().V1alpha1().VerificationPolicies() + return context.WithValue(ctx, verificationpolicy.Key{}, inf), inf.Informer() +} diff --git a/pkg/client/injection/informers/pipeline/v1alpha1/verificationpolicy/filtered/fake/fake.go b/pkg/client/injection/informers/pipeline/v1alpha1/verificationpolicy/filtered/fake/fake.go new file mode 100644 index 00000000000..029c40bc466 --- /dev/null +++ b/pkg/client/injection/informers/pipeline/v1alpha1/verificationpolicy/filtered/fake/fake.go @@ -0,0 +1,52 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + context "context" + + factoryfiltered "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory/filtered" + filtered "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/verificationpolicy/filtered" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" +) + +var Get = filtered.Get + +func init() { + injection.Fake.RegisterFilteredInformers(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, []controller.Informer) { + untyped := ctx.Value(factoryfiltered.LabelKey{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch labelkey from context.") + } + labelSelectors := untyped.([]string) + infs := []controller.Informer{} + for _, selector := range labelSelectors { + f := factoryfiltered.Get(ctx, selector) + inf := f.Tekton().V1alpha1().VerificationPolicies() + ctx = context.WithValue(ctx, filtered.Key{Selector: selector}, inf) + infs = append(infs, inf.Informer()) + } + return ctx, infs +} diff --git a/pkg/client/injection/informers/pipeline/v1alpha1/verificationpolicy/filtered/verificationpolicy.go b/pkg/client/injection/informers/pipeline/v1alpha1/verificationpolicy/filtered/verificationpolicy.go new file mode 100644 index 00000000000..5fc754fcfdf --- /dev/null +++ b/pkg/client/injection/informers/pipeline/v1alpha1/verificationpolicy/filtered/verificationpolicy.go @@ -0,0 +1,136 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package filtered + +import ( + context "context" + + apispipelinev1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" + v1alpha1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1" + client "github.com/tektoncd/pipeline/pkg/client/injection/client" + filtered "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory/filtered" + pipelinev1alpha1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + cache "k8s.io/client-go/tools/cache" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" +) + +func init() { + injection.Default.RegisterFilteredInformers(withInformer) + injection.Dynamic.RegisterDynamicInformer(withDynamicInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct { + Selector string +} + +func withInformer(ctx context.Context) (context.Context, []controller.Informer) { + untyped := ctx.Value(filtered.LabelKey{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch labelkey from context.") + } + labelSelectors := untyped.([]string) + infs := []controller.Informer{} + for _, selector := range labelSelectors { + f := filtered.Get(ctx, selector) + inf := f.Tekton().V1alpha1().VerificationPolicies() + ctx = context.WithValue(ctx, Key{Selector: selector}, inf) + infs = append(infs, inf.Informer()) + } + return ctx, infs +} + +func withDynamicInformer(ctx context.Context) context.Context { + untyped := ctx.Value(filtered.LabelKey{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch labelkey from context.") + } + labelSelectors := untyped.([]string) + for _, selector := range labelSelectors { + inf := &wrapper{client: client.Get(ctx), selector: selector} + ctx = context.WithValue(ctx, Key{Selector: selector}, inf) + } + return ctx +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context, selector string) v1alpha1.VerificationPolicyInformer { + untyped := ctx.Value(Key{Selector: selector}) + if untyped == nil { + logging.FromContext(ctx).Panicf( + "Unable to fetch github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1.VerificationPolicyInformer with selector %s from context.", selector) + } + return untyped.(v1alpha1.VerificationPolicyInformer) +} + +type wrapper struct { + client versioned.Interface + + namespace string + + selector string +} + +var _ v1alpha1.VerificationPolicyInformer = (*wrapper)(nil) +var _ pipelinev1alpha1.VerificationPolicyLister = (*wrapper)(nil) + +func (w *wrapper) Informer() cache.SharedIndexInformer { + return cache.NewSharedIndexInformer(nil, &apispipelinev1alpha1.VerificationPolicy{}, 0, nil) +} + +func (w *wrapper) Lister() pipelinev1alpha1.VerificationPolicyLister { + return w +} + +func (w *wrapper) VerificationPolicies(namespace string) pipelinev1alpha1.VerificationPolicyNamespaceLister { + return &wrapper{client: w.client, namespace: namespace, selector: w.selector} +} + +func (w *wrapper) List(selector labels.Selector) (ret []*apispipelinev1alpha1.VerificationPolicy, err error) { + reqs, err := labels.ParseToRequirements(w.selector) + if err != nil { + return nil, err + } + selector = selector.Add(reqs...) + lo, err := w.client.TektonV1alpha1().VerificationPolicies(w.namespace).List(context.TODO(), v1.ListOptions{ + LabelSelector: selector.String(), + // TODO(mattmoor): Incorporate resourceVersion bounds based on staleness criteria. + }) + if err != nil { + return nil, err + } + for idx := range lo.Items { + ret = append(ret, &lo.Items[idx]) + } + return ret, nil +} + +func (w *wrapper) Get(name string) (*apispipelinev1alpha1.VerificationPolicy, error) { + // TODO(mattmoor): Check that the fetched object matches the selector. + return w.client.TektonV1alpha1().VerificationPolicies(w.namespace).Get(context.TODO(), name, v1.GetOptions{ + // TODO(mattmoor): Incorporate resourceVersion bounds based on staleness criteria. + }) +} diff --git a/pkg/client/injection/informers/pipeline/v1alpha1/verificationpolicy/verificationpolicy.go b/pkg/client/injection/informers/pipeline/v1alpha1/verificationpolicy/verificationpolicy.go new file mode 100644 index 00000000000..04480ee8eaa --- /dev/null +++ b/pkg/client/injection/informers/pipeline/v1alpha1/verificationpolicy/verificationpolicy.go @@ -0,0 +1,116 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package verificationpolicy + +import ( + context "context" + + apispipelinev1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" + v1alpha1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1" + client "github.com/tektoncd/pipeline/pkg/client/injection/client" + factory "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory" + pipelinev1alpha1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + cache "k8s.io/client-go/tools/cache" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" +) + +func init() { + injection.Default.RegisterInformer(withInformer) + injection.Dynamic.RegisterDynamicInformer(withDynamicInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Tekton().V1alpha1().VerificationPolicies() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +func withDynamicInformer(ctx context.Context) context.Context { + inf := &wrapper{client: client.Get(ctx), resourceVersion: injection.GetResourceVersion(ctx)} + return context.WithValue(ctx, Key{}, inf) +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1alpha1.VerificationPolicyInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1.VerificationPolicyInformer from context.") + } + return untyped.(v1alpha1.VerificationPolicyInformer) +} + +type wrapper struct { + client versioned.Interface + + namespace string + + resourceVersion string +} + +var _ v1alpha1.VerificationPolicyInformer = (*wrapper)(nil) +var _ pipelinev1alpha1.VerificationPolicyLister = (*wrapper)(nil) + +func (w *wrapper) Informer() cache.SharedIndexInformer { + return cache.NewSharedIndexInformer(nil, &apispipelinev1alpha1.VerificationPolicy{}, 0, nil) +} + +func (w *wrapper) Lister() pipelinev1alpha1.VerificationPolicyLister { + return w +} + +func (w *wrapper) VerificationPolicies(namespace string) pipelinev1alpha1.VerificationPolicyNamespaceLister { + return &wrapper{client: w.client, namespace: namespace, resourceVersion: w.resourceVersion} +} + +// SetResourceVersion allows consumers to adjust the minimum resourceVersion +// used by the underlying client. It is not accessible via the standard +// lister interface, but can be accessed through a user-defined interface and +// an implementation check e.g. rvs, ok := foo.(ResourceVersionSetter) +func (w *wrapper) SetResourceVersion(resourceVersion string) { + w.resourceVersion = resourceVersion +} + +func (w *wrapper) List(selector labels.Selector) (ret []*apispipelinev1alpha1.VerificationPolicy, err error) { + lo, err := w.client.TektonV1alpha1().VerificationPolicies(w.namespace).List(context.TODO(), v1.ListOptions{ + LabelSelector: selector.String(), + ResourceVersion: w.resourceVersion, + }) + if err != nil { + return nil, err + } + for idx := range lo.Items { + ret = append(ret, &lo.Items[idx]) + } + return ret, nil +} + +func (w *wrapper) Get(name string) (*apispipelinev1alpha1.VerificationPolicy, error) { + return w.client.TektonV1alpha1().VerificationPolicies(w.namespace).Get(context.TODO(), name, v1.GetOptions{ + ResourceVersion: w.resourceVersion, + }) +} diff --git a/pkg/client/injection/reconciler/pipeline/v1alpha1/verificationpolicy/controller.go b/pkg/client/injection/reconciler/pipeline/v1alpha1/verificationpolicy/controller.go new file mode 100644 index 00000000000..914b77b1d1b --- /dev/null +++ b/pkg/client/injection/reconciler/pipeline/v1alpha1/verificationpolicy/controller.go @@ -0,0 +1,159 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package verificationpolicy + +import ( + context "context" + fmt "fmt" + reflect "reflect" + strings "strings" + + versionedscheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme" + client "github.com/tektoncd/pipeline/pkg/client/injection/client" + verificationpolicy "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/verificationpolicy" + zap "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/kubernetes/typed/core/v1" + record "k8s.io/client-go/tools/record" + kubeclient "knative.dev/pkg/client/injection/kube/client" + controller "knative.dev/pkg/controller" + logging "knative.dev/pkg/logging" + logkey "knative.dev/pkg/logging/logkey" + reconciler "knative.dev/pkg/reconciler" +) + +const ( + defaultControllerAgentName = "verificationpolicy-controller" + defaultFinalizerName = "verificationpolicies.tekton.dev" +) + +// NewImpl returns a controller.Impl that handles queuing and feeding work from +// the queue through an implementation of controller.Reconciler, delegating to +// the provided Interface and optional Finalizer methods. OptionsFn is used to return +// controller.ControllerOptions to be used by the internal reconciler. +func NewImpl(ctx context.Context, r Interface, optionsFns ...controller.OptionsFn) *controller.Impl { + logger := logging.FromContext(ctx) + + // Check the options function input. It should be 0 or 1. + if len(optionsFns) > 1 { + logger.Fatal("Up to one options function is supported, found: ", len(optionsFns)) + } + + verificationpolicyInformer := verificationpolicy.Get(ctx) + + lister := verificationpolicyInformer.Lister() + + var promoteFilterFunc func(obj interface{}) bool + + rec := &reconcilerImpl{ + LeaderAwareFuncs: reconciler.LeaderAwareFuncs{ + PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error { + all, err := lister.List(labels.Everything()) + if err != nil { + return err + } + for _, elt := range all { + if promoteFilterFunc != nil { + if ok := promoteFilterFunc(elt); !ok { + continue + } + } + enq(bkt, types.NamespacedName{ + Namespace: elt.GetNamespace(), + Name: elt.GetName(), + }) + } + return nil + }, + }, + Client: client.Get(ctx), + Lister: lister, + reconciler: r, + finalizerName: defaultFinalizerName, + } + + ctrType := reflect.TypeOf(r).Elem() + ctrTypeName := fmt.Sprintf("%s.%s", ctrType.PkgPath(), ctrType.Name()) + ctrTypeName = strings.ReplaceAll(ctrTypeName, "/", ".") + + logger = logger.With( + zap.String(logkey.ControllerType, ctrTypeName), + zap.String(logkey.Kind, "tekton.dev.VerificationPolicy"), + ) + + impl := controller.NewContext(ctx, rec, controller.ControllerOptions{WorkQueueName: ctrTypeName, Logger: logger}) + agentName := defaultControllerAgentName + + // Pass impl to the options. Save any optional results. + for _, fn := range optionsFns { + opts := fn(impl) + if opts.ConfigStore != nil { + rec.configStore = opts.ConfigStore + } + if opts.FinalizerName != "" { + rec.finalizerName = opts.FinalizerName + } + if opts.AgentName != "" { + agentName = opts.AgentName + } + if opts.DemoteFunc != nil { + rec.DemoteFunc = opts.DemoteFunc + } + if opts.PromoteFilterFunc != nil { + promoteFilterFunc = opts.PromoteFilterFunc + } + } + + rec.Recorder = createRecorder(ctx, agentName) + + return impl +} + +func createRecorder(ctx context.Context, agentName string) record.EventRecorder { + logger := logging.FromContext(ctx) + + recorder := controller.GetEventRecorder(ctx) + if recorder == nil { + // Create event broadcaster + logger.Debug("Creating event broadcaster") + eventBroadcaster := record.NewBroadcaster() + watches := []watch.Interface{ + eventBroadcaster.StartLogging(logger.Named("event-broadcaster").Infof), + eventBroadcaster.StartRecordingToSink( + &v1.EventSinkImpl{Interface: kubeclient.Get(ctx).CoreV1().Events("")}), + } + recorder = eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: agentName}) + go func() { + <-ctx.Done() + for _, w := range watches { + w.Stop() + } + }() + } + + return recorder +} + +func init() { + versionedscheme.AddToScheme(scheme.Scheme) +} diff --git a/pkg/client/injection/reconciler/pipeline/v1alpha1/verificationpolicy/reconciler.go b/pkg/client/injection/reconciler/pipeline/v1alpha1/verificationpolicy/reconciler.go new file mode 100644 index 00000000000..b99de2e65cc --- /dev/null +++ b/pkg/client/injection/reconciler/pipeline/v1alpha1/verificationpolicy/reconciler.go @@ -0,0 +1,365 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package verificationpolicy + +import ( + context "context" + json "encoding/json" + fmt "fmt" + + v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" + pipelinev1alpha1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1" + zap "go.uber.org/zap" + v1 "k8s.io/api/core/v1" + errors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + sets "k8s.io/apimachinery/pkg/util/sets" + record "k8s.io/client-go/tools/record" + controller "knative.dev/pkg/controller" + logging "knative.dev/pkg/logging" + reconciler "knative.dev/pkg/reconciler" +) + +// Interface defines the strongly typed interfaces to be implemented by a +// controller reconciling v1alpha1.VerificationPolicy. +type Interface interface { + // ReconcileKind implements custom logic to reconcile v1alpha1.VerificationPolicy. Any changes + // to the objects .Status or .Finalizers will be propagated to the stored + // object. It is recommended that implementors do not call any update calls + // for the Kind inside of ReconcileKind, it is the responsibility of the calling + // controller to propagate those properties. The resource passed to ReconcileKind + // will always have an empty deletion timestamp. + ReconcileKind(ctx context.Context, o *v1alpha1.VerificationPolicy) reconciler.Event +} + +// Finalizer defines the strongly typed interfaces to be implemented by a +// controller finalizing v1alpha1.VerificationPolicy. +type Finalizer interface { + // FinalizeKind implements custom logic to finalize v1alpha1.VerificationPolicy. Any changes + // to the objects .Status or .Finalizers will be ignored. Returning a nil or + // Normal type reconciler.Event will allow the finalizer to be deleted on + // the resource. The resource passed to FinalizeKind will always have a set + // deletion timestamp. + FinalizeKind(ctx context.Context, o *v1alpha1.VerificationPolicy) reconciler.Event +} + +// ReadOnlyInterface defines the strongly typed interfaces to be implemented by a +// controller reconciling v1alpha1.VerificationPolicy if they want to process resources for which +// they are not the leader. +type ReadOnlyInterface interface { + // ObserveKind implements logic to observe v1alpha1.VerificationPolicy. + // This method should not write to the API. + ObserveKind(ctx context.Context, o *v1alpha1.VerificationPolicy) reconciler.Event +} + +type doReconcile func(ctx context.Context, o *v1alpha1.VerificationPolicy) reconciler.Event + +// reconcilerImpl implements controller.Reconciler for v1alpha1.VerificationPolicy resources. +type reconcilerImpl struct { + // LeaderAwareFuncs is inlined to help us implement reconciler.LeaderAware. + reconciler.LeaderAwareFuncs + + // Client is used to write back status updates. + Client versioned.Interface + + // Listers index properties about resources. + Lister pipelinev1alpha1.VerificationPolicyLister + + // Recorder is an event recorder for recording Event resources to the + // Kubernetes API. + Recorder record.EventRecorder + + // configStore allows for decorating a context with config maps. + // +optional + configStore reconciler.ConfigStore + + // reconciler is the implementation of the business logic of the resource. + reconciler Interface + + // finalizerName is the name of the finalizer to reconcile. + finalizerName string +} + +// Check that our Reconciler implements controller.Reconciler. +var _ controller.Reconciler = (*reconcilerImpl)(nil) + +// Check that our generated Reconciler is always LeaderAware. +var _ reconciler.LeaderAware = (*reconcilerImpl)(nil) + +func NewReconciler(ctx context.Context, logger *zap.SugaredLogger, client versioned.Interface, lister pipelinev1alpha1.VerificationPolicyLister, recorder record.EventRecorder, r Interface, options ...controller.Options) controller.Reconciler { + // Check the options function input. It should be 0 or 1. + if len(options) > 1 { + logger.Fatal("Up to one options struct is supported, found: ", len(options)) + } + + // Fail fast when users inadvertently implement the other LeaderAware interface. + // For the typed reconcilers, Promote shouldn't take any arguments. + if _, ok := r.(reconciler.LeaderAware); ok { + logger.Fatalf("%T implements the incorrect LeaderAware interface. Promote() should not take an argument as genreconciler handles the enqueuing automatically.", r) + } + + rec := &reconcilerImpl{ + LeaderAwareFuncs: reconciler.LeaderAwareFuncs{ + PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error { + all, err := lister.List(labels.Everything()) + if err != nil { + return err + } + for _, elt := range all { + // TODO: Consider letting users specify a filter in options. + enq(bkt, types.NamespacedName{ + Namespace: elt.GetNamespace(), + Name: elt.GetName(), + }) + } + return nil + }, + }, + Client: client, + Lister: lister, + Recorder: recorder, + reconciler: r, + finalizerName: defaultFinalizerName, + } + + for _, opts := range options { + if opts.ConfigStore != nil { + rec.configStore = opts.ConfigStore + } + if opts.FinalizerName != "" { + rec.finalizerName = opts.FinalizerName + } + if opts.DemoteFunc != nil { + rec.DemoteFunc = opts.DemoteFunc + } + } + + return rec +} + +// Reconcile implements controller.Reconciler +func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error { + logger := logging.FromContext(ctx) + + // Initialize the reconciler state. This will convert the namespace/name + // string into a distinct namespace and name, determine if this instance of + // the reconciler is the leader, and any additional interfaces implemented + // by the reconciler. Returns an error is the resource key is invalid. + s, err := newState(key, r) + if err != nil { + logger.Error("Invalid resource key: ", key) + return nil + } + + // If we are not the leader, and we don't implement either ReadOnly + // observer interfaces, then take a fast-path out. + if s.isNotLeaderNorObserver() { + return controller.NewSkipKey(key) + } + + // If configStore is set, attach the frozen configuration to the context. + if r.configStore != nil { + ctx = r.configStore.ToContext(ctx) + } + + // Add the recorder to context. + ctx = controller.WithEventRecorder(ctx, r.Recorder) + + // Get the resource with this namespace/name. + + getter := r.Lister.VerificationPolicies(s.namespace) + + original, err := getter.Get(s.name) + + if errors.IsNotFound(err) { + // The resource may no longer exist, in which case we stop processing and call + // the ObserveDeletion handler if appropriate. + logger.Debugf("Resource %q no longer exists", key) + if del, ok := r.reconciler.(reconciler.OnDeletionInterface); ok { + return del.ObserveDeletion(ctx, types.NamespacedName{ + Namespace: s.namespace, + Name: s.name, + }) + } + return nil + } else if err != nil { + return err + } + + // Don't modify the informers copy. + resource := original.DeepCopy() + + var reconcileEvent reconciler.Event + + name, do := s.reconcileMethodFor(resource) + // Append the target method to the logger. + logger = logger.With(zap.String("targetMethod", name)) + switch name { + case reconciler.DoReconcileKind: + // Set and update the finalizer on resource if r.reconciler + // implements Finalizer. + if resource, err = r.setFinalizerIfFinalizer(ctx, resource); err != nil { + return fmt.Errorf("failed to set finalizers: %w", err) + } + + // Reconcile this copy of the resource and then write back any status + // updates regardless of whether the reconciliation errored out. + reconcileEvent = do(ctx, resource) + + case reconciler.DoFinalizeKind: + // For finalizing reconcilers, if this resource being marked for deletion + // and reconciled cleanly (nil or normal event), remove the finalizer. + reconcileEvent = do(ctx, resource) + + if resource, err = r.clearFinalizer(ctx, resource, reconcileEvent); err != nil { + return fmt.Errorf("failed to clear finalizers: %w", err) + } + + case reconciler.DoObserveKind: + // Observe any changes to this resource, since we are not the leader. + reconcileEvent = do(ctx, resource) + + } + + // Report the reconciler event, if any. + if reconcileEvent != nil { + var event *reconciler.ReconcilerEvent + if reconciler.EventAs(reconcileEvent, &event) { + logger.Infow("Returned an event", zap.Any("event", reconcileEvent)) + r.Recorder.Event(resource, event.EventType, event.Reason, event.Error()) + + // the event was wrapped inside an error, consider the reconciliation as failed + if _, isEvent := reconcileEvent.(*reconciler.ReconcilerEvent); !isEvent { + return reconcileEvent + } + return nil + } + + if controller.IsSkipKey(reconcileEvent) { + // This is a wrapped error, don't emit an event. + } else if ok, _ := controller.IsRequeueKey(reconcileEvent); ok { + // This is a wrapped error, don't emit an event. + } else { + logger.Errorw("Returned an error", zap.Error(reconcileEvent)) + r.Recorder.Event(resource, v1.EventTypeWarning, "InternalError", reconcileEvent.Error()) + } + return reconcileEvent + } + + return nil +} + +// updateFinalizersFiltered will update the Finalizers of the resource. +// TODO: this method could be generic and sync all finalizers. For now it only +// updates defaultFinalizerName or its override. +func (r *reconcilerImpl) updateFinalizersFiltered(ctx context.Context, resource *v1alpha1.VerificationPolicy, desiredFinalizers sets.String) (*v1alpha1.VerificationPolicy, error) { + // Don't modify the informers copy. + existing := resource.DeepCopy() + + var finalizers []string + + // If there's nothing to update, just return. + existingFinalizers := sets.NewString(existing.Finalizers...) + + if desiredFinalizers.Has(r.finalizerName) { + if existingFinalizers.Has(r.finalizerName) { + // Nothing to do. + return resource, nil + } + // Add the finalizer. + finalizers = append(existing.Finalizers, r.finalizerName) + } else { + if !existingFinalizers.Has(r.finalizerName) { + // Nothing to do. + return resource, nil + } + // Remove the finalizer. + existingFinalizers.Delete(r.finalizerName) + finalizers = existingFinalizers.List() + } + + mergePatch := map[string]interface{}{ + "metadata": map[string]interface{}{ + "finalizers": finalizers, + "resourceVersion": existing.ResourceVersion, + }, + } + + patch, err := json.Marshal(mergePatch) + if err != nil { + return resource, err + } + + patcher := r.Client.TektonV1alpha1().VerificationPolicies(resource.Namespace) + + resourceName := resource.Name + updated, err := patcher.Patch(ctx, resourceName, types.MergePatchType, patch, metav1.PatchOptions{}) + if err != nil { + r.Recorder.Eventf(existing, v1.EventTypeWarning, "FinalizerUpdateFailed", + "Failed to update finalizers for %q: %v", resourceName, err) + } else { + r.Recorder.Eventf(updated, v1.EventTypeNormal, "FinalizerUpdate", + "Updated %q finalizers", resource.GetName()) + } + return updated, err +} + +func (r *reconcilerImpl) setFinalizerIfFinalizer(ctx context.Context, resource *v1alpha1.VerificationPolicy) (*v1alpha1.VerificationPolicy, error) { + if _, ok := r.reconciler.(Finalizer); !ok { + return resource, nil + } + + finalizers := sets.NewString(resource.Finalizers...) + + // If this resource is not being deleted, mark the finalizer. + if resource.GetDeletionTimestamp().IsZero() { + finalizers.Insert(r.finalizerName) + } + + // Synchronize the finalizers filtered by r.finalizerName. + return r.updateFinalizersFiltered(ctx, resource, finalizers) +} + +func (r *reconcilerImpl) clearFinalizer(ctx context.Context, resource *v1alpha1.VerificationPolicy, reconcileEvent reconciler.Event) (*v1alpha1.VerificationPolicy, error) { + if _, ok := r.reconciler.(Finalizer); !ok { + return resource, nil + } + if resource.GetDeletionTimestamp().IsZero() { + return resource, nil + } + + finalizers := sets.NewString(resource.Finalizers...) + + if reconcileEvent != nil { + var event *reconciler.ReconcilerEvent + if reconciler.EventAs(reconcileEvent, &event) { + if event.EventType == v1.EventTypeNormal { + finalizers.Delete(r.finalizerName) + } + } + } else { + finalizers.Delete(r.finalizerName) + } + + // Synchronize the finalizers filtered by r.finalizerName. + return r.updateFinalizersFiltered(ctx, resource, finalizers) +} diff --git a/pkg/client/injection/reconciler/pipeline/v1alpha1/verificationpolicy/state.go b/pkg/client/injection/reconciler/pipeline/v1alpha1/verificationpolicy/state.go new file mode 100644 index 00000000000..293cb5a08dc --- /dev/null +++ b/pkg/client/injection/reconciler/pipeline/v1alpha1/verificationpolicy/state.go @@ -0,0 +1,97 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package verificationpolicy + +import ( + fmt "fmt" + + v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + types "k8s.io/apimachinery/pkg/types" + cache "k8s.io/client-go/tools/cache" + reconciler "knative.dev/pkg/reconciler" +) + +// state is used to track the state of a reconciler in a single run. +type state struct { + // key is the original reconciliation key from the queue. + key string + // namespace is the namespace split from the reconciliation key. + namespace string + // name is the name split from the reconciliation key. + name string + // reconciler is the reconciler. + reconciler Interface + // roi is the read only interface cast of the reconciler. + roi ReadOnlyInterface + // isROI (Read Only Interface) the reconciler only observes reconciliation. + isROI bool + // isLeader the instance of the reconciler is the elected leader. + isLeader bool +} + +func newState(key string, r *reconcilerImpl) (*state, error) { + // Convert the namespace/name string into a distinct namespace and name. + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + return nil, fmt.Errorf("invalid resource key: %s", key) + } + + roi, isROI := r.reconciler.(ReadOnlyInterface) + + isLeader := r.IsLeaderFor(types.NamespacedName{ + Namespace: namespace, + Name: name, + }) + + return &state{ + key: key, + namespace: namespace, + name: name, + reconciler: r.reconciler, + roi: roi, + isROI: isROI, + isLeader: isLeader, + }, nil +} + +// isNotLeaderNorObserver checks to see if this reconciler with the current +// state is enabled to do any work or not. +// isNotLeaderNorObserver returns true when there is no work possible for the +// reconciler. +func (s *state) isNotLeaderNorObserver() bool { + if !s.isLeader && !s.isROI { + // If we are not the leader, and we don't implement the ReadOnly + // interface, then take a fast-path out. + return true + } + return false +} + +func (s *state) reconcileMethodFor(o *v1alpha1.VerificationPolicy) (string, doReconcile) { + if o.GetDeletionTimestamp().IsZero() { + if s.isLeader { + return reconciler.DoReconcileKind, s.reconciler.ReconcileKind + } else if s.isROI { + return reconciler.DoObserveKind, s.roi.ObserveKind + } + } else if fin, ok := s.reconciler.(Finalizer); s.isLeader && ok { + return reconciler.DoFinalizeKind, fin.FinalizeKind + } + return "unknown", nil +} diff --git a/pkg/client/listers/pipeline/v1alpha1/expansion_generated.go b/pkg/client/listers/pipeline/v1alpha1/expansion_generated.go index ef742ea3490..459ebc6ce55 100644 --- a/pkg/client/listers/pipeline/v1alpha1/expansion_generated.go +++ b/pkg/client/listers/pipeline/v1alpha1/expansion_generated.go @@ -25,3 +25,11 @@ type RunListerExpansion interface{} // RunNamespaceListerExpansion allows custom methods to be added to // RunNamespaceLister. type RunNamespaceListerExpansion interface{} + +// VerificationPolicyListerExpansion allows custom methods to be added to +// VerificationPolicyLister. +type VerificationPolicyListerExpansion interface{} + +// VerificationPolicyNamespaceListerExpansion allows custom methods to be added to +// VerificationPolicyNamespaceLister. +type VerificationPolicyNamespaceListerExpansion interface{} diff --git a/pkg/client/listers/pipeline/v1alpha1/verificationpolicy.go b/pkg/client/listers/pipeline/v1alpha1/verificationpolicy.go new file mode 100644 index 00000000000..7682fa51ba2 --- /dev/null +++ b/pkg/client/listers/pipeline/v1alpha1/verificationpolicy.go @@ -0,0 +1,99 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// VerificationPolicyLister helps list VerificationPolicies. +// All objects returned here must be treated as read-only. +type VerificationPolicyLister interface { + // List lists all VerificationPolicies in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.VerificationPolicy, err error) + // VerificationPolicies returns an object that can list and get VerificationPolicies. + VerificationPolicies(namespace string) VerificationPolicyNamespaceLister + VerificationPolicyListerExpansion +} + +// verificationPolicyLister implements the VerificationPolicyLister interface. +type verificationPolicyLister struct { + indexer cache.Indexer +} + +// NewVerificationPolicyLister returns a new VerificationPolicyLister. +func NewVerificationPolicyLister(indexer cache.Indexer) VerificationPolicyLister { + return &verificationPolicyLister{indexer: indexer} +} + +// List lists all VerificationPolicies in the indexer. +func (s *verificationPolicyLister) List(selector labels.Selector) (ret []*v1alpha1.VerificationPolicy, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.VerificationPolicy)) + }) + return ret, err +} + +// VerificationPolicies returns an object that can list and get VerificationPolicies. +func (s *verificationPolicyLister) VerificationPolicies(namespace string) VerificationPolicyNamespaceLister { + return verificationPolicyNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// VerificationPolicyNamespaceLister helps list and get VerificationPolicies. +// All objects returned here must be treated as read-only. +type VerificationPolicyNamespaceLister interface { + // List lists all VerificationPolicies in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.VerificationPolicy, err error) + // Get retrieves the VerificationPolicy from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1alpha1.VerificationPolicy, error) + VerificationPolicyNamespaceListerExpansion +} + +// verificationPolicyNamespaceLister implements the VerificationPolicyNamespaceLister +// interface. +type verificationPolicyNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all VerificationPolicies in the indexer for a given namespace. +func (s verificationPolicyNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.VerificationPolicy, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.VerificationPolicy)) + }) + return ret, err +} + +// Get retrieves the VerificationPolicy from the indexer for a given namespace and name. +func (s verificationPolicyNamespaceLister) Get(name string) (*v1alpha1.VerificationPolicy, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("verificationpolicy"), name) + } + return obj.(*v1alpha1.VerificationPolicy), nil +} diff --git a/pkg/reconciler/pipelinerun/controller.go b/pkg/reconciler/pipelinerun/controller.go index 4bf187fb749..df3e5b65ac6 100644 --- a/pkg/reconciler/pipelinerun/controller.go +++ b/pkg/reconciler/pipelinerun/controller.go @@ -24,6 +24,7 @@ import ( "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" pipelineclient "github.com/tektoncd/pipeline/pkg/client/injection/client" runinformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/run" + verificationpolicyinformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/verificationpolicy" customruninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/customrun" pipelineruninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/pipelinerun" taskruninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/taskrun" @@ -55,23 +56,25 @@ func NewController(opts *pipeline.Options, clock clock.PassiveClock) func(contex resourceInformer := resourceinformer.Get(ctx) resolutionInformer := resolutioninformer.Get(ctx) runInformer := runinformer.Get(ctx) + verificationpolicyInformer := verificationpolicyinformer.Get(ctx) configStore := config.NewStore(logger.Named("config-store"), pipelinerunmetrics.MetricsOnStore(logger)) configStore.WatchConfigs(cmw) c := &Reconciler{ - KubeClientSet: kubeclientset, - PipelineClientSet: pipelineclientset, - Images: opts.Images, - Clock: clock, - pipelineRunLister: pipelineRunInformer.Lister(), - taskRunLister: taskRunInformer.Lister(), - customRunLister: customRunInformer.Lister(), - runLister: runInformer.Lister(), - resourceLister: resourceInformer.Lister(), - cloudEventClient: cloudeventclient.Get(ctx), - metrics: pipelinerunmetrics.Get(ctx), - pvcHandler: volumeclaim.NewPVCHandler(kubeclientset, logger), - resolutionRequester: resolution.NewCRDRequester(resolutionclient.Get(ctx), resolutionInformer.Lister()), + KubeClientSet: kubeclientset, + PipelineClientSet: pipelineclientset, + Images: opts.Images, + Clock: clock, + pipelineRunLister: pipelineRunInformer.Lister(), + taskRunLister: taskRunInformer.Lister(), + customRunLister: customRunInformer.Lister(), + runLister: runInformer.Lister(), + resourceLister: resourceInformer.Lister(), + verificationPolicyLister: verificationpolicyInformer.Lister(), + cloudEventClient: cloudeventclient.Get(ctx), + metrics: pipelinerunmetrics.Get(ctx), + pvcHandler: volumeclaim.NewPVCHandler(kubeclientset, logger), + resolutionRequester: resolution.NewCRDRequester(resolutionclient.Get(ctx), resolutionInformer.Lister()), } impl := pipelinerunreconciler.NewImpl(ctx, c, func(impl *controller.Impl) controller.Options { return controller.Options{ diff --git a/pkg/reconciler/pipelinerun/pipelinerun.go b/pkg/reconciler/pipelinerun/pipelinerun.go index b31e6afdd6f..343a6242366 100644 --- a/pkg/reconciler/pipelinerun/pipelinerun.go +++ b/pkg/reconciler/pipelinerun/pipelinerun.go @@ -36,7 +36,7 @@ import ( "github.com/tektoncd/pipeline/pkg/artifacts" clientset "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" pipelinerunreconciler "github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1beta1/pipelinerun" - runlisters "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1" + alpha1listers "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1" listers "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1beta1" resourcelisters "github.com/tektoncd/pipeline/pkg/client/resource/listers/resource/v1alpha1" resolutionutil "github.com/tektoncd/pipeline/pkg/internal/resolution" @@ -59,6 +59,7 @@ import ( corev1 "k8s.io/api/core/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" k8slabels "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -139,15 +140,16 @@ type Reconciler struct { Clock clock.PassiveClock // listers index properties about resources - pipelineRunLister listers.PipelineRunLister - taskRunLister listers.TaskRunLister - customRunLister listers.CustomRunLister - runLister runlisters.RunLister - resourceLister resourcelisters.PipelineResourceLister - cloudEventClient cloudevent.CEClient - metrics *pipelinerunmetrics.Recorder - pvcHandler volumeclaim.PvcHandler - resolutionRequester resolution.Requester + pipelineRunLister listers.PipelineRunLister + taskRunLister listers.TaskRunLister + customRunLister listers.CustomRunLister + runLister alpha1listers.RunLister + resourceLister resourcelisters.PipelineResourceLister + verificationPolicyLister alpha1listers.VerificationPolicyLister + cloudEventClient cloudevent.CEClient + metrics *pipelinerunmetrics.Recorder + pvcHandler volumeclaim.PvcHandler + resolutionRequester resolution.Requester } var ( @@ -185,7 +187,11 @@ func (c *Reconciler) ReconcileKind(ctx context.Context, pr *v1beta1.PipelineRun) before = pr.Status.GetCondition(apis.ConditionSucceeded) } - getPipelineFunc := resources.GetPipelineFunc(ctx, c.KubeClientSet, c.PipelineClientSet, c.resolutionRequester, pr) + vp, err := getVerificationPolicies(ctx, c.verificationPolicyLister, pr.Namespace) + if err != nil { + return fmt.Errorf("failed to list VerificationPolicies from namespace %s with error %v", pr.Namespace, err) + } + getPipelineFunc := resources.GetVerifiedPipelineFunc(ctx, c.KubeClientSet, c.PipelineClientSet, c.resolutionRequester, pr, vp) if pr.IsDone() { pr.SetDefaults(ctx) @@ -221,7 +227,7 @@ func (c *Reconciler) ReconcileKind(ctx context.Context, pr *v1beta1.PipelineRun) } // Make sure that the PipelineRun status is in sync with the actual TaskRuns - err := c.updatePipelineRunStatusFromInformer(ctx, pr) + err = c.updatePipelineRunStatusFromInformer(ctx, pr) if err != nil { // This should not fail. Return the error so we can re-try later. logger.Errorf("Error while syncing the pipelinerun status: %v", err.Error()) @@ -311,7 +317,13 @@ func (c *Reconciler) resolvePipelineState( // We need the TaskRun name to ensure that we don't perform an additional remote resolution request for a PipelineTask // in the TaskRun reconciler. trName := resources.GetTaskRunName(pr.Status.TaskRuns, pr.Status.ChildReferences, task.Name, pr.Name) - fn := tresources.GetTaskFunc(ctx, c.KubeClientSet, c.PipelineClientSet, c.resolutionRequester, pr, task.TaskRef, trName, pr.Namespace, pr.Spec.ServiceAccountName) + + vp, err := getVerificationPolicies(ctx, c.verificationPolicyLister, pr.Namespace) + if err != nil { + return nil, fmt.Errorf("failed to list VerificationPolicies from namespace %s with error %v", pr.Namespace, err) + } + + fn := tresources.GetVerifiedTaskFunc(ctx, c.KubeClientSet, c.PipelineClientSet, c.resolutionRequester, pr, task.TaskRef, trName, pr.Namespace, pr.Spec.ServiceAccountName, vp) getRunObjectFunc := func(name string) (v1beta1.RunObject, error) { r, err := c.customRunLister.CustomRuns(pr.Namespace).Get(name) @@ -1624,3 +1636,16 @@ func updatePipelineRunStatusFromChildRefs(logger *zap.SugaredLogger, pr *v1beta1 } pr.Status.ChildReferences = newChildRefs } + +// getVerificationPolicies lists the verificationPolicies from given namespace +func getVerificationPolicies(ctx context.Context, verificationPolicyLister alpha1listers.VerificationPolicyLister, namespace string) ([]*v1alpha1.VerificationPolicy, error) { + var verificationpolicies []*v1alpha1.VerificationPolicy + if config.CheckEnforceResourceVerificationMode(ctx) || config.CheckWarnResourceVerificationMode(ctx) { + var err error + verificationpolicies, err = verificationPolicyLister.VerificationPolicies(namespace).List(labels.Everything()) + if err != nil { + return nil, fmt.Errorf("failed to list VerificationPolicies: %w", err) + } + } + return verificationpolicies, nil +} diff --git a/pkg/reconciler/pipelinerun/pipelinerun_test.go b/pkg/reconciler/pipelinerun/pipelinerun_test.go index dbba88ff5f5..9e0b3561103 100644 --- a/pkg/reconciler/pipelinerun/pipelinerun_test.go +++ b/pkg/reconciler/pipelinerun/pipelinerun_test.go @@ -11055,10 +11055,7 @@ spec: value: bar `) - signer, secretpath, err := test.GetSignerFromFile(ctx, t) - if err != nil { - t.Fatal(err) - } + signer, _, vps := test.SetupMatchAllVerificationPolicies(t, prs.Namespace) signedTask, err := test.GetSignedTask(ts, signer, "test-task") if err != nil { t.Fatal("fail to sign task", err) @@ -11075,20 +11072,15 @@ spec: "resource-verification-mode": "enforce", }, }, - { - ObjectMeta: metav1.ObjectMeta{Name: config.GetTrustedResourcesConfigName(), Namespace: system.Namespace()}, - Data: map[string]string{ - config.PublicKeys: secretpath, - }, - }, } t.Logf("config maps: %s", cms) d := test.Data{ - PipelineRuns: []*v1beta1.PipelineRun{prs}, - Pipelines: []*v1beta1.Pipeline{signedPipeline}, - Tasks: []*v1beta1.Task{signedTask}, - ConfigMaps: cms, + PipelineRuns: []*v1beta1.PipelineRun{prs}, + Pipelines: []*v1beta1.Pipeline{signedPipeline}, + Tasks: []*v1beta1.Task{signedTask}, + ConfigMaps: cms, + VerificationPolicies: vps, } prt := newPipelineRunTest(d, t) defer prt.Cancel() @@ -11138,10 +11130,7 @@ spec: value: bar `) - signer, secretpath, err := test.GetSignerFromFile(ctx, t) - if err != nil { - t.Fatal(err) - } + signer, _, vps := test.SetupMatchAllVerificationPolicies(t, prs.Namespace) signedTask, err := test.GetSignedTask(ts, signer, "test-task") if err != nil { t.Fatal("fail to sign task", err) @@ -11170,12 +11159,6 @@ spec: "resource-verification-mode": "enforce", }, }, - { - ObjectMeta: metav1.ObjectMeta{Name: config.GetTrustedResourcesConfigName(), Namespace: system.Namespace()}, - Data: map[string]string{ - config.PublicKeys: secretpath, - }, - }, } t.Logf("config maps: %s", cms) @@ -11213,10 +11196,11 @@ spec: for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { d := test.Data{ - PipelineRuns: tc.pipelinerun, - Pipelines: tc.pipeline, - Tasks: tc.task, - ConfigMaps: cms, + PipelineRuns: tc.pipelinerun, + Pipelines: tc.pipeline, + Tasks: tc.task, + ConfigMaps: cms, + VerificationPolicies: vps, } prt := newPipelineRunTest(d, t) defer prt.Cancel() diff --git a/pkg/reconciler/pipelinerun/resources/pipelineref.go b/pkg/reconciler/pipelinerun/resources/pipelineref.go index 72287a1d6c2..755428bae99 100644 --- a/pkg/reconciler/pipelinerun/resources/pipelineref.go +++ b/pkg/reconciler/pipelinerun/resources/pipelineref.go @@ -23,6 +23,7 @@ import ( "github.com/google/go-containerregistry/pkg/authn/k8schain" "github.com/tektoncd/pipeline/pkg/apis/config" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" clientset "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" rprp "github.com/tektoncd/pipeline/pkg/reconciler/pipelinerun/pipelinespec" @@ -76,7 +77,7 @@ func GetPipelineFunc(ctx context.Context, k8s kubernetes.Interface, tekton clien return nil, nil, fmt.Errorf("failed to get keychain: %w", err) } resolver := oci.NewResolver(pr.Bundle, kc) - return resolvePipeline(ctx, resolver, name, k8s) + return resolvePipeline(ctx, resolver, name) } case pr != nil && pr.Resolver != "" && requester != nil: return func(ctx context.Context, name string) (v1beta1.PipelineObject, *v1beta1.ConfigSource, error) { @@ -86,24 +87,54 @@ func GetPipelineFunc(ctx context.Context, k8s kubernetes.Interface, tekton clien } replacedParams := replaceParamValues(pr.Params, stringReplacements, arrayReplacements, objectReplacements) resolver := resolution.NewResolver(requester, pipelineRun, string(pr.Resolver), "", "", replacedParams) - return resolvePipeline(ctx, resolver, name, k8s) + return resolvePipeline(ctx, resolver, name) } default: // Even if there is no task ref, we should try to return a local resolver. local := &LocalPipelineRefResolver{ Namespace: namespace, Tektonclient: tekton, - K8sclient: k8s, } return local.GetPipeline } } +// GetVerifiedPipelineFunc is a wrapper of GetPipelineFunc and return the function to +// verify the pipeline if resource-verification-mode is not "skip" +func GetVerifiedPipelineFunc(ctx context.Context, k8s kubernetes.Interface, tekton clientset.Interface, requester remoteresource.Requester, pipelineRun *v1beta1.PipelineRun, verificationpolicies []*v1alpha1.VerificationPolicy) rprp.GetPipeline { + get := GetPipelineFunc(ctx, k8s, tekton, requester, pipelineRun) + return func(context.Context, string) (v1beta1.PipelineObject, *v1beta1.ConfigSource, error) { + p, s, err := get(ctx, pipelineRun.Spec.PipelineRef.Name) + if err != nil { + return nil, nil, fmt.Errorf("failed to get pipeline: %w", err) + } + // if the pipeline is in status, then it has been verified and no need to verify again + if pipelineRun.Status.PipelineSpec != nil { + return p, s, nil + } + var source string + if s != nil { + source = s.URI + } + logger := logging.FromContext(ctx) + if config.CheckEnforceResourceVerificationMode(ctx) || config.CheckWarnResourceVerificationMode(ctx) { + if err := trustedresources.VerifyPipeline(ctx, p, k8s, source, verificationpolicies); err != nil { + if config.CheckEnforceResourceVerificationMode(ctx) { + logger.Errorf("GetVerifiedPipelineFunc failed: %v", err) + return nil, nil, fmt.Errorf("GetVerifiedPipelineFunc failed: %w: %v", trustedresources.ErrorResourceVerificationFailed, err) + } + logger.Warnf("GetVerifiedPipelineFunc failed: %v", err) + return p, s, nil + } + } + return p, s, nil + } +} + // LocalPipelineRefResolver uses the current cluster to resolve a pipeline reference. type LocalPipelineRefResolver struct { Namespace string Tektonclient clientset.Interface - K8sclient kubernetes.Interface } // GetPipeline will resolve a Pipeline from the local cluster using a versioned Tekton client. It will @@ -120,9 +151,6 @@ func (l *LocalPipelineRefResolver) GetPipeline(ctx context.Context, name string) if err != nil { return nil, nil, err } - if err := verifyResolvedPipeline(ctx, pipeline, l.K8sclient); err != nil { - return nil, nil, err - } return pipeline, nil, nil } @@ -130,8 +158,8 @@ func (l *LocalPipelineRefResolver) GetPipeline(ctx context.Context, name string) // fetch a pipeline with given name. An error is returned if the // resolution doesn't work or the returned data isn't a valid // v1beta1.PipelineObject. -func resolvePipeline(ctx context.Context, resolver remote.Resolver, name string, k8s kubernetes.Interface) (v1beta1.PipelineObject, *v1beta1.ConfigSource, error) { - obj, source, err := resolver.Get(ctx, "pipeline", name) +func resolvePipeline(ctx context.Context, resolver remote.Resolver, name string) (v1beta1.PipelineObject, *v1beta1.ConfigSource, error) { + obj, configSource, err := resolver.Get(ctx, "pipeline", name) if err != nil { return nil, nil, err } @@ -139,11 +167,7 @@ func resolvePipeline(ctx context.Context, resolver remote.Resolver, name string, if err != nil { return nil, nil, fmt.Errorf("failed to convert obj %s into Pipeline", obj.GetObjectKind().GroupVersionKind().String()) } - // TODO(#5527): Consider move this function call to GetPipelineData - if err := verifyResolvedPipeline(ctx, pipelineObj, k8s); err != nil { - return nil, nil, err - } - return pipelineObj, source, nil + return pipelineObj, configSource, nil } // readRuntimeObjectAsPipeline tries to convert a generic runtime.Object @@ -158,19 +182,3 @@ func readRuntimeObjectAsPipeline(ctx context.Context, obj runtime.Object) (v1bet return nil, errors.New("resource is not a pipeline") } - -// verifyResolvedPipeline verifies the resolved pipeline -func verifyResolvedPipeline(ctx context.Context, pipeline v1beta1.PipelineObject, k8s kubernetes.Interface) error { - cfg := config.FromContextOrDefaults(ctx) - if cfg.FeatureFlags.ResourceVerificationMode == config.EnforceResourceVerificationMode || cfg.FeatureFlags.ResourceVerificationMode == config.WarnResourceVerificationMode { - if err := trustedresources.VerifyPipeline(ctx, pipeline, k8s); err != nil { - if cfg.FeatureFlags.ResourceVerificationMode == config.EnforceResourceVerificationMode { - return trustedresources.ErrorResourceVerificationFailed - } - logger := logging.FromContext(ctx) - logger.Warnf("trusted resources verification failed: %v", err) - return nil - } - } - return nil -} diff --git a/pkg/reconciler/pipelinerun/resources/pipelineref_test.go b/pkg/reconciler/pipelinerun/resources/pipelineref_test.go index 0fd9443336f..1b3b900032d 100644 --- a/pkg/reconciler/pipelinerun/resources/pipelineref_test.go +++ b/pkg/reconciler/pipelinerun/resources/pipelineref_test.go @@ -20,6 +20,7 @@ import ( "context" "encoding/json" "errors" + "fmt" "net/http/httptest" "net/url" "strings" @@ -40,6 +41,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" fakek8s "k8s.io/client-go/kubernetes/fake" + "knative.dev/pkg/logging" logtesting "knative.dev/pkg/logging/testing" ) @@ -440,200 +442,161 @@ func TestGetPipelineFunc_RemoteResolutionInvalidData(t *testing.T) { } } -func TestLocalPipelineRef_TrustedResourceVerification_Success(t *testing.T) { +func TestGetVerifiedPipelineFunc_Success(t *testing.T) { ctx := context.Background() - ctx, cancel := context.WithCancel(ctx) - defer cancel() - signer, secretpath, err := test.GetSignerFromFile(ctx, t) - if err != nil { - t.Fatal(err) - } + tektonclient := fake.NewSimpleClientset() + signer, k8sclient, vps := test.SetupMatchAllVerificationPolicies(t, "trusted-resources") unsignedPipeline := test.GetUnsignedPipeline("test-pipeline") - signedPipeline, err := test.GetSignedPipeline(unsignedPipeline, signer, "test-signed") + unsignedPipelineBytes, err := json.Marshal(unsignedPipeline) if err != nil { - t.Fatal("fail to sign pipeline", err) + t.Fatal("fail to marshal pipeline", err) } - // attack another signed pipeline - signedPipeline2, err := test.GetSignedPipeline(test.GetUnsignedPipeline("test-pipeline2"), signer, "test-signed2") + resolvedUnsigned := test.NewResolvedResource(unsignedPipelineBytes, nil, sampleConfigSource.DeepCopy(), nil) + requesterUnsigned := test.NewRequester(resolvedUnsigned, nil) + + signedPipeline, err := test.GetSignedPipeline(unsignedPipeline, signer, "signed") if err != nil { - t.Fatal("fail to sign task", err) - } - tamperedPipeline := signedPipeline2.DeepCopy() - if tamperedPipeline.Annotations == nil { - tamperedPipeline.Annotations = make(map[string]string) + t.Fatal("fail to sign pipeline", err) } - tamperedPipeline.Annotations["random"] = "attack" - - tektonclient := fake.NewSimpleClientset(signedPipeline, unsignedPipeline, tamperedPipeline) - - testcases := []struct { - name string - ref *v1beta1.PipelineRef - resourceVerificationMode string - expected runtime.Object - }{ - { - name: "local signed pipeline with enforce policy", - ref: &v1beta1.PipelineRef{ - Name: "test-signed", - }, - resourceVerificationMode: config.EnforceResourceVerificationMode, - expected: signedPipeline, - }, { - name: "local unsigned pipeline with warn policy", - ref: &v1beta1.PipelineRef{ - Name: "test-pipeline", - }, - resourceVerificationMode: config.WarnResourceVerificationMode, - expected: unsignedPipeline, - }, - { - name: "local signed pipeline with warn policy", - ref: &v1beta1.PipelineRef{ - Name: "test-signed", - }, - resourceVerificationMode: config.WarnResourceVerificationMode, - expected: signedPipeline, - }, { - name: "local tampered pipeline with warn policy", - ref: &v1beta1.PipelineRef{ - Name: "test-signed2", - }, - resourceVerificationMode: config.WarnResourceVerificationMode, - expected: tamperedPipeline, - }, { - name: "local unsigned pipeline with skip policy", - ref: &v1beta1.PipelineRef{ - Name: "test-pipeline", - }, - resourceVerificationMode: config.SkipResourceVerificationMode, - expected: unsignedPipeline, - }, - { - name: "local signed pipeline with skip policy", - ref: &v1beta1.PipelineRef{ - Name: "test-signed", - }, - resourceVerificationMode: config.SkipResourceVerificationMode, - expected: signedPipeline, - }, { - name: "local tampered pipeline with skip policy", - ref: &v1beta1.PipelineRef{ - Name: "test-signed2", - }, - resourceVerificationMode: config.SkipResourceVerificationMode, - expected: tamperedPipeline, - }, + signedPipelineBytes, err := json.Marshal(signedPipeline) + if err != nil { + t.Fatal("fail to marshal pipeline", err) } - for _, tc := range testcases { - t.Run(tc.name, func(t *testing.T) { - ctx = test.SetupTrustedResourceConfig(ctx, secretpath, tc.resourceVerificationMode) - lc := &resources.LocalPipelineRefResolver{ - Namespace: "trusted-resources", - Tektonclient: tektonclient, - } - - pipeline, source, err := lc.GetPipeline(ctx, tc.ref.Name) - if err != nil { - t.Fatalf("Received unexpected error ( %#v )", err) - } - if d := cmp.Diff(pipeline, tc.expected); d != "" { - t.Error(diff.PrintWantGot(d)) - } - if source != nil { - t.Errorf("expected source is nil, but got %v", source) - } - }) - } -} + resolvedSigned := test.NewResolvedResource(signedPipelineBytes, nil, sampleConfigSource.DeepCopy(), nil) + requesterSigned := test.NewRequester(resolvedSigned, nil) -func TestLocalPipelineRef_TrustedResourceVerification_Error(t *testing.T) { - ctx := context.Background() - ctx, cancel := context.WithCancel(ctx) - defer cancel() - signer, secretpath, err := test.GetSignerFromFile(ctx, t) + tamperedPipeline := signedPipeline.DeepCopy() + tamperedPipeline.Annotations["random"] = "attack" + tamperedPipelineBytes, err := json.Marshal(tamperedPipeline) if err != nil { - t.Fatal(err) + t.Fatal("fail to marshal pipeline", err) } + resolvedTampered := test.NewResolvedResource(tamperedPipelineBytes, nil, sampleConfigSource.DeepCopy(), nil) + requesterTampered := test.NewRequester(resolvedTampered, nil) - unsignedPipeline := test.GetUnsignedPipeline("test-pipeline") - signedPipeline, err := test.GetSignedPipeline(unsignedPipeline, signer, "test-signed") - if err != nil { - t.Fatal("fail to sign pipeline", err) + pipelineRef := &v1beta1.PipelineRef{ + Name: signedPipeline.Name, + ResolverRef: v1beta1.ResolverRef{ + Resolver: "git", + }, } - // attack another signed pipeline - signedPipeline2, err := test.GetSignedPipeline(test.GetUnsignedPipeline("test-pipeline2"), signer, "test-signed2") - if err != nil { - t.Fatal("fail to sign task", err) - } - tamperedPipeline := signedPipeline2.DeepCopy() - if tamperedPipeline.Annotations == nil { - tamperedPipeline.Annotations = make(map[string]string) + pr := v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{Namespace: "trusted-resources"}, + Spec: v1beta1.PipelineRunSpec{ + PipelineRef: pipelineRef, + ServiceAccountName: "default", + }, } - tamperedPipeline.Annotations["random"] = "attack" - tektonclient := fake.NewSimpleClientset(signedPipeline, unsignedPipeline, tamperedPipeline) + prWithStatus := v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{Namespace: "trusted-resources"}, + Spec: v1beta1.PipelineRunSpec{ + PipelineRef: pipelineRef, + ServiceAccountName: "default", + }, + Status: v1beta1.PipelineRunStatus{ + PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ + PipelineSpec: &signedPipeline.Spec, + Provenance: &v1beta1.Provenance{ + ConfigSource: &v1beta1.ConfigSource{ + URI: "abc.com", + Digest: map[string]string{"sha1": "a123"}, + EntryPoint: "foo/bar", + }, + }, + }, + }, + } testcases := []struct { name string - ref *v1beta1.PipelineRef + requester *test.Requester resourceVerificationMode string + pipelinerun v1beta1.PipelineRun expected runtime.Object - expectedErr error - }{ - { - name: "local unsigned pipeline with enforce policy", - ref: &v1beta1.PipelineRef{ - Name: "test-pipeline", - }, - resourceVerificationMode: config.EnforceResourceVerificationMode, - expected: nil, - expectedErr: trustedresources.ErrorResourceVerificationFailed, - }, - { - name: "local tampered pipeline with enforce policy", - ref: &v1beta1.PipelineRef{ - Name: "test-signed2", + }{{ + name: "signed pipeline with enforce policy", + requester: requesterSigned, + resourceVerificationMode: config.EnforceResourceVerificationMode, + pipelinerun: pr, + expected: signedPipeline, + }, { + name: "unsigned pipeline with warn policy", + requester: requesterUnsigned, + resourceVerificationMode: config.WarnResourceVerificationMode, + pipelinerun: pr, + expected: unsignedPipeline, + }, { + name: "signed pipeline with warn policy", + requester: requesterSigned, + resourceVerificationMode: config.WarnResourceVerificationMode, + pipelinerun: pr, + expected: signedPipeline, + }, { + name: "tampered pipeline with warn policy", + requester: requesterTampered, + resourceVerificationMode: config.WarnResourceVerificationMode, + pipelinerun: pr, + expected: tamperedPipeline, + }, { + name: "unsigned pipeline with skip policy", + requester: requesterUnsigned, + resourceVerificationMode: config.SkipResourceVerificationMode, + pipelinerun: pr, + expected: unsignedPipeline, + }, { + name: "signed pipeline with skip policy", + requester: requesterSigned, + resourceVerificationMode: config.SkipResourceVerificationMode, + pipelinerun: pr, + expected: signedPipeline, + }, { + name: "tampered pipeline with skip policy", + requester: requesterTampered, + resourceVerificationMode: config.SkipResourceVerificationMode, + pipelinerun: pr, + expected: tamperedPipeline, + }, { + name: "signed pipeline in status no need to verify", + requester: requesterSigned, + resourceVerificationMode: config.EnforceResourceVerificationMode, + pipelinerun: prWithStatus, + expected: &v1beta1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{ + Name: signedPipeline.Name, + Namespace: signedPipeline.Namespace, }, - resourceVerificationMode: config.EnforceResourceVerificationMode, - expected: nil, - expectedErr: trustedresources.ErrorResourceVerificationFailed, + Spec: signedPipeline.Spec, }, + }, } - for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - ctx = test.SetupTrustedResourceConfig(ctx, secretpath, tc.resourceVerificationMode) - lc := &resources.LocalPipelineRefResolver{ - Namespace: "trusted-resources", - Tektonclient: tektonclient, - } + ctx = test.SetupTrustedResourceConfig(ctx, tc.resourceVerificationMode) + fn := resources.GetVerifiedPipelineFunc(ctx, k8sclient, tektonclient, tc.requester, &tc.pipelinerun, vps) - pipeline, source, err := lc.GetPipeline(ctx, tc.ref.Name) - if err == nil || !errors.Is(err, tc.expectedErr) { - t.Fatalf("Expected error %v but found %v instead", tc.expectedErr, err) + resolvedPipeline, source, err := fn(ctx, pipelineRef.Name) + if err != nil { + t.Fatalf("Received unexpected error ( %#v )", err) } - if d := cmp.Diff(pipeline, tc.expected); d != "" { - t.Error(diff.PrintWantGot(d)) + if d := cmp.Diff(tc.expected, resolvedPipeline); d != "" { + t.Errorf("resolvedPipeline did not match: %s", diff.PrintWantGot(d)) } - - if source != nil { - t.Errorf("expected source is nil, but got %v", source) + if d := cmp.Diff(sampleConfigSource, source); d != "" { + t.Errorf("configSources did not match: %s", diff.PrintWantGot(d)) } }) } } -func TestGetPipelineFunc_RemoteResolution_TrustedResourceVerification_Success(t *testing.T) { +func TestGetVerifiedPipelineFunc_VerifyError(t *testing.T) { ctx := context.Background() - signer, secretpath, err := test.GetSignerFromFile(ctx, t) - if err != nil { - t.Fatal(err) - } + tektonclient := fake.NewSimpleClientset() + signer, k8sclient, vps := test.SetupMatchAllVerificationPolicies(t, "trusted-resources") unsignedPipeline := test.GetUnsignedPipeline("test-pipeline") unsignedPipelineBytes, err := json.Marshal(unsignedPipeline) @@ -648,13 +611,6 @@ func TestGetPipelineFunc_RemoteResolution_TrustedResourceVerification_Success(t if err != nil { t.Fatal("fail to sign pipeline", err) } - signedPipelineBytes, err := json.Marshal(signedPipeline) - if err != nil { - t.Fatal("fail to marshal pipeline", err) - } - - resolvedSigned := test.NewResolvedResource(signedPipelineBytes, nil, sampleConfigSource.DeepCopy(), nil) - requesterSigned := test.NewRequester(resolvedSigned, nil) tamperedPipeline := signedPipeline.DeepCopy() tamperedPipeline.Annotations["random"] = "attack" @@ -672,47 +628,25 @@ func TestGetPipelineFunc_RemoteResolution_TrustedResourceVerification_Success(t requester *test.Requester resourceVerificationMode string expected runtime.Object + expectedErr error }{ { - name: "signed pipeline with enforce policy", - requester: requesterSigned, - resourceVerificationMode: config.EnforceResourceVerificationMode, - expected: signedPipeline, - }, { - name: "unsigned pipeline with warn policy", - requester: requesterUnsigned, - resourceVerificationMode: config.WarnResourceVerificationMode, - expected: unsignedPipeline, - }, { - name: "signed pipeline with warn policy", - requester: requesterSigned, - resourceVerificationMode: config.WarnResourceVerificationMode, - expected: signedPipeline, - }, { - name: "tampered pipeline with warn policy", - requester: requesterTampered, - resourceVerificationMode: config.WarnResourceVerificationMode, - expected: tamperedPipeline, - }, { - name: "unsigned pipeline with skip policy", + name: "unsigned pipeline with enforce policy", requester: requesterUnsigned, - resourceVerificationMode: config.SkipResourceVerificationMode, - expected: unsignedPipeline, - }, { - name: "signed pipeline with skip policy", - requester: requesterSigned, - resourceVerificationMode: config.SkipResourceVerificationMode, - expected: signedPipeline, + resourceVerificationMode: config.EnforceResourceVerificationMode, + expected: nil, + expectedErr: trustedresources.ErrorResourceVerificationFailed, }, { - name: "tampered pipeline with skip policy", + name: "tampered pipeline with enforce policy", requester: requesterTampered, - resourceVerificationMode: config.SkipResourceVerificationMode, - expected: tamperedPipeline, + resourceVerificationMode: config.EnforceResourceVerificationMode, + expected: nil, + expectedErr: trustedresources.ErrorResourceVerificationFailed, }, } for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - ctx = test.SetupTrustedResourceConfig(ctx, secretpath, tc.resourceVerificationMode) + ctx = test.SetupTrustedResourceConfig(ctx, tc.resourceVerificationMode) pr := &v1beta1.PipelineRun{ ObjectMeta: metav1.ObjectMeta{Namespace: "trusted-resources"}, Spec: v1beta1.PipelineRunSpec{ @@ -720,28 +654,26 @@ func TestGetPipelineFunc_RemoteResolution_TrustedResourceVerification_Success(t ServiceAccountName: "default", }, } - fn := resources.GetPipelineFunc(ctx, nil, nil, tc.requester, pr) + fn := resources.GetVerifiedPipelineFunc(ctx, k8sclient, tektonclient, tc.requester, pr, vps) resolvedPipeline, source, err := fn(ctx, pipelineRef.Name) - if err != nil { - t.Fatalf("Received unexpected error ( %#v )", err) + if !errors.Is(err, tc.expectedErr) { + t.Errorf("GetVerifiedPipelineFunc got %v, want %v", err, tc.expectedErr) } - if d := cmp.Diff(tc.expected, resolvedPipeline); d != "" { - t.Error(d) + if d := cmp.Diff(resolvedPipeline, tc.expected); d != "" { + t.Errorf("resolvedPipeline did not match: %s", diff.PrintWantGot(d)) } - if d := cmp.Diff(sampleConfigSource, source); d != "" { - t.Errorf("configSources did not match: %s", diff.PrintWantGot(d)) + if source != nil { + t.Errorf("got %v, but expected source is nil", source) } }) } } -func TestGetPipelineFunc_RemoteResolution_TrustedResourceVerification_Error(t *testing.T) { +func TestGetVerifiedPipelineFunc_GetFuncError(t *testing.T) { ctx := context.Background() - signer, secretpath, err := test.GetSignerFromFile(ctx, t) - if err != nil { - t.Fatal(err) - } + tektonclient := fake.NewSimpleClientset() + _, k8sclient, vps := test.SetupMatchAllVerificationPolicies(t, "trusted-resources") unsignedPipeline := test.GetUnsignedPipeline("test-pipeline") unsignedPipelineBytes, err := json.Marshal(unsignedPipeline) @@ -751,65 +683,75 @@ func TestGetPipelineFunc_RemoteResolution_TrustedResourceVerification_Error(t *t resolvedUnsigned := test.NewResolvedResource(unsignedPipelineBytes, nil, sampleConfigSource.DeepCopy(), nil) requesterUnsigned := test.NewRequester(resolvedUnsigned, nil) + resolvedUnsigned.DataErr = fmt.Errorf("resolution error") - signedPipeline, err := test.GetSignedPipeline(unsignedPipeline, signer, "signed") - if err != nil { - t.Fatal("fail to sign pipeline", err) + prBundleError := &v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{Namespace: "trusted-resources"}, + Spec: v1beta1.PipelineRunSpec{ + PipelineRef: &v1beta1.PipelineRef{ + Name: "pipelineName", + Bundle: "bundle", + }, + ServiceAccountName: "default", + }, } - tamperedPipeline := signedPipeline.DeepCopy() - tamperedPipeline.Annotations["random"] = "attack" - tamperedPipelineBytes, err := json.Marshal(tamperedPipeline) - if err != nil { - t.Fatal("fail to marshal pipeline", err) + prResolutionError := &v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{Namespace: "trusted-resources"}, + Spec: v1beta1.PipelineRunSpec{ + PipelineRef: &v1beta1.PipelineRef{ + Name: "pipelineName", + ResolverRef: v1beta1.ResolverRef{ + Resolver: "git", + }, + }, + ServiceAccountName: "default", + }, } - resolvedTampered := test.NewResolvedResource(tamperedPipelineBytes, nil, sampleConfigSource.DeepCopy(), nil) - requesterTampered := test.NewRequester(resolvedTampered, nil) - - pipelineRef := &v1beta1.PipelineRef{ResolverRef: v1beta1.ResolverRef{Resolver: "git"}} testcases := []struct { name string requester *test.Requester resourceVerificationMode string - expected runtime.Object + pipelinerun v1beta1.PipelineRun expectedErr error }{ { - name: "unsigned pipeline with enforce policy", + name: "get error when oci bundle return error", requester: requesterUnsigned, resourceVerificationMode: config.EnforceResourceVerificationMode, - expected: nil, - expectedErr: trustedresources.ErrorResourceVerificationFailed, - }, { - name: "tampered pipeline with enforce policy", - requester: requesterTampered, + pipelinerun: *prBundleError, + expectedErr: fmt.Errorf(`failed to get pipeline: failed to get keychain: serviceaccounts "default" not found`), + }, + { + name: "get error when remote resolution return error", + requester: requesterUnsigned, resourceVerificationMode: config.EnforceResourceVerificationMode, - expected: nil, - expectedErr: trustedresources.ErrorResourceVerificationFailed, + pipelinerun: *prResolutionError, + expectedErr: fmt.Errorf("failed to get pipeline: error accessing data from remote resource: %v", resolvedUnsigned.DataErr), }, } for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - ctx = test.SetupTrustedResourceConfig(ctx, secretpath, tc.resourceVerificationMode) - pr := &v1beta1.PipelineRun{ - ObjectMeta: metav1.ObjectMeta{Namespace: "trusted-resources"}, - Spec: v1beta1.PipelineRunSpec{ - PipelineRef: pipelineRef, - ServiceAccountName: "default", + ctx = test.SetupTrustedResourceConfig(ctx, tc.resourceVerificationMode) + store := config.NewStore(logging.FromContext(ctx).Named("config-store")) + featureflags := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "tekton-pipelines", + Name: "feature-flags", + }, + Data: map[string]string{ + "enable-tekton-oci-bundles": "true", }, } - fn := resources.GetPipelineFunc(ctx, nil, nil, tc.requester, pr) + store.OnConfigChanged(featureflags) + ctx = store.ToContext(ctx) - resolvedPipeline, source, err := fn(ctx, pipelineRef.Name) - if err == nil || !errors.Is(err, tc.expectedErr) { - t.Fatalf("Expected error %v but found %v instead", tc.expectedErr, err) - } - if d := cmp.Diff(tc.expected, resolvedPipeline); d != "" { - t.Error(d) - } - if source != nil { - t.Errorf("expected source is nil, but got %v", source) + fn := resources.GetVerifiedPipelineFunc(ctx, k8sclient, tektonclient, tc.requester, &tc.pipelinerun, vps) + + _, _, err = fn(ctx, tc.pipelinerun.Spec.PipelineRef.Name) + if d := cmp.Diff(err.Error(), tc.expectedErr.Error()); d != "" { + t.Errorf("GetVerifiedPipelineFunc got %v, want %v", err, tc.expectedErr) } }) } diff --git a/pkg/reconciler/taskrun/controller.go b/pkg/reconciler/taskrun/controller.go index 9e2b07a9044..a9c7242662e 100644 --- a/pkg/reconciler/taskrun/controller.go +++ b/pkg/reconciler/taskrun/controller.go @@ -23,6 +23,7 @@ import ( "github.com/tektoncd/pipeline/pkg/apis/pipeline" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" pipelineclient "github.com/tektoncd/pipeline/pkg/client/injection/client" + verificationpolicyinformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/verificationpolicy" taskruninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/taskrun" taskrunreconciler "github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1beta1/taskrun" resolutionclient "github.com/tektoncd/pipeline/pkg/client/resolution/injection/client" @@ -53,6 +54,7 @@ func NewController(opts *pipeline.Options, clock clock.PassiveClock) func(contex podInformer := filteredpodinformer.Get(ctx, v1beta1.ManagedByLabelKey) resourceInformer := resourceinformer.Get(ctx) limitrangeInformer := limitrangeinformer.Get(ctx) + verificationpolicyInformer := verificationpolicyinformer.Get(ctx) resolutionInformer := resolutioninformer.Get(ctx) configStore := config.NewStore(logger.Named("config-store"), taskrunmetrics.MetricsOnStore(logger)) configStore.WatchConfigs(cmw) @@ -63,19 +65,20 @@ func NewController(opts *pipeline.Options, clock clock.PassiveClock) func(contex } c := &Reconciler{ - KubeClientSet: kubeclientset, - PipelineClientSet: pipelineclientset, - Images: opts.Images, - Clock: clock, - taskRunLister: taskRunInformer.Lister(), - resourceLister: resourceInformer.Lister(), - limitrangeLister: limitrangeInformer.Lister(), - cloudEventClient: cloudeventclient.Get(ctx), - metrics: taskrunmetrics.Get(ctx), - entrypointCache: entrypointCache, - podLister: podInformer.Lister(), - pvcHandler: volumeclaim.NewPVCHandler(kubeclientset, logger), - resolutionRequester: resolution.NewCRDRequester(resolutionclient.Get(ctx), resolutionInformer.Lister()), + KubeClientSet: kubeclientset, + PipelineClientSet: pipelineclientset, + Images: opts.Images, + Clock: clock, + taskRunLister: taskRunInformer.Lister(), + resourceLister: resourceInformer.Lister(), + limitrangeLister: limitrangeInformer.Lister(), + verificationPolicyLister: verificationpolicyInformer.Lister(), + cloudEventClient: cloudeventclient.Get(ctx), + metrics: taskrunmetrics.Get(ctx), + entrypointCache: entrypointCache, + podLister: podInformer.Lister(), + pvcHandler: volumeclaim.NewPVCHandler(kubeclientset, logger), + resolutionRequester: resolution.NewCRDRequester(resolutionclient.Get(ctx), resolutionInformer.Lister()), } impl := taskrunreconciler.NewImpl(ctx, c, func(impl *controller.Impl) controller.Options { return controller.Options{ diff --git a/pkg/reconciler/taskrun/resources/taskref.go b/pkg/reconciler/taskrun/resources/taskref.go index 2d07c06075e..4e161a57920 100644 --- a/pkg/reconciler/taskrun/resources/taskref.go +++ b/pkg/reconciler/taskrun/resources/taskref.go @@ -24,6 +24,7 @@ import ( "github.com/google/go-containerregistry/pkg/authn/k8schain" "github.com/tektoncd/pipeline/pkg/apis/config" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" clientset "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" "github.com/tektoncd/pipeline/pkg/remote" @@ -57,7 +58,7 @@ func GetTaskKind(taskrun *v1beta1.TaskRun) v1beta1.TaskKind { // also requires a kubeclient, tektonclient, namespace, and service account in case it needs to find that task in // cluster or authorize against an external repositroy. It will figure out whether it needs to look in the cluster or in // a remote image to fetch the reference. It will also return the "kind" of the task being referenced. -func GetTaskFuncFromTaskRun(ctx context.Context, k8s kubernetes.Interface, tekton clientset.Interface, requester remoteresource.Requester, taskrun *v1beta1.TaskRun) GetTask { +func GetTaskFuncFromTaskRun(ctx context.Context, k8s kubernetes.Interface, tekton clientset.Interface, requester remoteresource.Requester, taskrun *v1beta1.TaskRun, verificationpolicies []*v1alpha1.VerificationPolicy) GetTask { // if the spec is already in the status, do not try to fetch it again, just use it as source of truth. // Same for the Source field in the Status.Provenance. if taskrun.Status.TaskSpec != nil { @@ -75,7 +76,37 @@ func GetTaskFuncFromTaskRun(ctx context.Context, k8s kubernetes.Interface, tekto }, configsource, nil } } - return GetTaskFunc(ctx, k8s, tekton, requester, taskrun, taskrun.Spec.TaskRef, taskrun.Name, taskrun.Namespace, taskrun.Spec.ServiceAccountName) + return GetVerifiedTaskFunc(ctx, k8s, tekton, requester, taskrun, taskrun.Spec.TaskRef, taskrun.Name, taskrun.Namespace, taskrun.Spec.ServiceAccountName, verificationpolicies) +} + +// GetVerifiedTaskFunc is a wrapper of GetTaskFunc and return the function to +// verify the task if resource-verification-mode is not "skip" +func GetVerifiedTaskFunc(ctx context.Context, k8s kubernetes.Interface, tekton clientset.Interface, requester remoteresource.Requester, + owner kmeta.OwnerRefable, taskref *v1beta1.TaskRef, trName string, namespace, saName string, verificationpolicies []*v1alpha1.VerificationPolicy) GetTask { + get := GetTaskFunc(ctx, k8s, tekton, requester, owner, taskref, trName, namespace, saName) + + return func(context.Context, string) (v1beta1.TaskObject, *v1beta1.ConfigSource, error) { + t, s, err := get(ctx, taskref.Name) + if err != nil { + return nil, nil, fmt.Errorf("failed to get task: %w", err) + } + var source string + if s != nil { + source = s.URI + } + logger := logging.FromContext(ctx) + if config.CheckEnforceResourceVerificationMode(ctx) || config.CheckWarnResourceVerificationMode(ctx) { + if err := trustedresources.VerifyTask(ctx, t, k8s, source, verificationpolicies); err != nil { + if config.CheckEnforceResourceVerificationMode(ctx) { + logger.Errorf("GetVerifiedTaskFunc failed: %v", err) + return nil, nil, fmt.Errorf("GetVerifiedTaskFunc failed: %w: %v", trustedresources.ErrorResourceVerificationFailed, err) + } + logger.Warnf("GetVerifiedTaskFunc failed: %v", err) + return t, s, nil + } + } + return t, s, nil + } } // GetTaskFunc is a factory function that will use the given TaskRef as context to return a valid GetTask function. It @@ -134,7 +165,6 @@ func GetTaskFunc(ctx context.Context, k8s kubernetes.Interface, tekton clientset Namespace: namespace, Kind: kind, Tektonclient: tekton, - K8sclient: k8s, } return local.GetTask } @@ -155,10 +185,6 @@ func resolveTask(ctx context.Context, resolver remote.Resolver, name string, kin if err != nil { return nil, nil, fmt.Errorf("failed to convert obj %s into Task", obj.GetObjectKind().GroupVersionKind().String()) } - // TODO(#5527): Consider move this function call to GetTaskData - if err := verifyResolvedTask(ctx, taskObj, k8s); err != nil { - return nil, nil, err - } return taskObj, configSource, nil } @@ -179,7 +205,6 @@ type LocalTaskRefResolver struct { Namespace string Kind v1beta1.TaskKind Tektonclient clientset.Interface - K8sclient kubernetes.Interface } // GetTask will resolve either a Task or ClusterTask from the local cluster using a versioned Tekton client. It will @@ -203,9 +228,6 @@ func (l *LocalTaskRefResolver) GetTask(ctx context.Context, name string) (v1beta if err != nil { return nil, nil, err } - if err := verifyResolvedTask(ctx, task, l.K8sclient); err != nil { - return nil, nil, err - } return task, nil, nil } @@ -213,19 +235,3 @@ func (l *LocalTaskRefResolver) GetTask(ctx context.Context, name string) (v1beta func IsGetTaskErrTransient(err error) bool { return strings.Contains(err.Error(), errEtcdLeaderChange) } - -// verifyResolvedTask verifies the resolved task -func verifyResolvedTask(ctx context.Context, task v1beta1.TaskObject, k8s kubernetes.Interface) error { - cfg := config.FromContextOrDefaults(ctx) - if cfg.FeatureFlags.ResourceVerificationMode == config.EnforceResourceVerificationMode || cfg.FeatureFlags.ResourceVerificationMode == config.WarnResourceVerificationMode { - if err := trustedresources.VerifyTask(ctx, task, k8s); err != nil { - if cfg.FeatureFlags.ResourceVerificationMode == config.EnforceResourceVerificationMode { - return trustedresources.ErrorResourceVerificationFailed - } - logger := logging.FromContext(ctx) - logger.Warnf("trusted resources verification failed: %v", err) - return nil - } - } - return nil -} diff --git a/pkg/reconciler/taskrun/resources/taskref_test.go b/pkg/reconciler/taskrun/resources/taskref_test.go index f573bce83aa..464985f8a89 100644 --- a/pkg/reconciler/taskrun/resources/taskref_test.go +++ b/pkg/reconciler/taskrun/resources/taskref_test.go @@ -20,6 +20,7 @@ import ( "context" "encoding/json" "errors" + "fmt" "net/http/httptest" "net/url" "strings" @@ -28,6 +29,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-containerregistry/pkg/registry" "github.com/tektoncd/pipeline/pkg/apis/config" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/fake" "github.com/tektoncd/pipeline/pkg/reconciler/taskrun/resources" @@ -40,6 +42,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" fakek8s "k8s.io/client-go/kubernetes/fake" + "knative.dev/pkg/logging" logtesting "knative.dev/pkg/logging/testing" ) @@ -512,7 +515,8 @@ echo hello Spec: TaskSpec, } - fn := resources.GetTaskFuncFromTaskRun(ctx, kubeclient, tektonclient, nil, TaskRun) + fn := resources.GetTaskFuncFromTaskRun(ctx, kubeclient, tektonclient, nil, TaskRun, []*v1alpha1.VerificationPolicy{}) + actualTask, actualConfigSource, err := fn(ctx, name) if err != nil { t.Fatalf("failed to call Taskfn: %s", err.Error()) @@ -560,7 +564,7 @@ func TestGetTaskFunc_RemoteResolution(t *testing.T) { } if d := cmp.Diff(task, resolvedTask); d != "" { - t.Error(d) + t.Errorf("resolvedTask did not match: %s", diff.PrintWantGot(d)) } } @@ -620,7 +624,7 @@ func TestGetTaskFunc_RemoteResolution_ReplacedParams(t *testing.T) { } if d := cmp.Diff(task, resolvedTask); d != "" { - t.Error(d) + t.Errorf("resolvedTask did not match: %s", diff.PrintWantGot(d)) } if d := cmp.Diff(sampleConfigSource, resolvedConfigSource); d != "" { @@ -686,196 +690,11 @@ func TestGetPipelineFunc_RemoteResolutionInvalidData(t *testing.T) { } } -func TestLocalTaskRef_TrustedResourceVerification_Success(t *testing.T) { +func TestGetVerifiedTaskFunc_Success(t *testing.T) { ctx := context.Background() - ctx, cancel := context.WithCancel(ctx) - defer cancel() - signer, secretpath, err := test.GetSignerFromFile(ctx, t) - if err != nil { - t.Fatal(err) - } - - unsignedTask := test.GetUnsignedTask("test-task") - signedTask, err := test.GetSignedTask(unsignedTask, signer, "test-signed") - if err != nil { - t.Fatal("fail to sign task", err) - } - // attack another signed task - signedTask2, err := test.GetSignedTask(test.GetUnsignedTask("test-task2"), signer, "test-signed2") - if err != nil { - t.Fatal("fail to sign task", err) - } - - tamperedTask := signedTask2.DeepCopy() - if tamperedTask.Annotations == nil { - tamperedTask.Annotations = make(map[string]string) - } - tamperedTask.Annotations["random"] = "attack" - - tektonclient := fake.NewSimpleClientset(signedTask, unsignedTask, tamperedTask) - testcases := []struct { - name string - ref *v1beta1.TaskRef - resourceVerificationMode string - expected runtime.Object - }{ - { - name: "local signed task with enforce policy", - ref: &v1beta1.TaskRef{ - Name: "test-signed", - }, - resourceVerificationMode: config.EnforceResourceVerificationMode, - expected: signedTask, - }, { - name: "local unsigned task with warn policy", - ref: &v1beta1.TaskRef{ - Name: "test-task", - }, - resourceVerificationMode: config.WarnResourceVerificationMode, - expected: unsignedTask, - }, { - name: "local signed task with warn policy", - ref: &v1beta1.TaskRef{ - Name: "test-signed", - }, - resourceVerificationMode: config.WarnResourceVerificationMode, - expected: signedTask, - }, { - name: "local tampered task with warn policy", - ref: &v1beta1.TaskRef{ - Name: "test-signed2", - }, - resourceVerificationMode: config.SkipResourceVerificationMode, - expected: tamperedTask, - }, { - name: "local unsigned task with skip policy", - ref: &v1beta1.TaskRef{ - Name: "test-task", - }, - resourceVerificationMode: config.SkipResourceVerificationMode, - expected: unsignedTask, - }, { - name: "local signed task with skip policy", - ref: &v1beta1.TaskRef{ - Name: "test-signed", - }, - resourceVerificationMode: config.SkipResourceVerificationMode, - expected: signedTask, - }, { - name: "local tampered task with skip policy", - ref: &v1beta1.TaskRef{ - Name: "test-signed2", - }, - resourceVerificationMode: config.WarnResourceVerificationMode, - expected: tamperedTask, - }, - } - for _, tc := range testcases { - t.Run(tc.name, func(t *testing.T) { - ctx = test.SetupTrustedResourceConfig(ctx, secretpath, tc.resourceVerificationMode) - lc := &resources.LocalTaskRefResolver{ - Namespace: "trusted-resources", - Kind: tc.ref.Kind, - Tektonclient: tektonclient, - } - - task, source, err := lc.GetTask(ctx, tc.ref.Name) - if err != nil { - t.Fatalf("Received unexpected error %#v", err) - } - - if d := cmp.Diff(task, tc.expected); d != "" { - t.Error(diff.PrintWantGot(d)) - } - - if source != nil { - t.Errorf("expected source for local task is nil, but got %v", source) - } - }) - } -} - -func TestLocalTaskRef_TrustedResourceVerification_Error(t *testing.T) { - ctx := context.Background() - ctx, cancel := context.WithCancel(ctx) - defer cancel() - signer, secretpath, err := test.GetSignerFromFile(ctx, t) - if err != nil { - t.Fatal(err) - } - - unsignedTask := test.GetUnsignedTask("test-task") - signedTask, err := test.GetSignedTask(unsignedTask, signer, "test-signed") - if err != nil { - t.Fatal("fail to sign task", err) - } - // attack another signed task - signedTask2, err := test.GetSignedTask(test.GetUnsignedTask("test-task2"), signer, "test-signed2") - if err != nil { - t.Fatal("fail to sign task", err) - } - - tamperedTask := signedTask2.DeepCopy() - if tamperedTask.Annotations == nil { - tamperedTask.Annotations = make(map[string]string) - } - tamperedTask.Annotations["random"] = "attack" - - tektonclient := fake.NewSimpleClientset(signedTask, unsignedTask, tamperedTask) - testcases := []struct { - name string - ref *v1beta1.TaskRef - resourceVerificationMode string - expected runtime.Object - expectedErr error - }{ - { - name: "local unsigned task with enforce policy", - ref: &v1beta1.TaskRef{ - Name: "test-task", - }, - resourceVerificationMode: config.EnforceResourceVerificationMode, - expected: nil, - expectedErr: trustedresources.ErrorResourceVerificationFailed, - }, { - name: "local tampered task with enforce policy", - ref: &v1beta1.TaskRef{ - Name: "test-signed2", - }, - resourceVerificationMode: config.EnforceResourceVerificationMode, - expected: nil, - expectedErr: trustedresources.ErrorResourceVerificationFailed, - }, - } - for _, tc := range testcases { - t.Run(tc.name, func(t *testing.T) { - ctx = test.SetupTrustedResourceConfig(ctx, secretpath, tc.resourceVerificationMode) - lc := &resources.LocalTaskRefResolver{ - Namespace: "trusted-resources", - Kind: tc.ref.Kind, - Tektonclient: tektonclient, - } - task, source, err := lc.GetTask(ctx, tc.ref.Name) - if err == nil || !errors.Is(err, tc.expectedErr) { - t.Fatalf("Expected error %v but found %v instead", tc.expectedErr, err) - } - if d := cmp.Diff(task, tc.expected); d != "" { - t.Error(diff.PrintWantGot(d)) - } - if source != nil { - t.Errorf("expected source for local task is nil, but got %v", source) - } - }) - } -} - -func TestGetTaskFunc_RemoteResolution_TrustedResourceVerification_Success(t *testing.T) { - ctx := context.Background() - signer, secretpath, err := test.GetSignerFromFile(ctx, t) - if err != nil { - t.Fatal(err) - } + signer, k8sclient, vps := test.SetupMatchAllVerificationPolicies(t, "trusted-resources") + tektonclient := fake.NewSimpleClientset() unsignedTask := test.GetUnsignedTask("test-task") unsignedTaskBytes, err := json.Marshal(unsignedTask) @@ -953,7 +772,7 @@ func TestGetTaskFunc_RemoteResolution_TrustedResourceVerification_Success(t *tes } for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - ctx = test.SetupTrustedResourceConfig(ctx, secretpath, tc.resourceVerificationMode) + ctx = test.SetupTrustedResourceConfig(ctx, tc.resourceVerificationMode) tr := &v1beta1.TaskRun{ ObjectMeta: metav1.ObjectMeta{Namespace: "trusted-resources"}, Spec: v1beta1.TaskRunSpec{ @@ -961,7 +780,7 @@ func TestGetTaskFunc_RemoteResolution_TrustedResourceVerification_Success(t *tes ServiceAccountName: "default", }, } - fn := resources.GetTaskFunc(ctx, nil, nil, tc.requester, tr, tr.Spec.TaskRef, "", "default", "default") + fn := resources.GetVerifiedTaskFunc(ctx, k8sclient, tektonclient, tc.requester, tr, tr.Spec.TaskRef, "", "default", "default", vps) resolvedTask, source, err := fn(ctx, taskRef.Name) @@ -970,7 +789,7 @@ func TestGetTaskFunc_RemoteResolution_TrustedResourceVerification_Success(t *tes } if d := cmp.Diff(tc.expected, resolvedTask); d != "" { - t.Error(d) + t.Errorf("resolvedTask did not match: %s", diff.PrintWantGot(d)) } if d := cmp.Diff(sampleConfigSource, source); d != "" { @@ -980,12 +799,10 @@ func TestGetTaskFunc_RemoteResolution_TrustedResourceVerification_Success(t *tes } } -func TestGetTaskFunc_RemoteResolution_TrustedResourceVerification_Error(t *testing.T) { +func TestGetVerifiedTaskFunc_VerifyError(t *testing.T) { ctx := context.Background() - signer, secretpath, err := test.GetSignerFromFile(ctx, t) - if err != nil { - t.Fatal(err) - } + signer, k8sclient, vps := test.SetupMatchAllVerificationPolicies(t, "trusted-resources") + tektonclient := fake.NewSimpleClientset() unsignedTask := test.GetUnsignedTask("test-task") unsignedTaskBytes, err := json.Marshal(unsignedTask) @@ -1034,7 +851,7 @@ func TestGetTaskFunc_RemoteResolution_TrustedResourceVerification_Error(t *testi } for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - ctx = test.SetupTrustedResourceConfig(ctx, secretpath, tc.resourceVerificationMode) + ctx = test.SetupTrustedResourceConfig(ctx, tc.resourceVerificationMode) tr := &v1beta1.TaskRun{ ObjectMeta: metav1.ObjectMeta{Namespace: "trusted-resources"}, Spec: v1beta1.TaskRunSpec{ @@ -1042,20 +859,108 @@ func TestGetTaskFunc_RemoteResolution_TrustedResourceVerification_Error(t *testi ServiceAccountName: "default", }, } - fn := resources.GetTaskFunc(ctx, nil, nil, tc.requester, tr, tr.Spec.TaskRef, "", "default", "default") + fn := resources.GetVerifiedTaskFunc(ctx, k8sclient, tektonclient, tc.requester, tr, tr.Spec.TaskRef, "", "default", "default", vps) resolvedTask, source, err := fn(ctx, taskRef.Name) - if err == nil || !errors.Is(err, tc.expectedErr) { - t.Fatalf("Expected error %v but found %v instead", tc.expectedErr, err) + if !errors.Is(err, tc.expectedErr) { + t.Errorf("GetVerifiedTaskFunc got %v but want %v", err, tc.expectedErr) } if d := cmp.Diff(tc.expected, resolvedTask); d != "" { - t.Error(d) + t.Errorf("resolvedTask did not match: %s", diff.PrintWantGot(d)) } if source != nil { - t.Errorf("expected source is nil, but got: %v", source) + t.Errorf("source is: %v but want is nil", source) + } + }) + } +} + +func TestGetVerifiedTaskFunc_GetFuncError(t *testing.T) { + ctx := context.Background() + _, k8sclient, vps := test.SetupMatchAllVerificationPolicies(t, "trusted-resources") + tektonclient := fake.NewSimpleClientset() + + unsignedTask := test.GetUnsignedTask("test-task") + unsignedTaskBytes, err := json.Marshal(unsignedTask) + if err != nil { + t.Fatal("fail to marshal task", err) + } + + resolvedUnsigned := test.NewResolvedResource(unsignedTaskBytes, nil, sampleConfigSource.DeepCopy(), nil) + requesterUnsigned := test.NewRequester(resolvedUnsigned, nil) + resolvedUnsigned.DataErr = fmt.Errorf("resolution error") + + trBundleError := &v1beta1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{Namespace: "trusted-resources"}, + Spec: v1beta1.TaskRunSpec{ + TaskRef: &v1beta1.TaskRef{ + Name: "taskName", + Bundle: "bundle", + }, + ServiceAccountName: "default", + }, + } + + trResolutionError := &v1beta1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{Namespace: "trusted-resources"}, + Spec: v1beta1.TaskRunSpec{ + TaskRef: &v1beta1.TaskRef{ + Name: "taskName", + ResolverRef: v1beta1.ResolverRef{ + Resolver: "git", + }, + }, + ServiceAccountName: "default", + }, + } + + testcases := []struct { + name string + requester *test.Requester + resourceVerificationMode string + taskrun v1beta1.TaskRun + expectedErr error + }{ + { + name: "get error when oci bundle return error", + requester: requesterUnsigned, + resourceVerificationMode: config.EnforceResourceVerificationMode, + taskrun: *trBundleError, + expectedErr: fmt.Errorf(`failed to get task: failed to get keychain: serviceaccounts "default" not found`), + }, + { + name: "get error when remote resolution return error", + requester: requesterUnsigned, + resourceVerificationMode: config.EnforceResourceVerificationMode, + taskrun: *trResolutionError, + expectedErr: fmt.Errorf("failed to get task: error accessing data from remote resource: %v", resolvedUnsigned.DataErr), + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + ctx = test.SetupTrustedResourceConfig(ctx, tc.resourceVerificationMode) + store := config.NewStore(logging.FromContext(ctx).Named("config-store")) + featureflags := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "tekton-pipelines", + Name: "feature-flags", + }, + Data: map[string]string{ + "enable-tekton-oci-bundles": "true", + }, + } + store.OnConfigChanged(featureflags) + ctx = store.ToContext(ctx) + + fn := resources.GetVerifiedTaskFunc(ctx, k8sclient, tektonclient, tc.requester, &tc.taskrun, tc.taskrun.Spec.TaskRef, "", "default", "default", vps) + + _, _, err = fn(ctx, tc.taskrun.Spec.TaskRef.Name) + + if d := cmp.Diff(err.Error(), tc.expectedErr.Error()); d != "" { + t.Fatalf("Expected error %v but found %v instead", tc.expectedErr, err) } }) } diff --git a/pkg/reconciler/taskrun/taskrun.go b/pkg/reconciler/taskrun/taskrun.go index b5a7f6e8646..94f80eca6be 100644 --- a/pkg/reconciler/taskrun/taskrun.go +++ b/pkg/reconciler/taskrun/taskrun.go @@ -28,11 +28,13 @@ import ( "github.com/tektoncd/pipeline/pkg/apis/config" "github.com/tektoncd/pipeline/pkg/apis/pipeline" "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" "github.com/tektoncd/pipeline/pkg/apis/resource" resourcev1alpha1 "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1" clientset "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" taskrunreconciler "github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1beta1/taskrun" + alphalisters "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1" listers "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1beta1" resourcelisters "github.com/tektoncd/pipeline/pkg/client/resource/listers/resource/v1alpha1" "github.com/tektoncd/pipeline/pkg/internal/affinityassistant" @@ -76,15 +78,16 @@ type Reconciler struct { Clock clock.PassiveClock // listers index properties about resources - taskRunLister listers.TaskRunLister - resourceLister resourcelisters.PipelineResourceLister - limitrangeLister corev1Listers.LimitRangeLister - podLister corev1Listers.PodLister - cloudEventClient cloudevent.CEClient - entrypointCache podconvert.EntrypointCache - metrics *taskrunmetrics.Recorder - pvcHandler volumeclaim.PvcHandler - resolutionRequester resolution.Requester + taskRunLister listers.TaskRunLister + resourceLister resourcelisters.PipelineResourceLister + limitrangeLister corev1Listers.LimitRangeLister + podLister corev1Listers.PodLister + verificationPolicyLister alphalisters.VerificationPolicyLister + cloudEventClient cloudevent.CEClient + entrypointCache podconvert.EntrypointCache + metrics *taskrunmetrics.Recorder + pvcHandler volumeclaim.PvcHandler + resolutionRequester resolution.Requester } // Check that our Reconciler implements taskrunreconciler.Interface @@ -322,7 +325,16 @@ func (c *Reconciler) prepare(ctx context.Context, tr *v1beta1.TaskRun) (*v1beta1 logger := logging.FromContext(ctx) tr.SetDefaults(ctx) - getTaskfunc := resources.GetTaskFuncFromTaskRun(ctx, c.KubeClientSet, c.PipelineClientSet, c.resolutionRequester, tr) + cfg := config.FromContextOrDefaults(ctx) + var verificationpolicies []*v1alpha1.VerificationPolicy + if cfg.FeatureFlags.ResourceVerificationMode == config.EnforceResourceVerificationMode || cfg.FeatureFlags.ResourceVerificationMode == config.WarnResourceVerificationMode { + var err error + verificationpolicies, err = c.verificationPolicyLister.VerificationPolicies(tr.Namespace).List(labels.Everything()) + if err != nil { + return nil, nil, fmt.Errorf("failed to list VerificationPolicies from namespace %s with error %v", tr.Namespace, err) + } + } + getTaskfunc := resources.GetTaskFuncFromTaskRun(ctx, c.KubeClientSet, c.PipelineClientSet, c.resolutionRequester, tr, verificationpolicies) taskMeta, taskSpec, err := resources.GetTaskData(ctx, tr, getTaskfunc) switch { diff --git a/pkg/reconciler/taskrun/taskrun_test.go b/pkg/reconciler/taskrun/taskrun_test.go index 53b9b4bb3e5..ee1bb244b92 100644 --- a/pkg/reconciler/taskrun/taskrun_test.go +++ b/pkg/reconciler/taskrun/taskrun_test.go @@ -46,6 +46,7 @@ import ( ttesting "github.com/tektoncd/pipeline/pkg/reconciler/testing" "github.com/tektoncd/pipeline/pkg/reconciler/volumeclaim" resolutioncommon "github.com/tektoncd/pipeline/pkg/resolution/common" + "github.com/tektoncd/pipeline/pkg/trustedresources" "github.com/tektoncd/pipeline/pkg/workspace" "github.com/tektoncd/pipeline/test" "github.com/tektoncd/pipeline/test/diff" @@ -5081,10 +5082,7 @@ status: podName: the-pod `) - signer, secretpath, err := test.GetSignerFromFile(ctx, t) - if err != nil { - t.Fatal(err) - } + signer, _, vps := test.SetupMatchAllVerificationPolicies(t, tr.Namespace) signedTask, err := test.GetSignedTask(ts, signer, "test-task") if err != nil { t.Fatal("fail to sign task", err) @@ -5097,18 +5095,13 @@ status: "resource-verification-mode": "enforce", }, }, - { - ObjectMeta: metav1.ObjectMeta{Name: config.GetTrustedResourcesConfigName(), Namespace: system.Namespace()}, - Data: map[string]string{ - config.PublicKeys: secretpath, - }, - }, } d := test.Data{ - TaskRuns: []*v1beta1.TaskRun{tr}, - Tasks: []*v1beta1.Task{signedTask}, - ConfigMaps: cms, + TaskRuns: []*v1beta1.TaskRun{tr}, + Tasks: []*v1beta1.Task{signedTask}, + ConfigMaps: cms, + VerificationPolicies: vps, } testAssets, cancel := getTaskRunController(t, d) @@ -5156,10 +5149,7 @@ status: podName: the-pod `) - signer, secretpath, err := test.GetSignerFromFile(ctx, t) - if err != nil { - t.Fatal(err) - } + signer, _, vps := test.SetupMatchAllVerificationPolicies(t, tr.Namespace) signedTask, err := test.GetSignedTask(ts, signer, "test-task") if err != nil { t.Fatal("fail to sign task", err) @@ -5178,12 +5168,6 @@ status: "resource-verification-mode": "enforce", }, }, - { - ObjectMeta: metav1.ObjectMeta{Name: config.GetTrustedResourcesConfigName(), Namespace: system.Namespace()}, - Data: map[string]string{ - config.PublicKeys: secretpath, - }, - }, } testCases := []struct { @@ -5195,21 +5179,22 @@ status: { name: "unsigned task fails verification", task: []*v1beta1.Task{ts}, - expectedError: fmt.Errorf("1 error occurred:\n\t* error when listing tasks for taskRun test-taskrun: resource verification failed"), + expectedError: trustedresources.ErrorResourceVerificationFailed, }, { name: "modified task fails verification", task: []*v1beta1.Task{tamperedTask}, - expectedError: fmt.Errorf("1 error occurred:\n\t* error when listing tasks for taskRun test-taskrun: resource verification failed"), + expectedError: trustedresources.ErrorResourceVerificationFailed, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { d := test.Data{ - TaskRuns: []*v1beta1.TaskRun{tr}, - Tasks: tc.task, - ConfigMaps: cms, + TaskRuns: []*v1beta1.TaskRun{tr}, + Tasks: tc.task, + ConfigMaps: cms, + VerificationPolicies: vps, } testAssets, cancel := getTaskRunController(t, d) @@ -5217,8 +5202,8 @@ status: createServiceAccount(t, testAssets, tr.Spec.ServiceAccountName, tr.Namespace) err := testAssets.Controller.Reconciler.Reconcile(testAssets.Ctx, getRunName(tr)) - if d := cmp.Diff(strings.TrimSuffix(err.Error(), "\n\n"), tc.expectedError.Error()); d != "" { - t.Errorf("Expected: %v, but Got: %v", tc.expectedError.Error(), err.Error()) + if !errors.Is(err, tc.expectedError) { + t.Errorf("Reconcile got %v but want %v", err, tc.expectedError) } tr, err := testAssets.Clients.Pipeline.TektonV1beta1().TaskRuns(tr.Namespace).Get(testAssets.Ctx, tr.Name, metav1.GetOptions{}) if err != nil { diff --git a/pkg/trustedresources/errors.go b/pkg/trustedresources/errors.go index e140e7eca33..8f0063ddc7e 100644 --- a/pkg/trustedresources/errors.go +++ b/pkg/trustedresources/errors.go @@ -17,5 +17,15 @@ package trustedresources import "errors" -// ErrorResourceVerificationFailed is returned when trusted resources fails verification. -var ErrorResourceVerificationFailed = errors.New("resource verification failed") +var ( + // ErrorResourceVerificationFailed is returned when trusted resources fails verification. + ErrorResourceVerificationFailed = errors.New("resource verification failed") + // ErrorEmptyVerificationConfig is returned when VerificationPolicy or config-trusted-resources configmap are founded + ErrorEmptyVerificationConfig = errors.New("no policies or config-trusted-resources configmap founded for verification") + // ErrorNoMatchedPolicies is returned when no policies are matched + ErrorNoMatchedPolicies = errors.New("no policies are matched") + // ErrorRegexMatch is returned when regex match returns error + ErrorRegexMatch = errors.New("regex failed to match") + // ErrorSignatureMissing is returned when signature is missing in resource + ErrorSignatureMissing = errors.New("signature is missing") +) diff --git a/pkg/trustedresources/verifier/errors.go b/pkg/trustedresources/verifier/errors.go new file mode 100644 index 00000000000..b064c51d633 --- /dev/null +++ b/pkg/trustedresources/verifier/errors.go @@ -0,0 +1,41 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package verifier + +import "errors" + +var ( + // ErrorFailedLoadKeyFile is returned the key file cannot be read + ErrorFailedLoadKeyFile = errors.New("the key file cannot be read") + // ErrorDecodeKey is returned when the key cannot be decoded + ErrorDecodeKey = errors.New("key cannot be decoded") + // ErrorEmptyPublicKeys is returned when no public keys are founded + ErrorEmptyPublicKeys = errors.New("no public keys are founded") + // ErrorEmptySecretData is returned secret data is empty + ErrorEmptySecretData = errors.New("secret data is empty") + // ErrorSecretNotFound is returned when the secret is not found + ErrorSecretNotFound = errors.New("secret not found") + // ErrorMultipleSecretData is returned secret contains multiple data + ErrorMultipleSecretData = errors.New("secret contains multiple data") + // ErrorEmptyKey is returned when the key doesn't contain data or keyRef + ErrorEmptyKey = errors.New("key doesn't contain data or keyRef") + // ErrorK8sSpecificationInvalid is returned when kubernetes specification format is invalid + ErrorK8sSpecificationInvalid = errors.New("kubernetes specification should be in the format k8s:///") + // ErrorLoadVerifier is returned when verifier cannot be loaded from the key + ErrorLoadVerifier = errors.New("verifier cannot to be loaded") + // ErrorAlgorithmInvalid is returned the hash algorithm is not supported + ErrorAlgorithmInvalid = errors.New("unknown digest algorithm") +) diff --git a/pkg/trustedresources/verifier/verifier.go b/pkg/trustedresources/verifier/verifier.go new file mode 100644 index 00000000000..7a6b849f2bc --- /dev/null +++ b/pkg/trustedresources/verifier/verifier.go @@ -0,0 +1,181 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package verifier + +import ( + "context" + "crypto" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/sigstore/sigstore/pkg/cryptoutils" + "github.com/sigstore/sigstore/pkg/signature" + "github.com/tektoncd/pipeline/pkg/apis/config" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" +) + +const ( + // keyReference is the prefix of secret reference + keyReference = "k8s://" +) + +// FromConfigMap get all verifiers from configmap, k8s is provided to fetch secret from cluster +func FromConfigMap(ctx context.Context, k8s kubernetes.Interface) ([]signature.Verifier, error) { + cfg := config.FromContextOrDefaults(ctx) + verifiers := []signature.Verifier{} + for key := range cfg.TrustedResources.Keys { + if key == "" { + continue + } + v, err := fromKeyRef(ctx, key, crypto.SHA256, k8s) + if err != nil { + return nil, fmt.Errorf("failed to get verifier from keyref: %w", err) + } + verifiers = append(verifiers, v) + } + if len(verifiers) == 0 { + return nil, ErrorEmptyPublicKeys + } + return verifiers, nil +} + +// FromPolicy get all verifiers from VerificationPolicy. +// For each policy, loop the Authorities of the VerificationPolicy to fetch public key +// from either inline Data or from a SecretRef. +func FromPolicy(ctx context.Context, k8s kubernetes.Interface, policy *v1alpha1.VerificationPolicy) ([]signature.Verifier, error) { + verifiers := []signature.Verifier{} + for _, a := range policy.Spec.Authorities { + algorithm, err := matchHashAlgorithm(a.Key.HashAlgorithm) + if err != nil { + return nil, fmt.Errorf("authority %q contains an invalid hash algorithm: %w", a.Name, err) + } + if a.Key.Data == "" && a.Key.SecretRef == nil { + return nil, ErrorEmptyKey + } + if a.Key.Data != "" { + v, err := fromData([]byte(a.Key.Data), algorithm) + if err != nil { + return nil, fmt.Errorf("failed to get verifier from data: %w", err) + } + verifiers = append(verifiers, v) + } else if a.Key.SecretRef != nil { + v, err := fromSecret(ctx, fmt.Sprintf("%s%s/%s", keyReference, a.Key.SecretRef.Namespace, a.Key.SecretRef.Name), algorithm, k8s) + if err != nil { + return nil, fmt.Errorf("failed to get verifier from secret: %w", err) + } + verifiers = append(verifiers, v) + } + } + if len(verifiers) == 0 { + return verifiers, ErrorEmptyPublicKeys + } + return verifiers, nil + +} + +// fromKeyRef parses the given keyRef, loads the key and returns an appropriate +// verifier using the provided hash algorithm +func fromKeyRef(ctx context.Context, keyRef string, hashAlgorithm crypto.Hash, k8s kubernetes.Interface) (signature.Verifier, error) { + var raw []byte + if strings.HasPrefix(keyRef, keyReference) { + v, err := fromSecret(ctx, keyRef, hashAlgorithm, k8s) + if err != nil { + return nil, fmt.Errorf("failed to get verifier from secret: %w", err) + } + return v, nil + } + raw, err := os.ReadFile(filepath.Clean(keyRef)) + if err != nil { + return nil, fmt.Errorf("%w: %v", ErrorFailedLoadKeyFile, err) + } + v, err := fromData(raw, hashAlgorithm) + if err != nil { + return nil, fmt.Errorf("failed to get verifier from data: %w", err) + } + return v, nil +} + +// fromSecret fetches the public key from SecretRef and returns the verifier +// hashAlgorithm is provided to determine the hash algorithm of the key +func fromSecret(ctx context.Context, secretRef string, hashAlgorithm crypto.Hash, k8s kubernetes.Interface) (signature.Verifier, error) { + if strings.HasPrefix(secretRef, keyReference) { + s, err := getKeyPairSecret(ctx, secretRef, k8s) + if err != nil { + return nil, fmt.Errorf("failed to get secret: %w", err) + } + // only 1 public key should be in the secret + if len(s.Data) == 0 { + return nil, fmt.Errorf("secret %q contains no data %w", secretRef, ErrorEmptySecretData) + } + if len(s.Data) > 1 { + return nil, fmt.Errorf("secret %q contains multiple data entries, only one is supported. %w", secretRef, ErrorMultipleSecretData) + } + for _, raw := range s.Data { + v, err := fromData(raw, hashAlgorithm) + if err != nil { + return nil, fmt.Errorf("failed to get verifier from secret data: %w", err) + } + return v, nil + } + } + return nil, fmt.Errorf("%w: secretRef %v is invalid", ErrorK8sSpecificationInvalid, secretRef) +} + +// fromData fetches the public key from raw data and returns the verifier +func fromData(raw []byte, hashAlgorithm crypto.Hash) (signature.Verifier, error) { + pubKey, err := cryptoutils.UnmarshalPEMToPublicKey(raw) + if err != nil { + return nil, fmt.Errorf("%w: %v", ErrorDecodeKey, err) + } + v, err := signature.LoadVerifier(pubKey, hashAlgorithm) + if err != nil { + return nil, fmt.Errorf("%w: %v", ErrorLoadVerifier, err) + } + return v, nil +} + +// getKeyPairSecret fetches the secret from a k8sRef +// TODO(#5884): use a secret lister to fetch secrets +func getKeyPairSecret(ctx context.Context, k8sRef string, k8s kubernetes.Interface) (*v1.Secret, error) { + split := strings.Split(strings.TrimPrefix(k8sRef, keyReference), "/") + if len(split) != 2 { + return nil, ErrorK8sSpecificationInvalid + } + namespace, name := split[0], split[1] + + s, err := k8s.CoreV1().Secrets(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("%w: %v", ErrorSecretNotFound, err) + } + + return s, nil +} + +// matchHashAlgorithm returns a crypto.Hash code using an algorithm name as input parameter +func matchHashAlgorithm(algorithmName v1alpha1.HashAlgorithm) (crypto.Hash, error) { + normalizedAlgo := strings.ToLower(string(algorithmName)) + algo, exists := v1alpha1.SupportedSignatureAlgorithms[v1alpha1.HashAlgorithm(normalizedAlgo)] + if !exists { + return crypto.SHA256, fmt.Errorf("%w: %s", ErrorAlgorithmInvalid, algorithmName) + } + return algo, nil +} diff --git a/pkg/trustedresources/verifier/verifier_test.go b/pkg/trustedresources/verifier/verifier_test.go new file mode 100644 index 00000000000..18e27f294a6 --- /dev/null +++ b/pkg/trustedresources/verifier/verifier_test.go @@ -0,0 +1,463 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package verifier + +import ( + "context" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "errors" + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/sigstore/sigstore/pkg/signature" + "github.com/tektoncd/pipeline/pkg/apis/config" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + "github.com/tektoncd/pipeline/test" + "github.com/tektoncd/pipeline/test/diff" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + fakek8s "k8s.io/client-go/kubernetes/fake" +) + +const ( + namespace = "trusted-resources" +) + +func TestFromConfigMap_Success(t *testing.T) { + ctx := context.Background() + keys, keypath := test.GetKeysFromFile(ctx, t) + ctx = test.SetupTrustedResourceKeyConfig(ctx, keypath, config.EnforceResourceVerificationMode) + v, err := FromConfigMap(ctx, fakek8s.NewSimpleClientset()) + checkVerifier(t, keys, v[0]) + if err != nil { + t.Errorf("couldn't construct expected verifier from config map: %v", err) + } +} + +func TestFromConfigMap_Error(t *testing.T) { + tcs := []struct { + name string + keyPath string + expectedError error + }{{ + name: "wrong key path", + keyPath: "wrongPath", + expectedError: ErrorFailedLoadKeyFile, + }, { + name: "empty key path", + keyPath: "", + expectedError: ErrorEmptyPublicKeys, + }} + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + ctx := test.SetupTrustedResourceKeyConfig(context.Background(), tc.keyPath, config.EnforceResourceVerificationMode) + _, err := FromConfigMap(ctx, fakek8s.NewSimpleClientset()) + if !errors.Is(err, tc.expectedError) { + t.Errorf("FromConfigMap got: %v, want: %v", err, tc.expectedError) + } + }) + } + +} + +func TestFromPolicy_Success(t *testing.T) { + ctx := context.Background() + _, key256, k8sclient, vps := test.SetupVerificationPolicies(t) + keyInDataVp, keyInSecretVp := vps[0], vps[1] + + _, key384, pub, err := test.GenerateKeys(elliptic.P384(), crypto.SHA256) + if err != nil { + t.Fatalf("failed to generate keys %v", err) + } + + sha384Vp := &v1alpha1.VerificationPolicy{ + TypeMeta: metav1.TypeMeta{ + Kind: "VerificationPolicy", + APIVersion: "v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "differentAlgo", + Namespace: namespace, + }, + Spec: v1alpha1.VerificationPolicySpec{ + Resources: []v1alpha1.ResourcePattern{}, + Authorities: []v1alpha1.Authority{ + { + Name: "sha384Key", + Key: &v1alpha1.KeyRef{ + Data: string(pub), + HashAlgorithm: "sha384", + }, + }, + }, + }, + } + + tcs := []struct { + name string + policy *v1alpha1.VerificationPolicy + key *ecdsa.PrivateKey + }{{ + name: "key in data", + policy: keyInDataVp, + key: key256, + }, { + name: "key in secret", + policy: keyInSecretVp, + key: key256, + }, { + name: "key with sha384", + policy: sha384Vp, + key: key384, + }} + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + verifiers, err := FromPolicy(ctx, k8sclient, tc.policy) + for _, v := range verifiers { + checkVerifier(t, tc.key, v) + } + if err != nil { + t.Errorf("couldn't construct expected verifier from VerificationPolicy: %v", err) + } + }) + } +} + +func TestFromPolicy_Error(t *testing.T) { + tcs := []struct { + name string + policy *v1alpha1.VerificationPolicy + expectedError error + }{{ + name: "hash algorithm is invalid", + policy: &v1alpha1.VerificationPolicy{ + Spec: v1alpha1.VerificationPolicySpec{ + Authorities: []v1alpha1.Authority{ + { + Key: &v1alpha1.KeyRef{ + Data: "inlinekey", + HashAlgorithm: "sha1", + }, + }, + }, + }, + }, + expectedError: ErrorAlgorithmInvalid, + }, { + name: "key is empty", + policy: &v1alpha1.VerificationPolicy{ + Spec: v1alpha1.VerificationPolicySpec{ + Authorities: []v1alpha1.Authority{ + { + Key: &v1alpha1.KeyRef{}, + }, + }, + }, + }, + expectedError: ErrorEmptyKey, + }, { + name: "authority is empty", + policy: &v1alpha1.VerificationPolicy{ + Spec: v1alpha1.VerificationPolicySpec{ + Authorities: []v1alpha1.Authority{}, + }, + }, + expectedError: ErrorEmptyPublicKeys, + }, { + name: "from data error", + policy: &v1alpha1.VerificationPolicy{ + Spec: v1alpha1.VerificationPolicySpec{ + Authorities: []v1alpha1.Authority{ + { + Key: &v1alpha1.KeyRef{ + Data: "inlinekey", + HashAlgorithm: "sha256", + }, + }, + }, + }, + }, + expectedError: ErrorDecodeKey, + }, { + name: "from secret error", + policy: &v1alpha1.VerificationPolicy{ + Spec: v1alpha1.VerificationPolicySpec{ + Authorities: []v1alpha1.Authority{ + { + Key: &v1alpha1.KeyRef{ + SecretRef: &v1.SecretReference{ + Name: "wrongSecret", + Namespace: "wrongNamespace", + }, + }, + }, + }, + }, + }, + expectedError: ErrorSecretNotFound, + }} + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + _, err := FromPolicy(context.Background(), fakek8s.NewSimpleClientset(), tc.policy) + if !errors.Is(err, tc.expectedError) { + t.Errorf("FromPolicy got: %v, want: %v", err, tc.expectedError) + } + }) + } +} + +func TestFromKeyRef_Success(t *testing.T) { + ctx := context.Background() + fileKey, keypath := test.GetKeysFromFile(ctx, t) + + _, secretKey, pub, err := test.GenerateKeys(elliptic.P256(), crypto.SHA256) + if err != nil { + t.Fatalf("failed to generate keys: %v", err) + } + secretData := &v1.Secret{ + Data: map[string][]byte{ + "data": pub, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "secret", + Namespace: namespace}} + + k8sclient := fakek8s.NewSimpleClientset(secretData) + + tcs := []struct { + name string + keyref string + key *ecdsa.PrivateKey + }{{ + name: "key in file", + keyref: keypath, + key: fileKey, + }, { + name: "key in secret", + keyref: fmt.Sprintf("k8s://%s/secret", namespace), + key: secretKey, + }} + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + verifier, err := fromKeyRef(ctx, tc.keyref, crypto.SHA256, k8sclient) + checkVerifier(t, tc.key, verifier) + if err != nil { + t.Errorf("couldn't construct expected verifier from keyref: %v", err) + } + }) + } +} + +func TestFromKeyRef_Error(t *testing.T) { + ctx := context.Background() + _, keypath := test.GetKeysFromFile(ctx, t) + tcs := []struct { + name string + keyref string + algorithm crypto.Hash + expectedError error + }{{ + name: "failed to read file", + keyref: "wrongPath", + algorithm: crypto.SHA256, + expectedError: ErrorFailedLoadKeyFile, + }, { + name: "failed to read from secret", + keyref: fmt.Sprintf("k8s://%s/not-exist-secret", namespace), + algorithm: crypto.SHA256, + expectedError: ErrorSecretNotFound, + }, { + name: "failed to read from data", + keyref: keypath, + algorithm: crypto.BLAKE2b_256, + expectedError: ErrorLoadVerifier, + }} + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + _, err := fromKeyRef(ctx, tc.keyref, tc.algorithm, fakek8s.NewSimpleClientset()) + if !errors.Is(err, tc.expectedError) { + t.Errorf("fromKeyRef got: %v, want: %v", err, tc.expectedError) + } + }) + } +} + +func TestFromSecret_Success(t *testing.T) { + _, keys, pub, err := test.GenerateKeys(elliptic.P256(), crypto.SHA256) + if err != nil { + t.Fatalf("failed to generate keys: %v", err) + } + secretData := &v1.Secret{ + Data: map[string][]byte{ + "data": pub, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "secret", + Namespace: namespace}} + + k8sclient := fakek8s.NewSimpleClientset(secretData) + + v, err := fromSecret(context.Background(), fmt.Sprintf("k8s://%s/secret", namespace), crypto.SHA256, k8sclient) + checkVerifier(t, keys, v) + if err != nil { + t.Errorf("couldn't construct expected verifier from secret: %v", err) + } +} + +func TestFromSecret_Error(t *testing.T) { + secretNoData := &v1.Secret{ + Data: map[string][]byte{}, + ObjectMeta: metav1.ObjectMeta{ + Name: "empty-secret", + Namespace: namespace}} + secretMultipleData := &v1.Secret{ + Data: map[string][]byte{ + "data1": []byte("key"), + "data2": []byte("key"), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "multiple-data-secret", + Namespace: namespace}} + secretInvalidData := &v1.Secret{ + Data: map[string][]byte{ + "data1": []byte("key"), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "invalid-data-secret", + Namespace: namespace}} + + k8sclient := fakek8s.NewSimpleClientset(secretNoData, secretMultipleData, secretInvalidData) + + tcs := []struct { + name string + secretref string + expectedError error + }{{ + name: "no data in secret", + secretref: fmt.Sprintf("k8s://%s/empty-secret", namespace), + expectedError: ErrorEmptySecretData, + }, { + name: "multiple data in secret", + secretref: fmt.Sprintf("k8s://%s/multiple-data-secret", namespace), + expectedError: ErrorMultipleSecretData, + }, { + name: "invalid data in secret", + secretref: fmt.Sprintf("k8s://%s/invalid-data-secret", namespace), + expectedError: ErrorDecodeKey, + }, { + name: "invalid secretref", + secretref: "invalid-secretref", + expectedError: ErrorK8sSpecificationInvalid, + }, { + name: "secretref has k8s prefix but contains invalid data", + secretref: "k8s://ns/name/foo", + expectedError: ErrorK8sSpecificationInvalid, + }, { + name: "secret doesn't exist", + secretref: fmt.Sprintf("k8s://%s/not-exist-secret", namespace), + expectedError: ErrorSecretNotFound, + }} + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + _, err := fromSecret(context.Background(), tc.secretref, crypto.SHA256, k8sclient) + if !errors.Is(err, tc.expectedError) { + t.Errorf("FromSecret got: %v, want: %v", err, tc.expectedError) + } + }) + } +} + +func TestFromData_Error(t *testing.T) { + _, _, pub, err := test.GenerateKeys(elliptic.P256(), crypto.SHA256) + if err != nil { + t.Fatalf("failed to generate keys %v", err) + } + tcs := []struct { + name string + data []byte + algorithm crypto.Hash + expectedError error + }{{ + name: "data in cannot be decoded", + data: []byte("wrong key"), + algorithm: crypto.SHA256, + expectedError: ErrorDecodeKey, + }, { + name: "verifier cannot be loaded due to wrong algorithm", + data: pub, + algorithm: crypto.BLAKE2b_256, + expectedError: ErrorLoadVerifier, + }} + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + _, err := fromData(tc.data, tc.algorithm) + if !errors.Is(err, tc.expectedError) { + t.Errorf("fromData got: %v, want: %v", err, tc.expectedError) + } + }) + } +} + +func TestMatchHashAlgorithm_Success(t *testing.T) { + tcs := []struct { + name string + algorithm v1alpha1.HashAlgorithm + want crypto.Hash + }{{ + name: "correct", + algorithm: "SHA256", + want: crypto.SHA256, + }, { + name: "lower case", + algorithm: "Sha256", + want: crypto.SHA256, + }, { + name: "empty should be defaulted to SHA256", + algorithm: "", + want: crypto.SHA256, + }} + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + got, err := matchHashAlgorithm(tc.algorithm) + if err != nil { + t.Errorf("failed to get hash algorithm: %v", err) + } + if d := cmp.Diff(tc.want, got); d != "" { + t.Error(diff.PrintWantGot(d)) + } + }) + } +} + +func TestHashAlgorithm_Error(t *testing.T) { + _, err := matchHashAlgorithm("SHA1") + if !errors.Is(err, ErrorAlgorithmInvalid) { + t.Errorf("hashAlgorithm got: %v, want: %v", err, ErrorAlgorithmInvalid) + } +} + +// checkVerifier checks if the keys public key is equal to the verifier's public key +func checkVerifier(t *testing.T, keys *ecdsa.PrivateKey, verifier signature.Verifier) { + t.Helper() + p, _ := verifier.PublicKey() + if !keys.PublicKey.Equal(p) { + t.Errorf("got wrong verifier %v", verifier) + } +} diff --git a/pkg/trustedresources/verify.go b/pkg/trustedresources/verify.go index c3aa52853b8..8606f6877f3 100644 --- a/pkg/trustedresources/verify.go +++ b/pkg/trustedresources/verify.go @@ -19,21 +19,17 @@ package trustedresources import ( "bytes" "context" - "crypto" "crypto/sha256" "encoding/base64" "encoding/json" + "errors" "fmt" - "os" - "path/filepath" - "strings" + "regexp" - "github.com/pkg/errors" - "github.com/sigstore/sigstore/pkg/cryptoutils" "github.com/sigstore/sigstore/pkg/signature" - "github.com/tektoncd/pipeline/pkg/apis/config" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" - v1 "k8s.io/api/core/v1" + "github.com/tektoncd/pipeline/pkg/trustedresources/verifier" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" ) @@ -41,29 +37,11 @@ import ( const ( // SignatureAnnotation is the key of signature in annotation map SignatureAnnotation = "tekton.dev/signature" - // keyReference is the prefix of secret reference - keyReference = "k8s://" ) -// VerifyInterface get the checksum of json marshalled object and verify it. -func VerifyInterface(obj interface{}, verifier signature.Verifier, signature []byte) error { - ts, err := json.Marshal(obj) - if err != nil { - return err - } - - h := sha256.New() - h.Write(ts) - - if err := verifier.VerifySignature(bytes.NewReader(signature), bytes.NewReader(h.Sum(nil))); err != nil { - return err - } - - return nil -} - -// VerifyTask verifies the signature and public key against task -func VerifyTask(ctx context.Context, taskObj v1beta1.TaskObject, k8s kubernetes.Interface) error { +// VerifyTask verifies the signature and public key against task. +// source is from ConfigSource.URI, which will be used to match policy patterns. k8s is used to fetch secret from cluster +func VerifyTask(ctx context.Context, taskObj v1beta1.TaskObject, k8s kubernetes.Interface, source string, policies []*v1alpha1.VerificationPolicy) error { tm, signature, err := prepareObjectMeta(taskObj.TaskMetadata()) if err != nil { return err @@ -75,20 +53,13 @@ func VerifyTask(ctx context.Context, taskObj v1beta1.TaskObject, k8s kubernetes. ObjectMeta: tm, Spec: taskObj.TaskSpec(), } - verifiers, err := getVerifiers(ctx, k8s) - if err != nil { - return err - } - for _, verifier := range verifiers { - if err := VerifyInterface(task, verifier, signature); err == nil { - return nil - } - } - return fmt.Errorf("Task %s in namespace %s fails verification", task.Name, task.Namespace) + + return verifyResource(ctx, &task, k8s, signature, source, policies) } -// VerifyPipeline verifies the signature and public key against pipeline -func VerifyPipeline(ctx context.Context, pipelineObj v1beta1.PipelineObject, k8s kubernetes.Interface) error { +// VerifyPipeline verifies the signature and public key against pipeline. +// source is from ConfigSource.URI, which will be used to match policy patterns, k8s is used to fetch secret from cluster +func VerifyPipeline(ctx context.Context, pipelineObj v1beta1.PipelineObject, k8s kubernetes.Interface, source string, policies []*v1alpha1.VerificationPolicy) error { pm, signature, err := prepareObjectMeta(pipelineObj.PipelineMetadata()) if err != nil { return err @@ -100,18 +71,88 @@ func VerifyPipeline(ctx context.Context, pipelineObj v1beta1.PipelineObject, k8s ObjectMeta: pm, Spec: pipelineObj.PipelineSpec(), } - verifiers, err := getVerifiers(ctx, k8s) - if err != nil { - return err + + return verifyResource(ctx, &pipeline, k8s, signature, source, policies) +} + +// verifyResource verifies resource which implements metav1.Object by provided signature and public keys from configmap or policies. +// It will fetch public key from configmap first, if no keys are found then try to fetch keys from VerificationPolicy +// For verificationPolicies verifyResource will adopt the following rules to do verification: +// 1. For each policy, check if the resource url is matching any of the `patterns` in the `resources` list. If matched then this policy will be used for verification. +// 2. If multiple policies are matched, the resource needs to pass all of them to pass verification. +// 3. To pass one policy, the resource can pass any public keys in the policy. +func verifyResource(ctx context.Context, resource metav1.Object, k8s kubernetes.Interface, signature []byte, source string, policies []*v1alpha1.VerificationPolicy) error { + verifiers, err := verifier.FromConfigMap(ctx, k8s) + if err != nil && !errors.Is(err, verifier.ErrorEmptyPublicKeys) { + return fmt.Errorf("failed to get verifiers from configmap: %w", err) + } + if len(verifiers) != 0 { + for _, verifier := range verifiers { + // if one of the verifier passes verification, then this resource passes verification + if err := verifyInterface(resource, verifier, signature); err == nil { + return nil + } + } + return fmt.Errorf("%w: resource %s in namespace %s fails verification", ErrorResourceVerificationFailed, resource.GetName(), resource.GetNamespace()) } - for _, verifier := range verifiers { - if err := VerifyInterface(pipeline, verifier, signature); err == nil { - return nil + if len(policies) == 0 { + return ErrorEmptyVerificationConfig + } + + matchedPolicies := []*v1alpha1.VerificationPolicy{} + for _, p := range policies { + for _, r := range p.Spec.Resources { + matching, err := regexp.MatchString(r.Pattern, source) + if err != nil { + return fmt.Errorf("%v: %w", err, ErrorRegexMatch) + } + if matching { + matchedPolicies = append(matchedPolicies, p) + break + } } } + if len(matchedPolicies) == 0 { + return fmt.Errorf("%w: no matching policies are found for resource: %s against source: %s", ErrorNoMatchedPolicies, resource.GetName(), source) + } - return fmt.Errorf("Pipeline %s in namespace %s fails verification", pipeline.Name, pipeline.Namespace) + for _, p := range matchedPolicies { + passVerification := false + verifiers, err := verifier.FromPolicy(ctx, k8s, p) + if err != nil { + return fmt.Errorf("failed to get verifiers from policy: %w", err) + } + for _, verifier := range verifiers { + // if one of the verifier passes verification, then this policy passes verification + if err := verifyInterface(resource, verifier, signature); err == nil { + passVerification = true + break + } + } + // if this policy fails the verification, should return error directly. No need to check other policies + if passVerification == false { + return fmt.Errorf("%w: resource %s in namespace %s fails verification", ErrorResourceVerificationFailed, resource.GetName(), resource.GetNamespace()) + } + } + return nil +} + +// verifyInterface get the checksum of json marshalled object and verify it. +func verifyInterface(obj interface{}, verifier signature.Verifier, signature []byte) error { + ts, err := json.Marshal(obj) + if err != nil { + return fmt.Errorf("failed to marshal the object: %w", err) + } + + h := sha256.New() + h.Write(ts) + + if err := verifier.VerifySignature(bytes.NewReader(signature), bytes.NewReader(h.Sum(nil))); err != nil { + return fmt.Errorf("%w:%v", ErrorResourceVerificationFailed, err.Error()) + } + + return nil } // prepareObjectMeta will remove annotations not configured from user side -- "kubectl-client-side-apply" and "kubectl.kubernetes.io/last-applied-configuration" @@ -145,7 +186,7 @@ func prepareObjectMeta(in metav1.ObjectMeta) (metav1.ObjectMeta, []byte, error) // signature should be contained in annotation sig, ok := in.Annotations[SignatureAnnotation] if !ok { - return out, nil, fmt.Errorf("signature is missing") + return out, nil, ErrorSignatureMissing } // extract signature signature, err := base64.StdEncoding.DecodeString(sig) @@ -156,86 +197,3 @@ func prepareObjectMeta(in metav1.ObjectMeta) (metav1.ObjectMeta, []byte, error) return out, signature, nil } - -// getVerifiers get all verifiers from configmap -func getVerifiers(ctx context.Context, k8s kubernetes.Interface) ([]signature.Verifier, error) { - cfg := config.FromContextOrDefaults(ctx) - verifiers := []signature.Verifier{} - // TODO(#5527): consider using k8s://namespace/name instead of mounting files. - for key := range cfg.TrustedResources.Keys { - v, err := verifierForKeyRef(ctx, key, crypto.SHA256, k8s) - if err == nil { - verifiers = append(verifiers, v...) - } - } - if len(verifiers) == 0 { - return verifiers, fmt.Errorf("no public keys are founded for verification") - } - - return verifiers, nil -} - -// verifierForKeyRef parses the given keyRef, loads the key and returns an appropriate -// verifier using the provided hash algorithm -// TODO(#5527): consider wrap verifiers to resolver so the same verifiers are used for the same reconcile event -func verifierForKeyRef(ctx context.Context, keyRef string, hashAlgorithm crypto.Hash, k8s kubernetes.Interface) (verifiers []signature.Verifier, err error) { - var raw []byte - verifiers = []signature.Verifier{} - // if the ref is secret then we fetch the keys from the secrets - if strings.HasPrefix(keyRef, keyReference) { - s, err := getKeyPairSecret(ctx, keyRef, k8s) - if err != nil { - return nil, err - } - for _, raw := range s.Data { - pubKey, err := cryptoutils.UnmarshalPEMToPublicKey(raw) - if err != nil { - return nil, fmt.Errorf("pem to public key: %w", err) - } - v, _ := signature.LoadVerifier(pubKey, hashAlgorithm) - verifiers = append(verifiers, v) - } - if len(verifiers) == 0 { - return verifiers, fmt.Errorf("no public keys are founded for verification") - } - return verifiers, nil - } - // read the key from mounted file - raw, err = os.ReadFile(filepath.Clean(keyRef)) - if err != nil { - return nil, err - } - - // PEM encoded file. - pubKey, err := cryptoutils.UnmarshalPEMToPublicKey(raw) - if err != nil { - return nil, fmt.Errorf("pem to public key: %w", err) - } - v, _ := signature.LoadVerifier(pubKey, hashAlgorithm) - verifiers = append(verifiers, v) - - return verifiers, nil -} - -func getKeyPairSecret(ctx context.Context, k8sRef string, k8s kubernetes.Interface) (*v1.Secret, error) { - namespace, name, err := parseRef(k8sRef) - if err != nil { - return nil, err - } - - var s *v1.Secret - if s, err = k8s.CoreV1().Secrets(namespace).Get(ctx, name, metav1.GetOptions{}); err != nil { - return nil, errors.Wrap(err, "checking if secret exists") - } - - return s, nil -} - -// the reference should be formatted as / -func parseRef(k8sRef string) (string, string, error) { - s := strings.Split(strings.TrimPrefix(k8sRef, keyReference), "/") - if len(s) != 2 { - return "", "", errors.New("kubernetes specification should be in the format k8s:///") - } - return s[0], s[1], nil -} diff --git a/pkg/trustedresources/verify_test.go b/pkg/trustedresources/verify_test.go index 0191e900f57..1c78275d25a 100644 --- a/pkg/trustedresources/verify_test.go +++ b/pkg/trustedresources/verify_test.go @@ -18,22 +18,22 @@ package trustedresources import ( "context" + "crypto" + "crypto/elliptic" "encoding/base64" - "fmt" - "io/ioutil" - "path/filepath" + "errors" "testing" "github.com/google/go-cmp/cmp" "github.com/sigstore/sigstore/pkg/signature" "github.com/tektoncd/pipeline/pkg/apis/config" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + "github.com/tektoncd/pipeline/pkg/trustedresources/verifier" test "github.com/tektoncd/pipeline/test" "github.com/tektoncd/pipeline/test/diff" "go.uber.org/zap/zaptest" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - fakek8s "k8s.io/client-go/kubernetes/fake" "knative.dev/pkg/logging" ) @@ -42,7 +42,6 @@ const ( ) func TestVerifyInterface_Task_Success(t *testing.T) { - // get signerverifer sv, _, err := signature.NewDefaultECDSASignerVerifier() if err != nil { t.Fatalf("failed to get signerverifier %v", err) @@ -64,7 +63,7 @@ func TestVerifyInterface_Task_Success(t *testing.T) { } } - err = VerifyInterface(signedTask, sv, signature) + err = verifyInterface(signedTask, sv, signature) if err != nil { t.Fatalf("VerifyInterface() get err %v", err) } @@ -72,7 +71,6 @@ func TestVerifyInterface_Task_Success(t *testing.T) { } func TestVerifyInterface_Task_Error(t *testing.T) { - // get signerverifer sv, _, err := signature.NewDefaultECDSASignerVerifier() if err != nil { t.Fatalf("failed to get signerverifier %v", err) @@ -89,24 +87,22 @@ func TestVerifyInterface_Task_Error(t *testing.T) { tamperedTask.Name = "tampered" tcs := []struct { - name string - task *v1beta1.Task - expectedErr error + name string + task *v1beta1.Task + expectedError error }{{ - name: "Unsigned Task Fail Verification", - task: unsignedTask, - expectedErr: fmt.Errorf("invalid signature when validating ASN.1 encoded signature"), + name: "Unsigned Task Fail Verification", + task: unsignedTask, + expectedError: ErrorResourceVerificationFailed, }, { - name: "Empty task Fail Verification", - task: nil, - expectedErr: fmt.Errorf("invalid signature when validating ASN.1 encoded signature"), + name: "Empty task Fail Verification", + task: nil, + expectedError: ErrorResourceVerificationFailed, }, { - name: "Tampered task Fail Verification", - task: tamperedTask, - expectedErr: fmt.Errorf("invalid signature when validating ASN.1 encoded signature"), - }, - } - + name: "Tampered task Fail Verification", + task: tamperedTask, + expectedError: ErrorResourceVerificationFailed, + }} for _, tc := range tcs { t.Run(tc.name, func(t *testing.T) { signature := []byte{} @@ -121,27 +117,21 @@ func TestVerifyInterface_Task_Error(t *testing.T) { } } - err := VerifyInterface(tc.task, sv, signature) - if err == nil { - t.Fatalf("verifyTaskRun() expects to get err but got nil") - } - if (err != nil) && (err.Error() != tc.expectedErr.Error()) { - t.Fatalf("VerifyInterface() get err %v, wantErr %t", err, tc.expectedErr) + err := verifyInterface(tc.task, sv, signature) + if !errors.Is(err, tc.expectedError) { + t.Errorf("verifyInterface got: %v, want: %v", err, tc.expectedError) } }) } } -func TestVerifyTask_Success(t *testing.T) { +func TestVerifyTask_Configmap_Success(t *testing.T) { ctx := logging.WithLogger(context.Background(), zaptest.NewLogger(t).Sugar()) - signer, keypath, err := test.GetSignerFromFile(ctx, t) - if err != nil { - t.Fatal(err) - } + signer, keypath := test.GetSignerFromFile(ctx, t) - ctx = test.SetupTrustedResourceConfig(ctx, keypath, config.EnforceResourceVerificationMode) + ctx = test.SetupTrustedResourceKeyConfig(ctx, keypath, config.EnforceResourceVerificationMode) unsignedTask := test.GetUnsignedTask("test-task") @@ -150,22 +140,17 @@ func TestVerifyTask_Success(t *testing.T) { t.Fatal("fail to sign task", err) } - err = VerifyTask(ctx, signedTask, nil) + err = VerifyTask(ctx, signedTask, nil, "", []*v1alpha1.VerificationPolicy{}) if err != nil { - t.Fatalf("verifyTaskRun() get err %v", err) + t.Errorf("VerifyTask() get err %v", err) } } -func TestVerifyTask_Error(t *testing.T) { +func TestVerifyTask_Configmap_Error(t *testing.T) { ctx := logging.WithLogger(context.Background(), zaptest.NewLogger(t).Sugar()) - signer, keypath, err := test.GetSignerFromFile(ctx, t) - if err != nil { - t.Fatal(err) - } - - ctx = test.SetupTrustedResourceConfig(ctx, keypath, config.EnforceResourceVerificationMode) + signer, keypath := test.GetSignerFromFile(ctx, t) unsignedTask := test.GetUnsignedTask("test-task") @@ -178,156 +163,279 @@ func TestVerifyTask_Error(t *testing.T) { tamperedTask.Annotations["random"] = "attack" tcs := []struct { - name string - task v1beta1.TaskObject + name string + task v1beta1.TaskObject + keypath string + expectedError error }{{ - name: "Tampered Task Fails Verification with tampered content", - task: tamperedTask, + name: "modified Task fails verification", + task: tamperedTask, + keypath: keypath, + expectedError: ErrorResourceVerificationFailed, }, { - name: "Unsigned Task Fails Verification without signature", - task: unsignedTask, + name: "unsigned Task fails verification", + task: unsignedTask, + keypath: keypath, + expectedError: ErrorSignatureMissing, + }, { + name: "fail to load key from configmap", + task: signedTask, + keypath: "wrongPath", + expectedError: verifier.ErrorFailedLoadKeyFile, }, } for _, tc := range tcs { t.Run(tc.name, func(t *testing.T) { - err := VerifyTask(ctx, tc.task, nil) - if err == nil { - t.Fatalf("verifyTaskRun() expects to get err but got nil") + ctx = test.SetupTrustedResourceKeyConfig(ctx, tc.keypath, config.EnforceResourceVerificationMode) + err := VerifyTask(ctx, tc.task, nil, "", []*v1alpha1.VerificationPolicy{}) + if !errors.Is(err, tc.expectedError) { + t.Errorf("VerifyTask got: %v, want: %v", err, tc.expectedError) } }) } } -func TestVerifyPipeline_Success(t *testing.T) { +func TestVerifyTask_VerificationPolicy_Success(t *testing.T) { ctx := logging.WithLogger(context.Background(), zaptest.NewLogger(t).Sugar()) + ctx = test.SetupTrustedResourceConfig(ctx, config.EnforceResourceVerificationMode) + signer256, _, k8sclient, vps := test.SetupVerificationPolicies(t) - signer, keypath, err := test.GetSignerFromFile(ctx, t) + unsignedTask := test.GetUnsignedTask("test-task") + + signedTask, err := test.GetSignedTask(unsignedTask, signer256, "signed") if err != nil { - t.Fatal(err) + t.Fatal("fail to sign task", err) } - ctx = test.SetupTrustedResourceConfig(ctx, keypath, config.EnforceResourceVerificationMode) + signer384, _, pub, err := test.GenerateKeys(elliptic.P384(), crypto.SHA384) + if err != nil { + t.Fatalf("failed to generate keys %v", err) + } - unsignedPipeline := test.GetUnsignedPipeline("test-pipeline") + sha384Vp := &v1alpha1.VerificationPolicy{ + TypeMeta: metav1.TypeMeta{ + Kind: "VerificationPolicy", + APIVersion: "v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "differentAlgo", + Namespace: namespace, + }, + Spec: v1alpha1.VerificationPolicySpec{ + Resources: []v1alpha1.ResourcePattern{ + {Pattern: "gcr.io/tekton-releases/catalog/upstream/sha384"}, + }, + Authorities: []v1alpha1.Authority{ + { + Name: "sha384Key", + Key: &v1alpha1.KeyRef{ + Data: string(pub), + HashAlgorithm: "sha384", + }, + }, + }, + }, + } + vps = append(vps, sha384Vp) - signedPipeline, err := test.GetSignedPipeline(unsignedPipeline, signer, "signed") + signedTask384, err := test.GetSignedTask(unsignedTask, signer384, "signed384") if err != nil { t.Fatal("fail to sign task", err) } - err = VerifyPipeline(ctx, signedPipeline, nil) - if err != nil { - t.Fatalf("VerifyPipeline() get err %v", err) - } + tcs := []struct { + name string + task v1beta1.TaskObject + source string + signer signature.SignerVerifier + }{{ + name: "signed git source task passes verification", + task: signedTask, + source: "git+https://github.com/tektoncd/catalog.git", + }, { + name: "signed bundle source task passes verification", + task: signedTask, + source: "gcr.io/tekton-releases/catalog/upstream/git-clone", + }, { + name: "signed task with sha384 key", + task: signedTask384, + source: "gcr.io/tekton-releases/catalog/upstream/sha384", + }} + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + err := VerifyTask(ctx, tc.task, k8sclient, tc.source, vps) + if err != nil { + t.Fatalf("VerifyTask() get err %v", err) + } + }) + } } -func TestVerifyPipeline_Error(t *testing.T) { +func TestVerifyTask_VerificationPolicy_Error(t *testing.T) { ctx := logging.WithLogger(context.Background(), zaptest.NewLogger(t).Sugar()) + ctx = test.SetupTrustedResourceConfig(ctx, config.EnforceResourceVerificationMode) + sv, _, k8sclient, vps := test.SetupVerificationPolicies(t) - signer, keypath, err := test.GetSignerFromFile(ctx, t) + unsignedTask := test.GetUnsignedTask("test-task") + + signedTask, err := test.GetSignedTask(unsignedTask, sv, "signed") if err != nil { - t.Fatal(err) + t.Fatal("fail to sign task", err) + } + + tamperedTask := signedTask.DeepCopy() + tamperedTask.Annotations["random"] = "attack" + + tcs := []struct { + name string + task v1beta1.TaskObject + source string + verificationPolicy []*v1alpha1.VerificationPolicy + expectedError error + }{{ + name: "modified Task fails verification", + task: tamperedTask, + source: "git+https://github.com/tektoncd/catalog.git", + verificationPolicy: vps, + expectedError: ErrorResourceVerificationFailed, + }, { + name: "task not matching pattern fails verification", + task: signedTask, + source: "wrong source", + verificationPolicy: vps, + expectedError: ErrorNoMatchedPolicies, + }, { + name: "verification fails with empty policy", + task: tamperedTask, + source: "git+https://github.com/tektoncd/catalog.git", + verificationPolicy: []*v1alpha1.VerificationPolicy{}, + expectedError: ErrorEmptyVerificationConfig, + }, { + name: "Verification fails with regex error", + task: signedTask, + source: "git+https://github.com/tektoncd/catalog.git", + verificationPolicy: []*v1alpha1.VerificationPolicy{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "vp", + }, + Spec: v1alpha1.VerificationPolicySpec{ + Resources: []v1alpha1.ResourcePattern{{ + Pattern: "^[", + }}, + }, + }, + }, + expectedError: ErrorRegexMatch, + }, { + name: "Verification fails with error from policy", + task: signedTask, + source: "git+https://github.com/tektoncd/catalog.git", + verificationPolicy: []*v1alpha1.VerificationPolicy{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "vp", + }, + Spec: v1alpha1.VerificationPolicySpec{ + Resources: []v1alpha1.ResourcePattern{{ + Pattern: ".*", + }}, + Authorities: []v1alpha1.Authority{ + { + Name: "foo", + Key: &v1alpha1.KeyRef{ + Data: "inline_key", + }, + }, + }, + }, + }, + }, + expectedError: verifier.ErrorDecodeKey, + }} + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + err := VerifyTask(ctx, tc.task, k8sclient, tc.source, tc.verificationPolicy) + if !errors.Is(err, tc.expectedError) { + t.Errorf("VerifyTask got: %v, want: %v", err, tc.expectedError) + } + }) } +} - ctx = test.SetupTrustedResourceConfig(ctx, keypath, config.EnforceResourceVerificationMode) +func TestVerifyPipeline_Success(t *testing.T) { + ctx := logging.WithLogger(context.Background(), zaptest.NewLogger(t).Sugar()) + ctx = test.SetupTrustedResourceConfig(ctx, config.EnforceResourceVerificationMode) + sv, _, k8sclient, vps := test.SetupVerificationPolicies(t) unsignedPipeline := test.GetUnsignedPipeline("test-pipeline") - signedPipeline, err := test.GetSignedPipeline(unsignedPipeline, signer, "signed") + signedPipeline, err := test.GetSignedPipeline(unsignedPipeline, sv, "signed") if err != nil { t.Fatal("fail to sign task", err) } - tamperedPipeline := signedPipeline.DeepCopy() - tamperedPipeline.Annotations["random"] = "attack" - tcs := []struct { name string pipeline v1beta1.PipelineObject + source string }{{ - name: "Tampered Pipeline Fails Verification with tampered content", - pipeline: tamperedPipeline, + name: "Signed git source Task Passes Verification", + pipeline: signedPipeline, + source: "git+https://github.com/tektoncd/catalog.git", }, { - name: "Unsigned Pipeline Fails Verification without signature", - pipeline: unsignedPipeline, - }, - } - + name: "Signed bundle source Task Passes Verification", + pipeline: signedPipeline, + source: "gcr.io/tekton-releases/catalog/upstream/git-clone", + }} for _, tc := range tcs { t.Run(tc.name, func(t *testing.T) { - err := VerifyPipeline(ctx, tc.pipeline, nil) - if err == nil { - t.Fatalf("VerifyPipeline() expects to get err but got nil") + err := VerifyPipeline(ctx, tc.pipeline, k8sclient, tc.source, vps) + if err != nil { + t.Fatalf("VerifyPipeline() get err: %v", err) } }) } - } -func TestVerifyTask_SecretRef(t *testing.T) { +func TestVerifyPipeline_Error(t *testing.T) { ctx := logging.WithLogger(context.Background(), zaptest.NewLogger(t).Sugar()) + ctx = test.SetupTrustedResourceConfig(ctx, config.EnforceResourceVerificationMode) + sv, _, k8sclient, vps := test.SetupVerificationPolicies(t) - signer, keypath, err := test.GetSignerFromFile(ctx, t) - if err != nil { - t.Fatal(err) - } - fileBytes, err := ioutil.ReadFile(filepath.Clean(keypath)) - if err != nil { - t.Fatal(err) - } - - secret := &v1.Secret{ - Data: map[string][]byte{"cosign.pub": fileBytes}, - ObjectMeta: metav1.ObjectMeta{ - Name: "verification-secrets", - Namespace: "default"}} - kubeclient := fakek8s.NewSimpleClientset(secret) - - secretref := fmt.Sprintf("%sdefault/verification-secrets", keyReference) - - ctx = test.SetupTrustedResourceConfig(ctx, secretref, config.EnforceResourceVerificationMode) - - unsignedTask := test.GetUnsignedTask("test-task") + unsignedPipeline := test.GetUnsignedPipeline("test-pipeline") - signedTask, err := test.GetSignedTask(unsignedTask, signer, "signed") + signedPipeline, err := test.GetSignedPipeline(unsignedPipeline, sv, "signed") if err != nil { t.Fatal("fail to sign task", err) } - - tamperedTask := signedTask.DeepCopy() - tamperedTask.Annotations["random"] = "attack" + tamperedPipeline := signedPipeline.DeepCopy() + tamperedPipeline.Annotations["random"] = "attack" tcs := []struct { - name string - task v1beta1.TaskObject - wantErr bool + name string + pipeline v1beta1.PipelineObject + source string }{{ - name: "Signed Task Passes Verification", - task: signedTask, - wantErr: false, - }, { - name: "Tampered Task Fails Verification with tampered content", - task: tamperedTask, - wantErr: true, + name: "Tampered Task Fails Verification with tampered content", + pipeline: tamperedPipeline, + source: "git+https://github.com/tektoncd/catalog.git", }, { - name: "Unsigned Task Fails Verification without signature", - task: unsignedTask, - wantErr: true, - }, - } - + name: "Task Not Matching Pattern Fails Verification", + pipeline: signedPipeline, + source: "wrong source", + }} for _, tc := range tcs { t.Run(tc.name, func(t *testing.T) { - err := VerifyTask(ctx, tc.task, kubeclient) - if (err != nil) != tc.wantErr { - t.Fatalf("verifyTaskRun() get err %v, wantErr %t", err, tc.wantErr) + err := VerifyPipeline(ctx, tc.pipeline, k8sclient, tc.source, vps) + if err == nil { + t.Fatalf("VerifyPipeline() expects to get err but got nil") } }) } - } func TestPrepareObjectMeta(t *testing.T) { diff --git a/test/clients.go b/test/clients.go index 1b19a292837..39a5c561daa 100644 --- a/test/clients.go +++ b/test/clients.go @@ -58,19 +58,20 @@ import ( type clients struct { KubeClient kubernetes.Interface - V1beta1PipelineClient v1beta1.PipelineInterface - V1beta1ClusterTaskClient v1beta1.ClusterTaskInterface - V1beta1TaskClient v1beta1.TaskInterface - V1beta1TaskRunClient v1beta1.TaskRunInterface - V1beta1PipelineRunClient v1beta1.PipelineRunInterface - V1beta1CustomRunClient v1beta1.CustomRunInterface - V1alpha1PipelineResourceClient resourcev1alpha1.PipelineResourceInterface - V1alpha1RunClient v1alpha1.RunInterface - V1alpha1ResolutionRequestclient resolutionv1alpha1.ResolutionRequestInterface - V1PipelineClient v1.PipelineInterface - V1TaskClient v1.TaskInterface - V1TaskRunClient v1.TaskRunInterface - V1PipelineRunClient v1.PipelineRunInterface + V1beta1PipelineClient v1beta1.PipelineInterface + V1beta1ClusterTaskClient v1beta1.ClusterTaskInterface + V1beta1TaskClient v1beta1.TaskInterface + V1beta1TaskRunClient v1beta1.TaskRunInterface + V1beta1PipelineRunClient v1beta1.PipelineRunInterface + V1beta1CustomRunClient v1beta1.CustomRunInterface + V1alpha1PipelineResourceClient resourcev1alpha1.PipelineResourceInterface + V1alpha1RunClient v1alpha1.RunInterface + V1alpha1ResolutionRequestclient resolutionv1alpha1.ResolutionRequestInterface + V1alpha1VerificationPolicyClient v1alpha1.VerificationPolicyInterface + V1PipelineClient v1.PipelineInterface + V1TaskClient v1.TaskInterface + V1TaskRunClient v1.TaskRunInterface + V1PipelineRunClient v1.PipelineRunInterface } // newClients instantiates and returns several clientsets required for making requests to the @@ -113,6 +114,7 @@ func newClients(t *testing.T, configPath, clusterName, namespace string) *client c.V1alpha1PipelineResourceClient = rcs.TektonV1alpha1().PipelineResources(namespace) c.V1alpha1RunClient = cs.TektonV1alpha1().Runs(namespace) c.V1alpha1ResolutionRequestclient = rrcs.ResolutionV1alpha1().ResolutionRequests(namespace) + c.V1alpha1VerificationPolicyClient = cs.TektonV1alpha1().VerificationPolicies(namespace) c.V1PipelineClient = cs.TektonV1().Pipelines(namespace) c.V1TaskClient = cs.TektonV1().Tasks(namespace) c.V1TaskRunClient = cs.TektonV1().TaskRuns(namespace) diff --git a/test/controller.go b/test/controller.go index ee18bc53345..bcea1ed5e4d 100644 --- a/test/controller.go +++ b/test/controller.go @@ -33,6 +33,7 @@ import ( informersv1beta1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1" fakepipelineclient "github.com/tektoncd/pipeline/pkg/client/injection/client/fake" fakeruninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/run/fake" + fakeverificationpolicyinformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/verificationpolicy/fake" fakeclustertaskinformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/clustertask/fake" fakecustomruninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/customrun/fake" fakepipelineinformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/pipeline/fake" @@ -86,6 +87,7 @@ type Data struct { LimitRange []*corev1.LimitRange ResolutionRequests []*resolutionv1alpha1.ResolutionRequest ExpectedCloudEventCount int + VerificationPolicies []*v1alpha1.VerificationPolicy } // Clients holds references to clients which are useful for reconciler tests. @@ -99,19 +101,20 @@ type Clients struct { // Informers holds references to informers which are useful for reconciler tests. type Informers struct { - PipelineRun informersv1beta1.PipelineRunInformer - Pipeline informersv1beta1.PipelineInformer - TaskRun informersv1beta1.TaskRunInformer - Run informersv1alpha1.RunInformer - CustomRun informersv1beta1.CustomRunInformer - Task informersv1beta1.TaskInformer - ClusterTask informersv1beta1.ClusterTaskInformer - PipelineResource resourceinformersv1alpha1.PipelineResourceInformer - Pod coreinformers.PodInformer - ConfigMap coreinformers.ConfigMapInformer - ServiceAccount coreinformers.ServiceAccountInformer - LimitRange coreinformers.LimitRangeInformer - ResolutionRequest resolutioninformersv1alpha1.ResolutionRequestInformer + PipelineRun informersv1beta1.PipelineRunInformer + Pipeline informersv1beta1.PipelineInformer + TaskRun informersv1beta1.TaskRunInformer + Run informersv1alpha1.RunInformer + CustomRun informersv1beta1.CustomRunInformer + Task informersv1beta1.TaskInformer + ClusterTask informersv1beta1.ClusterTaskInformer + PipelineResource resourceinformersv1alpha1.PipelineResourceInformer + Pod coreinformers.PodInformer + ConfigMap coreinformers.ConfigMapInformer + ServiceAccount coreinformers.ServiceAccountInformer + LimitRange coreinformers.LimitRangeInformer + ResolutionRequest resolutioninformersv1alpha1.ResolutionRequestInformer + VerificationPolicy informersv1alpha1.VerificationPolicyInformer } // Assets holds references to the controller, logs, clients, and informers. @@ -183,19 +186,20 @@ func SeedTestData(t *testing.T, ctx context.Context, d Data) (Clients, Informers PrependResourceVersionReactor(&c.Pipeline.Fake) i := Informers{ - PipelineRun: fakepipelineruninformer.Get(ctx), - Pipeline: fakepipelineinformer.Get(ctx), - TaskRun: faketaskruninformer.Get(ctx), - Run: fakeruninformer.Get(ctx), - CustomRun: fakecustomruninformer.Get(ctx), - Task: faketaskinformer.Get(ctx), - ClusterTask: fakeclustertaskinformer.Get(ctx), - PipelineResource: fakeresourceinformer.Get(ctx), - Pod: fakefilteredpodinformer.Get(ctx, v1beta1.ManagedByLabelKey), - ConfigMap: fakeconfigmapinformer.Get(ctx), - ServiceAccount: fakeserviceaccountinformer.Get(ctx), - LimitRange: fakelimitrangeinformer.Get(ctx), - ResolutionRequest: fakeresolutionrequestinformer.Get(ctx), + PipelineRun: fakepipelineruninformer.Get(ctx), + Pipeline: fakepipelineinformer.Get(ctx), + TaskRun: faketaskruninformer.Get(ctx), + Run: fakeruninformer.Get(ctx), + CustomRun: fakecustomruninformer.Get(ctx), + Task: faketaskinformer.Get(ctx), + ClusterTask: fakeclustertaskinformer.Get(ctx), + PipelineResource: fakeresourceinformer.Get(ctx), + Pod: fakefilteredpodinformer.Get(ctx, v1beta1.ManagedByLabelKey), + ConfigMap: fakeconfigmapinformer.Get(ctx), + ServiceAccount: fakeserviceaccountinformer.Get(ctx), + LimitRange: fakelimitrangeinformer.Get(ctx), + ResolutionRequest: fakeresolutionrequestinformer.Get(ctx), + VerificationPolicy: fakeverificationpolicyinformer.Get(ctx), } // Attach reactors that add resource mutations to the appropriate @@ -291,6 +295,14 @@ func SeedTestData(t *testing.T, ctx context.Context, d Data) (Clients, Informers t.Fatal(err) } } + + c.Pipeline.PrependReactor("*", "verificationpolicies", AddToInformer(t, i.VerificationPolicy.Informer().GetIndexer())) + for _, vp := range d.VerificationPolicies { + vp := vp.DeepCopy() // Avoid assumptions that the informer's copy is modified. + if _, err := c.Pipeline.TektonV1alpha1().VerificationPolicies(vp.Namespace).Create(ctx, vp, metav1.CreateOptions{}); err != nil { + t.Fatal(err) + } + } c.Pipeline.ClearActions() c.Kube.ClearActions() c.ResolutionRequests.ClearActions() diff --git a/test/parse/yaml.go b/test/parse/yaml.go index f78bf0f659e..4a81082e015 100644 --- a/test/parse/yaml.go +++ b/test/parse/yaml.go @@ -104,6 +104,16 @@ kind: PipelineResource return &resource } +// MustParseVerificationPolicy takes YAML and parses it into a *v1alpha1.VerificationPolicy +func MustParseVerificationPolicy(t *testing.T, yaml string) *v1alpha1.VerificationPolicy { + var v v1alpha1.VerificationPolicy + yaml = `apiVersion: tekton.dev/v1alpha1 +kind: VerificationPolicy +` + yaml + mustParseYAML(t, yaml, &v) + return &v +} + func mustParseYAML(t *testing.T, yaml string, i runtime.Object) { if _, _, err := scheme.Codecs.UniversalDeserializer().Decode([]byte(yaml), nil, i); err != nil { t.Fatalf("mustParseYAML (%s): %v", yaml, err) diff --git a/test/trusted_resources_test.go b/test/trusted_resources_test.go index 4c435e7cdfb..2c939d931ee 100644 --- a/test/trusted_resources_test.go +++ b/test/trusted_resources_test.go @@ -56,12 +56,12 @@ func init() { os.Setenv("PRIVATE_PASSWORD", password) } -func TestTrustedResourcesVerify_Success(t *testing.T) { +func TestTrustedResourcesVerify_ConfigMap_Success(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() - c, namespace, secretName, signer := setupResourceVerificationConfig(ctx, t, requireAnyGate(neededFeatureFlags)) + c, namespace, secretName, signer := setupResourceVerificationConfig(ctx, t, true, requireAnyGate(neededFeatureFlags)) knativetest.CleanupOnInterrupt(func() { removeResourceVerificationConfig(ctx, t, c, namespace, secretName) }, t.Logf) defer removeResourceVerificationConfig(ctx, t, c, namespace, secretName) @@ -137,12 +137,12 @@ spec: } -func TestTrustedResourcesVerify_Error(t *testing.T) { +func TestTrustedResourcesVerify_ConfigMap_Error(t *testing.T) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() - c, namespace, secretName, signer := setupResourceVerificationConfig(ctx, t, requireAnyGate(neededFeatureFlags)) + c, namespace, secretName, signer := setupResourceVerificationConfig(ctx, t, true, requireAnyGate(neededFeatureFlags)) knativetest.CleanupOnInterrupt(func() { removeResourceVerificationConfig(ctx, t, c, namespace, secretName) }, t.Logf) defer removeResourceVerificationConfig(ctx, t, c, namespace, secretName) @@ -223,13 +223,216 @@ spec: } -func setupResourceVerificationConfig(ctx context.Context, t *testing.T, fn ...func(context.Context, *testing.T, *clients, string)) (*clients, string, string, signature.Signer) { +func TestTrustedResourcesVerify_VerificationPolicy_Success(t *testing.T) { + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + c, namespace, secretName, signer := setupResourceVerificationConfig(ctx, t, false, requireAnyGate(neededFeatureFlags)) + knativetest.CleanupOnInterrupt(func() { removeResourceVerificationConfig(ctx, t, c, namespace, secretName) }, t.Logf) + defer removeResourceVerificationConfig(ctx, t, c, namespace, secretName) + + vp := parse.MustParseVerificationPolicy(t, fmt.Sprintf(` +metadata: + name: %s + namespace: %s +spec: + resources: + - pattern: ".*" + authorities: + - name: key1 + key: + secretRef: + name: %s + namespace: %s +`, helpers.ObjectNameForTest(t), namespace, secretName, namespace)) + + if _, err := c.V1alpha1VerificationPolicyClient.Create(ctx, vp, metav1.CreateOptions{}); err != nil { + t.Fatalf("Failed to create VerificationPolicy: %s", err) + } + + fqImageName := getTestImage(busyboxImage) + task := parse.MustParseV1beta1Task(t, fmt.Sprintf(` +metadata: + name: %s + namespace: %s +spec: + steps: + - image: %s + command: ['/bin/sh'] + args: ['-c', 'echo hello'] +`, helpers.ObjectNameForTest(t), namespace, fqImageName)) + + signedTask, err := GetSignedTask(task, signer, "signedtask") + if err != nil { + t.Errorf("error getting signed task: %v", err) + } + if _, err := c.V1beta1TaskClient.Create(ctx, signedTask, metav1.CreateOptions{}); err != nil { + t.Fatalf("Failed to create Task: %s", err) + } + + pipeline := parse.MustParseV1beta1Pipeline(t, fmt.Sprintf(` +metadata: + name: %s + namespace: %s +spec: + tasks: + - name: task + taskRef: + name: %s + kind: Task +`, helpers.ObjectNameForTest(t), namespace, signedTask.Name)) + + signedPipeline, err := GetSignedPipeline(pipeline, signer, "signedpipeline") + if err != nil { + t.Errorf("error getting signed pipeline: %v", err) + } + + if _, err := c.V1beta1PipelineClient.Create(ctx, signedPipeline, metav1.CreateOptions{}); err != nil { + t.Fatalf("Failed to create Pipeline: %s", err) + } + + pr := parse.MustParseV1beta1PipelineRun(t, fmt.Sprintf(` +metadata: + name: %s + namespace: %s +spec: + pipelineRef: + name: %s +`, helpers.ObjectNameForTest(t), namespace, signedPipeline.Name)) + + t.Logf("Creating PipelineRun %s", pr.Name) + if _, err := c.V1beta1PipelineRunClient.Create(ctx, pr, metav1.CreateOptions{}); err != nil { + t.Fatalf("Failed to create PipelineRun `%s`: %s", pr.Name, err) + } + + t.Logf("Waiting for PipelineRun in namespace %s to succeed", namespace) + if err := WaitForPipelineRunState(ctx, c, pr.Name, timeout, PipelineRunSucceed(pr.Name), "PipelineRunSucceed", v1beta1Version); err != nil { + t.Errorf("Error waiting for PipelineRun to finish: %s", err) + } + + pr, err = c.V1beta1PipelineRunClient.Get(ctx, pr.Name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Couldn't get expected PipelineRun %s: %s", pr.Name, err) + } + + if pr.Status.GetCondition(apis.ConditionSucceeded).IsFalse() { + t.Errorf("Expected PipelineRun to succeed but instead found condition: %s", pr.Status.GetCondition(apis.ConditionSucceeded)) + } + +} + +func TestTrustedResourcesVerify_VerificationPolicy_Error(t *testing.T) { + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + c, namespace, secretName, signer := setupResourceVerificationConfig(ctx, t, false, requireAnyGate(neededFeatureFlags)) + knativetest.CleanupOnInterrupt(func() { removeResourceVerificationConfig(ctx, t, c, namespace, secretName) }, t.Logf) + defer removeResourceVerificationConfig(ctx, t, c, namespace, secretName) + + vp := parse.MustParseVerificationPolicy(t, fmt.Sprintf(` +metadata: + name: %s + namespace: %s +spec: + resources: + - pattern: ".*" + authorities: + - name: key1 + key: + secretRef: + name: %s + namespace: %s +`, helpers.ObjectNameForTest(t), namespace, secretName, namespace)) + + if _, err := c.V1alpha1VerificationPolicyClient.Create(ctx, vp, metav1.CreateOptions{}); err != nil { + t.Fatalf("Failed to create VerificationPolicy: %s", err) + } + + fqImageName := getTestImage(busyboxImage) + task := parse.MustParseV1beta1Task(t, fmt.Sprintf(` +metadata: + name: %s + namespace: %s +spec: + steps: + - image: %s + command: ['/bin/sh'] + args: ['-c', 'echo hello'] +`, helpers.ObjectNameForTest(t), namespace, fqImageName)) + + signedTask, err := GetSignedTask(task, signer, "signedtask") + if err != nil { + t.Errorf("error getting signed task: %v", err) + } + // modify the task to fail the verification + signedTask.Annotations["foo"] = "bar" + if _, err := c.V1beta1TaskClient.Create(ctx, signedTask, metav1.CreateOptions{}); err != nil { + t.Fatalf("Failed to create Task: %s", err) + } + + pipeline := parse.MustParseV1beta1Pipeline(t, fmt.Sprintf(` +metadata: + name: %s + namespace: %s +spec: + tasks: + - name: task + taskRef: + name: %s + kind: Task +`, helpers.ObjectNameForTest(t), namespace, signedTask.Name)) + + signedPipeline, err := GetSignedPipeline(pipeline, signer, "signedpipeline") + if err != nil { + t.Errorf("error getting signed pipeline: %v", err) + } + + if _, err := c.V1beta1PipelineClient.Create(ctx, signedPipeline, metav1.CreateOptions{}); err != nil { + t.Fatalf("Failed to create Pipeline: %s", err) + } + + pr := parse.MustParseV1beta1PipelineRun(t, fmt.Sprintf(` +metadata: + name: %s + namespace: %s +spec: + pipelineRef: + name: %s +`, helpers.ObjectNameForTest(t), namespace, signedPipeline.Name)) + + t.Logf("Creating PipelineRun %s", pr.Name) + if _, err := c.V1beta1PipelineRunClient.Create(ctx, pr, metav1.CreateOptions{}); err != nil { + t.Fatalf("Failed to create PipelineRun `%s`: %s", pr.Name, err) + } + + t.Logf("Waiting for PipelineRun in namespace %s to fail", namespace) + if err := WaitForPipelineRunState(ctx, c, pr.Name, timeout, PipelineRunFailed(pr.Name), "PipelineRunFailed", v1beta1Version); err != nil { + t.Errorf("Error waiting for PipelineRun to finish: %s", err) + } + + pr, err = c.V1beta1PipelineRunClient.Get(ctx, pr.Name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Couldn't get expected PipelineRun %s: %s", pr.Name, err) + } + + if pr.Status.GetCondition(apis.ConditionSucceeded).IsTrue() { + t.Errorf("Expected PipelineRun to fail but found condition: %s", pr.Status.GetCondition(apis.ConditionSucceeded)) + } + if pr.Status.Conditions[0].Reason != pod.ReasonResourceVerificationFailed { + t.Errorf("Expected PipelineRun fail condition is: %s but got: %s", pod.ReasonResourceVerificationFailed, pr.Status.Conditions[0].Reason) + } + +} + +func setupResourceVerificationConfig(ctx context.Context, t *testing.T, keyInConfigMap bool, fn ...func(context.Context, *testing.T, *clients, string)) (*clients, string, string, signature.Signer) { c, ns := setup(ctx, t, requireAnyGate(neededFeatureFlags)) - secretName, signer := setSecretAndConfig(ctx, t, c.KubeClient) + secretName, signer := setSecretAndConfig(ctx, t, c.KubeClient, ns, keyInConfigMap) return c, ns, secretName, signer } -func setSecretAndConfig(ctx context.Context, t *testing.T, client kubernetes.Interface) (string, signature.Signer) { +func setSecretAndConfig(ctx context.Context, t *testing.T, client kubernetes.Interface, namespace string, keyInConfigMap bool) (string, signature.Signer) { t.Helper() // Note that this may not work if we run e2e tests in parallel since this feature flag require all tasks and pipelines // to be signed and unsigned resources will fail. i.e. Don't add t.Parallel() for this test. @@ -240,7 +443,6 @@ func setSecretAndConfig(ctx context.Context, t *testing.T, client kubernetes.Int t.Fatal(err) } - // Config signer and public key signer, err := signature.LoadSignerFromPEMFile(privKey, crypto.SHA256, getPass) if err != nil { t.Errorf("error getting signer from key file: %v", err) @@ -251,30 +453,31 @@ func setSecretAndConfig(ctx context.Context, t *testing.T, client kubernetes.Int t.Fatal(err) } - secret := &v1.Secret{Data: map[string][]byte{"cosign.pub": fileBytes}, ObjectMeta: metav1.ObjectMeta{Name: "verification-secrets", Namespace: system.Namespace()}} + secret := &v1.Secret{Data: map[string][]byte{"cosign.pub": fileBytes}, ObjectMeta: metav1.ObjectMeta{Name: "verification-secrets", Namespace: namespace}} - client.CoreV1().Secrets(system.Namespace()).Create(ctx, secret, metav1.CreateOptions{}) - // Check if secret created - _, err = client.CoreV1().Secrets(system.Namespace()).Get(ctx, secret.Name, metav1.GetOptions{}) + client.CoreV1().Secrets(namespace).Create(ctx, secret, metav1.CreateOptions{}) + _, err = client.CoreV1().Secrets(namespace).Get(ctx, secret.Name, metav1.GetOptions{}) if err != nil { t.Error(err) return "", nil } - configMapData = map[string]string{ - config.PublicKeys: fmt.Sprintf("k8s://%s/verification-secrets", system.Namespace()), - } - if err := updateConfigMap(ctx, client, system.Namespace(), config.GetTrustedResourcesConfigName(), configMapData); err != nil { - t.Fatal(err) + if keyInConfigMap { + configMapData = map[string]string{ + config.PublicKeys: fmt.Sprintf("k8s://%s/verification-secrets", namespace), + } + if err := updateConfigMap(ctx, client, system.Namespace(), config.GetTrustedResourcesConfigName(), configMapData); err != nil { + t.Fatal(err) + } } return secret.Name, signer } func removeResourceVerificationConfig(ctx context.Context, t *testing.T, c *clients, namespace string, secretName string) { - resetSecretAndConfig(ctx, t, c.KubeClient, secretName) + resetSecretAndConfig(ctx, t, c.KubeClient, secretName, namespace) tearDown(ctx, t, c, namespace) } -func resetSecretAndConfig(ctx context.Context, t *testing.T, client kubernetes.Interface, secretName string) { +func resetSecretAndConfig(ctx context.Context, t *testing.T, client kubernetes.Interface, secretName, namespace string) { t.Helper() configMapData := map[string]string{ "resource-verification-mode": config.SkipResourceVerificationMode, @@ -282,8 +485,13 @@ func resetSecretAndConfig(ctx context.Context, t *testing.T, client kubernetes.I if err := updateConfigMap(ctx, client, system.Namespace(), config.GetFeatureFlagsConfigName(), configMapData); err != nil { t.Fatal(err) } - - err := client.CoreV1().Secrets(system.Namespace()).Delete(ctx, secretName, metav1.DeleteOptions{}) + configMapData = map[string]string{ + config.PublicKeys: "", + } + if err := updateConfigMap(ctx, client, system.Namespace(), config.GetTrustedResourcesConfigName(), configMapData); err != nil { + t.Fatal(err) + } + err := client.CoreV1().Secrets(namespace).Delete(ctx, secretName, metav1.DeleteOptions{}) if err != nil { t.Fatal(err) } diff --git a/test/trustedresources.go b/test/trustedresources.go index 779bc8e7536..403599ba717 100644 --- a/test/trustedresources.go +++ b/test/trustedresources.go @@ -34,13 +34,16 @@ import ( "github.com/sigstore/sigstore/pkg/cryptoutils" "github.com/sigstore/sigstore/pkg/signature" "github.com/tektoncd/pipeline/pkg/apis/config" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" - corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + fakek8s "k8s.io/client-go/kubernetes/fake" "knative.dev/pkg/logging" ) +// TODO(#5820): refactor those into an internal pkg const ( namespace = "trusted-resources" // signatureAnnotation is the key of signature in annotation map @@ -92,8 +95,9 @@ func GetUnsignedPipeline(name string) *v1beta1.Pipeline { } } -// SetupTrustedResourceConfig config the keys and feature flag for testing -func SetupTrustedResourceConfig(ctx context.Context, keypath string, resourceVerificationMode string) context.Context { +// SetupTrustedResourceKeyConfig config the public keys keypath in config-trusted-resources +// and resource-verification-mode feature flag by given resourceVerificationMode for testing +func SetupTrustedResourceKeyConfig(ctx context.Context, keypath string, resourceVerificationMode string) context.Context { store := config.NewStore(logging.FromContext(ctx).Named("config-store")) cm := &corev1.ConfigMap{ TypeMeta: metav1.TypeMeta{ @@ -109,7 +113,13 @@ func SetupTrustedResourceConfig(ctx context.Context, keypath string, resourceVer }, } store.OnConfigChanged(cm) + ctx = SetupTrustedResourceConfig(ctx, resourceVerificationMode) + return store.ToContext(ctx) +} +// SetupTrustedResourceConfig config the resource-verification-mode feature flag by given mode for testing +func SetupTrustedResourceConfig(ctx context.Context, resourceVerificationMode string) context.Context { + store := config.NewStore(logging.FromContext(ctx).Named("config-store")) featureflags := &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, @@ -124,44 +134,176 @@ func SetupTrustedResourceConfig(ctx context.Context, keypath string, resourceVer return store.ToContext(ctx) } -// GetSignerFromFile generates key files to tmpdir, return signer and pubkey path -func GetSignerFromFile(ctx context.Context, t *testing.T) (signature.Signer, string, error) { +// SetupVerificationPolicies set verification policies and secrets to store public keys. +// This function helps to setup 3 kinds of VerificationPolicies: +// 1. One public key in inline data +// 2. One public key in secret +// 3. 2 authorities referring to the same secret. This is to test and make sure we don't have duplicate counts +// SignerVerifier is returned to sign resources +// The k8s clientset is returned to fetch secret from it. +// VerificationPolicies are returned to fetch public keys +func SetupVerificationPolicies(t *testing.T) (signature.SignerVerifier, *ecdsa.PrivateKey, *fakek8s.Clientset, []*v1alpha1.VerificationPolicy) { t.Helper() + sv, keys, pub, err := GenerateKeys(elliptic.P256(), crypto.SHA256) + if err != nil { + t.Fatalf("failed to generate keys %v", err) + } + _, _, pub2, err := GenerateKeys(elliptic.P256(), crypto.SHA256) + if err != nil { + t.Fatalf("failed to generate keys %v", err) + } + secret := &v1.Secret{ + Data: map[string][]byte{"cosign.pub": pub}, + ObjectMeta: metav1.ObjectMeta{ + Name: "verification-secrets", + Namespace: namespace}} + + keyInDataVp := getVerificationPolicy( + "keyInDataVp", + namespace, + []v1alpha1.ResourcePattern{ + {Pattern: "https://github.com/tektoncd/catalog.git"}, + }, + []v1alpha1.Authority{ + { + Name: "pubkey", + Key: &v1alpha1.KeyRef{ + Data: string(pub), + HashAlgorithm: "sha256", + }, + }, + }) + + keyInSecretVp := getVerificationPolicy( + "keyInSecretVp", + namespace, + []v1alpha1.ResourcePattern{{ + Pattern: "gcr.io/tekton-releases/catalog/upstream/git-clone"}, + }, + []v1alpha1.Authority{ + { + Name: "pubkey", + Key: &v1alpha1.KeyRef{ + SecretRef: &v1.SecretReference{ + Name: secret.Name, + Namespace: secret.Namespace, + }, + HashAlgorithm: "sha256", + }, + }, + }) + + wrongKeyandPatternVp := getVerificationPolicy( + "wrongKeyInDataVp", + namespace, + []v1alpha1.ResourcePattern{ + {Pattern: "this should not match any resources"}, + }, + []v1alpha1.Authority{ + { + Name: "pubkey", + Key: &v1alpha1.KeyRef{ + Data: string(pub2), + HashAlgorithm: "sha256", + }, + }, + }) + + k8sclient := fakek8s.NewSimpleClientset(secret) + + return sv, keys, k8sclient, []*v1alpha1.VerificationPolicy{&keyInDataVp, &keyInSecretVp, &wrongKeyandPatternVp} +} + +// SetupMatchAllVerificationPolicies set verification policies with a Pattern to match all resources +// SignerVerifier is returned to sign resources +// The k8s clientset is returned to fetch secret from it. +// VerificationPolicies are returned to fetch public keys +func SetupMatchAllVerificationPolicies(t *testing.T, namespace string) (signature.SignerVerifier, *fakek8s.Clientset, []*v1alpha1.VerificationPolicy) { + t.Helper() + sv, _, pub, err := GenerateKeys(elliptic.P256(), crypto.SHA256) + if err != nil { + t.Fatalf("failed to generate keys %v", err) + } + + secret := &v1.Secret{ + Data: map[string][]byte{"cosign.pub": pub}, + ObjectMeta: metav1.ObjectMeta{ + Name: "verification-secrets", + Namespace: namespace}} + + matchAllVp := getVerificationPolicy( + "matchAllVp", + namespace, + []v1alpha1.ResourcePattern{ + {Pattern: ".*"}, + }, + []v1alpha1.Authority{ + { + Name: "pubkey", + Key: &v1alpha1.KeyRef{ + Data: string(pub), + HashAlgorithm: "sha256", + }, + }, + }) + + k8sclient := fakek8s.NewSimpleClientset(secret) + + return sv, k8sclient, []*v1alpha1.VerificationPolicy{&matchAllVp} +} + +// GetSignerFromFile generates key files to tmpdir, return signer and pubkey path +func GetSignerFromFile(ctx context.Context, t *testing.T) (signature.Signer, string) { + t.Helper() + sv, _, pub, err := GenerateKeys(elliptic.P256(), crypto.SHA256) + if err != nil { + t.Fatal(err) + } tmpDir := t.TempDir() - publicKeyFile := "ecdsa.pub" - sv, err := GenerateKeyFile(tmpDir, publicKeyFile) + pubKey := filepath.Join(tmpDir, "ecdsa.pub") + if err := os.WriteFile(pubKey, pub, 0600); err != nil { + t.Fatal(err) + } + + return sv, pubKey +} + +// GetKeysFromFile generates key files to tmpdir, return keys and pubkey path +func GetKeysFromFile(ctx context.Context, t *testing.T) (*ecdsa.PrivateKey, string) { + t.Helper() + _, keys, pub, err := GenerateKeys(elliptic.P256(), crypto.SHA256) if err != nil { t.Fatal(err) } + tmpDir := t.TempDir() + pubKey := filepath.Join(tmpDir, "ecdsa.pub") + if err := os.WriteFile(pubKey, pub, 0600); err != nil { + t.Fatal(err) + } - return sv, filepath.Join(tmpDir, publicKeyFile), nil + return keys, pubKey } -// GenerateKeyFile creates public key files, return the SignerVerifier -func GenerateKeyFile(dir string, pubkeyfile string) (signature.SignerVerifier, error) { - keys, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) +// GenerateKeys creates public key files, return the SignerVerifier +func GenerateKeys(c elliptic.Curve, hashFunc crypto.Hash) (signature.SignerVerifier, *ecdsa.PrivateKey, []byte, error) { + keys, err := ecdsa.GenerateKey(c, rand.Reader) if err != nil { - return nil, err + return nil, nil, nil, err } // Now do the public key pubBytes, err := cryptoutils.MarshalPublicKeyToPEM(keys.Public()) if err != nil { - return nil, err + return nil, nil, nil, err } - pubKey := filepath.Join(dir, pubkeyfile) - if err := os.WriteFile(pubKey, pubBytes, 0600); err != nil { - return nil, err - } - - sv, err := signature.LoadSignerVerifier(keys, crypto.SHA256) + sv, err := signature.LoadSignerVerifier(keys, hashFunc) if err != nil { - return nil, err + return nil, nil, nil, err } - return sv, nil + return sv, keys, pubBytes, nil } // signInterface returns the encoded signature for the given object. @@ -218,7 +360,6 @@ func getPass(confirm bool) ([]byte, error) { read := read(confirm) return read() } - func readPasswordFn(confirm bool) func() ([]byte, error) { pw, ok := os.LookupEnv("PRIVATE_PASSWORD") if ok { @@ -230,3 +371,20 @@ func readPasswordFn(confirm bool) func() ([]byte, error) { return nil, fmt.Errorf("fail to get password") } } + +func getVerificationPolicy(name, namespace string, patterns []v1alpha1.ResourcePattern, authorities []v1alpha1.Authority) v1alpha1.VerificationPolicy { + return v1alpha1.VerificationPolicy{ + TypeMeta: metav1.TypeMeta{ + Kind: "VerificationPolicy", + APIVersion: "v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: v1alpha1.VerificationPolicySpec{ + Resources: patterns, + Authorities: authorities, + }, + } +}