diff --git a/acceptance/acceptance_test.go b/acceptance/acceptance_test.go index a1deec531..7b8ba9a19 100644 --- a/acceptance/acceptance_test.go +++ b/acceptance/acceptance_test.go @@ -23,7 +23,6 @@ const ( var ( latestPlatformAPI = api.Platform.Latest().String() buildDir string - cacheFixtureDir string ) func TestVersion(t *testing.T) { diff --git a/acceptance/analyzer_test.go b/acceptance/analyzer_test.go index 92e9d487f..23abb9e45 100644 --- a/acceptance/analyzer_test.go +++ b/acceptance/analyzer_test.go @@ -37,7 +37,6 @@ func TestAnalyzer(t *testing.T) { analyzeImage = analyzeTest.testImageRef analyzerPath = analyzeTest.containerBinaryPath - cacheFixtureDir = filepath.Join("testdata", "cache-dir") analyzeRegAuthConfig = analyzeTest.targetRegistry.authConfig analyzeRegNetwork = analyzeTest.targetRegistry.network analyzeDaemonFixtures = analyzeTest.targetDaemon.fixtures diff --git a/acceptance/creator_test.go b/acceptance/creator_test.go index 749ee4e25..ebd38788d 100644 --- a/acceptance/creator_test.go +++ b/acceptance/creator_test.go @@ -40,7 +40,6 @@ func TestCreator(t *testing.T) { createImage = createTest.testImageRef creatorPath = createTest.containerBinaryPath - cacheFixtureDir = filepath.Join("testdata", "creator", "cache-dir") createRegAuthConfig = createTest.targetRegistry.authConfig createRegNetwork = createTest.targetRegistry.network createDaemonFixtures = createTest.targetDaemon.fixtures diff --git a/acceptance/exporter_test.go b/acceptance/exporter_test.go index 493316579..c29a8e30c 100644 --- a/acceptance/exporter_test.go +++ b/acceptance/exporter_test.go @@ -5,8 +5,11 @@ package acceptance import ( "context" + "crypto/sha256" + "encoding/hex" "encoding/json" "fmt" + "io" "os" "os/exec" "path/filepath" @@ -18,6 +21,7 @@ import ( "github.com/buildpacks/imgutil" "github.com/google/go-containerregistry/pkg/authn" "github.com/google/go-containerregistry/pkg/v1/remote" + "github.com/pkg/errors" "github.com/sclevine/spec" "github.com/sclevine/spec/report" @@ -25,6 +29,7 @@ import ( "github.com/buildpacks/lifecycle/auth" "github.com/buildpacks/lifecycle/cache" "github.com/buildpacks/lifecycle/cmd" + "github.com/buildpacks/lifecycle/internal/fsutil" "github.com/buildpacks/lifecycle/internal/path" "github.com/buildpacks/lifecycle/platform/files" h "github.com/buildpacks/lifecycle/testhelpers" @@ -51,7 +56,6 @@ func TestExporter(t *testing.T) { exportImage = exportTest.testImageRef exporterPath = exportTest.containerBinaryPath - cacheFixtureDir = filepath.Join("testdata", "exporter", "cache-dir") exportRegAuthConfig = exportTest.targetRegistry.authConfig exportRegNetwork = exportTest.targetRegistry.network exportDaemonFixtures = exportTest.targetDaemon.fixtures @@ -71,140 +75,138 @@ func testExporterFunc(platformAPI string) func(t *testing.T, when spec.G, it spe }) when("daemon case", func() { - when("first build", func() { - when("app", func() { - it("is created", func() { - exportFlags := []string{"-daemon", "-log-level", "debug"} - exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...) - exportedImageName = "some-exported-image-" + h.RandString(10) - exportArgs = append(exportArgs, exportedImageName) - - output := h.DockerRun(t, - exportImage, - h.WithFlags(append( - dockerSocketMount, - "--env", "CNB_PLATFORM_API="+platformAPI, - )...), - h.WithArgs(exportArgs...), - ) - h.AssertStringContains(t, output, "Saving "+exportedImageName) + it("app is created", func() { + exportFlags := []string{"-daemon", "-log-level", "debug"} + exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...) + exportedImageName = "some-exported-image-" + h.RandString(10) + exportArgs = append(exportArgs, exportedImageName) + + output := h.DockerRun(t, + exportImage, + h.WithFlags(append( + dockerSocketMount, + "--env", "CNB_PLATFORM_API="+platformAPI, + )...), + h.WithArgs(exportArgs...), + ) + h.AssertStringContains(t, output, "Saving "+exportedImageName) + + if api.MustParse(platformAPI).AtLeast("0.11") { + extensions := []string{"sbom.cdx.json", "sbom.spdx.json", "sbom.syft.json"} + for _, extension := range extensions { + h.AssertStringContains(t, output, fmt.Sprintf("Copying SBOM lifecycle.%s to %s", extension, filepath.Join(path.RootDir, "layers", "sbom", "build", "buildpacksio_lifecycle", extension))) + h.AssertStringContains(t, output, fmt.Sprintf("Copying SBOM launcher.%s to %s", extension, filepath.Join(path.RootDir, "layers", "sbom", "launch", "buildpacksio_lifecycle", "launcher", extension))) + } + } else { + h.AssertStringDoesNotContain(t, output, "Copying SBOM") + } + + if api.MustParse(platformAPI).AtLeast("0.12") { + expectedHistory := []string{ + "Buildpacks Launcher Config", + "Buildpacks Application Launcher", + "Application Layer", + "Software Bill-of-Materials", + "Layer: 'corrupted-layer', Created by buildpack: corrupted_buildpack@corrupted_v1", + "Layer: 'launch-layer', Created by buildpack: cacher_buildpack@cacher_v1", + "", // run image layer + } + assertDaemonImageHasHistory(t, exportedImageName, expectedHistory) + } else { + assertDaemonImageDoesNotHaveHistory(t, exportedImageName) + } - if api.MustParse(platformAPI).AtLeast("0.11") { - extensions := []string{"sbom.cdx.json", "sbom.spdx.json", "sbom.syft.json"} - for _, extension := range extensions { - h.AssertStringContains(t, output, fmt.Sprintf("Copying SBOM lifecycle.%s to %s", extension, filepath.Join(path.RootDir, "layers", "sbom", "build", "buildpacksio_lifecycle", extension))) - h.AssertStringContains(t, output, fmt.Sprintf("Copying SBOM launcher.%s to %s", extension, filepath.Join(path.RootDir, "layers", "sbom", "launch", "buildpacksio_lifecycle", "launcher", extension))) - } - } else { - h.AssertStringDoesNotContain(t, output, "Copying SBOM") - } - - if api.MustParse(platformAPI).AtLeast("0.12") { - expectedHistory := []string{ - "Buildpacks Launcher Config", - "Buildpacks Application Launcher", - "Application Layer", - "Software Bill-of-Materials", - "Layer: 'launch-layer', Created by buildpack: cacher_buildpack@cacher_v1", - "", // run image layer - } - assertDaemonImageHasHistory(t, exportedImageName, expectedHistory) - } else { - assertDaemonImageDoesNotHaveHistory(t, exportedImageName) - } + assertImageOSAndArchAndCreatedAt(t, exportedImageName, exportTest, imgutil.NormalizedDateTime) + }) - assertImageOSAndArchAndCreatedAt(t, exportedImageName, exportTest, imgutil.NormalizedDateTime) - }) + when("using extensions", func() { + it.Before(func() { + h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "") }) - when("using extensions", func() { - it.Before(func() { - h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "") - }) - - it("is created from the extended run image", func() { - exportFlags := []string{ - "-analyzed", "/layers/run-image-extended-analyzed.toml", // though the run image is a registry image, it also exists in the daemon with the same tag - "-daemon", - "-extended", "/layers/some-extended-dir", - "-log-level", "debug", - "-run", "/cnb/run.toml", // though the run image is a registry image, it also exists in the daemon with the same tag - } - exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...) - exportedImageName = "some-exported-image-" + h.RandString(10) - exportArgs = append(exportArgs, exportedImageName) + it("app is created from the extended run image", func() { + exportFlags := []string{ + "-analyzed", "/layers/run-image-extended-analyzed.toml", // though the run image is a registry image, it also exists in the daemon with the same tag + "-daemon", + "-extended", "/layers/some-extended-dir", + "-log-level", "debug", + "-run", "/cnb/run.toml", // though the run image is a registry image, it also exists in the daemon with the same tag + } + exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...) + exportedImageName = "some-exported-image-" + h.RandString(10) + exportArgs = append(exportArgs, exportedImageName) - // get run image top layer - inspect, _, err := h.DockerCli(t).ImageInspectWithRaw(context.TODO(), exportTest.targetRegistry.fixtures.ReadOnlyRunImage) - h.AssertNil(t, err) - layers := inspect.RootFS.Layers - runImageFixtureTopLayerSHA := layers[len(layers)-1] - runImageFixtureSHA := inspect.ID + // get run image top layer + inspect, _, err := h.DockerCli(t).ImageInspectWithRaw(context.TODO(), exportTest.targetRegistry.fixtures.ReadOnlyRunImage) + h.AssertNil(t, err) + layers := inspect.RootFS.Layers + runImageFixtureTopLayerSHA := layers[len(layers)-1] + runImageFixtureSHA := inspect.ID - experimentalMode := "warn" - if api.MustParse(platformAPI).AtLeast("0.13") { - experimentalMode = "error" - } + experimentalMode := "warn" + if api.MustParse(platformAPI).AtLeast("0.13") { + experimentalMode = "error" + } - output := h.DockerRun(t, - exportImage, - h.WithFlags(append( - dockerSocketMount, - "--env", "CNB_EXPERIMENTAL_MODE="+experimentalMode, - "--env", "CNB_PLATFORM_API="+platformAPI, - )...), - h.WithArgs(exportArgs...), - ) - h.AssertStringContains(t, output, "Saving "+exportedImageName) + output := h.DockerRun(t, + exportImage, + h.WithFlags(append( + dockerSocketMount, + "--env", "CNB_EXPERIMENTAL_MODE="+experimentalMode, + "--env", "CNB_PLATFORM_API="+platformAPI, + )...), + h.WithArgs(exportArgs...), + ) + h.AssertStringContains(t, output, "Saving "+exportedImageName) - assertImageOSAndArchAndCreatedAt(t, exportedImageName, exportTest, imgutil.NormalizedDateTime) - expectedHistory := []string{ - "Buildpacks Launcher Config", - "Buildpacks Application Launcher", - "Application Layer", - "Software Bill-of-Materials", - "Layer: 'launch-layer', Created by buildpack: cacher_buildpack@cacher_v1", - "Layer: 'RUN mkdir /some-other-dir && echo some-data > /some-other-dir/some-file && echo some-data > /some-other-file', Created by extension: second-extension", - "Layer: 'RUN mkdir /some-dir && echo some-data > /some-dir/some-file && echo some-data > /some-file', Created by extension: first-extension", - "", // run image layer - } - assertDaemonImageHasHistory(t, exportedImageName, expectedHistory) - t.Log("bases the exported image on the extended run image") - inspect, _, err = h.DockerCli(t).ImageInspectWithRaw(context.TODO(), exportedImageName) - h.AssertNil(t, err) - h.AssertEq(t, inspect.Config.Labels["io.buildpacks.rebasable"], "false") // from testdata/exporter/container/layers/some-extended-dir/run/sha256_/blobs/sha256/ - t.Log("Adds extension layers") - type testCase struct { - expectedDiffID string - layerIndex int - } - testCases := []testCase{ - { - expectedDiffID: "sha256:fb54d2566824d6630d94db0b008d9a544a94d3547a424f52e2fd282b648c0601", // from testdata/exporter/container/layers/some-extended-dir/run/sha256_/blobs/sha256/65c2873d397056a5cb4169790654d787579b005f18b903082b177d4d9b4aecf5 after un-compressing and zeroing timestamps - layerIndex: 1, - }, - { - expectedDiffID: "sha256:1018c7d3584c4f7fa3ef4486d1a6a11b93956b9d8bfe0898a3e0fbd248c984d8", // from testdata/exporter/container/layers/some-extended-dir/run/sha256_/blobs/sha256/0fb9b88c9cbe9f11b4c8da645f390df59f5949632985a0bfc2a842ef17b2ad18 after un-compressing and zeroing timestamps - layerIndex: 2, - }, - } - for _, tc := range testCases { - h.AssertEq(t, inspect.RootFS.Layers[tc.layerIndex], tc.expectedDiffID) - } - t.Log("sets the layers metadata label according to the new spec") - var lmd files.LayersMetadata - lmdJSON := inspect.Config.Labels["io.buildpacks.lifecycle.metadata"] - h.AssertNil(t, json.Unmarshal([]byte(lmdJSON), &lmd)) - h.AssertEq(t, lmd.RunImage.Image, exportTest.targetRegistry.fixtures.ReadOnlyRunImage) // from analyzed.toml - h.AssertEq(t, lmd.RunImage.Mirrors, []string{"mirror1", "mirror2"}) // from run.toml - h.AssertEq(t, lmd.RunImage.TopLayer, runImageFixtureTopLayerSHA) - h.AssertEq(t, lmd.RunImage.Reference, strings.TrimPrefix(runImageFixtureSHA, "sha256:")) - }) + assertImageOSAndArchAndCreatedAt(t, exportedImageName, exportTest, imgutil.NormalizedDateTime) + expectedHistory := []string{ + "Buildpacks Launcher Config", + "Buildpacks Application Launcher", + "Application Layer", + "Software Bill-of-Materials", + "Layer: 'corrupted-layer', Created by buildpack: corrupted_buildpack@corrupted_v1", + "Layer: 'launch-layer', Created by buildpack: cacher_buildpack@cacher_v1", + "Layer: 'RUN mkdir /some-other-dir && echo some-data > /some-other-dir/some-file && echo some-data > /some-other-file', Created by extension: second-extension", + "Layer: 'RUN mkdir /some-dir && echo some-data > /some-dir/some-file && echo some-data > /some-file', Created by extension: first-extension", + "", // run image layer + } + assertDaemonImageHasHistory(t, exportedImageName, expectedHistory) + t.Log("bases the exported image on the extended run image") + inspect, _, err = h.DockerCli(t).ImageInspectWithRaw(context.TODO(), exportedImageName) + h.AssertNil(t, err) + h.AssertEq(t, inspect.Config.Labels["io.buildpacks.rebasable"], "false") // from testdata/exporter/container/layers/some-extended-dir/run/sha256_/blobs/sha256/ + t.Log("Adds extension layers") + type testCase struct { + expectedDiffID string + layerIndex int + } + testCases := []testCase{ + { + expectedDiffID: "sha256:fb54d2566824d6630d94db0b008d9a544a94d3547a424f52e2fd282b648c0601", // from testdata/exporter/container/layers/some-extended-dir/run/sha256_/blobs/sha256/65c2873d397056a5cb4169790654d787579b005f18b903082b177d4d9b4aecf5 after un-compressing and zeroing timestamps + layerIndex: 1, + }, + { + expectedDiffID: "sha256:1018c7d3584c4f7fa3ef4486d1a6a11b93956b9d8bfe0898a3e0fbd248c984d8", // from testdata/exporter/container/layers/some-extended-dir/run/sha256_/blobs/sha256/0fb9b88c9cbe9f11b4c8da645f390df59f5949632985a0bfc2a842ef17b2ad18 after un-compressing and zeroing timestamps + layerIndex: 2, + }, + } + for _, tc := range testCases { + h.AssertEq(t, inspect.RootFS.Layers[tc.layerIndex], tc.expectedDiffID) + } + t.Log("sets the layers metadata label according to the new spec") + var lmd files.LayersMetadata + lmdJSON := inspect.Config.Labels["io.buildpacks.lifecycle.metadata"] + h.AssertNil(t, json.Unmarshal([]byte(lmdJSON), &lmd)) + h.AssertEq(t, lmd.RunImage.Image, exportTest.targetRegistry.fixtures.ReadOnlyRunImage) // from analyzed.toml + h.AssertEq(t, lmd.RunImage.Mirrors, []string{"mirror1", "mirror2"}) // from run.toml + h.AssertEq(t, lmd.RunImage.TopLayer, runImageFixtureTopLayerSHA) + h.AssertEq(t, lmd.RunImage.Reference, strings.TrimPrefix(runImageFixtureSHA, "sha256:")) }) }) when("SOURCE_DATE_EPOCH is set", func() { - it("Image CreatedAt is set to SOURCE_DATE_EPOCH", func() { + it("app is created with config CreatedAt set to SOURCE_DATE_EPOCH", func() { h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.9"), "SOURCE_DATE_EPOCH support added in 0.9") expectedTime := time.Date(2022, 1, 5, 5, 5, 5, 0, time.UTC) exportFlags := []string{"-daemon"} @@ -231,63 +233,87 @@ func testExporterFunc(platformAPI string) func(t *testing.T, when spec.G, it spe }) when("registry case", func() { - when("first build", func() { - when("app", func() { - it("is created", func() { - var exportFlags []string - exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...) - exportedImageName = exportTest.RegRepoName("some-exported-image-" + h.RandString(10)) - exportArgs = append(exportArgs, exportedImageName) + it("app is created", func() { + var exportFlags []string + exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...) + exportedImageName = exportTest.RegRepoName("some-exported-image-" + h.RandString(10)) + exportArgs = append(exportArgs, exportedImageName) + + output := h.DockerRun(t, + exportImage, + h.WithFlags( + "--env", "CNB_PLATFORM_API="+platformAPI, + "--env", "CNB_REGISTRY_AUTH="+exportRegAuthConfig, + "--network", exportRegNetwork, + ), + h.WithArgs(exportArgs...), + ) + h.AssertStringContains(t, output, "Saving "+exportedImageName) + + h.Run(t, exec.Command("docker", "pull", exportedImageName)) + assertImageOSAndArchAndCreatedAt(t, exportedImageName, exportTest, imgutil.NormalizedDateTime) + }) - output := h.DockerRun(t, - exportImage, - h.WithFlags( - "--env", "CNB_PLATFORM_API="+platformAPI, - "--env", "CNB_REGISTRY_AUTH="+exportRegAuthConfig, - "--network", exportRegNetwork, - ), - h.WithArgs(exportArgs...), - ) - h.AssertStringContains(t, output, "Saving "+exportedImageName) + when("registry is insecure", func() { + it.Before(func() { + h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "") + }) - h.Run(t, exec.Command("docker", "pull", exportedImageName)) - assertImageOSAndArchAndCreatedAt(t, exportedImageName, exportTest, imgutil.NormalizedDateTime) - }) + it("uses http protocol", func() { + var exportFlags []string + exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...) + exportedImageName = exportTest.RegRepoName("some-insecure-exported-image-" + h.RandString(10)) + exportArgs = append(exportArgs, exportedImageName) + insecureRegistry := "host.docker.internal/bar" + insecureAnalyzed := "/layers/analyzed_insecure.toml" + + _, _, err := h.DockerRunWithError(t, + exportImage, + h.WithFlags( + "--env", "CNB_PLATFORM_API="+platformAPI, + "--env", "CNB_INSECURE_REGISTRIES="+insecureRegistry, + "--env", "CNB_ANALYZED_PATH="+insecureAnalyzed, + "--network", exportRegNetwork, + ), + h.WithArgs(exportArgs...), + ) + h.AssertStringContains(t, err.Error(), "http://host.docker.internal") }) + }) - when("app using insecure registry", func() { - it.Before(func() { - h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "") - }) + when("SOURCE_DATE_EPOCH is set", func() { + it("app is created with config CreatedAt set to SOURCE_DATE_EPOCH", func() { + h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.9"), "SOURCE_DATE_EPOCH support added in 0.9") + expectedTime := time.Date(2022, 1, 5, 5, 5, 5, 0, time.UTC) - it("does an http request", func() { - var exportFlags []string - exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...) - exportedImageName = exportTest.RegRepoName("some-insecure-exported-image-" + h.RandString(10)) - exportArgs = append(exportArgs, exportedImageName) - insecureRegistry := "host.docker.internal/bar" - insecureAnalyzed := "/layers/analyzed_insecure.toml" + var exportFlags []string + exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...) + exportedImageName = exportTest.RegRepoName("some-exported-image-" + h.RandString(10)) + exportArgs = append(exportArgs, exportedImageName) - _, _, err := h.DockerRunWithError(t, - exportImage, - h.WithFlags( - "--env", "CNB_PLATFORM_API="+platformAPI, - "--env", "CNB_INSECURE_REGISTRIES="+insecureRegistry, - "--env", "CNB_ANALYZED_PATH="+insecureAnalyzed, - "--network", exportRegNetwork, - ), - h.WithArgs(exportArgs...), - ) - h.AssertStringContains(t, err.Error(), "http://host.docker.internal") - }) - }) + output := h.DockerRun(t, + exportImage, + h.WithFlags( + "--env", "CNB_PLATFORM_API="+platformAPI, + "--env", "CNB_REGISTRY_AUTH="+exportRegAuthConfig, + "--env", "SOURCE_DATE_EPOCH="+fmt.Sprintf("%d", expectedTime.Unix()), + "--network", exportRegNetwork, + ), + h.WithArgs(exportArgs...), + ) + h.AssertStringContains(t, output, "Saving "+exportedImageName) - when("SOURCE_DATE_EPOCH is set", func() { - it("Image CreatedAt is set to SOURCE_DATE_EPOCH", func() { - h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.9"), "SOURCE_DATE_EPOCH support added in 0.9") - expectedTime := time.Date(2022, 1, 5, 5, 5, 5, 0, time.UTC) + h.Run(t, exec.Command("docker", "pull", exportedImageName)) + assertImageOSAndArchAndCreatedAt(t, exportedImageName, exportTest, expectedTime) + }) + }) - var exportFlags []string + // FIXME: move this out of the registry block + when("cache", func() { + when("image case", func() { + it("cache is created", func() { + cacheImageName := exportTest.RegRepoName("some-cache-image-" + h.RandString(10)) + exportFlags := []string{"-cache-image", cacheImageName} exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...) exportedImageName = exportTest.RegRepoName("some-exported-image-" + h.RandString(10)) exportArgs = append(exportArgs, exportedImageName) @@ -297,23 +323,21 @@ func testExporterFunc(platformAPI string) func(t *testing.T, when spec.G, it spe h.WithFlags( "--env", "CNB_PLATFORM_API="+platformAPI, "--env", "CNB_REGISTRY_AUTH="+exportRegAuthConfig, - "--env", "SOURCE_DATE_EPOCH="+fmt.Sprintf("%d", expectedTime.Unix()), "--network", exportRegNetwork, ), h.WithArgs(exportArgs...), ) h.AssertStringContains(t, output, "Saving "+exportedImageName) - + // To detect whether the export of cacheImage and exportedImage is successful h.Run(t, exec.Command("docker", "pull", exportedImageName)) - assertImageOSAndArchAndCreatedAt(t, exportedImageName, exportTest, expectedTime) + assertImageOSAndArchAndCreatedAt(t, exportedImageName, exportTest, imgutil.NormalizedDateTime) + h.Run(t, exec.Command("docker", "pull", cacheImageName)) }) - }) - when("cache", func() { - when("cache image case", func() { - it("is created", func() { + when("parallel export is enabled", func() { + it("cache is created", func() { cacheImageName := exportTest.RegRepoName("some-cache-image-" + h.RandString(10)) - exportFlags := []string{"-cache-image", cacheImageName} + exportFlags := []string{"-cache-image", cacheImageName, "-parallel"} exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...) exportedImageName = exportTest.RegRepoName("some-exported-image-" + h.RandString(10)) exportArgs = append(exportArgs, exportedImageName) @@ -328,15 +352,17 @@ func testExporterFunc(platformAPI string) func(t *testing.T, when spec.G, it spe h.WithArgs(exportArgs...), ) h.AssertStringContains(t, output, "Saving "+exportedImageName) - // To detect whether the export of cacheImage and exportedImage is successful + h.Run(t, exec.Command("docker", "pull", exportedImageName)) assertImageOSAndArchAndCreatedAt(t, exportedImageName, exportTest, imgutil.NormalizedDateTime) h.Run(t, exec.Command("docker", "pull", cacheImageName)) }) + }) - it("is created with parallel export enabled", func() { - cacheImageName := exportTest.RegRepoName("some-cache-image-" + h.RandString(10)) - exportFlags := []string{"-cache-image", cacheImageName, "-parallel"} + when("cache is provided but no data was cached", func() { + it("cache is created with an empty layer", func() { + cacheImageName := exportTest.RegRepoName("some-empty-cache-image-" + h.RandString(10)) + exportFlags := []string{"-cache-image", cacheImageName, "-layers", "/other_layers"} exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...) exportedImageName = exportTest.RegRepoName("some-exported-image-" + h.RandString(10)) exportArgs = append(exportArgs, exportedImageName) @@ -352,14 +378,52 @@ func testExporterFunc(platformAPI string) func(t *testing.T, when spec.G, it spe ) h.AssertStringContains(t, output, "Saving "+exportedImageName) - h.Run(t, exec.Command("docker", "pull", exportedImageName)) - assertImageOSAndArchAndCreatedAt(t, exportedImageName, exportTest, imgutil.NormalizedDateTime) + testEmptyLayerSHA := calculateEmptyLayerSha(t) + + // Retrieve the cache image from the ephemeral registry h.Run(t, exec.Command("docker", "pull", cacheImageName)) + logger := cmd.DefaultLogger + + subject, err := cache.NewImageCacheFromName(cacheImageName, authn.DefaultKeychain, logger, cache.NewImageDeleter(cache.NewImageComparer(), logger, api.MustParse(platformAPI).LessThan("0.13"))) + h.AssertNil(t, err) + + //Assert the cache image was created with an empty layer + layer, err := subject.RetrieveLayer(testEmptyLayerSHA) + h.AssertNil(t, err) + defer layer.Close() }) + }) + }) - it("is created with empty layer", func() { - cacheImageName := exportTest.RegRepoName("some-empty-cache-image-" + h.RandString(10)) - exportFlags := []string{"-cache-image", cacheImageName, "-layers", "/other_layers"} + when("directory case", func() { + when("original cache was corrupted", func() { + var cacheDir string + + it.Before(func() { + var err error + cacheDir, err = os.MkdirTemp("", "cache") + h.AssertNil(t, err) + h.AssertNil(t, os.Chmod(cacheDir, 0777)) // Override umask + + cacheFixtureDir := filepath.Join("testdata", "exporter", "cache-dir") + h.AssertNil(t, fsutil.Copy(cacheFixtureDir, cacheDir)) + // We have to pre-create the tar files so that their digests do not change due to timestamps + // But, ':' in the filepath on Windows is not allowed + h.AssertNil(t, os.Rename( + filepath.Join(cacheDir, "committed", "sha256_258dfa0cc987efebc17559694866ebc91139e7c0e574f60d1d4092f53d7dff59.tar"), + filepath.Join(cacheDir, "committed", "sha256:258dfa0cc987efebc17559694866ebc91139e7c0e574f60d1d4092f53d7dff59.tar"), + )) + }) + + it.After(func() { + _ = os.RemoveAll(cacheDir) + }) + + it("overwrites the original layer", func() { + exportFlags := []string{ + "-cache-dir", "/cache", + "-log-level", "debug", + } exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...) exportedImageName = exportTest.RegRepoName("some-exported-image-" + h.RandString(10)) exportArgs = append(exportArgs, exportedImageName) @@ -370,115 +434,131 @@ func testExporterFunc(platformAPI string) func(t *testing.T, when spec.G, it spe "--env", "CNB_PLATFORM_API="+platformAPI, "--env", "CNB_REGISTRY_AUTH="+exportRegAuthConfig, "--network", exportRegNetwork, + "--volume", fmt.Sprintf("%s:/cache", cacheDir), ), h.WithArgs(exportArgs...), ) + h.AssertStringContains(t, output, "Skipping reuse for layer corrupted_buildpack:corrupted-layer: expected layer contents to have SHA 'sha256:258dfa0cc987efebc17559694866ebc91139e7c0e574f60d1d4092f53d7dff59'; found 'sha256:9e0b77ed599eafdab8611f7eeefef084077f91f02f1da0a3870c7ff20a08bee8'") h.AssertStringContains(t, output, "Saving "+exportedImageName) - - testEmptyLayerSHA := calculateEmptyLayerSha(t) - - // Retrieve the cache image from the ephemeral registry - h.Run(t, exec.Command("docker", "pull", cacheImageName)) - logger := cmd.DefaultLogger - - subject, err := cache.NewImageCacheFromName(cacheImageName, authn.DefaultKeychain, logger, cache.NewImageDeleter(cache.NewImageComparer(), logger, api.MustParse(platformAPI).LessThan("0.13"))) + h.Run(t, exec.Command("docker", "pull", exportedImageName)) + defer h.Run(t, exec.Command("docker", "image", "rm", exportedImageName)) + // Verify the app has the correct sha for the layer + inspect, _, err := h.DockerCli(t).ImageInspectWithRaw(context.TODO(), exportedImageName) h.AssertNil(t, err) - - //Assert the cache image was created with an empty layer - layer, err := subject.RetrieveLayer(testEmptyLayerSHA) + var lmd files.LayersMetadata + lmdJSON := inspect.Config.Labels["io.buildpacks.lifecycle.metadata"] + h.AssertNil(t, json.Unmarshal([]byte(lmdJSON), &lmd)) + h.AssertEq(t, lmd.Buildpacks[2].Layers["corrupted-layer"].SHA, "sha256:258dfa0cc987efebc17559694866ebc91139e7c0e574f60d1d4092f53d7dff59") + // Verify the cache has correct contents now + foundDiffID, err := func() (string, error) { + layerPath := filepath.Join(cacheDir, "committed", "sha256:258dfa0cc987efebc17559694866ebc91139e7c0e574f60d1d4092f53d7dff59.tar") + layerRC, err := os.Open(layerPath) + if err != nil { + return "", err + } + defer func() { + _ = layerRC.Close() + }() + hasher := sha256.New() + if _, err = io.Copy(hasher, layerRC); err != nil { + return "", errors.Wrap(err, "hashing layer") + } + foundDiffID := "sha256:" + hex.EncodeToString(hasher.Sum(make([]byte, 0, hasher.Size()))) + return foundDiffID, nil + }() h.AssertNil(t, err) - defer layer.Close() + h.AssertEq(t, foundDiffID, "sha256:258dfa0cc987efebc17559694866ebc91139e7c0e574f60d1d4092f53d7dff59") }) }) }) + }) - when("using extensions", func() { - it.Before(func() { - h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "") - }) + when("using extensions", func() { + it.Before(func() { + h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "") + }) - it("is created from the extended run image", func() { - exportFlags := []string{ - "-analyzed", "/layers/run-image-extended-analyzed.toml", - "-extended", "/layers/some-extended-dir", - "-log-level", "debug", - "-run", "/cnb/run.toml", - } - exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...) - exportedImageName = exportTest.RegRepoName("some-exported-image-" + h.RandString(10)) - exportArgs = append(exportArgs, exportedImageName) + it("app is created from the extended run image", func() { + exportFlags := []string{ + "-analyzed", "/layers/run-image-extended-analyzed.toml", + "-extended", "/layers/some-extended-dir", + "-log-level", "debug", + "-run", "/cnb/run.toml", + } + exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...) + exportedImageName = exportTest.RegRepoName("some-exported-image-" + h.RandString(10)) + exportArgs = append(exportArgs, exportedImageName) - // get run image SHA & top layer - ref, imageAuth, err := auth.ReferenceForRepoName(authn.DefaultKeychain, exportTest.targetRegistry.fixtures.ReadOnlyRunImage) - h.AssertNil(t, err) - remoteImage, err := remote.Image(ref, remote.WithAuth(imageAuth)) - h.AssertNil(t, err) - layers, err := remoteImage.Layers() - h.AssertNil(t, err) - runImageFixtureTopLayerSHA, err := layers[len(layers)-1].DiffID() - h.AssertNil(t, err) - runImageFixtureSHA, err := remoteImage.Digest() - h.AssertNil(t, err) + // get run image SHA & top layer + ref, imageAuth, err := auth.ReferenceForRepoName(authn.DefaultKeychain, exportTest.targetRegistry.fixtures.ReadOnlyRunImage) + h.AssertNil(t, err) + remoteImage, err := remote.Image(ref, remote.WithAuth(imageAuth)) + h.AssertNil(t, err) + layers, err := remoteImage.Layers() + h.AssertNil(t, err) + runImageFixtureTopLayerSHA, err := layers[len(layers)-1].DiffID() + h.AssertNil(t, err) + runImageFixtureSHA, err := remoteImage.Digest() + h.AssertNil(t, err) - experimentalMode := "warn" - if api.MustParse(platformAPI).AtLeast("0.13") { - experimentalMode = "error" - } + experimentalMode := "warn" + if api.MustParse(platformAPI).AtLeast("0.13") { + experimentalMode = "error" + } - output := h.DockerRun(t, - exportImage, - h.WithFlags( - "--env", "CNB_EXPERIMENTAL_MODE="+experimentalMode, - "--env", "CNB_PLATFORM_API="+platformAPI, - "--env", "CNB_REGISTRY_AUTH="+exportRegAuthConfig, - "--network", exportRegNetwork, - ), - h.WithArgs(exportArgs...), - ) - h.AssertStringContains(t, output, "Saving "+exportedImageName) + output := h.DockerRun(t, + exportImage, + h.WithFlags( + "--env", "CNB_EXPERIMENTAL_MODE="+experimentalMode, + "--env", "CNB_PLATFORM_API="+platformAPI, + "--env", "CNB_REGISTRY_AUTH="+exportRegAuthConfig, + "--network", exportRegNetwork, + ), + h.WithArgs(exportArgs...), + ) + h.AssertStringContains(t, output, "Saving "+exportedImageName) - h.Run(t, exec.Command("docker", "pull", exportedImageName)) - assertImageOSAndArchAndCreatedAt(t, exportedImageName, exportTest, imgutil.NormalizedDateTime) - t.Log("bases the exported image on the extended run image") - ref, imageAuth, err = auth.ReferenceForRepoName(authn.DefaultKeychain, exportedImageName) - h.AssertNil(t, err) - remoteImage, err = remote.Image(ref, remote.WithAuth(imageAuth)) - h.AssertNil(t, err) - configFile, err := remoteImage.ConfigFile() - h.AssertNil(t, err) - h.AssertEq(t, configFile.Config.Labels["io.buildpacks.rebasable"], "false") // from testdata/exporter/container/layers/some-extended-dir/run/sha256_/blobs/sha256/ - t.Log("Adds extension layers") - layers, err = remoteImage.Layers() + h.Run(t, exec.Command("docker", "pull", exportedImageName)) + assertImageOSAndArchAndCreatedAt(t, exportedImageName, exportTest, imgutil.NormalizedDateTime) + t.Log("bases the exported image on the extended run image") + ref, imageAuth, err = auth.ReferenceForRepoName(authn.DefaultKeychain, exportedImageName) + h.AssertNil(t, err) + remoteImage, err = remote.Image(ref, remote.WithAuth(imageAuth)) + h.AssertNil(t, err) + configFile, err := remoteImage.ConfigFile() + h.AssertNil(t, err) + h.AssertEq(t, configFile.Config.Labels["io.buildpacks.rebasable"], "false") // from testdata/exporter/container/layers/some-extended-dir/run/sha256_/blobs/sha256/ + t.Log("Adds extension layers") + layers, err = remoteImage.Layers() + h.AssertNil(t, err) + type testCase struct { + expectedDigest string + layerIndex int + } + testCases := []testCase{ + { + expectedDigest: "sha256:08e7ad5ce17cf5e5f70affe68b341a93de86ee2ba074932c3a05b8770f66d772", // from testdata/exporter/container/layers/some-extended-dir/run/sha256_/blobs/sha256/65c2873d397056a5cb4169790654d787579b005f18b903082b177d4d9b4aecf5 after un-compressing, zeroing timestamps, and re-compressing + layerIndex: 1, + }, + { + expectedDigest: "sha256:0e74ef444ea437147e3fa0ce2aad371df5380c26b96875ae07b9b67f44cdb2ee", // from testdata/exporter/container/layers/some-extended-dir/run/sha256_/blobs/sha256/0fb9b88c9cbe9f11b4c8da645f390df59f5949632985a0bfc2a842ef17b2ad18 after un-compressing, zeroing timestamps, and re-compressing + layerIndex: 2, + }, + } + for _, tc := range testCases { + layer := layers[tc.layerIndex] + digest, err := layer.Digest() h.AssertNil(t, err) - type testCase struct { - expectedDigest string - layerIndex int - } - testCases := []testCase{ - { - expectedDigest: "sha256:08e7ad5ce17cf5e5f70affe68b341a93de86ee2ba074932c3a05b8770f66d772", // from testdata/exporter/container/layers/some-extended-dir/run/sha256_/blobs/sha256/65c2873d397056a5cb4169790654d787579b005f18b903082b177d4d9b4aecf5 after un-compressing, zeroing timestamps, and re-compressing - layerIndex: 1, - }, - { - expectedDigest: "sha256:0e74ef444ea437147e3fa0ce2aad371df5380c26b96875ae07b9b67f44cdb2ee", // from testdata/exporter/container/layers/some-extended-dir/run/sha256_/blobs/sha256/0fb9b88c9cbe9f11b4c8da645f390df59f5949632985a0bfc2a842ef17b2ad18 after un-compressing, zeroing timestamps, and re-compressing - layerIndex: 2, - }, - } - for _, tc := range testCases { - layer := layers[tc.layerIndex] - digest, err := layer.Digest() - h.AssertNil(t, err) - h.AssertEq(t, digest.String(), tc.expectedDigest) - } - t.Log("sets the layers metadata label according to the new spec") - var lmd files.LayersMetadata - lmdJSON := configFile.Config.Labels["io.buildpacks.lifecycle.metadata"] - h.AssertNil(t, json.Unmarshal([]byte(lmdJSON), &lmd)) - h.AssertEq(t, lmd.RunImage.Image, exportTest.targetRegistry.fixtures.ReadOnlyRunImage) // from analyzed.toml - h.AssertEq(t, lmd.RunImage.Mirrors, []string{"mirror1", "mirror2"}) // from run.toml - h.AssertEq(t, lmd.RunImage.TopLayer, runImageFixtureTopLayerSHA.String()) - h.AssertEq(t, lmd.RunImage.Reference, fmt.Sprintf("%s@%s", exportTest.targetRegistry.fixtures.ReadOnlyRunImage, runImageFixtureSHA.String())) - }) + h.AssertEq(t, digest.String(), tc.expectedDigest) + } + t.Log("sets the layers metadata label according to the new spec") + var lmd files.LayersMetadata + lmdJSON := configFile.Config.Labels["io.buildpacks.lifecycle.metadata"] + h.AssertNil(t, json.Unmarshal([]byte(lmdJSON), &lmd)) + h.AssertEq(t, lmd.RunImage.Image, exportTest.targetRegistry.fixtures.ReadOnlyRunImage) // from analyzed.toml + h.AssertEq(t, lmd.RunImage.Mirrors, []string{"mirror1", "mirror2"}) // from run.toml + h.AssertEq(t, lmd.RunImage.TopLayer, runImageFixtureTopLayerSHA.String()) + h.AssertEq(t, lmd.RunImage.Reference, fmt.Sprintf("%s@%s", exportTest.targetRegistry.fixtures.ReadOnlyRunImage, runImageFixtureSHA.String())) }) }) }) @@ -493,7 +573,7 @@ func testExporterFunc(platformAPI string) func(t *testing.T, when spec.G, it spe when("experimental mode is enabled", func() { it.Before(func() { - // creates the directory to save all the OCI images on disk + // create the directory to save all OCI images on disk tmpDir, err = os.MkdirTemp("", "layout") h.AssertNil(t, err) @@ -508,35 +588,31 @@ func testExporterFunc(platformAPI string) func(t *testing.T, when spec.G, it spe os.RemoveAll(tmpDir) }) - when("custom layout directory", func() { - when("first build", func() { - when("app", func() { - it.Before(func() { - exportedImageName = "my-custom-layout-app" - layoutDir = filepath.Join(path.RootDir, "my-layout-dir") - }) - - it("is created", func() { - var exportFlags []string - h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "Platform API < 0.12 does not accept a -layout flag") - exportFlags = append(exportFlags, []string{"-layout", "-layout-dir", layoutDir, "-analyzed", "/layers/layout-analyzed.toml"}...) - exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...) - exportArgs = append(exportArgs, exportedImageName) - - output := h.DockerRunAndCopy(t, containerName, tmpDir, layoutDir, exportImage, - h.WithFlags( - "--env", "CNB_EXPERIMENTAL_MODE=warn", - "--env", "CNB_PLATFORM_API="+platformAPI, - ), - h.WithArgs(exportArgs...)) - - h.AssertStringContains(t, output, "Saving /my-layout-dir/index.docker.io/library/my-custom-layout-app/latest") - - // assert the image was saved on disk in OCI layout format - index := h.ReadIndexManifest(t, filepath.Join(tmpDir, layoutDir, "index.docker.io", "library", exportedImageName, "latest")) - h.AssertEq(t, len(index.Manifests), 1) - }) - }) + when("using a custom layout directory", func() { + it.Before(func() { + exportedImageName = "my-custom-layout-app" + layoutDir = filepath.Join(path.RootDir, "my-layout-dir") + }) + + it("app is created", func() { + var exportFlags []string + h.SkipIf(t, api.MustParse(platformAPI).LessThan("0.12"), "Platform API < 0.12 does not accept a -layout flag") + exportFlags = append(exportFlags, []string{"-layout", "-layout-dir", layoutDir, "-analyzed", "/layers/layout-analyzed.toml"}...) + exportArgs := append([]string{ctrPath(exporterPath)}, exportFlags...) + exportArgs = append(exportArgs, exportedImageName) + + output := h.DockerRunAndCopy(t, containerName, tmpDir, layoutDir, exportImage, + h.WithFlags( + "--env", "CNB_EXPERIMENTAL_MODE=warn", + "--env", "CNB_PLATFORM_API="+platformAPI, + ), + h.WithArgs(exportArgs...)) + + h.AssertStringContains(t, output, "Saving /my-layout-dir/index.docker.io/library/my-custom-layout-app/latest") + + // assert the image was saved on disk in OCI layout format + index := h.ReadIndexManifest(t, filepath.Join(tmpDir, layoutDir, "index.docker.io", "library", exportedImageName, "latest")) + h.AssertEq(t, len(index.Manifests), 1) }) }) }) diff --git a/acceptance/restorer_test.go b/acceptance/restorer_test.go index 02f4b471d..2e6b14561 100644 --- a/acceptance/restorer_test.go +++ b/acceptance/restorer_test.go @@ -165,7 +165,7 @@ func testRestorerFunc(platformAPI string) func(t *testing.T, when spec.G, it spe h.AssertPathDoesNotExist(t, uncachedFile) }) - it("does not restore unused buildpack layer data", func() { + it("does not restore layer data from unused buildpacks", func() { h.DockerRunAndCopy(t, containerName, copyDir, @@ -179,6 +179,21 @@ func testRestorerFunc(platformAPI string) func(t *testing.T, when spec.G, it spe unusedBpLayer := filepath.Join(copyDir, "layers", "unused_buildpack") h.AssertPathDoesNotExist(t, unusedBpLayer) }) + + it("does not restore corrupted layer data", func() { + h.DockerRunAndCopy(t, + containerName, + copyDir, + "/layers", + restoreImage, + h.WithFlags("--env", "CNB_PLATFORM_API="+platformAPI), + h.WithArgs("-cache-dir", "/cache"), + ) + + // check corrupted layer is not restored + corruptedFile := filepath.Join(copyDir, "layers", "corrupted_buildpack", "corrupted-layer") + h.AssertPathDoesNotExist(t, corruptedFile) + }) }) }) diff --git a/acceptance/testdata/exporter/cache-dir/committed/io.buildpacks.lifecycle.cache.metadata b/acceptance/testdata/exporter/cache-dir/committed/io.buildpacks.lifecycle.cache.metadata new file mode 100644 index 000000000..7046df52d --- /dev/null +++ b/acceptance/testdata/exporter/cache-dir/committed/io.buildpacks.lifecycle.cache.metadata @@ -0,0 +1,17 @@ +{ + "buildpacks": [ + { + "key": "corrupted_buildpack", + "version": "corrupted_v1", + "layers": { + "corrupted-layer": { + "sha": "sha256:258dfa0cc987efebc17559694866ebc91139e7c0e574f60d1d4092f53d7dff59", + "data": null, + "build": false, + "launch": true, + "cache": true + } + } + } + ] +} diff --git a/acceptance/testdata/exporter/cache-dir/committed/sha256_258dfa0cc987efebc17559694866ebc91139e7c0e574f60d1d4092f53d7dff59.tar b/acceptance/testdata/exporter/cache-dir/committed/sha256_258dfa0cc987efebc17559694866ebc91139e7c0e574f60d1d4092f53d7dff59.tar new file mode 100644 index 000000000..91ae6ae71 Binary files /dev/null and b/acceptance/testdata/exporter/cache-dir/committed/sha256_258dfa0cc987efebc17559694866ebc91139e7c0e574f60d1d4092f53d7dff59.tar differ diff --git a/acceptance/testdata/exporter/container/layers/cacher_buildpack/cached-layer.sha b/acceptance/testdata/exporter/container/layers/cacher_buildpack/cached-layer.sha index 2cac9ac86..8515d4f61 100755 --- a/acceptance/testdata/exporter/container/layers/cacher_buildpack/cached-layer.sha +++ b/acceptance/testdata/exporter/container/layers/cacher_buildpack/cached-layer.sha @@ -1 +1 @@ -sha256:b89860e2f9c62e6b5d66d3ce019e18cdabae30273c25150b7f20a82f7a70e494 \ No newline at end of file +sha256:2d9c9c638d5c4f0df067eeae7b9c99ad05776a89d19ab863c28850a91e5f2944 \ No newline at end of file diff --git a/acceptance/testdata/exporter/container/layers/corrupted_buildpack/corrupted-layer.toml b/acceptance/testdata/exporter/container/layers/corrupted_buildpack/corrupted-layer.toml new file mode 100644 index 000000000..3111a78ed --- /dev/null +++ b/acceptance/testdata/exporter/container/layers/corrupted_buildpack/corrupted-layer.toml @@ -0,0 +1,3 @@ +[types] + cache = true + launch = true diff --git a/acceptance/testdata/exporter/container/layers/corrupted_buildpack/corrupted-layer/data b/acceptance/testdata/exporter/container/layers/corrupted_buildpack/corrupted-layer/data new file mode 100644 index 000000000..65a7bfba0 --- /dev/null +++ b/acceptance/testdata/exporter/container/layers/corrupted_buildpack/corrupted-layer/data @@ -0,0 +1 @@ +digest-not-match-data diff --git a/acceptance/testdata/exporter/container/layers/group.toml b/acceptance/testdata/exporter/container/layers/group.toml index cd879016d..a9a885792 100644 --- a/acceptance/testdata/exporter/container/layers/group.toml +++ b/acceptance/testdata/exporter/container/layers/group.toml @@ -7,3 +7,8 @@ id = "cacher_buildpack" version = "cacher_v1" api = "0.8" + +[[group]] + id = "corrupted_buildpack" + version = "corrupted_v1" + api = "0.8" diff --git a/acceptance/testdata/restorer/Dockerfile b/acceptance/testdata/restorer/Dockerfile index 85c85e8d9..a176ba1b7 100644 --- a/acceptance/testdata/restorer/Dockerfile +++ b/acceptance/testdata/restorer/Dockerfile @@ -8,10 +8,10 @@ ENV CNB_GROUP_ID=${cnb_gid} COPY ./container/ / -# turn /to_cache/ directories into cache tarballs -# these are referenced by sha in /cache/committed/io.buildpacks.lifecycle.cache.metadata -RUN tar cvf /cache/committed/sha256:b89860e2f9c62e6b5d66d3ce019e18cdabae30273c25150b7f20a82f7a70e494.tar -C /to_cache/cacher_buildpack layers -RUN tar cvf /cache/committed/sha256:58bafa1e79c8e44151141c95086beb37ca85b69578fc890bce33bb4c6c8e851f.tar -C /to_cache/unused_buildpack layers +# We have to pre-create the tar files so that their digests do not change due to timestamps +# But, ':' in the filepath on Windows is not allowed +RUN mv /cache/committed/sha256_2d9c9c638d5c4f0df067eeae7b9c99ad05776a89d19ab863c28850a91e5f2944.tar /cache/committed/sha256:2d9c9c638d5c4f0df067eeae7b9c99ad05776a89d19ab863c28850a91e5f2944.tar +RUN mv /cache/committed/sha256_430338f576c11e5236669f9c843599d96afe28784cffcb2d46ddb07beb00df78.tar /cache/committed/sha256:430338f576c11e5236669f9c843599d96afe28784cffcb2d46ddb07beb00df78.tar ENTRYPOINT ["/cnb/lifecycle/restorer"] diff --git a/acceptance/testdata/restorer/container/cache/committed/io.buildpacks.lifecycle.cache.metadata b/acceptance/testdata/restorer/container/cache/committed/io.buildpacks.lifecycle.cache.metadata index 21d99f74e..51c7edd77 100644 --- a/acceptance/testdata/restorer/container/cache/committed/io.buildpacks.lifecycle.cache.metadata +++ b/acceptance/testdata/restorer/container/cache/committed/io.buildpacks.lifecycle.cache.metadata @@ -1 +1,43 @@ -{"buildpacks":[{"key":"cacher_buildpack","version":"cacher_v1","layers":{"cached-layer":{"sha":"sha256:b89860e2f9c62e6b5d66d3ce019e18cdabae30273c25150b7f20a82f7a70e494","data":null,"build":false,"launch":false,"cache":true}}},{"key":"unused_buildpack","version":"v1","layers":{"cached-layer":{"sha":"sha256:58bafa1e79c8e44151141c95086beb37ca85b69578fc890bce33bb4c6c8e851f","data":null,"build":false,"launch":false,"cache":true}}}]} +{ + "buildpacks": [ + { + "key": "cacher_buildpack", + "version": "cacher_v1", + "layers": { + "cached-layer": { + "sha": "sha256:2d9c9c638d5c4f0df067eeae7b9c99ad05776a89d19ab863c28850a91e5f2944", + "data": null, + "build": false, + "launch": false, + "cache": true + } + } + }, + { + "key": "corrupted_buildpack", + "version": "corrupted_v1", + "layers": { + "corrupted-layer": { + "sha": "sha256:b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c", + "data": null, + "build": false, + "launch": false, + "cache": true + } + } + }, + { + "key": "unused_buildpack", + "version": "v1", + "layers": { + "cached-layer": { + "sha": "sha256:430338f576c11e5236669f9c843599d96afe28784cffcb2d46ddb07beb00df78", + "data": null, + "build": false, + "launch": false, + "cache": true + } + } + } + ] +} diff --git a/acceptance/testdata/restorer/container/cache/committed/sha256_2d9c9c638d5c4f0df067eeae7b9c99ad05776a89d19ab863c28850a91e5f2944.tar b/acceptance/testdata/restorer/container/cache/committed/sha256_2d9c9c638d5c4f0df067eeae7b9c99ad05776a89d19ab863c28850a91e5f2944.tar new file mode 100644 index 000000000..2dae6cc55 Binary files /dev/null and b/acceptance/testdata/restorer/container/cache/committed/sha256_2d9c9c638d5c4f0df067eeae7b9c99ad05776a89d19ab863c28850a91e5f2944.tar differ diff --git a/acceptance/testdata/restorer/container/cache/committed/sha256_430338f576c11e5236669f9c843599d96afe28784cffcb2d46ddb07beb00df78.tar b/acceptance/testdata/restorer/container/cache/committed/sha256_430338f576c11e5236669f9c843599d96afe28784cffcb2d46ddb07beb00df78.tar new file mode 100644 index 000000000..af9874ef3 Binary files /dev/null and b/acceptance/testdata/restorer/container/cache/committed/sha256_430338f576c11e5236669f9c843599d96afe28784cffcb2d46ddb07beb00df78.tar differ diff --git a/acceptance/testdata/restorer/container/layers/cacher_buildpack/cached-layer.sha b/acceptance/testdata/restorer/container/layers/cacher_buildpack/cached-layer.sha index 2cac9ac86..8515d4f61 100755 --- a/acceptance/testdata/restorer/container/layers/cacher_buildpack/cached-layer.sha +++ b/acceptance/testdata/restorer/container/layers/cacher_buildpack/cached-layer.sha @@ -1 +1 @@ -sha256:b89860e2f9c62e6b5d66d3ce019e18cdabae30273c25150b7f20a82f7a70e494 \ No newline at end of file +sha256:2d9c9c638d5c4f0df067eeae7b9c99ad05776a89d19ab863c28850a91e5f2944 \ No newline at end of file diff --git a/acceptance/testdata/restorer/container/layers/group.toml b/acceptance/testdata/restorer/container/layers/group.toml index f45252f71..a7f9116b2 100644 --- a/acceptance/testdata/restorer/container/layers/group.toml +++ b/acceptance/testdata/restorer/container/layers/group.toml @@ -8,6 +8,11 @@ version = "cacher_v1" api = "0.10" +[[group]] + id = "corrupted_buildpack" + version = "corrupted_v1" + api = "0.11" + [[group-extensions]] id = "some-extension-id" version = "v1" diff --git a/acceptance/testdata/restorer/container/to_cache/corrupted_buildpack/layers/corrupted_buildpack/corrupted-layer/data b/acceptance/testdata/restorer/container/to_cache/corrupted_buildpack/layers/corrupted_buildpack/corrupted-layer/data new file mode 100644 index 000000000..65a7bfba0 --- /dev/null +++ b/acceptance/testdata/restorer/container/to_cache/corrupted_buildpack/layers/corrupted_buildpack/corrupted-layer/data @@ -0,0 +1 @@ +digest-not-match-data diff --git a/cache/image_cache.go b/cache/image_cache.go index a029c87dc..012219ec9 100644 --- a/cache/image_cache.go +++ b/cache/image_cache.go @@ -158,3 +158,9 @@ func (c *ImageCache) Commit() error { return nil } + +// VerifyLayer returns an error if the layer contents do not match the provided sha. +func (c *ImageCache) VerifyLayer(_ string) error { + // we assume the registry is verifying digests for us + return nil +} diff --git a/cache/volume_cache.go b/cache/volume_cache.go index 3ec0003cc..22ef827d2 100644 --- a/cache/volume_cache.go +++ b/cache/volume_cache.go @@ -1,6 +1,7 @@ package cache import ( + "crypto/sha256" "encoding/json" "fmt" "io" @@ -143,11 +144,8 @@ func (c *VolumeCache) ReuseLayer(diffID string) error { stagingPath := diffIDPath(c.stagingDir, diffID) if _, err := os.Stat(committedPath); err != nil { - if os.IsNotExist(err) { - return NewReadErr(fmt.Sprintf("failed to find cache layer with SHA '%s'", diffID)) - } - if os.IsPermission(err) { - return NewReadErr(fmt.Sprintf("failed to read cache layer with SHA '%s' due to insufficient permissions", diffID)) + if err = handleFileError(err, diffID); errors.Is(err, ReadErr{}) { + return err } return fmt.Errorf("failed to re-use cache layer with SHA '%s': %w", diffID, err) } @@ -165,11 +163,8 @@ func (c *VolumeCache) RetrieveLayer(diffID string) (io.ReadCloser, error) { } file, err := os.Open(path) if err != nil { - if os.IsPermission(err) { - return nil, NewReadErr(fmt.Sprintf("failed to read cache layer with SHA '%s' due to insufficient permissions", diffID)) - } - if os.IsNotExist(err) { - return nil, NewReadErr(fmt.Sprintf("failed to find cache layer with SHA '%s'", diffID)) + if err = handleFileError(err, diffID); errors.Is(err, ReadErr{}) { + return nil, err } return nil, fmt.Errorf("failed to get cache layer with SHA '%s'", diffID) } @@ -189,8 +184,8 @@ func (c *VolumeCache) HasLayer(diffID string) (bool, error) { func (c *VolumeCache) RetrieveLayerFile(diffID string) (string, error) { path := diffIDPath(c.committedDir, diffID) if _, err := os.Stat(path); err != nil { - if os.IsNotExist(err) { - return "", NewReadErr(fmt.Sprintf("failed to find cache layer with SHA '%s'", diffID)) + if err = handleFileError(err, diffID); errors.Is(err, ReadErr{}) { + return "", err } return "", errors.Wrapf(err, "retrieving layer with SHA '%s'", diffID) } @@ -231,3 +226,33 @@ func (c *VolumeCache) setupStagingDir() error { } return os.MkdirAll(c.stagingDir, 0777) } + +// VerifyLayer returns an error if the layer contents do not match the provided sha. +func (c *VolumeCache) VerifyLayer(diffID string) error { + layerRC, err := c.RetrieveLayer(diffID) + if err != nil { + return err + } + defer func() { + _ = layerRC.Close() + }() + hasher := sha256.New() + if _, err := io.Copy(hasher, layerRC); err != nil { + return errors.Wrap(err, "hashing layer") + } + foundDiffID := fmt.Sprintf("sha256:%x", hasher.Sum(nil)) + if diffID != foundDiffID { + return NewReadErr(fmt.Sprintf("expected layer contents to have SHA '%s'; found '%s'", diffID, foundDiffID)) + } + return err +} + +func handleFileError(err error, diffID string) error { + if os.IsNotExist(err) { + return NewReadErr(fmt.Sprintf("failed to find cache layer with SHA '%s'", diffID)) + } + if os.IsPermission(err) { + return NewReadErr(fmt.Sprintf("failed to read cache layer with SHA '%s' due to insufficient permissions", diffID)) + } + return err +} diff --git a/cmd/lifecycle/restorer.go b/cmd/lifecycle/restorer.go index d9e175b55..226bc8ff1 100644 --- a/cmd/lifecycle/restorer.go +++ b/cmd/lifecycle/restorer.go @@ -119,7 +119,7 @@ func (r *restoreCmd) Exec() error { return cmd.FailErr(err, "get digest reference for builder image") } analyzedMD.BuildImage = &files.ImageIdentifier{Reference: digestRef.String()} - cmd.DefaultLogger.Debugf("Adding build image info to analyzed metadata: ") + cmd.DefaultLogger.Debug("Adding build image info to analyzed metadata: ") cmd.DefaultLogger.Debug(encoding.ToJSONMaybe(analyzedMD.BuildImage)) } var ( @@ -187,11 +187,11 @@ func (r *restoreCmd) updateAnalyzedMD(analyzedMD *files.Analyzed, runImage imgut return cmd.FailErr(err, "read target data from run image") } } - cmd.DefaultLogger.Debugf("Run image info in analyzed metadata was: ") + cmd.DefaultLogger.Debug("Run image info in analyzed metadata was: ") cmd.DefaultLogger.Debug(encoding.ToJSONMaybe(analyzedMD.RunImage)) analyzedMD.RunImage.Reference = digestRef.String() analyzedMD.RunImage.TargetMetadata = targetData - cmd.DefaultLogger.Debugf("Run image info in analyzed metadata is: ") + cmd.DefaultLogger.Debug("Run image info in analyzed metadata is: ") cmd.DefaultLogger.Debug(encoding.ToJSONMaybe(analyzedMD.RunImage)) return nil } diff --git a/phase/cache.go b/phase/cache.go index 4467df0d8..d32fd81e0 100644 --- a/phase/cache.go +++ b/phase/cache.go @@ -99,15 +99,23 @@ func (e *Exporter) addOrReuseCacheLayer(cache Cache, layerDir LayerDir, previous return "", errors.Wrapf(err, "creating layer '%s'", layerDir.Identifier()) } if layer.Digest == previousSHA { - e.Logger.Infof("Reusing cache layer '%s'\n", layer.ID) - e.Logger.Debugf("Layer '%s' SHA: %s\n", layer.ID, layer.Digest) - err = cache.ReuseLayer(previousSHA) - if err != nil { - isReadErr, readErr := c.IsReadErr(err) - if !isReadErr { - return "", errors.Wrapf(err, "reusing layer %s", layer.ID) + if err = cache.VerifyLayer(previousSHA); err == nil { + e.Logger.Infof("Reusing cache layer '%s'\n", layer.ID) + e.Logger.Debugf("Layer '%s' SHA: %s\n", layer.ID, layer.Digest) + if err = cache.ReuseLayer(previousSHA); err != nil { + if isReadErr, readErr := c.IsReadErr(err); isReadErr { + // we shouldn't get here, as VerifyLayer would've returned an error + e.Logger.Warnf("Skipping re-use for layer %s: %s", layer.ID, readErr.Error()) + } else { + return "", errors.Wrapf(err, "reusing layer %s", layer.ID) + } + } + } else { + if isReadErr, readErr := c.IsReadErr(err); isReadErr { + e.Logger.Warnf("Skipping reuse for layer %s: %s", layer.ID, readErr.Error()) + } else { + return "", errors.Wrapf(err, "verifying layer '%s'", layerDir.Identifier()) } - e.Logger.Warnf("Skipping re-use for layer %s: %s", layer.ID, readErr.Error()) } } e.Logger.Infof("Adding cache layer '%s'\n", layer.ID) diff --git a/phase/exporter.go b/phase/exporter.go index 759668149..a3c44e1e1 100644 --- a/phase/exporter.go +++ b/phase/exporter.go @@ -35,6 +35,7 @@ type Cache interface { ReuseLayer(sha string) error RetrieveLayer(sha string) (io.ReadCloser, error) Commit() error + VerifyLayer(sha string) error } type Exporter struct { diff --git a/phase/rebaser.go b/phase/rebaser.go index c96c2d0dc..947cdf251 100644 --- a/phase/rebaser.go +++ b/phase/rebaser.go @@ -202,7 +202,7 @@ func (r *Rebaser) validateTarget(appImg imgutil.Image, newBaseImg imgutil.Image) } if rebasable == "false" { if !r.Force { - return fmt.Errorf("%s; %s", msgAppImageNotMarkedRebasable, msgProvideForceToOverride) + return errors.New(msgAppImageNotMarkedRebasable + "; " + msgProvideForceToOverride) } r.Logger.Warn(msgAppImageNotMarkedRebasable) } diff --git a/phase/restorer.go b/phase/restorer.go index 901c28124..0f108f648 100644 --- a/phase/restorer.go +++ b/phase/restorer.go @@ -140,6 +140,9 @@ func (r *Restorer) restoreCacheLayer(cache Cache, sha string) error { return errors.New("restoring layer: cache not provided") } r.Logger.Debugf("Retrieving data for %q", sha) + if err := cache.VerifyLayer(sha); err != nil { + return err + } rc, err := cache.RetrieveLayer(sha) if err != nil { return err