From 89400b7410a0ae94ffac032a5461c7a129537afd Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Wed, 28 Mar 2018 13:34:00 -0700 Subject: [PATCH 1/3] Add command with unit tests --- pkg/commands/add.go | 140 ++++++++++++++++++++++++++++++++++ pkg/commands/commands.go | 2 + pkg/util/command_util.go | 45 ++++++++++- pkg/util/command_util_test.go | 67 ++++++++++++++++ pkg/util/fs_util.go | 37 +++++++++ pkg/util/tar_util.go | 86 +++++++++++++++++++++ pkg/util/tar_util_test.go | 108 ++++++++++++++++++++++++++ 7 files changed, 484 insertions(+), 1 deletion(-) create mode 100644 pkg/commands/add.go create mode 100644 pkg/util/tar_util_test.go diff --git a/pkg/commands/add.go b/pkg/commands/add.go new file mode 100644 index 0000000000..524018c72b --- /dev/null +++ b/pkg/commands/add.go @@ -0,0 +1,140 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package commands + +import ( + "github.com/GoogleCloudPlatform/k8s-container-builder/pkg/util" + "github.com/containers/image/manifest" + "github.com/docker/docker/builder/dockerfile/instructions" + "github.com/sirupsen/logrus" + "path/filepath" + "strings" +) + +type AddCommand struct { + cmd *instructions.AddCommand + buildcontext string + snapshotFiles []string +} + +// ExecuteCommand executes the ADD command +// Special stuff about ADD: +// 1. If is a remote file URL: +// - destination will have permissions of 0600 +// - If remote file has HTTP Last-Modified header, we set the mtime of the file to that timestamp +// - If dest doesn't end with a slash, the filepath is inferred to be / +// 2. If is a local tar archive: +// -If is a local tar archive, it is unpacked at the dest, as 'tar -x' would +func (a *AddCommand) ExecuteCommand(config *manifest.Schema2Config) error { + srcs := a.cmd.SourcesAndDest[:len(a.cmd.SourcesAndDest)-1] + dest := a.cmd.SourcesAndDest[len(a.cmd.SourcesAndDest)-1] + + logrus.Infof("cmd: Add %s", srcs) + logrus.Infof("dest: %s", dest) + + // First, resolve any environment replacement + resolvedEnvs, err := util.ResolveEnvironmentReplacementList(a.cmd.SourcesAndDest, config.Env, true) + if err != nil { + return err + } + dest = resolvedEnvs[len(resolvedEnvs)-1] + // Get a map of [src]:[files rooted at src] + srcMap, err := util.ResolveSources(resolvedEnvs, a.buildcontext) + if err != nil { + return err + } + // If any of the sources are local tar archives: + // 1. Unpack them to the specified destination + // 2. Remove it as a source that needs to be copied over + // If any of the sources is a remote file URL: + // 1. Download and copy it to the specifed dest + // 2. Remove it as a source that needs to be copied + for src, files := range srcMap { + for _, file := range files { + // If file is a local tar archive, then we unpack it to dest + filePath := filepath.Join(a.buildcontext, file) + isFilenameSource, err := isFilenameSource(srcMap, file) + if err != nil { + return err + } + if util.IsSrcRemoteFileURL(file) { + urlDest := util.URLDestinationFilepath(file, dest, config.WorkingDir) + logrus.Infof("Adding remote URL %s to %s", file, urlDest) + if err := util.DownloadFileToDest(file, urlDest); err != nil { + return err + } + a.snapshotFiles = append(a.snapshotFiles, urlDest) + delete(srcMap, src) + } else if isFilenameSource && util.IsFileLocalTarArchive(filePath) { + logrus.Infof("Unpacking local tar archive %s to %s", file, dest) + if err := util.UnpackLocalTarArchive(filePath, dest); err != nil { + return err + } + // Add the unpacked files to the snapshotter + filesAdded, err := util.Files(dest) + if err != nil { + return err + } + logrus.Debugf("Added %v from local tar archive %s", filesAdded, file) + a.snapshotFiles = append(a.snapshotFiles, filesAdded...) + delete(srcMap, src) + } + } + } + // With the remaining "normal" sources, create and execute a standard copy command + if len(srcMap) == 0 { + return nil + } + var regularSrcs []string + for src := range srcMap { + regularSrcs = append(regularSrcs, src) + } + copyCmd := CopyCommand{ + cmd: &instructions.CopyCommand{ + SourcesAndDest: append(regularSrcs, dest), + }, + buildcontext: a.buildcontext, + } + if err := copyCmd.ExecuteCommand(config); err != nil { + return err + } + a.snapshotFiles = append(a.snapshotFiles, copyCmd.snapshotFiles...) + return nil +} + +func isFilenameSource(srcMap map[string][]string, fileName string) (bool, error) { + for src := range srcMap { + matched, err := filepath.Match(src, fileName) + if err != nil { + return false, err + } + if matched || (src == fileName) { + return true, nil + } + } + return false, nil +} + +// FilesToSnapshot should return an empty array if still nil; no files were changed +func (a *AddCommand) FilesToSnapshot() []string { + return a.snapshotFiles +} + +// CreatedBy returns some information about the command for the image config +func (a *AddCommand) CreatedBy() string { + return strings.Join(a.cmd.SourcesAndDest, " ") +} diff --git a/pkg/commands/commands.go b/pkg/commands/commands.go index 08fa3ceb17..5f0b6aedf3 100644 --- a/pkg/commands/commands.go +++ b/pkg/commands/commands.go @@ -44,6 +44,8 @@ func GetCommand(cmd instructions.Command, buildcontext string) (DockerCommand, e return &ExposeCommand{cmd: c}, nil case *instructions.EnvCommand: return &EnvCommand{cmd: c}, nil + case *instructions.AddCommand: + return &AddCommand{cmd: c, buildcontext: buildcontext}, nil case *instructions.CmdCommand: return &CmdCommand{cmd: c}, nil case *instructions.EntrypointCommand: diff --git a/pkg/util/command_util.go b/pkg/util/command_util.go index 0f6472f79a..7ecb050e3d 100644 --- a/pkg/util/command_util.go +++ b/pkg/util/command_util.go @@ -22,6 +22,8 @@ import ( "github.com/docker/docker/builder/dockerfile/shell" "github.com/pkg/errors" "github.com/sirupsen/logrus" + "net/http" + "net/url" "os" "path/filepath" "strings" @@ -31,6 +33,10 @@ import ( func ResolveEnvironmentReplacementList(values, envs []string, isFilepath bool) ([]string, error) { var resolvedValues []string for _, value := range values { + if IsSrcRemoteFileURL(value) { + resolvedValues = append(resolvedValues, value) + continue + } resolved, err := ResolveEnvironmentReplacement(value, envs, isFilepath) logrus.Debugf("Resolved %s to %s", value, resolved) if err != nil { @@ -106,13 +112,17 @@ func ResolveSources(srcsAndDest instructions.SourcesAndDest, root string) (map[s func matchSources(srcs, files []string) ([]string, error) { var matchedSources []string for _, src := range srcs { + if IsSrcRemoteFileURL(src) { + matchedSources = append(matchedSources, src) + continue + } src = filepath.Clean(src) for _, file := range files { matched, err := filepath.Match(src, file) if err != nil { return nil, err } - if matched { + if matched || src == file { matchedSources = append(matchedSources, file) } } @@ -160,10 +170,31 @@ func DestinationFilepath(filename, srcName, dest, cwd, buildcontext string) (str return filepath.Join(cwd, dest), nil } +// URLDestinationFilepath gives the destintion a file from a remote URL should be saved to +func URLDestinationFilepath(rawurl, dest, cwd string) string { + if !IsDestDir(dest) { + if !filepath.IsAbs(dest) { + return filepath.Join(cwd, dest) + } + return dest + } + urlBase := filepath.Base(rawurl) + destPath := filepath.Join(dest, urlBase) + + if !filepath.IsAbs(dest) { + destPath = filepath.Join(cwd, destPath) + } + return destPath +} + // SourcesToFilesMap returns a map of [src]:[files rooted at source] func SourcesToFilesMap(srcs []string, root string) (map[string][]string, error) { srcMap := make(map[string][]string) for _, src := range srcs { + if IsSrcRemoteFileURL(src) { + srcMap[src] = []string{src} + continue + } src = filepath.Clean(src) files, err := RelativeFiles(src, root) if err != nil { @@ -202,3 +233,15 @@ func IsSrcsValid(srcsAndDest instructions.SourcesAndDest, srcMap map[string][]st } return nil } + +func IsSrcRemoteFileURL(rawurl string) bool { + _, err := url.ParseRequestURI(rawurl) + if err != nil { + return false + } + _, err = http.Get(rawurl) + if err != nil { + return false + } + return true +} diff --git a/pkg/util/command_util_test.go b/pkg/util/command_util_test.go index 611ad7b81d..afd359d302 100644 --- a/pkg/util/command_util_test.go +++ b/pkg/util/command_util_test.go @@ -22,6 +22,8 @@ import ( "testing" ) +var testUrl = "https://github.com/GoogleCloudPlatform/runtimes-common/blob/master/LICENSE" + var testEnvReplacement = []struct { path string command string @@ -207,6 +209,39 @@ func Test_DestinationFilepath(t *testing.T) { } } +var urlDestFilepathTests = []struct { + url string + cwd string + dest string + expectedDest string +}{ + { + url: "https://something/something", + cwd: "/test", + dest: ".", + expectedDest: "/test/something", + }, + { + url: "https://something/something", + cwd: "/cwd", + dest: "/test", + expectedDest: "/test", + }, + { + url: "https://something/something", + cwd: "/test", + dest: "/dest/", + expectedDest: "/dest/something", + }, +} + +func Test_UrlDestFilepath(t *testing.T) { + for _, test := range urlDestFilepathTests { + actualDest := URLDestinationFilepath(test.url, test.dest, test.cwd) + testutil.CheckErrorAndDeepEqual(t, false, nil, test.expectedDest, actualDest) + } +} + var matchSourcesTests = []struct { srcs []string files []string @@ -215,6 +250,7 @@ var matchSourcesTests = []struct { { srcs: []string{ "pkg/*", + testUrl, }, files: []string{ "pkg/a", @@ -226,6 +262,7 @@ var matchSourcesTests = []struct { expectedFiles: []string{ "pkg/a", "pkg/b", + testUrl, }, }, } @@ -380,6 +417,7 @@ var testResolveSources = []struct { srcsAndDest: []string{ "context/foo", "context/b*", + testUrl, "dest/", }, expectedMap: map[string][]string{ @@ -393,6 +431,9 @@ var testResolveSources = []struct { "context/bar/bat", "context/bar/baz", }, + testUrl: { + testUrl, + }, }, }, } @@ -403,3 +444,29 @@ func Test_ResolveSources(t *testing.T) { testutil.CheckErrorAndDeepEqual(t, false, err, test.expectedMap, actualMap) } } + +var testRemoteUrls = []struct { + url string + valid bool +}{ + { + url: testUrl, + valid: true, + }, + { + url: "not/real/", + valid: false, + }, + { + url: "https://url.com/something/not/real", + valid: false, + }, +} + +func Test_RemoteUrls(t *testing.T) { + for _, test := range testRemoteUrls { + valid := IsSrcRemoteFileURL(test.url) + testutil.CheckErrorAndDeepEqual(t, false, nil, test.valid, valid) + } + +} diff --git a/pkg/util/fs_util.go b/pkg/util/fs_util.go index c0ba9008ef..b3b020c15f 100644 --- a/pkg/util/fs_util.go +++ b/pkg/util/fs_util.go @@ -23,9 +23,11 @@ import ( "github.com/containers/image/docker" "github.com/sirupsen/logrus" "io" + "net/http" "os" "path/filepath" "strings" + "time" ) var whitelist = []string{"/kbuild"} @@ -117,6 +119,17 @@ func RelativeFiles(fp string, root string) ([]string, error) { return files, err } +// Files returns a list of all files rooted at root +func Files(root string) ([]string, error) { + var files []string + logrus.Debugf("Getting files and contents at root %s", root) + err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { + files = append(files, path) + return err + }) + return files, err +} + // FilepathExists returns true if the path exists func FilepathExists(path string) bool { _, err := os.Stat(path) @@ -142,3 +155,27 @@ func CreateFile(path string, reader io.Reader, perm os.FileMode) error { } return dest.Chmod(perm) } + +// DownloadFileToDest downloads the file at rawurl to the given dest for the ADD command +// From add command docs: +// 1. If is a remote file URL: +// - destination will have permissions of 0600 +// - If remote file has HTTP Last-Modified header, we set the mtime of the file to that timestamp +func DownloadFileToDest(rawurl, dest string) error { + resp, err := http.Get(rawurl) + if err != nil { + return err + } + defer resp.Body.Close() + if err := CreateFile(dest, resp.Body, 0600); err != nil { + return err + } + mTime := time.Time{} + lastMod := resp.Header.Get("Last-Modified") + if lastMod != "" { + if parsedMTime, err := http.ParseTime(lastMod); err == nil { + mTime = parsedMTime + } + } + return os.Chtimes(dest, mTime, mTime) +} diff --git a/pkg/util/tar_util.go b/pkg/util/tar_util.go index 417f16f042..8c0ffdfb91 100644 --- a/pkg/util/tar_util.go +++ b/pkg/util/tar_util.go @@ -18,8 +18,14 @@ package util import ( "archive/tar" + "compress/bzip2" + "compress/gzip" + pkgutil "github.com/GoogleCloudPlatform/container-diff/pkg/util" + "github.com/docker/docker/pkg/archive" + "github.com/pkg/errors" "github.com/sirupsen/logrus" "io" + "io/ioutil" "os" "syscall" ) @@ -86,3 +92,83 @@ func checkHardlink(p string, i os.FileInfo) (bool, string) { } return hardlink, linkDst } + +//UnpackLocalTarArchive unpacks the tar archive at path to the directory dest +// Returns true if the path was acutally unpacked +func UnpackLocalTarArchive(path, dest string) error { + // First, we need to check if the path is a local tar archive + if compressed, compressionLevel := fileIsCompressedTar(path); compressed { + file, err := os.Open(path) + if err != nil { + return err + } + defer file.Close() + if compressionLevel == archive.Gzip { + gzr, err := gzip.NewReader(file) + if err != nil { + return err + } + defer gzr.Close() + return pkgutil.UnTar(gzr, dest, nil) + } else if compressionLevel == archive.Bzip2 { + bzr := bzip2.NewReader(file) + return pkgutil.UnTar(bzr, dest, nil) + } + } + if fileIsUncompressedTar(path) { + file, err := os.Open(path) + if err != nil { + return err + } + defer file.Close() + return pkgutil.UnTar(file, dest, nil) + } + return errors.New("path does not lead to local tar archive") +} + +//IsFileLocalTarArchive returns true if the file is a local tar archive +func IsFileLocalTarArchive(src string) bool { + compressed, _ := fileIsCompressedTar(src) + uncompressed := fileIsUncompressedTar(src) + return compressed || uncompressed +} + +func fileIsCompressedTar(src string) (bool, archive.Compression) { + r, err := os.Open(src) + if err != nil { + return false, -1 + } + defer r.Close() + buf, err := ioutil.ReadAll(r) + if err != nil { + return false, -1 + } + compressionLevel := archive.DetectCompression(buf) + return (compressionLevel > 0), compressionLevel +} + +func fileIsUncompressedTar(src string) bool { + r, err := os.Open(src) + defer r.Close() + if err != nil { + return false + } + fi, err := os.Stat(src) + if err != nil { + return false + } + if fi.Size() == 0 { + return false + } + tr := tar.NewReader(r) + if tr == nil { + return false + } + for { + _, err := tr.Next() + if err != nil { + return false + } + return true + } +} diff --git a/pkg/util/tar_util_test.go b/pkg/util/tar_util_test.go new file mode 100644 index 0000000000..9974f6c217 --- /dev/null +++ b/pkg/util/tar_util_test.go @@ -0,0 +1,108 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "archive/tar" + "compress/gzip" + "github.com/GoogleCloudPlatform/k8s-container-builder/testutil" + "io" + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +var regularFiles = []string{"file", "file.tar", "file.tar.gz"} +var uncompressedTars = []string{"uncompressed", "uncompressed.tar"} +var compressedTars = []string{"compressed", "compressed.tar.gz"} + +func Test_IsLocalTarArchive(t *testing.T) { + testDir, err := ioutil.TempDir("", "") + if err != nil { + t.Fatalf("err setting up temp dir: %v", err) + } + defer os.RemoveAll(testDir) + if err := setUpFilesAndTars(testDir); err != nil { + t.Fatal(err) + } + // Test we get the correct result for regular files + for _, regularFile := range regularFiles { + isTarArchive := IsFileLocalTarArchive(filepath.Join(testDir, regularFile)) + testutil.CheckErrorAndDeepEqual(t, false, nil, false, isTarArchive) + } + // Test we get the correct result for uncompressed tars + for _, uncompressedTar := range uncompressedTars { + isTarArchive := IsFileLocalTarArchive(filepath.Join(testDir, uncompressedTar)) + testutil.CheckErrorAndDeepEqual(t, false, nil, true, isTarArchive) + } + // Test we get the correct result for compressed tars + for _, compressedTar := range compressedTars { + isTarArchive := IsFileLocalTarArchive(filepath.Join(testDir, compressedTar)) + testutil.CheckErrorAndDeepEqual(t, false, nil, true, isTarArchive) + } +} + +func setUpFilesAndTars(testDir string) error { + regularFilesAndContents := map[string]string{ + regularFiles[0]: "", + regularFiles[1]: "something", + regularFiles[2]: "here", + } + if err := testutil.SetupFiles(testDir, regularFilesAndContents); err != nil { + return err + } + + for _, uncompressedTar := range uncompressedTars { + tarFile, err := os.Create(filepath.Join(testDir, uncompressedTar)) + if err != nil { + return err + } + if err := createTar(testDir, tarFile); err != nil { + return err + } + } + + for _, compressedTar := range compressedTars { + tarFile, err := os.Create(filepath.Join(testDir, compressedTar)) + if err != nil { + return err + } + gzr := gzip.NewWriter(tarFile) + if err := createTar(testDir, gzr); err != nil { + return err + } + } + return nil +} + +func createTar(testdir string, writer io.Writer) error { + + w := tar.NewWriter(writer) + defer w.Close() + for _, regFile := range regularFiles { + filePath := filepath.Join(testdir, regFile) + fi, err := os.Stat(filePath) + if err != nil { + return err + } + if err := AddToTar(filePath, fi, w); err != nil { + return err + } + } + return nil +} From a6b971f8e65a14e2fb2aaffdfb7cb669efad33b7 Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Wed, 28 Mar 2018 13:34:54 -0700 Subject: [PATCH 2/3] dep ensure; updated vendor --- Gopkg.lock | 7 +- .../docker/docker/pkg/archive/archive.go | 1281 +++++++++++++++++ .../docker/pkg/archive/archive_linux.go | 92 ++ .../docker/pkg/archive/archive_other.go | 7 + .../docker/docker/pkg/archive/archive_unix.go | 114 ++ .../docker/pkg/archive/archive_windows.go | 77 + .../docker/docker/pkg/archive/changes.go | 441 ++++++ .../docker/pkg/archive/changes_linux.go | 313 ++++ .../docker/pkg/archive/changes_other.go | 97 ++ .../docker/docker/pkg/archive/changes_unix.go | 37 + .../docker/pkg/archive/changes_windows.go | 30 + .../docker/docker/pkg/archive/copy.go | 472 ++++++ .../docker/docker/pkg/archive/copy_unix.go | 11 + .../docker/docker/pkg/archive/copy_windows.go | 9 + .../docker/docker/pkg/archive/diff.go | 256 ++++ .../docker/pkg/archive/example_changes.go | 97 ++ .../docker/docker/pkg/archive/time_linux.go | 16 + .../docker/pkg/archive/time_unsupported.go | 16 + .../docker/docker/pkg/archive/whiteouts.go | 23 + .../docker/docker/pkg/archive/wrap.go | 59 + .../docker/docker/pkg/fileutils/fileutils.go | 298 ++++ .../docker/pkg/fileutils/fileutils_darwin.go | 27 + .../docker/pkg/fileutils/fileutils_unix.go | 22 + .../docker/pkg/fileutils/fileutils_windows.go | 7 + .../docker/docker/pkg/ioutils/buffer.go | 51 + .../docker/docker/pkg/ioutils/bytespipe.go | 186 +++ .../docker/docker/pkg/ioutils/fswriters.go | 162 +++ .../docker/docker/pkg/ioutils/readers.go | 158 ++ .../docker/docker/pkg/ioutils/temp_unix.go | 10 + .../docker/docker/pkg/ioutils/temp_windows.go | 16 + .../docker/docker/pkg/ioutils/writeflusher.go | 92 ++ .../docker/docker/pkg/ioutils/writers.go | 66 + .../docker/docker/pkg/longpath/longpath.go | 26 + .../docker/docker/pkg/pools/pools.go | 137 ++ 34 files changed, 4712 insertions(+), 1 deletion(-) create mode 100644 vendor/github.com/docker/docker/pkg/archive/archive.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_other.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/changes.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/changes_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/changes_other.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/changes_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/changes_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/copy.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/copy_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/copy_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/diff.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/example_changes.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/time_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/time_unsupported.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/whiteouts.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/wrap.go create mode 100644 vendor/github.com/docker/docker/pkg/fileutils/fileutils.go create mode 100644 vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go create mode 100644 vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/buffer.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/fswriters.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/readers.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/writers.go create mode 100644 vendor/github.com/docker/docker/pkg/longpath/longpath.go create mode 100644 vendor/github.com/docker/docker/pkg/pools/pools.go diff --git a/Gopkg.lock b/Gopkg.lock index d6be583a7a..e6ed112e7e 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -163,9 +163,14 @@ "builder/dockerfile/parser", "builder/dockerfile/shell", "client", + "pkg/archive", + "pkg/fileutils", "pkg/homedir", "pkg/idtools", + "pkg/ioutils", + "pkg/longpath", "pkg/mount", + "pkg/pools", "pkg/system" ] revision = "b1a1234c60cf87048814aa37da523b03a7b0d344" @@ -487,6 +492,6 @@ [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "eadec1feacc8473e54622d5f3a25fbc9c7fb1f9bd38776475c3e2d283bd80d2a" + inputs-digest = "061d7ecaab8da6e0a543987bf870b813d2f8c88481c6cdffec33374150b66c98" solver-name = "gps-cdcl" solver-version = 1 diff --git a/vendor/github.com/docker/docker/pkg/archive/archive.go b/vendor/github.com/docker/docker/pkg/archive/archive.go new file mode 100644 index 0000000000..e8f50869d9 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/archive.go @@ -0,0 +1,1281 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "bufio" + "bytes" + "compress/bzip2" + "compress/gzip" + "context" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "syscall" + + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" + "github.com/sirupsen/logrus" +) + +var unpigzPath string + +func init() { + if path, err := exec.LookPath("unpigz"); err != nil { + logrus.Debug("unpigz binary not found in PATH, falling back to go gzip library") + } else { + logrus.Debugf("Using unpigz binary found at path %s", path) + unpigzPath = path + } +} + +type ( + // Compression is the state represents if compressed or not. + Compression int + // WhiteoutFormat is the format of whiteouts unpacked + WhiteoutFormat int + + // TarOptions wraps the tar options. + TarOptions struct { + IncludeFiles []string + ExcludePatterns []string + Compression Compression + NoLchown bool + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + ChownOpts *idtools.IDPair + IncludeSourceDir bool + // WhiteoutFormat is the expected on disk format for whiteout files. + // This format will be converted to the standard format on pack + // and from the standard format on unpack. + WhiteoutFormat WhiteoutFormat + // When unpacking, specifies whether overwriting a directory with a + // non-directory is allowed and vice versa. + NoOverwriteDirNonDir bool + // For each include when creating an archive, the included name will be + // replaced with the matching name from this map. + RebaseNames map[string]string + InUserNS bool + } +) + +// Archiver implements the Archiver interface and allows the reuse of most utility functions of +// this package with a pluggable Untar function. Also, to facilitate the passing of specific id +// mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations. +type Archiver struct { + Untar func(io.Reader, string, *TarOptions) error + IDMappingsVar *idtools.IDMappings +} + +// NewDefaultArchiver returns a new Archiver without any IDMappings +func NewDefaultArchiver() *Archiver { + return &Archiver{Untar: Untar, IDMappingsVar: &idtools.IDMappings{}} +} + +// breakoutError is used to differentiate errors related to breaking out +// When testing archive breakout in the unit tests, this error is expected +// in order for the test to pass. +type breakoutError error + +const ( + // Uncompressed represents the uncompressed. + Uncompressed Compression = iota + // Bzip2 is bzip2 compression algorithm. + Bzip2 + // Gzip is gzip compression algorithm. + Gzip + // Xz is xz compression algorithm. + Xz +) + +const ( + // AUFSWhiteoutFormat is the default format for whiteouts + AUFSWhiteoutFormat WhiteoutFormat = iota + // OverlayWhiteoutFormat formats whiteout according to the overlay + // standard. + OverlayWhiteoutFormat +) + +const ( + modeISDIR = 040000 // Directory + modeISFIFO = 010000 // FIFO + modeISREG = 0100000 // Regular file + modeISLNK = 0120000 // Symbolic link + modeISBLK = 060000 // Block special file + modeISCHR = 020000 // Character special file + modeISSOCK = 0140000 // Socket +) + +// IsArchivePath checks if the (possibly compressed) file at the given path +// starts with a tar file header. +func IsArchivePath(path string) bool { + file, err := os.Open(path) + if err != nil { + return false + } + defer file.Close() + rdr, err := DecompressStream(file) + if err != nil { + return false + } + r := tar.NewReader(rdr) + _, err = r.Next() + return err == nil +} + +// DetectCompression detects the compression algorithm of the source. +func DetectCompression(source []byte) Compression { + for compression, m := range map[Compression][]byte{ + Bzip2: {0x42, 0x5A, 0x68}, + Gzip: {0x1F, 0x8B, 0x08}, + Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, + } { + if len(source) < len(m) { + logrus.Debug("Len too short") + continue + } + if bytes.Equal(m, source[:len(m)]) { + return compression + } + } + return Uncompressed +} + +func xzDecompress(ctx context.Context, archive io.Reader) (io.ReadCloser, error) { + args := []string{"xz", "-d", "-c", "-q"} + + return cmdStream(exec.CommandContext(ctx, args[0], args[1:]...), archive) +} + +func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) { + if unpigzPath == "" { + return gzip.NewReader(buf) + } + + disablePigzEnv := os.Getenv("MOBY_DISABLE_PIGZ") + if disablePigzEnv != "" { + if disablePigz, err := strconv.ParseBool(disablePigzEnv); err != nil { + return nil, err + } else if disablePigz { + return gzip.NewReader(buf) + } + } + + return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf) +} + +func wrapReadCloser(readBuf io.ReadCloser, cancel context.CancelFunc) io.ReadCloser { + return ioutils.NewReadCloserWrapper(readBuf, func() error { + cancel() + return readBuf.Close() + }) +} + +// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. +func DecompressStream(archive io.Reader) (io.ReadCloser, error) { + p := pools.BufioReader32KPool + buf := p.Get(archive) + bs, err := buf.Peek(10) + if err != nil && err != io.EOF { + // Note: we'll ignore any io.EOF error because there are some odd + // cases where the layer.tar file will be empty (zero bytes) and + // that results in an io.EOF from the Peek() call. So, in those + // cases we'll just treat it as a non-compressed stream and + // that means just create an empty layer. + // See Issue 18170 + return nil, err + } + + compression := DetectCompression(bs) + switch compression { + case Uncompressed: + readBufWrapper := p.NewReadCloserWrapper(buf, buf) + return readBufWrapper, nil + case Gzip: + ctx, cancel := context.WithCancel(context.Background()) + + gzReader, err := gzDecompress(ctx, buf) + if err != nil { + cancel() + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) + return wrapReadCloser(readBufWrapper, cancel), nil + case Bzip2: + bz2Reader := bzip2.NewReader(buf) + readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) + return readBufWrapper, nil + case Xz: + ctx, cancel := context.WithCancel(context.Background()) + + xzReader, err := xzDecompress(ctx, buf) + if err != nil { + cancel() + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) + return wrapReadCloser(readBufWrapper, cancel), nil + default: + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + } +} + +// CompressStream compresses the dest with specified compression algorithm. +func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { + p := pools.BufioWriter32KPool + buf := p.Get(dest) + switch compression { + case Uncompressed: + writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) + return writeBufWrapper, nil + case Gzip: + gzWriter := gzip.NewWriter(dest) + writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) + return writeBufWrapper, nil + case Bzip2, Xz: + // archive/bzip2 does not support writing, and there is no xz support at all + // However, this is not a problem as docker only currently generates gzipped tars + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + default: + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + } +} + +// TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to +// modify the contents or header of an entry in the archive. If the file already +// exists in the archive the TarModifierFunc will be called with the Header and +// a reader which will return the files content. If the file does not exist both +// header and content will be nil. +type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) + +// ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the +// tar stream are modified if they match any of the keys in mods. +func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser { + pipeReader, pipeWriter := io.Pipe() + + go func() { + tarReader := tar.NewReader(inputTarStream) + tarWriter := tar.NewWriter(pipeWriter) + defer inputTarStream.Close() + defer tarWriter.Close() + + modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error { + header, data, err := modifier(name, original, tarReader) + switch { + case err != nil: + return err + case header == nil: + return nil + } + + header.Name = name + header.Size = int64(len(data)) + if err := tarWriter.WriteHeader(header); err != nil { + return err + } + if len(data) != 0 { + if _, err := tarWriter.Write(data); err != nil { + return err + } + } + return nil + } + + var err error + var originalHeader *tar.Header + for { + originalHeader, err = tarReader.Next() + if err == io.EOF { + break + } + if err != nil { + pipeWriter.CloseWithError(err) + return + } + + modifier, ok := mods[originalHeader.Name] + if !ok { + // No modifiers for this file, copy the header and data + if err := tarWriter.WriteHeader(originalHeader); err != nil { + pipeWriter.CloseWithError(err) + return + } + if _, err := pools.Copy(tarWriter, tarReader); err != nil { + pipeWriter.CloseWithError(err) + return + } + continue + } + delete(mods, originalHeader.Name) + + if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil { + pipeWriter.CloseWithError(err) + return + } + } + + // Apply the modifiers that haven't matched any files in the archive + for name, modifier := range mods { + if err := modify(name, nil, modifier, nil); err != nil { + pipeWriter.CloseWithError(err) + return + } + } + + pipeWriter.Close() + + }() + return pipeReader +} + +// Extension returns the extension of a file that uses the specified compression algorithm. +func (compression *Compression) Extension() string { + switch *compression { + case Uncompressed: + return "tar" + case Bzip2: + return "tar.bz2" + case Gzip: + return "tar.gz" + case Xz: + return "tar.xz" + } + return "" +} + +// FileInfoHeader creates a populated Header from fi. +// Compared to archive pkg this function fills in more information. +// Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR), +// which have been deleted since Go 1.9 archive/tar. +func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { + hdr, err := tar.FileInfoHeader(fi, link) + if err != nil { + return nil, err + } + hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) + name, err = canonicalTarName(name, fi.IsDir()) + if err != nil { + return nil, fmt.Errorf("tar: cannot canonicalize path: %v", err) + } + hdr.Name = name + if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { + return nil, err + } + return hdr, nil +} + +// fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar +// https://github.com/golang/go/commit/66b5a2f +func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 { + fm := fi.Mode() + switch { + case fm.IsRegular(): + mode |= modeISREG + case fi.IsDir(): + mode |= modeISDIR + case fm&os.ModeSymlink != 0: + mode |= modeISLNK + case fm&os.ModeDevice != 0: + if fm&os.ModeCharDevice != 0 { + mode |= modeISCHR + } else { + mode |= modeISBLK + } + case fm&os.ModeNamedPipe != 0: + mode |= modeISFIFO + case fm&os.ModeSocket != 0: + mode |= modeISSOCK + } + return mode +} + +// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem +// to a tar header +func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { + capability, _ := system.Lgetxattr(path, "security.capability") + if capability != nil { + hdr.Xattrs = make(map[string]string) + hdr.Xattrs["security.capability"] = string(capability) + } + return nil +} + +type tarWhiteoutConverter interface { + ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) + ConvertRead(*tar.Header, string) (bool, error) +} + +type tarAppender struct { + TarWriter *tar.Writer + Buffer *bufio.Writer + + // for hardlink mapping + SeenFiles map[uint64]string + IDMappings *idtools.IDMappings + ChownOpts *idtools.IDPair + + // For packing and unpacking whiteout files in the + // non standard format. The whiteout files defined + // by the AUFS standard are used as the tar whiteout + // standard. + WhiteoutConverter tarWhiteoutConverter +} + +func newTarAppender(idMapping *idtools.IDMappings, writer io.Writer, chownOpts *idtools.IDPair) *tarAppender { + return &tarAppender{ + SeenFiles: make(map[uint64]string), + TarWriter: tar.NewWriter(writer), + Buffer: pools.BufioWriter32KPool.Get(nil), + IDMappings: idMapping, + ChownOpts: chownOpts, + } +} + +// canonicalTarName provides a platform-independent and consistent posix-style +//path for files and directories to be archived regardless of the platform. +func canonicalTarName(name string, isDir bool) (string, error) { + name, err := CanonicalTarNameForPath(name) + if err != nil { + return "", err + } + + // suffix with '/' for directories + if isDir && !strings.HasSuffix(name, "/") { + name += "/" + } + return name, nil +} + +// addTarFile adds to the tar archive a file from `path` as `name` +func (ta *tarAppender) addTarFile(path, name string) error { + fi, err := os.Lstat(path) + if err != nil { + return err + } + + var link string + if fi.Mode()&os.ModeSymlink != 0 { + var err error + link, err = os.Readlink(path) + if err != nil { + return err + } + } + + hdr, err := FileInfoHeader(name, fi, link) + if err != nil { + return err + } + if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { + return err + } + + // if it's not a directory and has more than 1 link, + // it's hard linked, so set the type flag accordingly + if !fi.IsDir() && hasHardlinks(fi) { + inode, err := getInodeFromStat(fi.Sys()) + if err != nil { + return err + } + // a link should have a name that it links too + // and that linked name should be first in the tar archive + if oldpath, ok := ta.SeenFiles[inode]; ok { + hdr.Typeflag = tar.TypeLink + hdr.Linkname = oldpath + hdr.Size = 0 // This Must be here for the writer math to add up! + } else { + ta.SeenFiles[inode] = name + } + } + + //check whether the file is overlayfs whiteout + //if yes, skip re-mapping container ID mappings. + isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 + + //handle re-mapping container ID mappings back to host ID mappings before + //writing tar headers/files. We skip whiteout files because they were written + //by the kernel and already have proper ownership relative to the host + if !isOverlayWhiteout && + !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && + !ta.IDMappings.Empty() { + fileIDPair, err := getFileUIDGID(fi.Sys()) + if err != nil { + return err + } + hdr.Uid, hdr.Gid, err = ta.IDMappings.ToContainer(fileIDPair) + if err != nil { + return err + } + } + + // explicitly override with ChownOpts + if ta.ChownOpts != nil { + hdr.Uid = ta.ChownOpts.UID + hdr.Gid = ta.ChownOpts.GID + } + + if ta.WhiteoutConverter != nil { + wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) + if err != nil { + return err + } + + // If a new whiteout file exists, write original hdr, then + // replace hdr with wo to be written after. Whiteouts should + // always be written after the original. Note the original + // hdr may have been updated to be a whiteout with returning + // a whiteout header + if wo != nil { + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + return err + } + if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { + return fmt.Errorf("tar: cannot use whiteout for non-empty file") + } + hdr = wo + } + } + + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + return err + } + + if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { + // We use system.OpenSequential to ensure we use sequential file + // access on Windows to avoid depleting the standby list. + // On Linux, this equates to a regular os.Open. + file, err := system.OpenSequential(path) + if err != nil { + return err + } + + ta.Buffer.Reset(ta.TarWriter) + defer ta.Buffer.Reset(nil) + _, err = io.Copy(ta.Buffer, file) + file.Close() + if err != nil { + return err + } + err = ta.Buffer.Flush() + if err != nil { + return err + } + } + + return nil +} + +func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.IDPair, inUserns bool) error { + // hdr.Mode is in linux format, which we can use for sycalls, + // but for os.Foo() calls we need the mode converted to os.FileMode, + // so use hdrInfo.Mode() (they differ for e.g. setuid bits) + hdrInfo := hdr.FileInfo() + + switch hdr.Typeflag { + case tar.TypeDir: + // Create directory unless it exists as a directory already. + // In that case we just want to merge the two + if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { + if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { + return err + } + } + + case tar.TypeReg, tar.TypeRegA: + // Source is regular file. We use system.OpenFileSequential to use sequential + // file access to avoid depleting the standby list on Windows. + // On Linux, this equates to a regular os.OpenFile + file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) + if err != nil { + return err + } + if _, err := io.Copy(file, reader); err != nil { + file.Close() + return err + } + file.Close() + + case tar.TypeBlock, tar.TypeChar: + if inUserns { // cannot create devices in a userns + return nil + } + // Handle this is an OS-specific way + if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { + return err + } + + case tar.TypeFifo: + // Handle this is an OS-specific way + if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { + return err + } + + case tar.TypeLink: + targetPath := filepath.Join(extractDir, hdr.Linkname) + // check for hardlink breakout + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) + } + if err := os.Link(targetPath, path); err != nil { + return err + } + + case tar.TypeSymlink: + // path -> hdr.Linkname = targetPath + // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file + targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) + + // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because + // that symlink would first have to be created, which would be caught earlier, at this very check: + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) + } + if err := os.Symlink(hdr.Linkname, path); err != nil { + return err + } + + case tar.TypeXGlobalHeader: + logrus.Debug("PAX Global Extended Headers found and ignored") + return nil + + default: + return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) + } + + // Lchown is not supported on Windows. + if Lchown && runtime.GOOS != "windows" { + if chownOpts == nil { + chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid} + } + if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { + return err + } + } + + var errors []string + for key, value := range hdr.Xattrs { + if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { + if err == syscall.ENOTSUP { + // We ignore errors here because not all graphdrivers support + // xattrs *cough* old versions of AUFS *cough*. However only + // ENOTSUP should be emitted in that case, otherwise we still + // bail. + errors = append(errors, err.Error()) + continue + } + return err + } + + } + + if len(errors) > 0 { + logrus.WithFields(logrus.Fields{ + "errors": errors, + }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") + } + + // There is no LChmod, so ignore mode for symlink. Also, this + // must happen after chown, as that can modify the file mode + if err := handleLChmod(hdr, path, hdrInfo); err != nil { + return err + } + + aTime := hdr.AccessTime + if aTime.Before(hdr.ModTime) { + // Last access time should never be before last modified time. + aTime = hdr.ModTime + } + + // system.Chtimes doesn't support a NOFOLLOW flag atm + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { + return err + } + } else { + ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} + if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { + return err + } + } + return nil +} + +// Tar creates an archive from the directory at `path`, and returns it as a +// stream of bytes. +func Tar(path string, compression Compression) (io.ReadCloser, error) { + return TarWithOptions(path, &TarOptions{Compression: compression}) +} + +// TarWithOptions creates an archive from the directory at `path`, only including files whose relative +// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. +func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { + + // Fix the source path to work with long path names. This is a no-op + // on platforms other than Windows. + srcPath = fixVolumePathPrefix(srcPath) + + pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns) + if err != nil { + return nil, err + } + + pipeReader, pipeWriter := io.Pipe() + + compressWriter, err := CompressStream(pipeWriter, options.Compression) + if err != nil { + return nil, err + } + + go func() { + ta := newTarAppender( + idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), + compressWriter, + options.ChownOpts, + ) + ta.WhiteoutConverter = getWhiteoutConverter(options.WhiteoutFormat) + + defer func() { + // Make sure to check the error on Close. + if err := ta.TarWriter.Close(); err != nil { + logrus.Errorf("Can't close tar writer: %s", err) + } + if err := compressWriter.Close(); err != nil { + logrus.Errorf("Can't close compress writer: %s", err) + } + if err := pipeWriter.Close(); err != nil { + logrus.Errorf("Can't close pipe writer: %s", err) + } + }() + + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + + stat, err := os.Lstat(srcPath) + if err != nil { + return + } + + if !stat.IsDir() { + // We can't later join a non-dir with any includes because the + // 'walk' will error if "file/." is stat-ed and "file" is not a + // directory. So, we must split the source path and use the + // basename as the include. + if len(options.IncludeFiles) > 0 { + logrus.Warn("Tar: Can't archive a file with includes") + } + + dir, base := SplitPathDirEntry(srcPath) + srcPath = dir + options.IncludeFiles = []string{base} + } + + if len(options.IncludeFiles) == 0 { + options.IncludeFiles = []string{"."} + } + + seen := make(map[string]bool) + + for _, include := range options.IncludeFiles { + rebaseName := options.RebaseNames[include] + + walkRoot := getWalkRoot(srcPath, include) + filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { + if err != nil { + logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) + return nil + } + + relFilePath, err := filepath.Rel(srcPath, filePath) + if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { + // Error getting relative path OR we are looking + // at the source directory path. Skip in both situations. + return nil + } + + if options.IncludeSourceDir && include == "." && relFilePath != "." { + relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) + } + + skip := false + + // If "include" is an exact match for the current file + // then even if there's an "excludePatterns" pattern that + // matches it, don't skip it. IOW, assume an explicit 'include' + // is asking for that file no matter what - which is true + // for some files, like .dockerignore and Dockerfile (sometimes) + if include != relFilePath { + skip, err = pm.Matches(relFilePath) + if err != nil { + logrus.Errorf("Error matching %s: %v", relFilePath, err) + return err + } + } + + if skip { + // If we want to skip this file and its a directory + // then we should first check to see if there's an + // excludes pattern (e.g. !dir/file) that starts with this + // dir. If so then we can't skip this dir. + + // Its not a dir then so we can just return/skip. + if !f.IsDir() { + return nil + } + + // No exceptions (!...) in patterns so just skip dir + if !pm.Exclusions() { + return filepath.SkipDir + } + + dirSlash := relFilePath + string(filepath.Separator) + + for _, pat := range pm.Patterns() { + if !pat.Exclusion() { + continue + } + if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { + // found a match - so can't skip this dir + return nil + } + } + + // No matching exclusion dir so just skip dir + return filepath.SkipDir + } + + if seen[relFilePath] { + return nil + } + seen[relFilePath] = true + + // Rename the base resource. + if rebaseName != "" { + var replacement string + if rebaseName != string(filepath.Separator) { + // Special case the root directory to replace with an + // empty string instead so that we don't end up with + // double slashes in the paths. + replacement = rebaseName + } + + relFilePath = strings.Replace(relFilePath, include, replacement, 1) + } + + if err := ta.addTarFile(filePath, relFilePath); err != nil { + logrus.Errorf("Can't add file %s to tar: %s", filePath, err) + // if pipe is broken, stop writing tar stream to it + if err == io.ErrClosedPipe { + return err + } + } + return nil + }) + } + }() + + return pipeReader, nil +} + +// Unpack unpacks the decompressedArchive to dest with options. +func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { + tr := tar.NewReader(decompressedArchive) + trBuf := pools.BufioReader32KPool.Get(nil) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + rootIDs := idMappings.RootPair() + whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat) + + // Iterate through the files in the archive. +loop: + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return err + } + + // Normalize name, for safety and for a simple is-root check + // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: + // This keeps "..\" as-is, but normalizes "\..\" to "\". + hdr.Name = filepath.Clean(hdr.Name) + + for _, exclude := range options.ExcludePatterns { + if strings.HasPrefix(hdr.Name, exclude) { + continue loop + } + } + + // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in + // the filepath format for the OS on which the daemon is running. Hence + // the check for a slash-suffix MUST be done in an OS-agnostic way. + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + // Not the root directory, ensure that the parent directory exists + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + err = idtools.MkdirAllAndChownNew(parentPath, 0777, rootIDs) + if err != nil { + return err + } + } + } + + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return err + } + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + + // If path exits we almost always just want to remove and replace it + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing directory with a non-directory from the archive. + return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) + } + + if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing non-directory with a directory from the archive. + return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) + } + + if fi.IsDir() && hdr.Name == "." { + continue + } + + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if err := os.RemoveAll(path); err != nil { + return err + } + } + } + trBuf.Reset(tr) + + if err := remapIDs(idMappings, hdr); err != nil { + return err + } + + if whiteoutConverter != nil { + writeFile, err := whiteoutConverter.ConvertRead(hdr, path) + if err != nil { + return err + } + if !writeFile { + continue + } + } + + if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil { + return err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + + if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + return err + } + } + return nil +} + +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive may be compressed with one of the following algorithms: +// identity (uncompressed), gzip, bzip2, xz. +// FIXME: specify behavior when target path exists vs. doesn't exist. +func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, true) +} + +// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive must be an uncompressed stream. +func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, false) +} + +// Handler for teasing out the automatic decompression +func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { + if tarArchive == nil { + return fmt.Errorf("Empty archive") + } + dest = filepath.Clean(dest) + if options == nil { + options = &TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + r := tarArchive + if decompress { + decompressedArchive, err := DecompressStream(tarArchive) + if err != nil { + return err + } + defer decompressedArchive.Close() + r = decompressedArchive + } + + return Unpack(r, dest, options) +} + +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func (archiver *Archiver) TarUntar(src, dst string) error { + logrus.Debugf("TarUntar(%s %s)", src, dst) + archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) + if err != nil { + return err + } + defer archive.Close() + options := &TarOptions{ + UIDMaps: archiver.IDMappingsVar.UIDs(), + GIDMaps: archiver.IDMappingsVar.GIDs(), + } + return archiver.Untar(archive, dst, options) +} + +// UntarPath untar a file from path to a destination, src is the source tar file path. +func (archiver *Archiver) UntarPath(src, dst string) error { + archive, err := os.Open(src) + if err != nil { + return err + } + defer archive.Close() + options := &TarOptions{ + UIDMaps: archiver.IDMappingsVar.UIDs(), + GIDMaps: archiver.IDMappingsVar.GIDs(), + } + return archiver.Untar(archive, dst, options) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func (archiver *Archiver) CopyWithTar(src, dst string) error { + srcSt, err := os.Stat(src) + if err != nil { + return err + } + if !srcSt.IsDir() { + return archiver.CopyFileWithTar(src, dst) + } + + // if this Archiver is set up with ID mapping we need to create + // the new destination directory with the remapped root UID/GID pair + // as owner + rootIDs := archiver.IDMappingsVar.RootPair() + // Create dst, copy src's content into it + logrus.Debugf("Creating dest directory: %s", dst) + if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { + return err + } + logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) + return archiver.TarUntar(src, dst) +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { + logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst) + srcSt, err := os.Stat(src) + if err != nil { + return err + } + + if srcSt.IsDir() { + return fmt.Errorf("Can't copy a directory") + } + + // Clean up the trailing slash. This must be done in an operating + // system specific manner. + if dst[len(dst)-1] == os.PathSeparator { + dst = filepath.Join(dst, filepath.Base(src)) + } + // Create the holding directory if necessary + if err := system.MkdirAll(filepath.Dir(dst), 0700, ""); err != nil { + return err + } + + r, w := io.Pipe() + errC := make(chan error, 1) + + go func() { + defer close(errC) + + errC <- func() error { + defer w.Close() + + srcF, err := os.Open(src) + if err != nil { + return err + } + defer srcF.Close() + + hdr, err := tar.FileInfoHeader(srcSt, "") + if err != nil { + return err + } + hdr.Name = filepath.Base(dst) + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + + if err := remapIDs(archiver.IDMappingsVar, hdr); err != nil { + return err + } + + tw := tar.NewWriter(w) + defer tw.Close() + if err := tw.WriteHeader(hdr); err != nil { + return err + } + if _, err := io.Copy(tw, srcF); err != nil { + return err + } + return nil + }() + }() + defer func() { + if er := <-errC; err == nil && er != nil { + err = er + } + }() + + err = archiver.Untar(r, filepath.Dir(dst), nil) + if err != nil { + r.CloseWithError(err) + } + return err +} + +// IDMappings returns the IDMappings of the archiver. +func (archiver *Archiver) IDMappings() *idtools.IDMappings { + return archiver.IDMappingsVar +} + +func remapIDs(idMappings *idtools.IDMappings, hdr *tar.Header) error { + ids, err := idMappings.ToHost(idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}) + hdr.Uid, hdr.Gid = ids.UID, ids.GID + return err +} + +// cmdStream executes a command, and returns its stdout as a stream. +// If the command fails to run or doesn't complete successfully, an error +// will be returned, including anything written on stderr. +func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) { + cmd.Stdin = input + pipeR, pipeW := io.Pipe() + cmd.Stdout = pipeW + var errBuf bytes.Buffer + cmd.Stderr = &errBuf + + // Run the command and return the pipe + if err := cmd.Start(); err != nil { + return nil, err + } + + // Copy stdout to the returned pipe + go func() { + if err := cmd.Wait(); err != nil { + pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) + } else { + pipeW.Close() + } + }() + + return pipeR, nil +} + +// NewTempArchive reads the content of src into a temporary file, and returns the contents +// of that file as an archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. +func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { + f, err := ioutil.TempFile(dir, "") + if err != nil { + return nil, err + } + if _, err := io.Copy(f, src); err != nil { + return nil, err + } + if _, err := f.Seek(0, 0); err != nil { + return nil, err + } + st, err := f.Stat() + if err != nil { + return nil, err + } + size := st.Size() + return &TempArchive{File: f, Size: size}, nil +} + +// TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. +type TempArchive struct { + *os.File + Size int64 // Pre-computed from Stat().Size() as a convenience + read int64 + closed bool +} + +// Close closes the underlying file if it's still open, or does a no-op +// to allow callers to try to close the TempArchive multiple times safely. +func (archive *TempArchive) Close() error { + if archive.closed { + return nil + } + + archive.closed = true + + return archive.File.Close() +} + +func (archive *TempArchive) Read(data []byte) (int, error) { + n, err := archive.File.Read(data) + archive.read += int64(n) + if err != nil || archive.read == archive.Size { + archive.Close() + os.Remove(archive.File.Name()) + } + return n, err +} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go new file mode 100644 index 0000000000..970d4d0680 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go @@ -0,0 +1,92 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/system" + "golang.org/x/sys/unix" +) + +func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { + if format == OverlayWhiteoutFormat { + return overlayWhiteoutConverter{} + } + return nil +} + +type overlayWhiteoutConverter struct{} + +func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) { + // convert whiteouts to AUFS format + if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 { + // we just rename the file and make it normal + dir, filename := filepath.Split(hdr.Name) + hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename) + hdr.Mode = 0600 + hdr.Typeflag = tar.TypeReg + hdr.Size = 0 + } + + if fi.Mode()&os.ModeDir != 0 { + // convert opaque dirs to AUFS format by writing an empty file with the prefix + opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque") + if err != nil { + return nil, err + } + if len(opaque) == 1 && opaque[0] == 'y' { + if hdr.Xattrs != nil { + delete(hdr.Xattrs, "trusted.overlay.opaque") + } + + // create a header for the whiteout file + // it should inherit some properties from the parent, but be a regular file + wo = &tar.Header{ + Typeflag: tar.TypeReg, + Mode: hdr.Mode & int64(os.ModePerm), + Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir), + Size: 0, + Uid: hdr.Uid, + Uname: hdr.Uname, + Gid: hdr.Gid, + Gname: hdr.Gname, + AccessTime: hdr.AccessTime, + ChangeTime: hdr.ChangeTime, + } + } + } + + return +} + +func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) { + base := filepath.Base(path) + dir := filepath.Dir(path) + + // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay + if base == WhiteoutOpaqueDir { + err := unix.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0) + // don't write the file itself + return false, err + } + + // if a file was deleted and we are using overlay, we need to create a character device + if strings.HasPrefix(base, WhiteoutPrefix) { + originalBase := base[len(WhiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + + if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil { + return false, err + } + if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil { + return false, err + } + + // don't write the file itself + return false, nil + } + + return true, nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_other.go b/vendor/github.com/docker/docker/pkg/archive/archive_other.go new file mode 100644 index 0000000000..462dfc6323 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/archive_other.go @@ -0,0 +1,7 @@ +// +build !linux + +package archive // import "github.com/docker/docker/pkg/archive" + +func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go new file mode 100644 index 0000000000..e81076c170 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go @@ -0,0 +1,114 @@ +// +build !windows + +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "errors" + "os" + "path/filepath" + "syscall" + + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/system" + rsystem "github.com/opencontainers/runc/libcontainer/system" + "golang.org/x/sys/unix" +) + +// fixVolumePathPrefix does platform specific processing to ensure that if +// the path being passed in is not in a volume path format, convert it to one. +func fixVolumePathPrefix(srcPath string) string { + return srcPath +} + +// getWalkRoot calculates the root path when performing a TarWithOptions. +// We use a separate function as this is platform specific. On Linux, we +// can't use filepath.Join(srcPath,include) because this will clean away +// a trailing "." or "/" which may be important. +func getWalkRoot(srcPath string, include string) string { + return srcPath + string(filepath.Separator) + include +} + +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) (string, error) { + return p, nil // already unix-style +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. + +func chmodTarEntry(perm os.FileMode) os.FileMode { + return perm // noop for unix as golang APIs provide perm bits correctly +} + +func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { + s, ok := stat.(*syscall.Stat_t) + + if ok { + // Currently go does not fill in the major/minors + if s.Mode&unix.S_IFBLK != 0 || + s.Mode&unix.S_IFCHR != 0 { + hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) // nolint: unconvert + hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) // nolint: unconvert + } + } + + return +} + +func getInodeFromStat(stat interface{}) (inode uint64, err error) { + s, ok := stat.(*syscall.Stat_t) + + if ok { + inode = s.Ino + } + + return +} + +func getFileUIDGID(stat interface{}) (idtools.IDPair, error) { + s, ok := stat.(*syscall.Stat_t) + + if !ok { + return idtools.IDPair{}, errors.New("cannot convert stat value to syscall.Stat_t") + } + return idtools.IDPair{UID: int(s.Uid), GID: int(s.Gid)}, nil +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + if rsystem.RunningInUserNS() { + // cannot create a device if running in user namespace + return nil + } + + mode := uint32(hdr.Mode & 07777) + switch hdr.Typeflag { + case tar.TypeBlock: + mode |= unix.S_IFBLK + case tar.TypeChar: + mode |= unix.S_IFCHR + case tar.TypeFifo: + mode |= unix.S_IFIFO + } + + return system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))) +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := os.Chmod(path, hdrInfo.Mode()); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := os.Chmod(path, hdrInfo.Mode()); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_windows.go b/vendor/github.com/docker/docker/pkg/archive/archive_windows.go new file mode 100644 index 0000000000..e75bfc9b62 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/archive_windows.go @@ -0,0 +1,77 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/longpath" +) + +// fixVolumePathPrefix does platform specific processing to ensure that if +// the path being passed in is not in a volume path format, convert it to one. +func fixVolumePathPrefix(srcPath string) string { + return longpath.AddPrefix(srcPath) +} + +// getWalkRoot calculates the root path when performing a TarWithOptions. +// We use a separate function as this is platform specific. +func getWalkRoot(srcPath string, include string) string { + return filepath.Join(srcPath, include) +} + +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) (string, error) { + // windows: convert windows style relative path with backslashes + // into forward slashes. Since windows does not allow '/' or '\' + // in file names, it is mostly safe to replace however we must + // check just in case + if strings.Contains(p, "/") { + return "", fmt.Errorf("Windows path contains forward slash: %s", p) + } + return strings.Replace(p, string(os.PathSeparator), "/", -1), nil + +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. +func chmodTarEntry(perm os.FileMode) os.FileMode { + //perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.) + permPart := perm & os.ModePerm + noPermPart := perm &^ os.ModePerm + // Add the x bit: make everything +x from windows + permPart |= 0111 + permPart &= 0755 + + return noPermPart | permPart +} + +func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { + // do nothing. no notion of Rdev, Nlink in stat on Windows + return +} + +func getInodeFromStat(stat interface{}) (inode uint64, err error) { + // do nothing. no notion of Inode in stat on Windows + return +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + return nil +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { + return nil +} + +func getFileUIDGID(stat interface{}) (idtools.IDPair, error) { + // no notion of file ownership mapping yet on Windows + return idtools.IDPair{0, 0}, nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes.go b/vendor/github.com/docker/docker/pkg/archive/changes.go new file mode 100644 index 0000000000..43734db5b1 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/changes.go @@ -0,0 +1,441 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strings" + "syscall" + "time" + + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" + "github.com/sirupsen/logrus" +) + +// ChangeType represents the change type. +type ChangeType int + +const ( + // ChangeModify represents the modify operation. + ChangeModify = iota + // ChangeAdd represents the add operation. + ChangeAdd + // ChangeDelete represents the delete operation. + ChangeDelete +) + +func (c ChangeType) String() string { + switch c { + case ChangeModify: + return "C" + case ChangeAdd: + return "A" + case ChangeDelete: + return "D" + } + return "" +} + +// Change represents a change, it wraps the change type and path. +// It describes changes of the files in the path respect to the +// parent layers. The change could be modify, add, delete. +// This is used for layer diff. +type Change struct { + Path string + Kind ChangeType +} + +func (change *Change) String() string { + return fmt.Sprintf("%s %s", change.Kind, change.Path) +} + +// for sort.Sort +type changesByPath []Change + +func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path } +func (c changesByPath) Len() int { return len(c) } +func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] } + +// Gnu tar and the go tar writer don't have sub-second mtime +// precision, which is problematic when we apply changes via tar +// files, we handle this by comparing for exact times, *or* same +// second count and either a or b having exactly 0 nanoseconds +func sameFsTime(a, b time.Time) bool { + return a == b || + (a.Unix() == b.Unix() && + (a.Nanosecond() == 0 || b.Nanosecond() == 0)) +} + +func sameFsTimeSpec(a, b syscall.Timespec) bool { + return a.Sec == b.Sec && + (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0) +} + +// Changes walks the path rw and determines changes for the files in the path, +// with respect to the parent layers +func Changes(layers []string, rw string) ([]Change, error) { + return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip) +} + +func aufsMetadataSkip(path string) (skip bool, err error) { + skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path) + if err != nil { + skip = true + } + return +} + +func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) { + f := filepath.Base(path) + + // If there is a whiteout, then the file was removed + if strings.HasPrefix(f, WhiteoutPrefix) { + originalFile := f[len(WhiteoutPrefix):] + return filepath.Join(filepath.Dir(path), originalFile), nil + } + + return "", nil +} + +type skipChange func(string) (bool, error) +type deleteChange func(string, string, os.FileInfo) (string, error) + +func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) { + var ( + changes []Change + changedDirs = make(map[string]struct{}) + ) + + err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + path, err = filepath.Rel(rw, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + path = filepath.Join(string(os.PathSeparator), path) + + // Skip root + if path == string(os.PathSeparator) { + return nil + } + + if sc != nil { + if skip, err := sc(path); skip { + return err + } + } + + change := Change{ + Path: path, + } + + deletedFile, err := dc(rw, path, f) + if err != nil { + return err + } + + // Find out what kind of modification happened + if deletedFile != "" { + change.Path = deletedFile + change.Kind = ChangeDelete + } else { + // Otherwise, the file was added + change.Kind = ChangeAdd + + // ...Unless it already existed in a top layer, in which case, it's a modification + for _, layer := range layers { + stat, err := os.Stat(filepath.Join(layer, path)) + if err != nil && !os.IsNotExist(err) { + return err + } + if err == nil { + // The file existed in the top layer, so that's a modification + + // However, if it's a directory, maybe it wasn't actually modified. + // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar + if stat.IsDir() && f.IsDir() { + if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { + // Both directories are the same, don't record the change + return nil + } + } + change.Kind = ChangeModify + break + } + } + } + + // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files. + // This block is here to ensure the change is recorded even if the + // modify time, mode and size of the parent directory in the rw and ro layers are all equal. + // Check https://github.com/docker/docker/pull/13590 for details. + if f.IsDir() { + changedDirs[path] = struct{}{} + } + if change.Kind == ChangeAdd || change.Kind == ChangeDelete { + parent := filepath.Dir(path) + if _, ok := changedDirs[parent]; !ok && parent != "/" { + changes = append(changes, Change{Path: parent, Kind: ChangeModify}) + changedDirs[parent] = struct{}{} + } + } + + // Record change + changes = append(changes, change) + return nil + }) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + return changes, nil +} + +// FileInfo describes the information of a file. +type FileInfo struct { + parent *FileInfo + name string + stat *system.StatT + children map[string]*FileInfo + capability []byte + added bool +} + +// LookUp looks up the file information of a file. +func (info *FileInfo) LookUp(path string) *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + parent := info + if path == string(os.PathSeparator) { + return info + } + + pathElements := strings.Split(path, string(os.PathSeparator)) + for _, elem := range pathElements { + if elem != "" { + child := parent.children[elem] + if child == nil { + return nil + } + parent = child + } + } + return parent +} + +func (info *FileInfo) path() string { + if info.parent == nil { + // As this runs on the daemon side, file paths are OS specific. + return string(os.PathSeparator) + } + return filepath.Join(info.parent.path(), info.name) +} + +func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { + + sizeAtEntry := len(*changes) + + if oldInfo == nil { + // add + change := Change{ + Path: info.path(), + Kind: ChangeAdd, + } + *changes = append(*changes, change) + info.added = true + } + + // We make a copy so we can modify it to detect additions + // also, we only recurse on the old dir if the new info is a directory + // otherwise any previous delete/change is considered recursive + oldChildren := make(map[string]*FileInfo) + if oldInfo != nil && info.isDir() { + for k, v := range oldInfo.children { + oldChildren[k] = v + } + } + + for name, newChild := range info.children { + oldChild := oldChildren[name] + if oldChild != nil { + // change? + oldStat := oldChild.stat + newStat := newChild.stat + // Note: We can't compare inode or ctime or blocksize here, because these change + // when copying a file into a container. However, that is not generally a problem + // because any content change will change mtime, and any status change should + // be visible when actually comparing the stat fields. The only time this + // breaks down is if some code intentionally hides a change by setting + // back mtime + if statDifferent(oldStat, newStat) || + !bytes.Equal(oldChild.capability, newChild.capability) { + change := Change{ + Path: newChild.path(), + Kind: ChangeModify, + } + *changes = append(*changes, change) + newChild.added = true + } + + // Remove from copy so we can detect deletions + delete(oldChildren, name) + } + + newChild.addChanges(oldChild, changes) + } + for _, oldChild := range oldChildren { + // delete + change := Change{ + Path: oldChild.path(), + Kind: ChangeDelete, + } + *changes = append(*changes, change) + } + + // If there were changes inside this directory, we need to add it, even if the directory + // itself wasn't changed. This is needed to properly save and restore filesystem permissions. + // As this runs on the daemon side, file paths are OS specific. + if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) { + change := Change{ + Path: info.path(), + Kind: ChangeModify, + } + // Let's insert the directory entry before the recently added entries located inside this dir + *changes = append(*changes, change) // just to resize the slice, will be overwritten + copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) + (*changes)[sizeAtEntry] = change + } + +} + +// Changes add changes to file information. +func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { + var changes []Change + + info.addChanges(oldInfo, &changes) + + return changes +} + +func newRootFileInfo() *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + root := &FileInfo{ + name: string(os.PathSeparator), + children: make(map[string]*FileInfo), + } + return root +} + +// ChangesDirs compares two directories and generates an array of Change objects describing the changes. +// If oldDir is "", then all files in newDir will be Add-Changes. +func ChangesDirs(newDir, oldDir string) ([]Change, error) { + var ( + oldRoot, newRoot *FileInfo + ) + if oldDir == "" { + emptyDir, err := ioutil.TempDir("", "empty") + if err != nil { + return nil, err + } + defer os.Remove(emptyDir) + oldDir = emptyDir + } + oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir) + if err != nil { + return nil, err + } + + return newRoot.Changes(oldRoot), nil +} + +// ChangesSize calculates the size in bytes of the provided changes, based on newDir. +func ChangesSize(newDir string, changes []Change) int64 { + var ( + size int64 + sf = make(map[uint64]struct{}) + ) + for _, change := range changes { + if change.Kind == ChangeModify || change.Kind == ChangeAdd { + file := filepath.Join(newDir, change.Path) + fileInfo, err := os.Lstat(file) + if err != nil { + logrus.Errorf("Can not stat %q: %s", file, err) + continue + } + + if fileInfo != nil && !fileInfo.IsDir() { + if hasHardlinks(fileInfo) { + inode := getIno(fileInfo) + if _, ok := sf[inode]; !ok { + size += fileInfo.Size() + sf[inode] = struct{}{} + } + } else { + size += fileInfo.Size() + } + } + } + } + return size +} + +// ExportChanges produces an Archive from the provided changes, relative to dir. +func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) { + reader, writer := io.Pipe() + go func() { + ta := newTarAppender(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer, nil) + + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + + sort.Sort(changesByPath(changes)) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + for _, change := range changes { + if change.Kind == ChangeDelete { + whiteOutDir := filepath.Dir(change.Path) + whiteOutBase := filepath.Base(change.Path) + whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase) + timestamp := time.Now() + hdr := &tar.Header{ + Name: whiteOut[1:], + Size: 0, + ModTime: timestamp, + AccessTime: timestamp, + ChangeTime: timestamp, + } + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + logrus.Debugf("Can't write whiteout header: %s", err) + } + } else { + path := filepath.Join(dir, change.Path) + if err := ta.addTarFile(path, change.Path[1:]); err != nil { + logrus.Debugf("Can't add file %s to tar: %s", path, err) + } + } + } + + // Make sure to check the error on Close. + if err := ta.TarWriter.Close(); err != nil { + logrus.Debugf("Can't close layer: %s", err) + } + if err := writer.Close(); err != nil { + logrus.Debugf("failed close Changes writer: %s", err) + } + }() + return reader, nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_linux.go b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go new file mode 100644 index 0000000000..78a5393c8e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go @@ -0,0 +1,313 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "sort" + "syscall" + "unsafe" + + "github.com/docker/docker/pkg/system" + "golang.org/x/sys/unix" +) + +// walker is used to implement collectFileInfoForChanges on linux. Where this +// method in general returns the entire contents of two directory trees, we +// optimize some FS calls out on linux. In particular, we take advantage of the +// fact that getdents(2) returns the inode of each file in the directory being +// walked, which, when walking two trees in parallel to generate a list of +// changes, can be used to prune subtrees without ever having to lstat(2) them +// directly. Eliminating stat calls in this way can save up to seconds on large +// images. +type walker struct { + dir1 string + dir2 string + root1 *FileInfo + root2 *FileInfo +} + +// collectFileInfoForChanges returns a complete representation of the trees +// rooted at dir1 and dir2, with one important exception: any subtree or +// leaf where the inode and device numbers are an exact match between dir1 +// and dir2 will be pruned from the results. This method is *only* to be used +// to generating a list of changes between the two directories, as it does not +// reflect the full contents. +func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) { + w := &walker{ + dir1: dir1, + dir2: dir2, + root1: newRootFileInfo(), + root2: newRootFileInfo(), + } + + i1, err := os.Lstat(w.dir1) + if err != nil { + return nil, nil, err + } + i2, err := os.Lstat(w.dir2) + if err != nil { + return nil, nil, err + } + + if err := w.walk("/", i1, i2); err != nil { + return nil, nil, err + } + + return w.root1, w.root2, nil +} + +// Given a FileInfo, its path info, and a reference to the root of the tree +// being constructed, register this file with the tree. +func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error { + if fi == nil { + return nil + } + parent := root.LookUp(filepath.Dir(path)) + if parent == nil { + return fmt.Errorf("walkchunk: Unexpectedly no parent for %s", path) + } + info := &FileInfo{ + name: filepath.Base(path), + children: make(map[string]*FileInfo), + parent: parent, + } + cpath := filepath.Join(dir, path) + stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t)) + if err != nil { + return err + } + info.stat = stat + info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access + parent.children[info.name] = info + return nil +} + +// Walk a subtree rooted at the same path in both trees being iterated. For +// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d +func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) { + // Register these nodes with the return trees, unless we're still at the + // (already-created) roots: + if path != "/" { + if err := walkchunk(path, i1, w.dir1, w.root1); err != nil { + return err + } + if err := walkchunk(path, i2, w.dir2, w.root2); err != nil { + return err + } + } + + is1Dir := i1 != nil && i1.IsDir() + is2Dir := i2 != nil && i2.IsDir() + + sameDevice := false + if i1 != nil && i2 != nil { + si1 := i1.Sys().(*syscall.Stat_t) + si2 := i2.Sys().(*syscall.Stat_t) + if si1.Dev == si2.Dev { + sameDevice = true + } + } + + // If these files are both non-existent, or leaves (non-dirs), we are done. + if !is1Dir && !is2Dir { + return nil + } + + // Fetch the names of all the files contained in both directories being walked: + var names1, names2 []nameIno + if is1Dir { + names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access + if err != nil { + return err + } + } + if is2Dir { + names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access + if err != nil { + return err + } + } + + // We have lists of the files contained in both parallel directories, sorted + // in the same order. Walk them in parallel, generating a unique merged list + // of all items present in either or both directories. + var names []string + ix1 := 0 + ix2 := 0 + + for { + if ix1 >= len(names1) { + break + } + if ix2 >= len(names2) { + break + } + + ni1 := names1[ix1] + ni2 := names2[ix2] + + switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) { + case -1: // ni1 < ni2 -- advance ni1 + // we will not encounter ni1 in names2 + names = append(names, ni1.name) + ix1++ + case 0: // ni1 == ni2 + if ni1.ino != ni2.ino || !sameDevice { + names = append(names, ni1.name) + } + ix1++ + ix2++ + case 1: // ni1 > ni2 -- advance ni2 + // we will not encounter ni2 in names1 + names = append(names, ni2.name) + ix2++ + } + } + for ix1 < len(names1) { + names = append(names, names1[ix1].name) + ix1++ + } + for ix2 < len(names2) { + names = append(names, names2[ix2].name) + ix2++ + } + + // For each of the names present in either or both of the directories being + // iterated, stat the name under each root, and recurse the pair of them: + for _, name := range names { + fname := filepath.Join(path, name) + var cInfo1, cInfo2 os.FileInfo + if is1Dir { + cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if is2Dir { + cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if err = w.walk(fname, cInfo1, cInfo2); err != nil { + return err + } + } + return nil +} + +// {name,inode} pairs used to support the early-pruning logic of the walker type +type nameIno struct { + name string + ino uint64 +} + +type nameInoSlice []nameIno + +func (s nameInoSlice) Len() int { return len(s) } +func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name } + +// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode +// numbers further up the stack when reading directory contents. Unlike +// os.Readdirnames, which returns a list of filenames, this function returns a +// list of {filename,inode} pairs. +func readdirnames(dirname string) (names []nameIno, err error) { + var ( + size = 100 + buf = make([]byte, 4096) + nbuf int + bufp int + nb int + ) + + f, err := os.Open(dirname) + if err != nil { + return nil, err + } + defer f.Close() + + names = make([]nameIno, 0, size) // Empty with room to grow. + for { + // Refill the buffer if necessary + if bufp >= nbuf { + bufp = 0 + nbuf, err = unix.ReadDirent(int(f.Fd()), buf) // getdents on linux + if nbuf < 0 { + nbuf = 0 + } + if err != nil { + return nil, os.NewSyscallError("readdirent", err) + } + if nbuf <= 0 { + break // EOF + } + } + + // Drain the buffer + nb, names = parseDirent(buf[bufp:nbuf], names) + bufp += nb + } + + sl := nameInoSlice(names) + sort.Sort(sl) + return sl, nil +} + +// parseDirent is a minor modification of unix.ParseDirent (linux version) +// which returns {name,inode} pairs instead of just names. +func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) { + origlen := len(buf) + for len(buf) > 0 { + dirent := (*unix.Dirent)(unsafe.Pointer(&buf[0])) + buf = buf[dirent.Reclen:] + if dirent.Ino == 0 { // File absent in directory. + continue + } + bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0])) + var name = string(bytes[0:clen(bytes[:])]) + if name == "." || name == ".." { // Useless names + continue + } + names = append(names, nameIno{name, dirent.Ino}) + } + return origlen - len(buf), names +} + +func clen(n []byte) int { + for i := 0; i < len(n); i++ { + if n[i] == 0 { + return i + } + } + return len(n) +} + +// OverlayChanges walks the path rw and determines changes for the files in the path, +// with respect to the parent layers +func OverlayChanges(layers []string, rw string) ([]Change, error) { + return changes(layers, rw, overlayDeletedFile, nil) +} + +func overlayDeletedFile(root, path string, fi os.FileInfo) (string, error) { + if fi.Mode()&os.ModeCharDevice != 0 { + s := fi.Sys().(*syscall.Stat_t) + if unix.Major(uint64(s.Rdev)) == 0 && unix.Minor(uint64(s.Rdev)) == 0 { // nolint: unconvert + return path, nil + } + } + if fi.Mode()&os.ModeDir != 0 { + opaque, err := system.Lgetxattr(filepath.Join(root, path), "trusted.overlay.opaque") + if err != nil { + return "", err + } + if len(opaque) == 1 && opaque[0] == 'y' { + return path, nil + } + } + + return "", nil + +} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_other.go b/vendor/github.com/docker/docker/pkg/archive/changes_other.go new file mode 100644 index 0000000000..ba744741cd --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/changes_other.go @@ -0,0 +1,97 @@ +// +build !linux + +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/docker/docker/pkg/system" +) + +func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) { + var ( + oldRoot, newRoot *FileInfo + err1, err2 error + errs = make(chan error, 2) + ) + go func() { + oldRoot, err1 = collectFileInfo(oldDir) + errs <- err1 + }() + go func() { + newRoot, err2 = collectFileInfo(newDir) + errs <- err2 + }() + + // block until both routines have returned + for i := 0; i < 2; i++ { + if err := <-errs; err != nil { + return nil, nil, err + } + } + + return oldRoot, newRoot, nil +} + +func collectFileInfo(sourceDir string) (*FileInfo, error) { + root := newRootFileInfo() + + err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + relPath, err := filepath.Rel(sourceDir, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + relPath = filepath.Join(string(os.PathSeparator), relPath) + + // See https://github.com/golang/go/issues/9168 - bug in filepath.Join. + // Temporary workaround. If the returned path starts with two backslashes, + // trim it down to a single backslash. Only relevant on Windows. + if runtime.GOOS == "windows" { + if strings.HasPrefix(relPath, `\\`) { + relPath = relPath[1:] + } + } + + if relPath == string(os.PathSeparator) { + return nil + } + + parent := root.LookUp(filepath.Dir(relPath)) + if parent == nil { + return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath) + } + + info := &FileInfo{ + name: filepath.Base(relPath), + children: make(map[string]*FileInfo), + parent: parent, + } + + s, err := system.Lstat(path) + if err != nil { + return err + } + info.stat = s + + info.capability, _ = system.Lgetxattr(path, "security.capability") + + parent.children[info.name] = info + + return nil + }) + if err != nil { + return nil, err + } + return root, nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_unix.go b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go new file mode 100644 index 0000000000..c06a209d8e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go @@ -0,0 +1,37 @@ +// +build !windows + +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "os" + "syscall" + + "github.com/docker/docker/pkg/system" + "golang.org/x/sys/unix" +) + +func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { + // Don't look at size for dirs, its not a good measure of change + if oldStat.Mode() != newStat.Mode() || + oldStat.UID() != newStat.UID() || + oldStat.GID() != newStat.GID() || + oldStat.Rdev() != newStat.Rdev() || + // Don't look at size for dirs, its not a good measure of change + (oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR && + (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.Mode()&unix.S_IFDIR != 0 +} + +func getIno(fi os.FileInfo) uint64 { + return fi.Sys().(*syscall.Stat_t).Ino +} + +func hasHardlinks(fi os.FileInfo) bool { + return fi.Sys().(*syscall.Stat_t).Nlink > 1 +} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_windows.go b/vendor/github.com/docker/docker/pkg/archive/changes_windows.go new file mode 100644 index 0000000000..6555c01368 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/changes_windows.go @@ -0,0 +1,30 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "os" + + "github.com/docker/docker/pkg/system" +) + +func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { + + // Don't look at size for dirs, its not a good measure of change + if oldStat.Mtim() != newStat.Mtim() || + oldStat.Mode() != newStat.Mode() || + oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.Mode().IsDir() +} + +func getIno(fi os.FileInfo) (inode uint64) { + return +} + +func hasHardlinks(fi os.FileInfo) bool { + return false +} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy.go b/vendor/github.com/docker/docker/pkg/archive/copy.go new file mode 100644 index 0000000000..d0f13ca79b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/copy.go @@ -0,0 +1,472 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "errors" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/system" + "github.com/sirupsen/logrus" +) + +// Errors used or returned by this file. +var ( + ErrNotDirectory = errors.New("not a directory") + ErrDirNotExists = errors.New("no such directory") + ErrCannotCopyDir = errors.New("cannot copy directory") + ErrInvalidCopySource = errors.New("invalid copy source content") +) + +// PreserveTrailingDotOrSeparator returns the given cleaned path (after +// processing using any utility functions from the path or filepath stdlib +// packages) and appends a trailing `/.` or `/` if its corresponding original +// path (from before being processed by utility functions from the path or +// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned +// path already ends in a `.` path segment, then another is not added. If the +// clean path already ends in the separator, then another is not added. +func PreserveTrailingDotOrSeparator(cleanedPath string, originalPath string, sep byte) string { + // Ensure paths are in platform semantics + cleanedPath = strings.Replace(cleanedPath, "/", string(sep), -1) + originalPath = strings.Replace(originalPath, "/", string(sep), -1) + + if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) { + if !hasTrailingPathSeparator(cleanedPath, sep) { + // Add a separator if it doesn't already end with one (a cleaned + // path would only end in a separator if it is the root). + cleanedPath += string(sep) + } + cleanedPath += "." + } + + if !hasTrailingPathSeparator(cleanedPath, sep) && hasTrailingPathSeparator(originalPath, sep) { + cleanedPath += string(sep) + } + + return cleanedPath +} + +// assertsDirectory returns whether the given path is +// asserted to be a directory, i.e., the path ends with +// a trailing '/' or `/.`, assuming a path separator of `/`. +func assertsDirectory(path string, sep byte) bool { + return hasTrailingPathSeparator(path, sep) || specifiesCurrentDir(path) +} + +// hasTrailingPathSeparator returns whether the given +// path ends with the system's path separator character. +func hasTrailingPathSeparator(path string, sep byte) bool { + return len(path) > 0 && path[len(path)-1] == sep +} + +// specifiesCurrentDir returns whether the given path specifies +// a "current directory", i.e., the last path segment is `.`. +func specifiesCurrentDir(path string) bool { + return filepath.Base(path) == "." +} + +// SplitPathDirEntry splits the given path between its directory name and its +// basename by first cleaning the path but preserves a trailing "." if the +// original path specified the current directory. +func SplitPathDirEntry(path string) (dir, base string) { + cleanedPath := filepath.Clean(filepath.FromSlash(path)) + + if specifiesCurrentDir(path) { + cleanedPath += string(os.PathSeparator) + "." + } + + return filepath.Dir(cleanedPath), filepath.Base(cleanedPath) +} + +// TarResource archives the resource described by the given CopyInfo to a Tar +// archive. A non-nil error is returned if sourcePath does not exist or is +// asserted to be a directory but exists as another type of file. +// +// This function acts as a convenient wrapper around TarWithOptions, which +// requires a directory as the source path. TarResource accepts either a +// directory or a file path and correctly sets the Tar options. +func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) { + return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName) +} + +// TarResourceRebase is like TarResource but renames the first path element of +// items in the resulting tar archive to match the given rebaseName if not "". +func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) { + sourcePath = normalizePath(sourcePath) + if _, err = os.Lstat(sourcePath); err != nil { + // Catches the case where the source does not exist or is not a + // directory if asserted to be a directory, as this also causes an + // error. + return + } + + // Separate the source path between its directory and + // the entry in that directory which we are archiving. + sourceDir, sourceBase := SplitPathDirEntry(sourcePath) + opts := TarResourceRebaseOpts(sourceBase, rebaseName) + + logrus.Debugf("copying %q from %q", sourceBase, sourceDir) + return TarWithOptions(sourceDir, opts) +} + +// TarResourceRebaseOpts does not preform the Tar, but instead just creates the rebase +// parameters to be sent to TarWithOptions (the TarOptions struct) +func TarResourceRebaseOpts(sourceBase string, rebaseName string) *TarOptions { + filter := []string{sourceBase} + return &TarOptions{ + Compression: Uncompressed, + IncludeFiles: filter, + IncludeSourceDir: true, + RebaseNames: map[string]string{ + sourceBase: rebaseName, + }, + } +} + +// CopyInfo holds basic info about the source +// or destination path of a copy operation. +type CopyInfo struct { + Path string + Exists bool + IsDir bool + RebaseName string +} + +// CopyInfoSourcePath stats the given path to create a CopyInfo +// struct representing that resource for the source of an archive copy +// operation. The given path should be an absolute local path. A source path +// has all symlinks evaluated that appear before the last path separator ("/" +// on Unix). As it is to be a copy source, the path must exist. +func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) { + // normalize the file path and then evaluate the symbol link + // we will use the target file instead of the symbol link if + // followLink is set + path = normalizePath(path) + + resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink) + if err != nil { + return CopyInfo{}, err + } + + stat, err := os.Lstat(resolvedPath) + if err != nil { + return CopyInfo{}, err + } + + return CopyInfo{ + Path: resolvedPath, + Exists: true, + IsDir: stat.IsDir(), + RebaseName: rebaseName, + }, nil +} + +// CopyInfoDestinationPath stats the given path to create a CopyInfo +// struct representing that resource for the destination of an archive copy +// operation. The given path should be an absolute local path. +func CopyInfoDestinationPath(path string) (info CopyInfo, err error) { + maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot. + path = normalizePath(path) + originalPath := path + + stat, err := os.Lstat(path) + + if err == nil && stat.Mode()&os.ModeSymlink == 0 { + // The path exists and is not a symlink. + return CopyInfo{ + Path: path, + Exists: true, + IsDir: stat.IsDir(), + }, nil + } + + // While the path is a symlink. + for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ { + if n > maxSymlinkIter { + // Don't follow symlinks more than this arbitrary number of times. + return CopyInfo{}, errors.New("too many symlinks in " + originalPath) + } + + // The path is a symbolic link. We need to evaluate it so that the + // destination of the copy operation is the link target and not the + // link itself. This is notably different than CopyInfoSourcePath which + // only evaluates symlinks before the last appearing path separator. + // Also note that it is okay if the last path element is a broken + // symlink as the copy operation should create the target. + var linkTarget string + + linkTarget, err = os.Readlink(path) + if err != nil { + return CopyInfo{}, err + } + + if !system.IsAbs(linkTarget) { + // Join with the parent directory. + dstParent, _ := SplitPathDirEntry(path) + linkTarget = filepath.Join(dstParent, linkTarget) + } + + path = linkTarget + stat, err = os.Lstat(path) + } + + if err != nil { + // It's okay if the destination path doesn't exist. We can still + // continue the copy operation if the parent directory exists. + if !os.IsNotExist(err) { + return CopyInfo{}, err + } + + // Ensure destination parent dir exists. + dstParent, _ := SplitPathDirEntry(path) + + parentDirStat, err := os.Stat(dstParent) + if err != nil { + return CopyInfo{}, err + } + if !parentDirStat.IsDir() { + return CopyInfo{}, ErrNotDirectory + } + + return CopyInfo{Path: path}, nil + } + + // The path exists after resolving symlinks. + return CopyInfo{ + Path: path, + Exists: true, + IsDir: stat.IsDir(), + }, nil +} + +// PrepareArchiveCopy prepares the given srcContent archive, which should +// contain the archived resource described by srcInfo, to the destination +// described by dstInfo. Returns the possibly modified content archive along +// with the path to the destination directory which it should be extracted to. +func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) { + // Ensure in platform semantics + srcInfo.Path = normalizePath(srcInfo.Path) + dstInfo.Path = normalizePath(dstInfo.Path) + + // Separate the destination path between its directory and base + // components in case the source archive contents need to be rebased. + dstDir, dstBase := SplitPathDirEntry(dstInfo.Path) + _, srcBase := SplitPathDirEntry(srcInfo.Path) + + switch { + case dstInfo.Exists && dstInfo.IsDir: + // The destination exists as a directory. No alteration + // to srcContent is needed as its contents can be + // simply extracted to the destination directory. + return dstInfo.Path, ioutil.NopCloser(srcContent), nil + case dstInfo.Exists && srcInfo.IsDir: + // The destination exists as some type of file and the source + // content is a directory. This is an error condition since + // you cannot copy a directory to an existing file location. + return "", nil, ErrCannotCopyDir + case dstInfo.Exists: + // The destination exists as some type of file and the source content + // is also a file. The source content entry will have to be renamed to + // have a basename which matches the destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case srcInfo.IsDir: + // The destination does not exist and the source content is an archive + // of a directory. The archive should be extracted to the parent of + // the destination path instead, and when it is, the directory that is + // created as a result should take the name of the destination path. + // The source content entries will have to be renamed to have a + // basename which matches the destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case assertsDirectory(dstInfo.Path, os.PathSeparator): + // The destination does not exist and is asserted to be created as a + // directory, but the source content is not a directory. This is an + // error condition since you cannot create a directory from a file + // source. + return "", nil, ErrDirNotExists + default: + // The last remaining case is when the destination does not exist, is + // not asserted to be a directory, and the source content is not an + // archive of a directory. It this case, the destination file will need + // to be created when the archive is extracted and the source content + // entry will have to be renamed to have a basename which matches the + // destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + } + +} + +// RebaseArchiveEntries rewrites the given srcContent archive replacing +// an occurrence of oldBase with newBase at the beginning of entry names. +func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser { + if oldBase == string(os.PathSeparator) { + // If oldBase specifies the root directory, use an empty string as + // oldBase instead so that newBase doesn't replace the path separator + // that all paths will start with. + oldBase = "" + } + + rebased, w := io.Pipe() + + go func() { + srcTar := tar.NewReader(srcContent) + rebasedTar := tar.NewWriter(w) + + for { + hdr, err := srcTar.Next() + if err == io.EOF { + // Signals end of archive. + rebasedTar.Close() + w.Close() + return + } + if err != nil { + w.CloseWithError(err) + return + } + + hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1) + if hdr.Typeflag == tar.TypeLink { + hdr.Linkname = strings.Replace(hdr.Linkname, oldBase, newBase, 1) + } + + if err = rebasedTar.WriteHeader(hdr); err != nil { + w.CloseWithError(err) + return + } + + if _, err = io.Copy(rebasedTar, srcTar); err != nil { + w.CloseWithError(err) + return + } + } + }() + + return rebased +} + +// TODO @gupta-ak. These might have to be changed in the future to be +// continuity driver aware as well to support LCOW. + +// CopyResource performs an archive copy from the given source path to the +// given destination path. The source path MUST exist and the destination +// path's parent directory must exist. +func CopyResource(srcPath, dstPath string, followLink bool) error { + var ( + srcInfo CopyInfo + err error + ) + + // Ensure in platform semantics + srcPath = normalizePath(srcPath) + dstPath = normalizePath(dstPath) + + // Clean the source and destination paths. + srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath, os.PathSeparator) + dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath, os.PathSeparator) + + if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil { + return err + } + + content, err := TarResource(srcInfo) + if err != nil { + return err + } + defer content.Close() + + return CopyTo(content, srcInfo, dstPath) +} + +// CopyTo handles extracting the given content whose +// entries should be sourced from srcInfo to dstPath. +func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error { + // The destination path need not exist, but CopyInfoDestinationPath will + // ensure that at least the parent directory exists. + dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath)) + if err != nil { + return err + } + + dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo) + if err != nil { + return err + } + defer copyArchive.Close() + + options := &TarOptions{ + NoLchown: true, + NoOverwriteDirNonDir: true, + } + + return Untar(copyArchive, dstDir, options) +} + +// ResolveHostSourcePath decides real path need to be copied with parameters such as +// whether to follow symbol link or not, if followLink is true, resolvedPath will return +// link target of any symbol link file, else it will only resolve symlink of directory +// but return symbol link file itself without resolving. +func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) { + if followLink { + resolvedPath, err = filepath.EvalSymlinks(path) + if err != nil { + return + } + + resolvedPath, rebaseName = GetRebaseName(path, resolvedPath) + } else { + dirPath, basePath := filepath.Split(path) + + // if not follow symbol link, then resolve symbol link of parent dir + var resolvedDirPath string + resolvedDirPath, err = filepath.EvalSymlinks(dirPath) + if err != nil { + return + } + // resolvedDirPath will have been cleaned (no trailing path separators) so + // we can manually join it with the base path element. + resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath + if hasTrailingPathSeparator(path, os.PathSeparator) && + filepath.Base(path) != filepath.Base(resolvedPath) { + rebaseName = filepath.Base(path) + } + } + return resolvedPath, rebaseName, nil +} + +// GetRebaseName normalizes and compares path and resolvedPath, +// return completed resolved path and rebased file name +func GetRebaseName(path, resolvedPath string) (string, string) { + // linkTarget will have been cleaned (no trailing path separators and dot) so + // we can manually join it with them + var rebaseName string + if specifiesCurrentDir(path) && + !specifiesCurrentDir(resolvedPath) { + resolvedPath += string(filepath.Separator) + "." + } + + if hasTrailingPathSeparator(path, os.PathSeparator) && + !hasTrailingPathSeparator(resolvedPath, os.PathSeparator) { + resolvedPath += string(filepath.Separator) + } + + if filepath.Base(path) != filepath.Base(resolvedPath) { + // In the case where the path had a trailing separator and a symlink + // evaluation has changed the last path component, we will need to + // rebase the name in the archive that is being copied to match the + // originally requested name. + rebaseName = filepath.Base(path) + } + return resolvedPath, rebaseName +} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_unix.go b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go new file mode 100644 index 0000000000..3958364f5b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go @@ -0,0 +1,11 @@ +// +build !windows + +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "path/filepath" +) + +func normalizePath(path string) string { + return filepath.ToSlash(path) +} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_windows.go b/vendor/github.com/docker/docker/pkg/archive/copy_windows.go new file mode 100644 index 0000000000..a878d1bac4 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/copy_windows.go @@ -0,0 +1,9 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "path/filepath" +) + +func normalizePath(path string) string { + return filepath.FromSlash(path) +} diff --git a/vendor/github.com/docker/docker/pkg/archive/diff.go b/vendor/github.com/docker/docker/pkg/archive/diff.go new file mode 100644 index 0000000000..d0cff98ffc --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/diff.go @@ -0,0 +1,256 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" + "github.com/sirupsen/logrus" +) + +// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) { + tr := tar.NewReader(layer) + trBuf := pools.BufioReader32KPool.Get(tr) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + unpackedPaths := make(map[string]struct{}) + + if options == nil { + options = &TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + + aufsTempdir := "" + aufsHardlinks := make(map[string]*tar.Header) + + // Iterate through the files in the archive. + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return 0, err + } + + size += hdr.Size + + // Normalize name, for safety and for a simple is-root check + hdr.Name = filepath.Clean(hdr.Name) + + // Windows does not support filenames with colons in them. Ignore + // these files. This is not a problem though (although it might + // appear that it is). Let's suppose a client is running docker pull. + // The daemon it points to is Windows. Would it make sense for the + // client to be doing a docker pull Ubuntu for example (which has files + // with colons in the name under /usr/share/man/man3)? No, absolutely + // not as it would really only make sense that they were pulling a + // Windows image. However, for development, it is necessary to be able + // to pull Linux images which are in the repository. + // + // TODO Windows. Once the registry is aware of what images are Windows- + // specific or Linux-specific, this warning should be changed to an error + // to cater for the situation where someone does manage to upload a Linux + // image but have it tagged as Windows inadvertently. + if runtime.GOOS == "windows" { + if strings.Contains(hdr.Name, ":") { + logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name) + continue + } + } + + // Note as these operations are platform specific, so must the slash be. + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + // Not the root directory, ensure that the parent directory exists. + // This happened in some tests where an image had a tarfile without any + // parent directories. + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + err = system.MkdirAll(parentPath, 0600, "") + if err != nil { + return 0, err + } + } + } + + // Skip AUFS metadata dirs + if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) { + // Regular files inside /.wh..wh.plnk can be used as hardlink targets + // We don't want this directory, but we need the files in them so that + // such hardlinks can be resolved. + if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg { + basename := filepath.Base(hdr.Name) + aufsHardlinks[basename] = hdr + if aufsTempdir == "" { + if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil { + return 0, err + } + defer os.RemoveAll(aufsTempdir) + } + if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS); err != nil { + return 0, err + } + } + + if hdr.Name != WhiteoutOpaqueDir { + continue + } + } + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return 0, err + } + + // Note as these operations are platform specific, so must the slash be. + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + base := filepath.Base(path) + + if strings.HasPrefix(base, WhiteoutPrefix) { + dir := filepath.Dir(path) + if base == WhiteoutOpaqueDir { + _, err := os.Lstat(dir) + if err != nil { + return 0, err + } + err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if err != nil { + if os.IsNotExist(err) { + err = nil // parent was deleted + } + return err + } + if path == dir { + return nil + } + if _, exists := unpackedPaths[path]; !exists { + err := os.RemoveAll(path) + return err + } + return nil + }) + if err != nil { + return 0, err + } + } else { + originalBase := base[len(WhiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + if err := os.RemoveAll(originalPath); err != nil { + return 0, err + } + } + } else { + // If path exits we almost always just want to remove and replace it. + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if err := os.RemoveAll(path); err != nil { + return 0, err + } + } + } + + trBuf.Reset(tr) + srcData := io.Reader(trBuf) + srcHdr := hdr + + // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so + // we manually retarget these into the temporary files we extracted them into + if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) { + linkBasename := filepath.Base(hdr.Linkname) + srcHdr = aufsHardlinks[linkBasename] + if srcHdr == nil { + return 0, fmt.Errorf("Invalid aufs hardlink") + } + tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) + if err != nil { + return 0, err + } + defer tmpFile.Close() + srcData = tmpFile + } + + if err := remapIDs(idMappings, srcHdr); err != nil { + return 0, err + } + + if err := createTarFile(path, dest, srcHdr, srcData, true, nil, options.InUserNS); err != nil { + return 0, err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + unpackedPaths[path] = struct{}{} + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + return 0, err + } + } + + return size, nil +} + +// ApplyLayer parses a diff in the standard layer format from `layer`, +// and applies it to the directory `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyLayer(dest string, layer io.Reader) (int64, error) { + return applyLayerHandler(dest, layer, &TarOptions{}, true) +} + +// ApplyUncompressedLayer parses a diff in the standard layer format from +// `layer`, and applies it to the directory `dest`. The stream `layer` +// can only be uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) { + return applyLayerHandler(dest, layer, options, false) +} + +// do the bulk load of ApplyLayer, but allow for not calling DecompressStream +func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) { + dest = filepath.Clean(dest) + + // We need to be able to set any perms + oldmask, err := system.Umask(0) + if err != nil { + return 0, err + } + defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform + + if decompress { + layer, err = DecompressStream(layer) + if err != nil { + return 0, err + } + } + return UnpackLayer(dest, layer, options) +} diff --git a/vendor/github.com/docker/docker/pkg/archive/example_changes.go b/vendor/github.com/docker/docker/pkg/archive/example_changes.go new file mode 100644 index 0000000000..495db809e9 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/example_changes.go @@ -0,0 +1,97 @@ +// +build ignore + +// Simple tool to create an archive stream from an old and new directory +// +// By default it will stream the comparison of two temporary directories with junk files +package main + +import ( + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "path" + + "github.com/docker/docker/pkg/archive" + "github.com/sirupsen/logrus" +) + +var ( + flDebug = flag.Bool("D", false, "debugging output") + flNewDir = flag.String("newdir", "", "") + flOldDir = flag.String("olddir", "", "") + log = logrus.New() +) + +func main() { + flag.Usage = func() { + fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)") + fmt.Printf("%s [OPTIONS]\n", os.Args[0]) + flag.PrintDefaults() + } + flag.Parse() + log.Out = os.Stderr + if (len(os.Getenv("DEBUG")) > 0) || *flDebug { + logrus.SetLevel(logrus.DebugLevel) + } + var newDir, oldDir string + + if len(*flNewDir) == 0 { + var err error + newDir, err = ioutil.TempDir("", "docker-test-newDir") + if err != nil { + log.Fatal(err) + } + defer os.RemoveAll(newDir) + if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil { + log.Fatal(err) + } + } else { + newDir = *flNewDir + } + + if len(*flOldDir) == 0 { + oldDir, err := ioutil.TempDir("", "docker-test-oldDir") + if err != nil { + log.Fatal(err) + } + defer os.RemoveAll(oldDir) + } else { + oldDir = *flOldDir + } + + changes, err := archive.ChangesDirs(newDir, oldDir) + if err != nil { + log.Fatal(err) + } + + a, err := archive.ExportChanges(newDir, changes) + if err != nil { + log.Fatal(err) + } + defer a.Close() + + i, err := io.Copy(os.Stdout, a) + if err != nil && err != io.EOF { + log.Fatal(err) + } + fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i) +} + +func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { + fileData := []byte("fooo") + for n := 0; n < numberOfFiles; n++ { + fileName := fmt.Sprintf("file-%d", n) + if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { + return 0, err + } + if makeLinks { + if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { + return 0, err + } + } + } + totalSize := numberOfFiles * len(fileData) + return totalSize, nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/time_linux.go b/vendor/github.com/docker/docker/pkg/archive/time_linux.go new file mode 100644 index 0000000000..58aefe3efb --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/time_linux.go @@ -0,0 +1,16 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + if time.IsZero() { + // Return UTIME_OMIT special value + ts.Sec = 0 + ts.Nsec = ((1 << 30) - 2) + return + } + return syscall.NsecToTimespec(time.UnixNano()) +} diff --git a/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go b/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go new file mode 100644 index 0000000000..f58bf227fd --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go @@ -0,0 +1,16 @@ +// +build !linux + +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + nsec := int64(0) + if !time.IsZero() { + nsec = time.UnixNano() + } + return syscall.NsecToTimespec(nsec) +} diff --git a/vendor/github.com/docker/docker/pkg/archive/whiteouts.go b/vendor/github.com/docker/docker/pkg/archive/whiteouts.go new file mode 100644 index 0000000000..4c072a87ee --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/whiteouts.go @@ -0,0 +1,23 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +// Whiteouts are files with a special meaning for the layered filesystem. +// Docker uses AUFS whiteout files inside exported archives. In other +// filesystems these files are generated/handled on tar creation/extraction. + +// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a +// filename this means that file has been removed from the base layer. +const WhiteoutPrefix = ".wh." + +// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not +// for removing an actual file. Normally these files are excluded from exported +// archives. +const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix + +// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other +// layers. Normally these should not go into exported archives and all changed +// hardlinks should be copied to the top layer. +const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk" + +// WhiteoutOpaqueDir file means directory has been made opaque - meaning +// readdir calls to this directory do not follow to lower layers. +const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq" diff --git a/vendor/github.com/docker/docker/pkg/archive/wrap.go b/vendor/github.com/docker/docker/pkg/archive/wrap.go new file mode 100644 index 0000000000..85435694cf --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/wrap.go @@ -0,0 +1,59 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "bytes" + "io" +) + +// Generate generates a new archive from the content provided +// as input. +// +// `files` is a sequence of path/content pairs. A new file is +// added to the archive for each pair. +// If the last pair is incomplete, the file is created with an +// empty content. For example: +// +// Generate("foo.txt", "hello world", "emptyfile") +// +// The above call will return an archive with 2 files: +// * ./foo.txt with content "hello world" +// * ./empty with empty content +// +// FIXME: stream content instead of buffering +// FIXME: specify permissions and other archive metadata +func Generate(input ...string) (io.Reader, error) { + files := parseStringPairs(input...) + buf := new(bytes.Buffer) + tw := tar.NewWriter(buf) + for _, file := range files { + name, content := file[0], file[1] + hdr := &tar.Header{ + Name: name, + Size: int64(len(content)), + } + if err := tw.WriteHeader(hdr); err != nil { + return nil, err + } + if _, err := tw.Write([]byte(content)); err != nil { + return nil, err + } + } + if err := tw.Close(); err != nil { + return nil, err + } + return buf, nil +} + +func parseStringPairs(input ...string) (output [][2]string) { + output = make([][2]string, 0, len(input)/2+1) + for i := 0; i < len(input); i += 2 { + var pair [2]string + pair[0] = input[i] + if i+1 < len(input) { + pair[1] = input[i+1] + } + output = append(output, pair) + } + return +} diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go new file mode 100644 index 0000000000..88e0f2834b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go @@ -0,0 +1,298 @@ +package fileutils // import "github.com/docker/docker/pkg/fileutils" + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "regexp" + "strings" + "text/scanner" + + "github.com/sirupsen/logrus" +) + +// PatternMatcher allows checking paths agaist a list of patterns +type PatternMatcher struct { + patterns []*Pattern + exclusions bool +} + +// NewPatternMatcher creates a new matcher object for specific patterns that can +// be used later to match against patterns against paths +func NewPatternMatcher(patterns []string) (*PatternMatcher, error) { + pm := &PatternMatcher{ + patterns: make([]*Pattern, 0, len(patterns)), + } + for _, p := range patterns { + // Eliminate leading and trailing whitespace. + p = strings.TrimSpace(p) + if p == "" { + continue + } + p = filepath.Clean(p) + newp := &Pattern{} + if p[0] == '!' { + if len(p) == 1 { + return nil, errors.New("illegal exclusion pattern: \"!\"") + } + newp.exclusion = true + p = p[1:] + pm.exclusions = true + } + // Do some syntax checking on the pattern. + // filepath's Match() has some really weird rules that are inconsistent + // so instead of trying to dup their logic, just call Match() for its + // error state and if there is an error in the pattern return it. + // If this becomes an issue we can remove this since its really only + // needed in the error (syntax) case - which isn't really critical. + if _, err := filepath.Match(p, "."); err != nil { + return nil, err + } + newp.cleanedPattern = p + newp.dirs = strings.Split(p, string(os.PathSeparator)) + pm.patterns = append(pm.patterns, newp) + } + return pm, nil +} + +// Matches matches path against all the patterns. Matches is not safe to be +// called concurrently +func (pm *PatternMatcher) Matches(file string) (bool, error) { + matched := false + file = filepath.FromSlash(file) + parentPath := filepath.Dir(file) + parentPathDirs := strings.Split(parentPath, string(os.PathSeparator)) + + for _, pattern := range pm.patterns { + negative := false + + if pattern.exclusion { + negative = true + } + + match, err := pattern.match(file) + if err != nil { + return false, err + } + + if !match && parentPath != "." { + // Check to see if the pattern matches one of our parent dirs. + if len(pattern.dirs) <= len(parentPathDirs) { + match, _ = pattern.match(strings.Join(parentPathDirs[:len(pattern.dirs)], string(os.PathSeparator))) + } + } + + if match { + matched = !negative + } + } + + if matched { + logrus.Debugf("Skipping excluded path: %s", file) + } + + return matched, nil +} + +// Exclusions returns true if any of the patterns define exclusions +func (pm *PatternMatcher) Exclusions() bool { + return pm.exclusions +} + +// Patterns returns array of active patterns +func (pm *PatternMatcher) Patterns() []*Pattern { + return pm.patterns +} + +// Pattern defines a single regexp used used to filter file paths. +type Pattern struct { + cleanedPattern string + dirs []string + regexp *regexp.Regexp + exclusion bool +} + +func (p *Pattern) String() string { + return p.cleanedPattern +} + +// Exclusion returns true if this pattern defines exclusion +func (p *Pattern) Exclusion() bool { + return p.exclusion +} + +func (p *Pattern) match(path string) (bool, error) { + + if p.regexp == nil { + if err := p.compile(); err != nil { + return false, filepath.ErrBadPattern + } + } + + b := p.regexp.MatchString(path) + + return b, nil +} + +func (p *Pattern) compile() error { + regStr := "^" + pattern := p.cleanedPattern + // Go through the pattern and convert it to a regexp. + // We use a scanner so we can support utf-8 chars. + var scan scanner.Scanner + scan.Init(strings.NewReader(pattern)) + + sl := string(os.PathSeparator) + escSL := sl + if sl == `\` { + escSL += `\` + } + + for scan.Peek() != scanner.EOF { + ch := scan.Next() + + if ch == '*' { + if scan.Peek() == '*' { + // is some flavor of "**" + scan.Next() + + // Treat **/ as ** so eat the "/" + if string(scan.Peek()) == sl { + scan.Next() + } + + if scan.Peek() == scanner.EOF { + // is "**EOF" - to align with .gitignore just accept all + regStr += ".*" + } else { + // is "**" + // Note that this allows for any # of /'s (even 0) because + // the .* will eat everything, even /'s + regStr += "(.*" + escSL + ")?" + } + } else { + // is "*" so map it to anything but "/" + regStr += "[^" + escSL + "]*" + } + } else if ch == '?' { + // "?" is any char except "/" + regStr += "[^" + escSL + "]" + } else if ch == '.' || ch == '$' { + // Escape some regexp special chars that have no meaning + // in golang's filepath.Match + regStr += `\` + string(ch) + } else if ch == '\\' { + // escape next char. Note that a trailing \ in the pattern + // will be left alone (but need to escape it) + if sl == `\` { + // On windows map "\" to "\\", meaning an escaped backslash, + // and then just continue because filepath.Match on + // Windows doesn't allow escaping at all + regStr += escSL + continue + } + if scan.Peek() != scanner.EOF { + regStr += `\` + string(scan.Next()) + } else { + regStr += `\` + } + } else { + regStr += string(ch) + } + } + + regStr += "$" + + re, err := regexp.Compile(regStr) + if err != nil { + return err + } + + p.regexp = re + return nil +} + +// Matches returns true if file matches any of the patterns +// and isn't excluded by any of the subsequent patterns. +func Matches(file string, patterns []string) (bool, error) { + pm, err := NewPatternMatcher(patterns) + if err != nil { + return false, err + } + file = filepath.Clean(file) + + if file == "." { + // Don't let them exclude everything, kind of silly. + return false, nil + } + + return pm.Matches(file) +} + +// CopyFile copies from src to dst until either EOF is reached +// on src or an error occurs. It verifies src exists and removes +// the dst if it exists. +func CopyFile(src, dst string) (int64, error) { + cleanSrc := filepath.Clean(src) + cleanDst := filepath.Clean(dst) + if cleanSrc == cleanDst { + return 0, nil + } + sf, err := os.Open(cleanSrc) + if err != nil { + return 0, err + } + defer sf.Close() + if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) { + return 0, err + } + df, err := os.Create(cleanDst) + if err != nil { + return 0, err + } + defer df.Close() + return io.Copy(df, sf) +} + +// ReadSymlinkedDirectory returns the target directory of a symlink. +// The target of the symbolic link may not be a file. +func ReadSymlinkedDirectory(path string) (string, error) { + var realPath string + var err error + if realPath, err = filepath.Abs(path); err != nil { + return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err) + } + if realPath, err = filepath.EvalSymlinks(realPath); err != nil { + return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err) + } + realPathInfo, err := os.Stat(realPath) + if err != nil { + return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err) + } + if !realPathInfo.Mode().IsDir() { + return "", fmt.Errorf("canonical path points to a file '%s'", realPath) + } + return realPath, nil +} + +// CreateIfNotExists creates a file or a directory only if it does not already exist. +func CreateIfNotExists(path string, isDir bool) error { + if _, err := os.Stat(path); err != nil { + if os.IsNotExist(err) { + if isDir { + return os.MkdirAll(path, 0755) + } + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } + f, err := os.OpenFile(path, os.O_CREATE, 0755) + if err != nil { + return err + } + f.Close() + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go new file mode 100644 index 0000000000..e40cc271b3 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go @@ -0,0 +1,27 @@ +package fileutils // import "github.com/docker/docker/pkg/fileutils" + +import ( + "os" + "os/exec" + "strconv" + "strings" +) + +// GetTotalUsedFds returns the number of used File Descriptors by +// executing `lsof -p PID` +func GetTotalUsedFds() int { + pid := os.Getpid() + + cmd := exec.Command("lsof", "-p", strconv.Itoa(pid)) + + output, err := cmd.CombinedOutput() + if err != nil { + return -1 + } + + outputStr := strings.TrimSpace(string(output)) + + fds := strings.Split(outputStr, "\n") + + return len(fds) - 1 +} diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go new file mode 100644 index 0000000000..565396f1c7 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go @@ -0,0 +1,22 @@ +// +build linux freebsd + +package fileutils // import "github.com/docker/docker/pkg/fileutils" + +import ( + "fmt" + "io/ioutil" + "os" + + "github.com/sirupsen/logrus" +) + +// GetTotalUsedFds Returns the number of used File Descriptors by +// reading it via /proc filesystem. +func GetTotalUsedFds() int { + if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { + logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err) + } else { + return len(fds) + } + return -1 +} diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go new file mode 100644 index 0000000000..3f1ebb6567 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go @@ -0,0 +1,7 @@ +package fileutils // import "github.com/docker/docker/pkg/fileutils" + +// GetTotalUsedFds Returns the number of used File Descriptors. Not supported +// on Windows. +func GetTotalUsedFds() int { + return -1 +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/buffer.go b/vendor/github.com/docker/docker/pkg/ioutils/buffer.go new file mode 100644 index 0000000000..466f79294b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/buffer.go @@ -0,0 +1,51 @@ +package ioutils // import "github.com/docker/docker/pkg/ioutils" + +import ( + "errors" + "io" +) + +var errBufferFull = errors.New("buffer is full") + +type fixedBuffer struct { + buf []byte + pos int + lastRead int +} + +func (b *fixedBuffer) Write(p []byte) (int, error) { + n := copy(b.buf[b.pos:cap(b.buf)], p) + b.pos += n + + if n < len(p) { + if b.pos == cap(b.buf) { + return n, errBufferFull + } + return n, io.ErrShortWrite + } + return n, nil +} + +func (b *fixedBuffer) Read(p []byte) (int, error) { + n := copy(p, b.buf[b.lastRead:b.pos]) + b.lastRead += n + return n, nil +} + +func (b *fixedBuffer) Len() int { + return b.pos - b.lastRead +} + +func (b *fixedBuffer) Cap() int { + return cap(b.buf) +} + +func (b *fixedBuffer) Reset() { + b.pos = 0 + b.lastRead = 0 + b.buf = b.buf[:0] +} + +func (b *fixedBuffer) String() string { + return string(b.buf[b.lastRead:b.pos]) +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go new file mode 100644 index 0000000000..d4bbf3c9dc --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go @@ -0,0 +1,186 @@ +package ioutils // import "github.com/docker/docker/pkg/ioutils" + +import ( + "errors" + "io" + "sync" +) + +// maxCap is the highest capacity to use in byte slices that buffer data. +const maxCap = 1e6 + +// minCap is the lowest capacity to use in byte slices that buffer data +const minCap = 64 + +// blockThreshold is the minimum number of bytes in the buffer which will cause +// a write to BytesPipe to block when allocating a new slice. +const blockThreshold = 1e6 + +var ( + // ErrClosed is returned when Write is called on a closed BytesPipe. + ErrClosed = errors.New("write to closed BytesPipe") + + bufPools = make(map[int]*sync.Pool) + bufPoolsLock sync.Mutex +) + +// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue). +// All written data may be read at most once. Also, BytesPipe allocates +// and releases new byte slices to adjust to current needs, so the buffer +// won't be overgrown after peak loads. +type BytesPipe struct { + mu sync.Mutex + wait *sync.Cond + buf []*fixedBuffer + bufLen int + closeErr error // error to return from next Read. set to nil if not closed. +} + +// NewBytesPipe creates new BytesPipe, initialized by specified slice. +// If buf is nil, then it will be initialized with slice which cap is 64. +// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf). +func NewBytesPipe() *BytesPipe { + bp := &BytesPipe{} + bp.buf = append(bp.buf, getBuffer(minCap)) + bp.wait = sync.NewCond(&bp.mu) + return bp +} + +// Write writes p to BytesPipe. +// It can allocate new []byte slices in a process of writing. +func (bp *BytesPipe) Write(p []byte) (int, error) { + bp.mu.Lock() + + written := 0 +loop0: + for { + if bp.closeErr != nil { + bp.mu.Unlock() + return written, ErrClosed + } + + if len(bp.buf) == 0 { + bp.buf = append(bp.buf, getBuffer(64)) + } + // get the last buffer + b := bp.buf[len(bp.buf)-1] + + n, err := b.Write(p) + written += n + bp.bufLen += n + + // errBufferFull is an error we expect to get if the buffer is full + if err != nil && err != errBufferFull { + bp.wait.Broadcast() + bp.mu.Unlock() + return written, err + } + + // if there was enough room to write all then break + if len(p) == n { + break + } + + // more data: write to the next slice + p = p[n:] + + // make sure the buffer doesn't grow too big from this write + for bp.bufLen >= blockThreshold { + bp.wait.Wait() + if bp.closeErr != nil { + continue loop0 + } + } + + // add new byte slice to the buffers slice and continue writing + nextCap := b.Cap() * 2 + if nextCap > maxCap { + nextCap = maxCap + } + bp.buf = append(bp.buf, getBuffer(nextCap)) + } + bp.wait.Broadcast() + bp.mu.Unlock() + return written, nil +} + +// CloseWithError causes further reads from a BytesPipe to return immediately. +func (bp *BytesPipe) CloseWithError(err error) error { + bp.mu.Lock() + if err != nil { + bp.closeErr = err + } else { + bp.closeErr = io.EOF + } + bp.wait.Broadcast() + bp.mu.Unlock() + return nil +} + +// Close causes further reads from a BytesPipe to return immediately. +func (bp *BytesPipe) Close() error { + return bp.CloseWithError(nil) +} + +// Read reads bytes from BytesPipe. +// Data could be read only once. +func (bp *BytesPipe) Read(p []byte) (n int, err error) { + bp.mu.Lock() + if bp.bufLen == 0 { + if bp.closeErr != nil { + bp.mu.Unlock() + return 0, bp.closeErr + } + bp.wait.Wait() + if bp.bufLen == 0 && bp.closeErr != nil { + err := bp.closeErr + bp.mu.Unlock() + return 0, err + } + } + + for bp.bufLen > 0 { + b := bp.buf[0] + read, _ := b.Read(p) // ignore error since fixedBuffer doesn't really return an error + n += read + bp.bufLen -= read + + if b.Len() == 0 { + // it's empty so return it to the pool and move to the next one + returnBuffer(b) + bp.buf[0] = nil + bp.buf = bp.buf[1:] + } + + if len(p) == read { + break + } + + p = p[read:] + } + + bp.wait.Broadcast() + bp.mu.Unlock() + return +} + +func returnBuffer(b *fixedBuffer) { + b.Reset() + bufPoolsLock.Lock() + pool := bufPools[b.Cap()] + bufPoolsLock.Unlock() + if pool != nil { + pool.Put(b) + } +} + +func getBuffer(size int) *fixedBuffer { + bufPoolsLock.Lock() + pool, ok := bufPools[size] + if !ok { + pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }} + bufPools[size] = pool + } + bufPoolsLock.Unlock() + return pool.Get().(*fixedBuffer) +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go b/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go new file mode 100644 index 0000000000..534d66ac26 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go @@ -0,0 +1,162 @@ +package ioutils // import "github.com/docker/docker/pkg/ioutils" + +import ( + "io" + "io/ioutil" + "os" + "path/filepath" +) + +// NewAtomicFileWriter returns WriteCloser so that writing to it writes to a +// temporary file and closing it atomically changes the temporary file to +// destination path. Writing and closing concurrently is not allowed. +func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) { + f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename)) + if err != nil { + return nil, err + } + + abspath, err := filepath.Abs(filename) + if err != nil { + return nil, err + } + return &atomicFileWriter{ + f: f, + fn: abspath, + perm: perm, + }, nil +} + +// AtomicWriteFile atomically writes data to a file named by filename. +func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error { + f, err := NewAtomicFileWriter(filename, perm) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + f.(*atomicFileWriter).writeErr = err + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} + +type atomicFileWriter struct { + f *os.File + fn string + writeErr error + perm os.FileMode +} + +func (w *atomicFileWriter) Write(dt []byte) (int, error) { + n, err := w.f.Write(dt) + if err != nil { + w.writeErr = err + } + return n, err +} + +func (w *atomicFileWriter) Close() (retErr error) { + defer func() { + if retErr != nil || w.writeErr != nil { + os.Remove(w.f.Name()) + } + }() + if err := w.f.Sync(); err != nil { + w.f.Close() + return err + } + if err := w.f.Close(); err != nil { + return err + } + if err := os.Chmod(w.f.Name(), w.perm); err != nil { + return err + } + if w.writeErr == nil { + return os.Rename(w.f.Name(), w.fn) + } + return nil +} + +// AtomicWriteSet is used to atomically write a set +// of files and ensure they are visible at the same time. +// Must be committed to a new directory. +type AtomicWriteSet struct { + root string +} + +// NewAtomicWriteSet creates a new atomic write set to +// atomically create a set of files. The given directory +// is used as the base directory for storing files before +// commit. If no temporary directory is given the system +// default is used. +func NewAtomicWriteSet(tmpDir string) (*AtomicWriteSet, error) { + td, err := ioutil.TempDir(tmpDir, "write-set-") + if err != nil { + return nil, err + } + + return &AtomicWriteSet{ + root: td, + }, nil +} + +// WriteFile writes a file to the set, guaranteeing the file +// has been synced. +func (ws *AtomicWriteSet) WriteFile(filename string, data []byte, perm os.FileMode) error { + f, err := ws.FileWriter(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} + +type syncFileCloser struct { + *os.File +} + +func (w syncFileCloser) Close() error { + err := w.File.Sync() + if err1 := w.File.Close(); err == nil { + err = err1 + } + return err +} + +// FileWriter opens a file writer inside the set. The file +// should be synced and closed before calling commit. +func (ws *AtomicWriteSet) FileWriter(name string, flag int, perm os.FileMode) (io.WriteCloser, error) { + f, err := os.OpenFile(filepath.Join(ws.root, name), flag, perm) + if err != nil { + return nil, err + } + return syncFileCloser{f}, nil +} + +// Cancel cancels the set and removes all temporary data +// created in the set. +func (ws *AtomicWriteSet) Cancel() error { + return os.RemoveAll(ws.root) +} + +// Commit moves all created files to the target directory. The +// target directory must not exist and the parent of the target +// directory must exist. +func (ws *AtomicWriteSet) Commit(target string) error { + return os.Rename(ws.root, target) +} + +// String returns the location the set is writing to. +func (ws *AtomicWriteSet) String() string { + return ws.root +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/readers.go b/vendor/github.com/docker/docker/pkg/ioutils/readers.go new file mode 100644 index 0000000000..72f7f2319f --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/readers.go @@ -0,0 +1,158 @@ +package ioutils // import "github.com/docker/docker/pkg/ioutils" + +import ( + "crypto/sha256" + "encoding/hex" + "io" + + "golang.org/x/net/context" +) + +// ReadCloserWrapper wraps an io.Reader, and implements an io.ReadCloser +// It calls the given callback function when closed. It should be constructed +// with NewReadCloserWrapper +type ReadCloserWrapper struct { + io.Reader + closer func() error +} + +// Close calls back the passed closer function +func (r *ReadCloserWrapper) Close() error { + return r.closer() +} + +// NewReadCloserWrapper returns a new io.ReadCloser. +func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { + return &ReadCloserWrapper{ + Reader: r, + closer: closer, + } +} + +type readerErrWrapper struct { + reader io.Reader + closer func() +} + +func (r *readerErrWrapper) Read(p []byte) (int, error) { + n, err := r.reader.Read(p) + if err != nil { + r.closer() + } + return n, err +} + +// NewReaderErrWrapper returns a new io.Reader. +func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { + return &readerErrWrapper{ + reader: r, + closer: closer, + } +} + +// HashData returns the sha256 sum of src. +func HashData(src io.Reader) (string, error) { + h := sha256.New() + if _, err := io.Copy(h, src); err != nil { + return "", err + } + return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil +} + +// OnEOFReader wraps an io.ReadCloser and a function +// the function will run at the end of file or close the file. +type OnEOFReader struct { + Rc io.ReadCloser + Fn func() +} + +func (r *OnEOFReader) Read(p []byte) (n int, err error) { + n, err = r.Rc.Read(p) + if err == io.EOF { + r.runFunc() + } + return +} + +// Close closes the file and run the function. +func (r *OnEOFReader) Close() error { + err := r.Rc.Close() + r.runFunc() + return err +} + +func (r *OnEOFReader) runFunc() { + if fn := r.Fn; fn != nil { + fn() + r.Fn = nil + } +} + +// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read +// operations. +type cancelReadCloser struct { + cancel func() + pR *io.PipeReader // Stream to read from + pW *io.PipeWriter +} + +// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the +// context is cancelled. The returned io.ReadCloser must be closed when it is +// no longer needed. +func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser { + pR, pW := io.Pipe() + + // Create a context used to signal when the pipe is closed + doneCtx, cancel := context.WithCancel(context.Background()) + + p := &cancelReadCloser{ + cancel: cancel, + pR: pR, + pW: pW, + } + + go func() { + _, err := io.Copy(pW, in) + select { + case <-ctx.Done(): + // If the context was closed, p.closeWithError + // was already called. Calling it again would + // change the error that Read returns. + default: + p.closeWithError(err) + } + in.Close() + }() + go func() { + for { + select { + case <-ctx.Done(): + p.closeWithError(ctx.Err()) + case <-doneCtx.Done(): + return + } + } + }() + + return p +} + +// Read wraps the Read method of the pipe that provides data from the wrapped +// ReadCloser. +func (p *cancelReadCloser) Read(buf []byte) (n int, err error) { + return p.pR.Read(buf) +} + +// closeWithError closes the wrapper and its underlying reader. It will +// cause future calls to Read to return err. +func (p *cancelReadCloser) closeWithError(err error) { + p.pW.CloseWithError(err) + p.cancel() +} + +// Close closes the wrapper its underlying reader. It will cause +// future calls to Read to return io.EOF. +func (p *cancelReadCloser) Close() error { + p.closeWithError(io.EOF) + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go b/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go new file mode 100644 index 0000000000..dc894f9131 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go @@ -0,0 +1,10 @@ +// +build !windows + +package ioutils // import "github.com/docker/docker/pkg/ioutils" + +import "io/ioutil" + +// TempDir on Unix systems is equivalent to ioutil.TempDir. +func TempDir(dir, prefix string) (string, error) { + return ioutil.TempDir(dir, prefix) +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go b/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go new file mode 100644 index 0000000000..ecaba2e36d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go @@ -0,0 +1,16 @@ +package ioutils // import "github.com/docker/docker/pkg/ioutils" + +import ( + "io/ioutil" + + "github.com/docker/docker/pkg/longpath" +) + +// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format. +func TempDir(dir, prefix string) (string, error) { + tempDir, err := ioutil.TempDir(dir, prefix) + if err != nil { + return "", err + } + return longpath.AddPrefix(tempDir), nil +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go b/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go new file mode 100644 index 0000000000..91b8d18266 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go @@ -0,0 +1,92 @@ +package ioutils // import "github.com/docker/docker/pkg/ioutils" + +import ( + "io" + "sync" +) + +// WriteFlusher wraps the Write and Flush operation ensuring that every write +// is a flush. In addition, the Close method can be called to intercept +// Read/Write calls if the targets lifecycle has already ended. +type WriteFlusher struct { + w io.Writer + flusher flusher + flushed chan struct{} + flushedOnce sync.Once + closed chan struct{} + closeLock sync.Mutex +} + +type flusher interface { + Flush() +} + +var errWriteFlusherClosed = io.EOF + +func (wf *WriteFlusher) Write(b []byte) (n int, err error) { + select { + case <-wf.closed: + return 0, errWriteFlusherClosed + default: + } + + n, err = wf.w.Write(b) + wf.Flush() // every write is a flush. + return n, err +} + +// Flush the stream immediately. +func (wf *WriteFlusher) Flush() { + select { + case <-wf.closed: + return + default: + } + + wf.flushedOnce.Do(func() { + close(wf.flushed) + }) + wf.flusher.Flush() +} + +// Flushed returns the state of flushed. +// If it's flushed, return true, or else it return false. +func (wf *WriteFlusher) Flushed() bool { + // BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to + // be used to detect whether or a response code has been issued or not. + // Another hook should be used instead. + var flushed bool + select { + case <-wf.flushed: + flushed = true + default: + } + return flushed +} + +// Close closes the write flusher, disallowing any further writes to the +// target. After the flusher is closed, all calls to write or flush will +// result in an error. +func (wf *WriteFlusher) Close() error { + wf.closeLock.Lock() + defer wf.closeLock.Unlock() + + select { + case <-wf.closed: + return errWriteFlusherClosed + default: + close(wf.closed) + } + return nil +} + +// NewWriteFlusher returns a new WriteFlusher. +func NewWriteFlusher(w io.Writer) *WriteFlusher { + var fl flusher + if f, ok := w.(flusher); ok { + fl = f + } else { + fl = &NopFlusher{} + } + return &WriteFlusher{w: w, flusher: fl, closed: make(chan struct{}), flushed: make(chan struct{})} +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/writers.go b/vendor/github.com/docker/docker/pkg/ioutils/writers.go new file mode 100644 index 0000000000..61c679497d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/writers.go @@ -0,0 +1,66 @@ +package ioutils // import "github.com/docker/docker/pkg/ioutils" + +import "io" + +// NopWriter represents a type which write operation is nop. +type NopWriter struct{} + +func (*NopWriter) Write(buf []byte) (int, error) { + return len(buf), nil +} + +type nopWriteCloser struct { + io.Writer +} + +func (w *nopWriteCloser) Close() error { return nil } + +// NopWriteCloser returns a nopWriteCloser. +func NopWriteCloser(w io.Writer) io.WriteCloser { + return &nopWriteCloser{w} +} + +// NopFlusher represents a type which flush operation is nop. +type NopFlusher struct{} + +// Flush is a nop operation. +func (f *NopFlusher) Flush() {} + +type writeCloserWrapper struct { + io.Writer + closer func() error +} + +func (r *writeCloserWrapper) Close() error { + return r.closer() +} + +// NewWriteCloserWrapper returns a new io.WriteCloser. +func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser { + return &writeCloserWrapper{ + Writer: r, + closer: closer, + } +} + +// WriteCounter wraps a concrete io.Writer and hold a count of the number +// of bytes written to the writer during a "session". +// This can be convenient when write return is masked +// (e.g., json.Encoder.Encode()) +type WriteCounter struct { + Count int64 + Writer io.Writer +} + +// NewWriteCounter returns a new WriteCounter. +func NewWriteCounter(w io.Writer) *WriteCounter { + return &WriteCounter{ + Writer: w, + } +} + +func (wc *WriteCounter) Write(p []byte) (count int, err error) { + count, err = wc.Writer.Write(p) + wc.Count += int64(count) + return +} diff --git a/vendor/github.com/docker/docker/pkg/longpath/longpath.go b/vendor/github.com/docker/docker/pkg/longpath/longpath.go new file mode 100644 index 0000000000..4177affba2 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/longpath/longpath.go @@ -0,0 +1,26 @@ +// longpath introduces some constants and helper functions for handling long paths +// in Windows, which are expected to be prepended with `\\?\` and followed by either +// a drive letter, a UNC server\share, or a volume identifier. + +package longpath // import "github.com/docker/docker/pkg/longpath" + +import ( + "strings" +) + +// Prefix is the longpath prefix for Windows file paths. +const Prefix = `\\?\` + +// AddPrefix will add the Windows long path prefix to the path provided if +// it does not already have it. +func AddPrefix(path string) string { + if !strings.HasPrefix(path, Prefix) { + if strings.HasPrefix(path, `\\`) { + // This is a UNC path, so we need to add 'UNC' to the path as well. + path = Prefix + `UNC` + path[1:] + } else { + path = Prefix + path + } + } + return path +} diff --git a/vendor/github.com/docker/docker/pkg/pools/pools.go b/vendor/github.com/docker/docker/pkg/pools/pools.go new file mode 100644 index 0000000000..46339c282f --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/pools/pools.go @@ -0,0 +1,137 @@ +// Package pools provides a collection of pools which provide various +// data types with buffers. These can be used to lower the number of +// memory allocations and reuse buffers. +// +// New pools should be added to this package to allow them to be +// shared across packages. +// +// Utility functions which operate on pools should be added to this +// package to allow them to be reused. +package pools // import "github.com/docker/docker/pkg/pools" + +import ( + "bufio" + "io" + "sync" + + "github.com/docker/docker/pkg/ioutils" +) + +const buffer32K = 32 * 1024 + +var ( + // BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer. + BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K) + // BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer. + BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K) + buffer32KPool = newBufferPoolWithSize(buffer32K) +) + +// BufioReaderPool is a bufio reader that uses sync.Pool. +type BufioReaderPool struct { + pool sync.Pool +} + +// newBufioReaderPoolWithSize is unexported because new pools should be +// added here to be shared where required. +func newBufioReaderPoolWithSize(size int) *BufioReaderPool { + return &BufioReaderPool{ + pool: sync.Pool{ + New: func() interface{} { return bufio.NewReaderSize(nil, size) }, + }, + } +} + +// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool. +func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader { + buf := bufPool.pool.Get().(*bufio.Reader) + buf.Reset(r) + return buf +} + +// Put puts the bufio.Reader back into the pool. +func (bufPool *BufioReaderPool) Put(b *bufio.Reader) { + b.Reset(nil) + bufPool.pool.Put(b) +} + +type bufferPool struct { + pool sync.Pool +} + +func newBufferPoolWithSize(size int) *bufferPool { + return &bufferPool{ + pool: sync.Pool{ + New: func() interface{} { return make([]byte, size) }, + }, + } +} + +func (bp *bufferPool) Get() []byte { + return bp.pool.Get().([]byte) +} + +func (bp *bufferPool) Put(b []byte) { + bp.pool.Put(b) +} + +// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy. +func Copy(dst io.Writer, src io.Reader) (written int64, err error) { + buf := buffer32KPool.Get() + written, err = io.CopyBuffer(dst, src, buf) + buffer32KPool.Put(buf) + return +} + +// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back +// into the pool and closes the reader if it's an io.ReadCloser. +func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser { + return ioutils.NewReadCloserWrapper(r, func() error { + if readCloser, ok := r.(io.ReadCloser); ok { + readCloser.Close() + } + bufPool.Put(buf) + return nil + }) +} + +// BufioWriterPool is a bufio writer that uses sync.Pool. +type BufioWriterPool struct { + pool sync.Pool +} + +// newBufioWriterPoolWithSize is unexported because new pools should be +// added here to be shared where required. +func newBufioWriterPoolWithSize(size int) *BufioWriterPool { + return &BufioWriterPool{ + pool: sync.Pool{ + New: func() interface{} { return bufio.NewWriterSize(nil, size) }, + }, + } +} + +// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool. +func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer { + buf := bufPool.pool.Get().(*bufio.Writer) + buf.Reset(w) + return buf +} + +// Put puts the bufio.Writer back into the pool. +func (bufPool *BufioWriterPool) Put(b *bufio.Writer) { + b.Reset(nil) + bufPool.pool.Put(b) +} + +// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back +// into the pool and closes the writer if it's an io.Writecloser. +func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser { + return ioutils.NewWriteCloserWrapper(w, func() error { + buf.Flush() + if writeCloser, ok := w.(io.WriteCloser); ok { + writeCloser.Close() + } + bufPool.Put(buf) + return nil + }) +} From 56519c913d283c8ff9a91d1f19836f16fa0ac266 Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Wed, 28 Mar 2018 13:35:13 -0700 Subject: [PATCH 3/3] Add cmd integration test --- integration_tests/context/tars/file | 1 + integration_tests/context/tars/file.bz2 | Bin 0 -> 196 bytes integration_tests/context/tars/file.tar | Bin 0 -> 10240 bytes integration_tests/context/tars/file.tar.gz | Bin 0 -> 199 bytes .../dockerfiles/Dockerfile_test_add | 18 ++++++++++++++++++ .../dockerfiles/config_test_add.json | 12 ++++++++++++ integration_tests/integration_test_yaml.go | 7 +++++++ 7 files changed, 38 insertions(+) create mode 100644 integration_tests/context/tars/file create mode 100644 integration_tests/context/tars/file.bz2 create mode 100644 integration_tests/context/tars/file.tar create mode 100644 integration_tests/context/tars/file.tar.gz create mode 100644 integration_tests/dockerfiles/Dockerfile_test_add create mode 100644 integration_tests/dockerfiles/config_test_add.json diff --git a/integration_tests/context/tars/file b/integration_tests/context/tars/file new file mode 100644 index 0000000000..faddca8a8d --- /dev/null +++ b/integration_tests/context/tars/file @@ -0,0 +1 @@ +normal file diff --git a/integration_tests/context/tars/file.bz2 b/integration_tests/context/tars/file.bz2 new file mode 100644 index 0000000000000000000000000000000000000000..53c13149a1a2a8e4f309d9c4f1920a8eec06f199 GIT binary patch literal 196 zcmV;#06YIeT4*^jL0KkKSt*-N;{X6?e}uu1AP7JK|9}Yr00DO9-mn0G00=MuyIK;c z&;vjKXbl5E(x0jxpa1{>01`+>Q_-k;fB*n`n+tHlwC-Gx6Os}}a;B#-Drt1cg+>ww z5ENu?2%qXhZUC2F;7J-Fk}QfOwL*!5DvT*ARKsiI0^yuwVp3_uc-MmuffC&yAA{-a yu&Z;>@rr;*w+Wj)X`MKHJ3nP)!rB-ho|uFQJuF^gP_c-Re;0B^I8cx&n@;0WB=1||~uvs?kcOkxS zv7d&`1LsWU#q_-P-4L5`Z0bw_M$&mlY>P2!~DCNHjU@vpR?Uf_Rv%e zJm#{iR;*EAYsNy|s`*dP9QgC#EO+GGS*-6}rz`FdaFLYXv$6lP`o(J%>$iL?v%4N9 ztvfTme%1+#n50uiFN2GFgV*>4*Zj`gm!G^#F4+8A^qX5AaWcEM`F}OIa&@2O_3S@| w{4rMFckbWjUH|ymzokKLMq7`~2(Q0azjFWYuObY{AbuBP=Ei7Q1`P%V06MT)5C8xG literal 0 HcmV?d00001 diff --git a/integration_tests/dockerfiles/Dockerfile_test_add b/integration_tests/dockerfiles/Dockerfile_test_add new file mode 100644 index 0000000000..6fb9d73a7c --- /dev/null +++ b/integration_tests/dockerfiles/Dockerfile_test_add @@ -0,0 +1,18 @@ +FROM gcr.io/google-appengine/debian9:latest +# First, try adding some regular files +ADD context/foo foo +ADD context/foo /foodir/ +ADD context/bar/b* bar/ +ADD . newdir +ADD ["context/foo", "/tmp/foo" ] + +# Next, make sure environment replacement works +ENV contextenv ./context +ADD $contextenv/* /tmp/${contextenv}/ + +# Now, test extracting local tar archives +ADD context/tars/fil* /tars/ +ADD context/tars/file.tar /tars_again + +# Finally, test adding a remote URL, concurrently with a normal file +ADD https://github.com/GoogleCloudPlatform/docker-credential-gcr/releases/download/v1.4.3/docker-credential-gcr_linux_386-1.4.3.tar.gz context/foo /test/all/ diff --git a/integration_tests/dockerfiles/config_test_add.json b/integration_tests/dockerfiles/config_test_add.json new file mode 100644 index 0000000000..b6d7a2020e --- /dev/null +++ b/integration_tests/dockerfiles/config_test_add.json @@ -0,0 +1,12 @@ +[ + { + "Image1": "gcr.io/kbuild-test/docker-test-add:latest", + "Image2": "gcr.io/kbuild-test/kbuild-test-add:latest", + "DiffType": "File", + "Diff": { + "Adds": null, + "Dels": null, + "Mods": null + } + } +] \ No newline at end of file diff --git a/integration_tests/integration_test_yaml.go b/integration_tests/integration_test_yaml.go index 3129456ff6..abfc03db8c 100644 --- a/integration_tests/integration_test_yaml.go +++ b/integration_tests/integration_test_yaml.go @@ -57,6 +57,13 @@ var fileTests = []struct { context: "/workspace/integration_tests/", repo: "test-copy", }, + { + description: "test add", + dockerfilePath: "/workspace/integration_tests/dockerfiles/Dockerfile_test_add", + configPath: "/workspace/integration_tests/dockerfiles/config_test_add.json", + context: "/workspace/integration_tests/", + repo: "test-add", + }, } var structureTests = []struct {