diff --git a/cli/command/commands/commands.go b/cli/command/commands/commands.go index c6fc8d6ace78..285daca44d25 100644 --- a/cli/command/commands/commands.go +++ b/cli/command/commands/commands.go @@ -8,6 +8,7 @@ import ( "github.com/docker/cli/cli/command/config" "github.com/docker/cli/cli/command/container" "github.com/docker/cli/cli/command/image" + "github.com/docker/cli/cli/command/manifest" "github.com/docker/cli/cli/command/network" "github.com/docker/cli/cli/command/node" "github.com/docker/cli/cli/command/plugin" @@ -38,12 +39,15 @@ func AddCommands(cmd *cobra.Command, dockerCli *command.DockerCli) { image.NewImageCommand(dockerCli), image.NewBuildCommand(dockerCli), - // node - node.NewNodeCommand(dockerCli), + // manfiest + manifest.NewManifestCommand(dockerCli), // network network.NewNetworkCommand(dockerCli), + // node + node.NewNodeCommand(dockerCli), + // plugin plugin.NewPluginCommand(dockerCli), diff --git a/cli/command/manifest/annotate.go b/cli/command/manifest/annotate.go new file mode 100644 index 000000000000..60f89b574385 --- /dev/null +++ b/cli/command/manifest/annotate.go @@ -0,0 +1,127 @@ +package manifest + +import ( + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/distribution/reference" + + "github.com/Sirupsen/logrus" + "github.com/spf13/cobra" +) + +type annotateOptions struct { + target string // the target manifest list name (also transaction ID) + image string // the manifest to annotate within the list + variant string // an architecture variant + os string + arch string + cpuFeatures []string + osFeatures []string +} + +// NewAnnotateCommand creates a new `docker manifest annotate` command +func newAnnotateCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts annotateOptions + + cmd := &cobra.Command{ + Use: "annotate NAME[:TAG] [OPTIONS]", + Short: "Add additional information to an image's manifest.", + Args: cli.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + opts.target = args[0] + opts.image = args[1] + return runManifestAnnotate(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.StringVar(&opts.os, "os", "", "Add ios info to a manifest before pushing it.") + flags.StringVar(&opts.arch, "arch", "", "Add arch info to a manifest before pushing it.") + flags.StringSliceVar(&opts.cpuFeatures, "cpuFeatures", []string{}, "Add feature info to a manifest before pushing it.") + flags.StringSliceVar(&opts.osFeatures, "osFeatures", []string{}, "Add feature info to a manifest before pushing it.") + flags.StringVar(&opts.variant, "variant", "", "Add arch variant to a manifest before pushing it.") + + return cmd +} + +func runManifestAnnotate(dockerCli *command.DockerCli, opts annotateOptions) error { + + // Make sure the manifests are pulled, find the file you need, unmarshal the json, edit the file, and done. + targetRef, err := reference.ParseNormalizedNamed(opts.target) + if err != nil { + return fmt.Errorf("Annotate: Error parsing name for manifest list (%s): %s", opts.target, err) + } + imgRef, err := reference.ParseNormalizedNamed(opts.image) + if err != nil { + return fmt.Errorf("Annotate: Error prasing name for manifest (%s): %s:", opts.image, err) + } + + // Make sure we've got tags or digests: + if _, isDigested := targetRef.(reference.Canonical); !isDigested { + targetRef = reference.TagNameOnly(targetRef) + } + if _, isDigested := imgRef.(reference.Canonical); !isDigested { + imgRef = reference.TagNameOnly(imgRef) + } + transactionID := makeFilesafeName(targetRef.String()) + imgID := makeFilesafeName(imgRef.String()) + logrus.Debugf("Beginning annotate for %s/%s", transactionID, imgID) + + imgInspect, _, err := getImageData(dockerCli, imgRef.String(), targetRef.String(), false) + if err != nil { + return err + } + + if len(imgInspect) > 1 { + return fmt.Errorf("Cannot annotate manifest list. Please pass an image (not list) name") + } + + mf := imgInspect[0] + + newMf, err := unmarshalIntoManifestInspect(imgID, transactionID) + if err != nil { + return err + } + + // Update the mf + if opts.os != "" { + newMf.OS = opts.os + } + if opts.arch != "" { + newMf.Architecture = opts.arch + } + for _, cpuFeature := range opts.cpuFeatures { + newMf.Features = appendIfUnique(mf.Features, cpuFeature) + } + for _, osFeature := range opts.osFeatures { + newMf.OSFeatures = appendIfUnique(mf.OSFeatures, osFeature) + } + if opts.variant != "" { + newMf.Variant = opts.variant + } + + // validate os/arch input + if !isValidOSArch(newMf.OS, newMf.Architecture) { + return fmt.Errorf("Manifest entry for image has unsupported os/arch combination: %s/%s", opts.os, opts.arch) + } + // @TODO + // dgst := digest.FromBytes(b) can't use b/c not of the json. + + if err := updateMfFile(newMf, imgID, transactionID); err != nil { + return err + } + + logrus.Debugf("Annotated %s with options %v", mf.RefName, opts) + return nil +} +func appendIfUnique(list []string, str string) []string { + for _, s := range list { + if s == str { + return list + } + } + return append(list, str) +} diff --git a/cli/command/manifest/cmd.go b/cli/command/manifest/cmd.go new file mode 100644 index 000000000000..7eb953c9a0a6 --- /dev/null +++ b/cli/command/manifest/cmd.go @@ -0,0 +1,44 @@ +package manifest + +import ( + "fmt" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + + "github.com/spf13/cobra" +) + +// NewManifestCommand returns a cobra command for `manifest` subcommands +func NewManifestCommand(dockerCli *command.DockerCli) *cobra.Command { + cmd := &cobra.Command{ + Use: "manifest COMMAND", + Short: "Manage Docker image manifests and lists", + Long: manifestDescription, + Args: cli.NoArgs, + Run: func(cmd *cobra.Command, args []string) { + fmt.Fprintf(dockerCli.Err(), "\n"+cmd.UsageString()) + }, + } + cmd.AddCommand( + //newListFetchCommand(dockerCli), + newCreateListCommand(dockerCli), + newInspectCommand(dockerCli), + newAnnotateCommand(dockerCli), + newPushListCommand(dockerCli), + ) + return cmd +} + +var manifestDescription = ` +The **docker manifest** command has subcommands for managing image manifests and +manifest lists. A manifest list allows you to use one name to refer to the same image +built for multiple architectures. + +To see help for a subcommand, use: + + docker manifest CMD help + +For full details on using docker manifest lists view the registry v2 specification. + +` diff --git a/cli/command/manifest/create_list.go b/cli/command/manifest/create_list.go new file mode 100644 index 000000000000..2a2f1b1269a6 --- /dev/null +++ b/cli/command/manifest/create_list.go @@ -0,0 +1,80 @@ +package manifest + +import ( + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/spf13/cobra" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/distribution/reference" + "github.com/docker/docker/registry" +) + +type annotateOpts struct { + amend bool +} + +func newCreateListCommand(dockerCli *command.DockerCli) *cobra.Command { + + opts := annotateOpts{} + + cmd := &cobra.Command{ + Use: "create newRef manifest [manifest...]", + Short: "Create a local manifest list for annotating and pushing to a registry", + Args: cli.RequiresMinArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + return createManifestList(dockerCli, args, opts) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&opts.amend, "amend", "a", false, "Amend an existing manifest list transaction") + return cmd +} + +func createManifestList(dockerCli *command.DockerCli, args []string, opts annotateOpts) error { + + // Just do some basic verification here, and leave the rest for when the user pushes the list + newRef := args[0] + targetRef, err := reference.ParseNormalizedNamed(newRef) + if err != nil { + return fmt.Errorf("Error parsing name for manifest list (%s): %v", newRef, err) + } + _, err = registry.ParseRepositoryInfo(targetRef) + if err != nil { + return fmt.Errorf("Error parsing repository name for manifest list (%s): %v", newRef, err) + } + + // Check locally for this list transaction before proceeding + if _, isDigested := targetRef.(reference.Canonical); !isDigested { + targetRef = reference.TagNameOnly(targetRef) + } + manifestFiles, err := getListFilenames(makeFilesafeName(targetRef.String())) + if err != nil { + return err + } + if len(manifestFiles) > 0 && !opts.amend { + return fmt.Errorf("Refusing to continue over an existing manifest list transaction with no --amend flag") + } + + // Now create the local manifest list transaction by looking up the manifest schemas + // for the constituent images: + manifests := args[1:] + logrus.Info("Retrieving digests of images...") + for _, manifestRef := range manifests { + + mfstData, _, err := getImageData(dockerCli, manifestRef, targetRef.String(), false) + if err != nil { + return err + } + + if len(mfstData) > 1 { + // too many responses--can only happen if a manifest list was returned for the name lookup + return fmt.Errorf("You specified a manifest list entry from a digest that points to a current manifest list. Manifest lists do not allow recursion.") + } + + } + return nil +} diff --git a/cli/command/manifest/ensurehome_linux.go b/cli/command/manifest/ensurehome_linux.go new file mode 100644 index 000000000000..749a7fa72292 --- /dev/null +++ b/cli/command/manifest/ensurehome_linux.go @@ -0,0 +1,28 @@ +// +build linux + +package manifest + +import ( + "os" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/homedir" +) + +// ensureHomeIfIAmStatic ensure $HOME to be set if dockerversion.IAmStatic is "true". +// In a static binary, os/user.Current() leads to segfault due to a glibc issue that won't be fixed +// in the foreseeable future. (golang/go#13470, https://sourceware.org/bugzilla/show_bug.cgi?id=19341) +// So we forcibly set HOME so as to avoid call to os/user/Current() +func ensureHomeIfIAmStatic() error { + // Note: dockerversion.IAmStatic and homedir.GetStatic() is only available for linux. + if dockerversion.IAmStatic == "true" && os.Getenv("HOME") == "" { + home, err := homedir.GetStatic() + if err != nil { + return err + } + logrus.Warnf("docker manifest requires HOME to be set for static client binary. Forcibly setting HOME to %s.", home) + os.Setenv("HOME", home) + } + return nil +} diff --git a/cli/command/manifest/ensurehome_other.go b/cli/command/manifest/ensurehome_other.go new file mode 100644 index 000000000000..2c367bfbd79e --- /dev/null +++ b/cli/command/manifest/ensurehome_other.go @@ -0,0 +1,7 @@ +// +build !linux + +package manifest + +func ensureHomeIfIAmStatic() error { + return nil +} diff --git a/cli/command/manifest/fetch.go b/cli/command/manifest/fetch.go new file mode 100644 index 000000000000..ee3dfb933c0c --- /dev/null +++ b/cli/command/manifest/fetch.go @@ -0,0 +1,342 @@ +package manifest + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "syscall" + "time" + + "github.com/Sirupsen/logrus" + "github.com/opencontainers/go-digest" + "golang.org/x/net/context" + + "github.com/docker/cli/cli/command" + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/client" + "github.com/docker/docker/api/types" + "github.com/docker/docker/distribution" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/registry" +) + +type fetchOptions struct { + remote string +} + +type manifestFetcher interface { + Fetch(ctx context.Context, ref reference.Named) ([]ImgManifestInspect, error) +} + +func loadManifest(manifest string, transaction string) ([]ImgManifestInspect, error) { + + // Load either a single manifest (if transaction is "", that's fine), or a + // manifest list + var foundImages []ImgManifestInspect + fd, err := getManifestFd(manifest, transaction) + if err != nil && err.Error() != "Cannot open directory" { + return nil, err + } + if fd != nil { + defer fd.Close() + _, err := fd.Stat() + if err != nil { + return nil, err + } + mfInspect, err := unmarshalIntoManifestInspect(manifest, transaction) + if err != nil { + return nil, err + } + foundImages = append(foundImages, mfInspect) + } + return foundImages, nil +} + +func loadManifestList(transaction string) (foundImages []ImgManifestInspect, _ error) { + manifests, err := getListFilenames(transaction) + if err != nil { + return nil, err + } + for _, manifestFile := range manifests { + fileParts := strings.Split(manifestFile, string(filepath.Separator)) + numParts := len(fileParts) + mfInspect, err := unmarshalIntoManifestInspect(fileParts[numParts-1], transaction) + if err != nil { + return nil, err + } + foundImages = append(foundImages, mfInspect) + } + return foundImages, nil +} + +func storeManifest(imgInspect ImgManifestInspect, name, transaction string) error { + // Store this image manifest so that it can be annotated. + // Store the manifests in a user's home to prevent conflict. + manifestBase, err := buildBaseFilename() + transaction = makeFilesafeName(transaction) + if err != nil { + return err + } + os.MkdirAll(filepath.Join(manifestBase, transaction), 0755) + logrus.Debugf("Storing %s", name) + if err = updateMfFile(imgInspect, name, transaction); err != nil { + fmt.Printf("Error writing local manifest copy: %s\n", err) + return err + } + + return nil +} + +func getImageData(dockerCli *command.DockerCli, name string, transactionID string, fetchOnly bool) ([]ImgManifestInspect, *registry.RepositoryInfo, error) { + + var ( + lastErr error + discardNoSupportErrors bool + foundImages []ImgManifestInspect + confirmedV2 bool + confirmedTLSRegistries = make(map[string]struct{}) + namedRef, transactionNamed reference.Named + err error + normalName string + ) + + if namedRef, err = reference.ParseNormalizedNamed(name); err != nil { + return nil, nil, fmt.Errorf("Error parsing reference for %s: %s\n", name, err) + } + if transactionID != "" { + if transactionNamed, err = reference.ParseNormalizedNamed(transactionID); err != nil { + return nil, nil, fmt.Errorf("Error parsing reference for %s: %s\n", transactionID, err) + } + if _, isDigested := transactionNamed.(reference.Canonical); !isDigested { + transactionNamed = reference.TagNameOnly(transactionNamed) + } + transactionID = makeFilesafeName(transactionNamed.String()) + } + + // Make sure these have a tag, as long as it's not a digest + if _, isDigested := namedRef.(reference.Canonical); !isDigested { + namedRef = reference.TagNameOnly(namedRef) + } + normalName = namedRef.String() + logrus.Debugf("getting image data for ref: %s", normalName) + + // Resolve the Repository name from fqn to RepositoryInfo + // This calls TrimNamed, which removes the tag, so always use namedRef for the image. + repoInfo, err := registry.ParseRepositoryInfo(namedRef) + if err != nil { + return nil, nil, err + } + + // If this is a manifest list, let's check for it locally so a user can see any modifications + // he/she has made. + logrus.Debugf("Checking locally for %s", normalName) + foundImages, err = loadManifest(makeFilesafeName(normalName), transactionID) + if err != nil { + return nil, nil, err + } + if len(foundImages) > 0 { + // Great, no reason to pull from the registry. + return foundImages, repoInfo, nil + } + // Maybe this wasn't a single manifest, in which case there will be no transaction ID + foundImages, err = loadManifestList(normalName) + if err != nil { + return nil, nil, err + } + if len(foundImages) > 0 { + return foundImages, repoInfo, nil + } + + ctx := context.Background() + + authConfig := command.ResolveAuthConfig(ctx, dockerCli, repoInfo.Index) + + options := registry.ServiceOptions{} + registryService := registry.NewService(options) + + // a list of registry.APIEndpoint, which could be mirrors, etc., of locally-configured + // repo endpoints. The list will be ordered by priority (v2, https, v1). + endpoints, err := registryService.LookupPullEndpoints(reference.Domain(repoInfo.Name)) + if err != nil { + return nil, nil, err + } + logrus.Debugf("manifest pull: endpoints: %v", endpoints) + + // Try to find the first endpoint that is *both* v2 and using TLS. + for _, endpoint := range endpoints { + // make sure I can reach the registry, same as docker pull does + v1endpoint, err := endpoint.ToV1Endpoint(dockerversion.DockerUserAgent(nil), nil) + if err != nil { + return nil, nil, err + } + if _, err := v1endpoint.Ping(); err != nil { + if strings.Contains(err.Error(), "timeout") { + return nil, nil, err + } + continue + } + + if confirmedV2 && endpoint.Version == registry.APIVersion1 { + logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL) + continue + } + + if endpoint.URL.Scheme != "https" { + if _, confirmedTLS := confirmedTLSRegistries[endpoint.URL.Host]; confirmedTLS { + logrus.Debugf("Skipping non-TLS endpoint %s for host/port that appears to use TLS", endpoint.URL) + continue + } + } + + logrus.Debugf("Trying to fetch image manifest of %s repository from %s %s", normalName, endpoint.URL, endpoint.Version) + + fetcher, err := newManifestFetcher(endpoint, repoInfo, authConfig, registryService) + if err != nil { + lastErr = err + continue + } + + if foundImages, err = fetcher.Fetch(ctx, namedRef); err != nil { + // Was this fetch cancelled? If so, don't try to fall back. + fallback := false + select { + case <-ctx.Done(): + default: + if fallbackErr, ok := err.(fallbackError); ok { + fallback = true + confirmedV2 = confirmedV2 || fallbackErr.confirmedV2 + if fallbackErr.transportOK && endpoint.URL.Scheme == "https" { + confirmedTLSRegistries[endpoint.URL.Host] = struct{}{} + } + err = fallbackErr.err + } + } + if fallback { + if _, ok := err.(distribution.ErrNoSupport); !ok { + // Because we found an error that's not ErrNoSupport, discard all subsequent ErrNoSupport errors. + discardNoSupportErrors = true + // save the current error + lastErr = err + } else if !discardNoSupportErrors { + // Save the ErrNoSupport error, because it's either the first error or all encountered errors + // were also ErrNoSupport errors. + lastErr = err + } + continue + } + logrus.Errorf("Not continuing with pull after error: %v", err) + return nil, nil, err + } + + if transactionID == "" && len(foundImages) > 1 { + transactionID = normalName + } + // Additionally, we're never storing on inspect, so if we're asked to save images it's for a create, + // and this function will have been called for each image in the create. In that case we'll have an + // image name *and* a transaction ID. IOW, foundImages will be only one image. + if !fetchOnly { + if err := storeManifest(foundImages[0], makeFilesafeName(normalName), transactionID); err != nil { + logrus.Errorf("Error storing manifests: %s\n", err) + } + } + return foundImages, repoInfo, nil + } + + if lastErr == nil { + lastErr = fmt.Errorf("no endpoints found for %s\n", normalName) + } + + return nil, nil, lastErr + +} + +func newManifestFetcher(endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo, authConfig types.AuthConfig, registryService registry.Service) (manifestFetcher, error) { + switch endpoint.Version { + case registry.APIVersion2: + return &v2ManifestFetcher{ + endpoint: endpoint, + authConfig: authConfig, + service: registryService, + repoInfo: repoInfo, + }, nil + case registry.APIVersion1: + return &v1ManifestFetcher{ + endpoint: endpoint, + authConfig: authConfig, + service: registryService, + repoInfo: repoInfo, + }, nil + } + return nil, fmt.Errorf("unknown version %d for registry %s", endpoint.Version, endpoint.URL) +} + +func makeImgManifestInspect(name string, img *image.Image, tag string, mfInfo manifestInfo, mediaType string, tagList []string) *ImgManifestInspect { + var digest digest.Digest + if err := mfInfo.digest.Validate(); err == nil { + digest = mfInfo.digest + } + + if mediaType == manifestlist.MediaTypeManifestList { + return &ImgManifestInspect{ + MediaType: mediaType, + Digest: digest, + } + } + + var digests []string + for _, blobDigest := range mfInfo.blobDigests { + digests = append(digests, blobDigest.String()) + } + return &ImgManifestInspect{ + RefName: name, + Size: mfInfo.length, + MediaType: mediaType, + Tag: tag, + Digest: digest, + RepoTags: tagList, + Comment: img.Comment, + Created: img.Created.Format(time.RFC3339Nano), + ContainerConfig: &img.ContainerConfig, + DockerVersion: img.DockerVersion, + Author: img.Author, + Config: img.Config, + Architecture: mfInfo.platform.Architecture, + OS: mfInfo.platform.OS, + OSVersion: mfInfo.platform.OSVersion, + OSFeatures: mfInfo.platform.OSFeatures, + Variant: mfInfo.platform.Variant, + Features: mfInfo.platform.Features, + References: digests, + LayerDigests: mfInfo.layers, + CanonicalJSON: mfInfo.jsonBytes, + } +} + +func continueOnError(err error) bool { + switch v := err.(type) { + case errcode.Errors: + if len(v) == 0 { + return true + } + return continueOnError(v[0]) + case distribution.ErrNoSupport: + return continueOnError(v.Err) + case errcode.Error: + return shouldV2Fallback(v) + case *client.UnexpectedHTTPResponseError: + return true + case ImageConfigPullError: + return false + case error: + return !strings.Contains(err.Error(), strings.ToLower(syscall.ENOSPC.Error())) + } + // let's be nice and fallback if the error is a completely + // unexpected one. + // If new errors have to be handled in some way, please + // add them to the switch above. + return true +} diff --git a/cli/command/manifest/fetch_v1.go b/cli/command/manifest/fetch_v1.go new file mode 100644 index 000000000000..fde5eba58e34 --- /dev/null +++ b/cli/command/manifest/fetch_v1.go @@ -0,0 +1,240 @@ +package manifest + +import ( + "encoding/json" + "errors" + "fmt" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/api/types" + dockerdistribution "github.com/docker/docker/distribution" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +type v1ManifestFetcher struct { + endpoint registry.APIEndpoint + repoInfo *registry.RepositoryInfo + repo distribution.Repository + confirmedV2 bool + // wrap in a config? + authConfig types.AuthConfig + // Leaving this as a pointer to an interface won't compile for me + service registry.Service + session *registry.Session +} + +func (mf *v1ManifestFetcher) Fetch(ctx context.Context, ref reference.Named) ([]ImgManifestInspect, error) { + // @TODO: Re-test the v1 registry stuff after pulling in all the consolidated reference stuff. + // Pre-condition: ref has to be tagged (e.g. using ParseNormalizedNamed) + if _, isCanonical := ref.(reference.Canonical); isCanonical { + // Allowing fallback, because HTTPS v1 is before HTTP v2 + return nil, fallbackError{ + err: dockerdistribution.ErrNoSupport{errors.New("Cannot pull by digest with v1 registry")}, + } + } + tlsConfig, err := mf.service.TLSConfig(mf.repoInfo.Index.Name) + if err != nil { + return nil, err + } + // Adds Docker-specific headers as well as user-specified headers (metaHeaders) + tr := transport.NewTransport( + registry.NewTransport(tlsConfig), + //registry.DockerHeaders(mf.config.MetaHeaders)..., + registry.DockerHeaders(dockerversion.DockerUserAgent(nil), nil)..., + ) + client := registry.HTTPClient(tr) + //v1Endpoint, err := mf.endpoint.ToV1Endpoint(mf.config.MetaHeaders) + v1Endpoint, err := mf.endpoint.ToV1Endpoint(dockerversion.DockerUserAgent(nil), nil) + if err != nil { + logrus.Debugf("Could not get v1 endpoint: %v", err) + return nil, fallbackError{err: err} + } + mf.session, err = registry.NewSession(client, &mf.authConfig, v1Endpoint) + if err != nil { + logrus.Debugf("Fallback from error: %s", err) + return nil, fallbackError{err: err} + } + + imgsInspect, err := mf.fetchWithSession(ctx, ref) + if err != nil { + return nil, err + } + if len(imgsInspect) > 1 { + return nil, fmt.Errorf("Found more than one image in V1 fetch!? %v", imgsInspect) + } + imgsInspect[0].MediaType = schema1.MediaTypeManifest + return imgsInspect, nil +} + +func (mf *v1ManifestFetcher) fetchWithSession(ctx context.Context, ref reference.Named) ([]ImgManifestInspect, error) { + // Pre-Condition: ref should always be tagged (e.g. using ParseNormalizedNamed) + var ( + imageList = []ImgManifestInspect{} + pulledImg *image.Image + tagsMap map[string]string + ) + logrus.Debugf("Fetching v1 manifest for %s", mf.repoInfo.Name) + repoData, err := mf.session.GetRepositoryData(mf.repoInfo.Name) + if err != nil { + if strings.Contains(err.Error(), "HTTP code: 404") { + return nil, fmt.Errorf("Error: image %s not found", reference.Path(mf.repoInfo.Name)) + } + // Unexpected HTTP error + return nil, err + } + + // GetRemoteTags gets all tags & corresponding image IDs for a repo, returned in a map + logrus.Debugf("Retrieving the tag list") + tagsMap, err = mf.session.GetRemoteTags(repoData.Endpoints, mf.repoInfo.Name) + if err != nil { + logrus.Errorf("unable to get remote tags: %s", err) + return nil, err + } + + tagged, isTagged := ref.(reference.NamedTagged) + if !isTagged { + logrus.Errorf("No tag in image name! Christy messed up.") + return nil, fmt.Errorf("fws: No tag in image name") + } + tag := tagged.Tag() + tagID, err := mf.session.GetRemoteTag(repoData.Endpoints, mf.repoInfo.Name, tag) + if err == registry.ErrRepoNotFound { + return nil, fmt.Errorf("Tag %s not found in repository %s", tag, mf.repoInfo.Name.Name()) + } + if err != nil { + logrus.Errorf("unable to get remote tags: %s", err) + return nil, err + } + tagsMap[tagged.Tag()] = tagID + + // Pull the tags from the tag/imgID map: + tagList := []string{} + for tag := range tagsMap { + tagList = append(tagList, tag) + } + + img := repoData.ImgList[tagID] + + for _, ep := range mf.repoInfo.Index.Mirrors { + if pulledImg, err = mf.pullImageJSON(img.ID, ep); err != nil { + // Don't report errors when pulling from mirrors. + logrus.Debugf("Error pulling image json of %s:%s, mirror: %s, %s", mf.repoInfo.Name.Name(), img.Tag, ep, err) + continue + } + break + } + if pulledImg == nil { + for _, ep := range repoData.Endpoints { + if pulledImg, err = mf.pullImageJSON(img.ID, ep); err != nil { + // It's not ideal that only the last error is returned, it would be better to concatenate the errors. + logrus.Infof("Error pulling image json of %s:%s, endpoint: %s, %v", mf.repoInfo.Name.Name(), img.Tag, ep, err) + continue + } + break + } + } + if err != nil { + return nil, fmt.Errorf("Error pulling image (%s) from %s, %v", img.Tag, mf.repoInfo.Name.Name(), err) + } + if pulledImg == nil { + return nil, fmt.Errorf("No such image %s:%s", mf.repoInfo.Name.Name(), tag) + } + + imageInsp := makeImgManifestInspect(ref.String(), pulledImg, tag, manifestInfo{}, schema1.MediaTypeManifest, tagList) + imageList = append(imageList, *imageInsp) + return imageList, nil +} + +func (mf *v1ManifestFetcher) pullImageJSON(imgID, endpoint string) (*image.Image, error) { + imgJSON, _, err := mf.session.GetRemoteImageJSON(imgID, endpoint) + if err != nil { + return nil, err + } + h, err := v1.HistoryFromConfig(imgJSON, false) + if err != nil { + return nil, err + } + configRaw, err := makeRawConfigFromV1Config(imgJSON, image.NewRootFS(), []image.History{h}) + if err != nil { + return nil, err + } + config, err := json.Marshal(configRaw) + if err != nil { + return nil, err + } + img, err := image.NewFromJSON(config) + if err != nil { + return nil, err + } + return img, nil +} + +func makeRawConfigFromV1Config(imageJSON []byte, rootfs *image.RootFS, history []image.History) (map[string]*json.RawMessage, error) { + var dver struct { + DockerVersion string `json:"docker_version"` + } + + if err := json.Unmarshal(imageJSON, &dver); err != nil { + return nil, err + } + + // the Version pkg was removed, so find a better way to do this + useFallback := false + cPoint := [3]int{1, 8, 3} + for i, dPoint := range strings.Split(dver.DockerVersion, ".") { + if x, _ := strconv.Atoi(dPoint); x < cPoint[i] { + useFallback = true + } + } + + if useFallback { + var v1Image image.V1Image + err := json.Unmarshal(imageJSON, &v1Image) + if err != nil { + return nil, err + } + imageJSON, err = json.Marshal(v1Image) + if err != nil { + return nil, err + } + } + + var c map[string]*json.RawMessage + if err := json.Unmarshal(imageJSON, &c); err != nil { + return nil, err + } + + c["rootfs"] = rawJSON(rootfs) + c["history"] = rawJSON(history) + + return c, nil +} + +// shouldV2Fallback returns true if this error is a reason to fall back to v1. +func shouldV2Fallback(err errcode.Error) bool { + switch err.Code { + case errcode.ErrorCodeUnauthorized, v2.ErrorCodeManifestUnknown, v2.ErrorCodeNameUnknown: + return true + } + return false +} + +func rawJSON(value interface{}) *json.RawMessage { + jsonval, err := json.Marshal(value) + if err != nil { + return nil + } + return (*json.RawMessage)(&jsonval) +} diff --git a/cli/command/manifest/fetch_v2.go b/cli/command/manifest/fetch_v2.go new file mode 100644 index 000000000000..b7a6d37b8323 --- /dev/null +++ b/cli/command/manifest/fetch_v2.go @@ -0,0 +1,522 @@ +package manifest + +import ( + "encoding/json" + "errors" + "fmt" + "runtime" + + digest "github.com/opencontainers/go-digest" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/docker/api/types" + dockerdistribution "github.com/docker/docker/distribution" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +type v2ManifestFetcher struct { + endpoint registry.APIEndpoint + repoInfo *registry.RepositoryInfo + repo distribution.Repository + confirmedV2 bool + authConfig types.AuthConfig + // Leaving this as a pointer to an interface won't compile for me + service registry.Service +} + +type manifestInfo struct { + blobDigests []digest.Digest + layers []string + digest digest.Digest + platform manifestlist.PlatformSpec + length int64 + jsonBytes []byte +} + +func (mf *v2ManifestFetcher) Fetch(ctx context.Context, ref reference.Named) ([]ImgManifestInspect, error) { + // Pre-condition: ref has to be tagged (e.g. using ParseNormalizedNamed) + var err error + + mf.repo, mf.confirmedV2, err = dockerdistribution.NewV2Repository(ctx, mf.repoInfo, mf.endpoint, nil, &mf.authConfig, "pull") + if err != nil { + logrus.Debugf("Error getting v2 registry: %v", err) + return nil, err + } + + images, err := mf.fetchWithRepository(ctx, ref) + if err != nil { + if _, ok := err.(fallbackError); ok { + return nil, err + } + if continueOnError(err) { + logrus.Errorf("Error trying v2 registry: %v", err) + return nil, fallbackError{err: err, confirmedV2: mf.confirmedV2, transportOK: true} + } + } + for _, img := range images { + img.MediaType = schema2.MediaTypeManifest + } + return images, err +} + +func (mf *v2ManifestFetcher) fetchWithRepository(ctx context.Context, ref reference.Named) ([]ImgManifestInspect, error) { + var ( + manifest distribution.Manifest + tagOrDigest string // Used for logging/progress only + tagList = []string{} + imageList = []ImgManifestInspect{} + ) + + manSvc, err := mf.repo.Manifests(ctx) + if err != nil { + return nil, err + } + + if tagged, isTagged := ref.(reference.NamedTagged); isTagged { + // NOTE: not using TagService.Get, since it uses HEAD requests + // against the manifests endpoint, which are not supported by + // all registry versions. + manifest, err = manSvc.Get(ctx, "", distribution.WithTag(tagged.Tag())) + if err != nil { + return nil, allowV1Fallback(err) + } + tagOrDigest = tagged.Tag() + } else if digested, isDigested := ref.(reference.Canonical); isDigested { + manifest, err = manSvc.Get(ctx, digested.Digest()) + if err != nil { + return nil, err + } + tagOrDigest = digested.Digest().String() + } else { + return nil, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", ref.String()) + } + + if manifest == nil { + return nil, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest) + } + + // If manSvc.Get succeeded, we can be confident that the registry on + // the other side speaks the v2 protocol. + mf.confirmedV2 = true + + tagList, err = mf.repo.Tags(ctx).All(ctx) + if err != nil { + // If this repository doesn't exist on V2, we should + // permit a fallback to V1. + return nil, allowV1Fallback(err) + } + + var ( + images []*image.Image + mfInfos []manifestInfo + mediaType []string + ) + + switch v := manifest.(type) { + case *schema1.SignedManifest: + image, mfInfo, err := mf.pullSchema1(ctx, ref, v) + images = append(images, image) + mfInfos = append(mfInfos, mfInfo) + mediaType = append(mediaType, schema1.MediaTypeManifest) + if err != nil { + return nil, err + } + case *schema2.DeserializedManifest: + image, mfInfo, err := mf.pullSchema2(ctx, ref, v) + images = append(images, image) + mfInfos = append(mfInfos, mfInfo) + mediaType = append(mediaType, schema2.MediaTypeManifest) + if err != nil { + return nil, err + } + case *manifestlist.DeserializedManifestList: + images, mfInfos, mediaType, err = mf.pullManifestList(ctx, ref, v) + if err != nil { + return nil, err + } + default: + return nil, errors.New("unsupported manifest format") + } + + for idx, img := range images { + imgReturn := makeImgManifestInspect(ref.String(), img, tagOrDigest, mfInfos[idx], mediaType[idx], tagList) + imageList = append(imageList, *imgReturn) + } + return imageList, nil +} + +func (mf *v2ManifestFetcher) pullSchema1(ctx context.Context, ref reference.Named, unverifiedManifest *schema1.SignedManifest) (img *image.Image, mfInfo manifestInfo, err error) { + mfInfo = manifestInfo{} + var verifiedManifest *schema1.Manifest + verifiedManifest, err = verifySchema1Manifest(unverifiedManifest, ref) + if err != nil { + return nil, mfInfo, err + } + + // remove duplicate layers and check parent chain validity + err = fixManifestLayers(verifiedManifest) + if err != nil { + return nil, mfInfo, err + } + + // Image history converted to the new format + var history []image.History + + // Note that the order of this loop is in the direction of bottom-most + // to top-most, so that the downloads slice gets ordered correctly. + for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- { + var throwAway struct { + ThrowAway bool `json:"throwaway,omitempty"` + } + if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil { + return nil, mfInfo, err + } + + h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway) + if err != nil { + return nil, mfInfo, err + } + history = append(history, h) + mfInfo.blobDigests = append(mfInfo.blobDigests, verifiedManifest.FSLayers[i].BlobSum) + } + + rootFS := image.NewRootFS() + configRaw, err := makeRawConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), rootFS, history) + + config, err := json.Marshal(configRaw) + if err != nil { + return nil, mfInfo, err + } + + img, err = image.NewFromJSON(config) + if err != nil { + return nil, mfInfo, err + } + + mfInfo.digest = digest.FromBytes(unverifiedManifest.Canonical) + // add the size of the manifest to the info struct; needed for assembling proper + // manifest lists + mfInfo.length = int64(len(unverifiedManifest.Canonical)) + mfInfo.jsonBytes, err = unverifiedManifest.MarshalJSON() + if err != nil { + return nil, mfInfo, err + } + return img, mfInfo, nil +} + +func verifySchema1Manifest(signedManifest *schema1.SignedManifest, ref reference.Named) (m *schema1.Manifest, err error) { + // If pull by digest, then verify the manifest digest. NOTE: It is + // important to do this first, before any other content validation. If the + // digest cannot be verified, don't even bother with those other things. + if digested, isCanonical := ref.(reference.Canonical); isCanonical { + verifier := digested.Digest().Verifier() + if err != nil { + return nil, err + } + if _, err := verifier.Write(signedManifest.Canonical); err != nil { + return nil, err + } + if !verifier.Verified() { + err := fmt.Errorf("image verification failed for digest %s", digested.Digest()) + logrus.Error(err) + return nil, err + } + } + m = &signedManifest.Manifest + + if m.SchemaVersion != 1 { + return nil, fmt.Errorf("unsupported schema version %d for %q", m.SchemaVersion, ref.String()) + } + if len(m.FSLayers) != len(m.History) { + return nil, fmt.Errorf("length of history not equal to number of layers for %q", ref.String()) + } + if len(m.FSLayers) == 0 { + return nil, fmt.Errorf("no FSLayers in manifest for %q", ref.String()) + } + return m, nil +} + +func fixManifestLayers(m *schema1.Manifest) error { + imgs := make([]*image.V1Image, len(m.FSLayers)) + for i := range m.FSLayers { + img := &image.V1Image{} + + if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil { + return err + } + + imgs[i] = img + if err := v1.ValidateID(img.ID); err != nil { + return err + } + } + + if imgs[len(imgs)-1].Parent != "" && runtime.GOOS != "windows" { + // Windows base layer can point to a base layer parent that is not in manifest. + return errors.New("Invalid parent ID in the base layer of the image.") + } + + // check general duplicates to error instead of a deadlock + idmap := make(map[string]struct{}) + + var lastID string + for _, img := range imgs { + // skip IDs that appear after each other, we handle those later + if _, exists := idmap[img.ID]; img.ID != lastID && exists { + return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID) + } + lastID = img.ID + idmap[lastID] = struct{}{} + } + + // backwards loop so that we keep the remaining indexes after removing items + for i := len(imgs) - 2; i >= 0; i-- { + if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue + m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...) + m.History = append(m.History[:i], m.History[i+1:]...) + } else if imgs[i].Parent != imgs[i+1].ID { + return fmt.Errorf("Invalid parent ID. Expected %v, got %v.", imgs[i+1].ID, imgs[i].Parent) + } + } + + return nil +} + +// pullSchema2 pulls an image using a schema2 manifest +func (mf *v2ManifestFetcher) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest) (img *image.Image, mfInfo manifestInfo, err error) { + mfInfo.digest, err = schema2ManifestDigest(ref, mfst) + if err != nil { + return nil, mfInfo, err + } + target := mfst.Target() + + configChan := make(chan []byte, 1) + errChan := make(chan error, 1) + var cancel func() + ctx, cancel = context.WithCancel(ctx) + + // Pull the image config + go func() { + configJSON, err := mf.pullSchema2ImageConfig(ctx, target.Digest) + if err != nil { + errChan <- ImageConfigPullError{Err: err} + cancel() + return + } + configChan <- configJSON + }() + + var ( + configJSON []byte // raw serialized image config + unmarshalledConfig image.Image // deserialized image config + ) + if runtime.GOOS == "windows" { + configJSON, unmarshalledConfig, err = receiveConfig(configChan, errChan) + if err != nil { + return nil, mfInfo, err + } + if unmarshalledConfig.RootFS == nil { + return nil, mfInfo, errors.New("image config has no rootfs section") + } + } + + if configJSON == nil { + configJSON, unmarshalledConfig, err = receiveConfig(configChan, errChan) + if err != nil { + return nil, mfInfo, err + } + } + + for _, descriptor := range mfst.References() { + mfInfo.blobDigests = append(mfInfo.blobDigests, descriptor.Digest) + } + for _, layer := range mfst.Layers { + mfInfo.layers = append(mfInfo.layers, layer.Digest.String()) + } + + img, err = image.NewFromJSON(configJSON) + if err != nil { + return nil, mfInfo, err + } + // add the size of the manifest to the image response; needed for assembling proper + // manifest lists + _, mfBytes, err := mfst.Payload() + if err != nil { + return nil, mfInfo, err + } + mfInfo.length = int64(len(mfBytes)) + mfInfo.jsonBytes = mfBytes + mfInfo.platform = manifestlist.PlatformSpec{ + OS: img.OS, + Architecture: img.Architecture, + OSVersion: img.OSVersion, + OSFeatures: img.OSFeatures, + } + return img, mfInfo, nil +} + +func (mf *v2ManifestFetcher) pullSchema2ImageConfig(ctx context.Context, dgst digest.Digest) (configJSON []byte, err error) { + blobs := mf.repo.Blobs(ctx) + configJSON, err = blobs.Get(ctx, dgst) + if err != nil { + return nil, err + } + + // Verify image config digest + verifier := dgst.Verifier() + if err != nil { + return nil, err + } + if _, err := verifier.Write(configJSON); err != nil { + return nil, err + } + if !verifier.Verified() { + err := fmt.Errorf("image config verification failed for digest %s", dgst) + logrus.Error(err) + return nil, err + } + + return configJSON, nil +} + +func receiveConfig(configChan <-chan []byte, errChan <-chan error) ([]byte, image.Image, error) { + select { + case configJSON := <-configChan: + var unmarshalledConfig image.Image + if err := json.Unmarshal(configJSON, &unmarshalledConfig); err != nil { + return nil, image.Image{}, err + } + return configJSON, unmarshalledConfig, nil + case err := <-errChan: + return nil, image.Image{}, err + // Don't need a case for ctx.Done in the select because cancellation + // will trigger an error in p.pullSchema2ImageConfig. + } +} + +// ImageConfigPullError is an error pulling the image config blob +// (only applies to schema2). +type ImageConfigPullError struct { + Err error +} + +// Error returns the error string for ImageConfigPullError. +func (e ImageConfigPullError) Error() string { + return "error pulling image configuration: " + e.Err.Error() +} + +// allowV1Fallback checks if the error is a possible reason to fallback to v1 +// (even if confirmedV2 has been set already), and if so, wraps the error in +// a fallbackError with confirmedV2 set to false. Otherwise, it returns the +// error unmodified. +func allowV1Fallback(err error) error { + switch v := err.(type) { + case errcode.Errors: + if len(v) != 0 { + if v0, ok := v[0].(errcode.Error); ok && shouldV2Fallback(v0) { + return fallbackError{err: err, confirmedV2: false, transportOK: true} + } + } + case errcode.Error: + if shouldV2Fallback(v) { + return fallbackError{err: err, confirmedV2: false, transportOK: true} + } + } + return err +} + +// schema2ManifestDigest computes the manifest digest, and, if pulling by +// digest, ensures that it matches the requested digest. +func schema2ManifestDigest(ref reference.Named, mfst distribution.Manifest) (digest.Digest, error) { + _, canonical, err := mfst.Payload() + if err != nil { + return "", err + } + + // If pull by digest, then verify the manifest digest. + if digested, isDigested := ref.(reference.Canonical); isDigested { + verifier := digested.Digest().Verifier() + if err != nil { + return "", err + } + if _, err := verifier.Write(canonical); err != nil { + return "", err + } + if !verifier.Verified() { + err := fmt.Errorf("manifest verification failed for digest %s", digested.Digest()) + logrus.Error(err) + return "", err + } + return digested.Digest(), nil + } + + return digest.FromBytes(canonical), nil +} + +// pullManifestList handles "manifest lists" which point to various +// platform-specifc manifests. +func (mf *v2ManifestFetcher) pullManifestList(ctx context.Context, ref reference.Named, mfstList *manifestlist.DeserializedManifestList) ([]*image.Image, []manifestInfo, []string, error) { + var ( + imageList = []*image.Image{} + mfInfos = []manifestInfo{} + mediaType = []string{} + ) + manifestListDigest, err := schema2ManifestDigest(ref, mfstList) + if err != nil { + return nil, nil, nil, err + } + logrus.Debugf("Pulling manifest list entries for ML digest %v", manifestListDigest) + + for _, manifestDescriptor := range mfstList.Manifests { + manSvc, err := mf.repo.Manifests(ctx) + if err != nil { + return nil, nil, nil, err + } + + thisDigest := manifestDescriptor.Digest + thisPlatform := manifestDescriptor.Platform + manifest, err := manSvc.Get(ctx, thisDigest) + if err != nil { + return nil, nil, nil, err + } + + manifestRef, err := reference.WithDigest(ref, thisDigest) + if err != nil { + return nil, nil, nil, err + } + + switch v := manifest.(type) { + case *schema1.SignedManifest: + img, mfInfo, err := mf.pullSchema1(ctx, manifestRef, v) + imageList = append(imageList, img) + mfInfo.platform = thisPlatform + mfInfos = append(mfInfos, mfInfo) + mediaType = append(mediaType, schema1.MediaTypeManifest) + if err != nil { + return nil, nil, nil, err + } + case *schema2.DeserializedManifest: + img, mfInfo, err := mf.pullSchema2(ctx, manifestRef, v) + imageList = append(imageList, img) + mfInfo.platform = thisPlatform + mfInfos = append(mfInfos, mfInfo) + mediaType = append(mediaType, schema2.MediaTypeManifest) + if err != nil { + return nil, nil, nil, err + } + default: + return nil, nil, nil, errors.New("unsupported manifest format") + } + } + + return imageList, mfInfos, mediaType, err +} diff --git a/cli/command/manifest/inspect.go b/cli/command/manifest/inspect.go new file mode 100644 index 000000000000..e5eda14ad3cb --- /dev/null +++ b/cli/command/manifest/inspect.go @@ -0,0 +1,168 @@ +package manifest + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/spf13/cobra" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" + "github.com/docker/docker/registry" +) + +type inspectOptions struct { + remote string + verbose bool +} + +// NewInspectCommand creates a new `docker manifest inspect` command +func newInspectCommand(dockerCli *command.DockerCli) *cobra.Command { + var opts inspectOptions + + cmd := &cobra.Command{ + Use: "inspect [OPTIONS] NAME[:TAG]", + Short: "Display an image's manifest, or a remote manifest list.", + Args: cli.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + opts.remote = args[0] + return runListInspect(dockerCli, opts) + }, + } + + flags := cmd.Flags() + + flags.BoolVarP(&opts.verbose, "verbose", "v", false, "Output additional info including layers and platform") + + return cmd +} + +func runListInspect(dockerCli *command.DockerCli, opts inspectOptions) error { + + // Get the data and then format it + var ( + imgInspect []ImgManifestInspect + prettyJSON bytes.Buffer + ) + + named, err := reference.ParseNormalizedNamed(opts.remote) + if err != nil { + return err + } + targetRepo, err := registry.ParseRepositoryInfo(named) + if err != nil { + return err + } + + // For now, always pull as there' no reason to store an inspect. They're quick to get. + // When the engine is multi-arch image aware, we can store these in a universal location to + // save a little bandwidth. + imgInspect, _, err = getImageData(dockerCli, named.String(), "", true) + if err != nil { + logrus.Fatal(err) + } + // output basic informative details about the image + if len(imgInspect) == 1 { + // this is a basic single manifest + err = json.Indent(&prettyJSON, imgInspect[0].CanonicalJSON, "", " ") + if err != nil { + return err + } + fmt.Fprintf(dockerCli.Out(), "%s\n", prettyJSON.String()) + if !opts.verbose { + return nil + } + mfd, _, err := buildManifestObj(targetRepo, imgInspect[0]) + if err != nil { + return err + } + jsonBytes, err := json.Marshal(mfd) + if err != nil { + return err + } + prettyJSON.Reset() + err = json.Indent(&prettyJSON, jsonBytes, "", " ") + if err != nil { + return err + } + fmt.Fprintf(dockerCli.Out(), "%s\n", prettyJSON.String()) + return nil + } + + manifests := []manifestlist.ManifestDescriptor{} + // More than one response. This is a manifest list. + for _, img := range imgInspect { + mfd, _, err := buildManifestObj(targetRepo, img) + if err != nil { + return fmt.Errorf("Error assembling ManifestDescriptor") + } + manifests = append(manifests, mfd) + } + deserializedML, err := manifestlist.FromDescriptors(manifests) + if err != nil { + return err + } + jsonBytes, err := deserializedML.MarshalJSON() + if err != nil { + return err + } + err = json.Indent(&prettyJSON, jsonBytes, "", " ") + if err != nil { + return err + } + fmt.Fprintf(dockerCli.Out(), "%s\n", prettyJSON.String()) + if !opts.verbose { + return nil + } + for _, img := range imgInspect { + switch img.MediaType { + case schema1.MediaTypeManifest: + var manifestv1 schema1.Manifest + err := json.Unmarshal(img.CanonicalJSON, &manifestv1) + if err != nil { + return err + } + jsonBytes, err := json.Marshal(manifestv1) + if err != nil { + return err + } + prettyJSON.Reset() + err = json.Indent(&prettyJSON, jsonBytes, "", " ") + if err != nil { + return err + } + fmt.Fprintf(dockerCli.Out(), "%s\n", prettyJSON.String()) + case schema2.MediaTypeManifest: + var manifestv2 schema2.Manifest + err := json.Unmarshal(img.CanonicalJSON, &manifestv2) + if err != nil { + return err + } + jsonBytes, err := json.Marshal(manifestv2) + if err != nil { + return err + } + prettyJSON.Reset() + err = json.Indent(&prettyJSON, jsonBytes, "", " ") + if err != nil { + return err + } + fmt.Fprintf(dockerCli.Out(), "%s\n", prettyJSON.String()) + } + /* + prettyJSON.Reset() + err = json.Indent(&prettyJSON, jsonBytes, "", " ") + if err != nil { + return err + } + fmt.Fprintf(dockerCli.Out(), "%s\n", prettyJSON.String()) + */ + } + return nil +} diff --git a/cli/command/manifest/push.go b/cli/command/manifest/push.go new file mode 100644 index 000000000000..2fa25de7c83e --- /dev/null +++ b/cli/command/manifest/push.go @@ -0,0 +1,591 @@ +package manifest + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "path/filepath" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/spf13/cobra" + "golang.org/x/net/context" + "gopkg.in/yaml.v2" + + digest "github.com/opencontainers/go-digest" + + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/transport" + + "github.com/docker/cli/cli" + "github.com/docker/cli/cli/command" + "github.com/docker/cli/cli/config/configfile" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/pkg/homedir" + "github.com/docker/docker/registry" +) + +type pushOpts struct { + newRef string + file string + purge bool +} + +type existingTokenHandler struct { + token string +} + +func (th *existingTokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", th.token)) + return nil +} + +func (th *existingTokenHandler) Scheme() string { + return "bearer" +} + +type dumbCredentialStore struct { + auth *types.AuthConfig +} + +// YamlInput represents the YAML format input to the pushml +// command. +type YamlInput struct { + Image string + Manifests []YamlManifestEntry +} + +// YamlManifestEntry represents an entry in the list of manifests to +// be combined into a manifest list, provided via the YAML input +type YamlManifestEntry struct { + Image string + Platform manifestlist.PlatformSpec +} + +// we will store up a list of blobs we must ask the registry +// to cross-mount into our target namespace +type blobMount struct { + FromRepo string + Digest string +} + +// if we have mounted blobs referenced from manifests from +// outside the target repository namespace we will need to +// push them to our target's repo as they will be references +// from the final manifest list object we push +type manifestPush struct { + Name string + Digest string + JSONBytes []byte + MediaType string +} + +func newPushListCommand(dockerCli *command.DockerCli) *cobra.Command { + + opts := pushOpts{} + + cmd := &cobra.Command{ + Use: "push [newRef | --file pre-annotated-yaml]", + Short: "Push a manifest list for an image to a repository", + Args: cli.RequiresMinArgs(0), + RunE: func(cmd *cobra.Command, args []string) error { + return putManifestList(dockerCli, opts, args) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&opts.file, "file", "f", "", "Path to a file containing a manifest list and its annotated constituent manifests") + flags.BoolVarP(&opts.purge, "purge", "p", true, "After pushing, delete the user's locally-stored manifest list info") + + return cmd +} + +func putManifestList(dockerCli *command.DockerCli, opts pushOpts, args []string) error { + var ( + manifests []string + manifestList manifestlist.ManifestList + targetRef reference.Named + blobMountRequests []blobMount + manifestRequests []manifestPush + err error + yamlInput YamlInput + targetFilename string + ) + + // First get all the info we'll need from either a yaml file, or a user's locally-creatd manifest transation. + numArgs := len(args) + if numArgs > 1 { + return fmt.Errorf("More than one argument provided to 'manifest push'") + } + if (numArgs == 0) && (opts.file == "") { + return fmt.Errorf("Please push using a yaml file or a list created using 'manifest create.'") + } + if opts.file != "" { + yamlInput, err = getYamlInput(dockerCli, opts.file) + if err != nil { + return fmt.Errorf("Error retrieving manifests from YAML file: %s", err) + } + targetRef, err = reference.ParseNormalizedNamed(yamlInput.Image) + if err != nil { + return fmt.Errorf("Error parsing name for manifest list (%s): %v", yamlInput.Image, err) + } + if _, isDigested := targetRef.(reference.Canonical); !isDigested { + targetRef = reference.TagNameOnly(targetRef) + } + } else { + targetRef, err = reference.ParseNormalizedNamed(args[0]) + if err != nil { + return fmt.Errorf("Error parsing name for manifest list (%s): %v", args[0], err) + } + if _, isDigested := targetRef.(reference.Canonical); !isDigested { + targetRef = reference.TagNameOnly(targetRef) + } + manifests, err = getListFilenames(targetRef.String()) + if err != nil { + return err + } + if len(manifests) == 0 { + return fmt.Errorf("%s not found", targetRef.String()) + } + // Set this now, for purging the dir later, because we alter it below to use for the pushURL + targetFilename, _ = mfToFilename(targetRef.String(), "") + } + targetRepo, err := registry.ParseRepositoryInfo(targetRef) + if err != nil { + return fmt.Errorf("Error parsing repository name for manifest list (%s): %v", opts.newRef, err) + } + targetEndpoint, targetRepoName, err := setupRepo(targetRepo) + if err != nil { + return fmt.Errorf("Error setting up repository endpoint and references for %q: %v", targetRef, err) + } + + // Now that targetRepo is set, jump through a lot of hoops to get a Named reference without + // the domain included (targetRef), and one without the tag (bareRef) + tagIndex := strings.LastIndex(targetRef.String(), ":") + if tagIndex < 0 { + targetRef = reference.TagNameOnly(targetRef) + tagIndex = strings.IndexRune(targetRef.String(), ':') + } + tag := targetRef.String()[tagIndex+1:] + bareRef, err := reference.WithName(reference.Path(targetRef)) + if err != nil { + return err + } + targetRef, _ = reference.WithTag(bareRef, tag) + + logrus.Debugf("Creating target ref: %s", targetRef.String()) + + ctx := context.Background() + + // Now create the manifest list payload by looking up the manifest schemas + // for the constituent images: + logrus.Info("Retrieving digests of images...") + if opts.file == "" { + // manifests is a list of file paths + for _, manifestFile := range manifests { + fileParts := strings.Split(manifestFile, string(filepath.Separator)) + numParts := len(fileParts) + mfstInspect, err := unmarshalIntoManifestInspect(fileParts[numParts-1], fileParts[numParts-2]) + if err != nil { + return err + } + if mfstInspect.Architecture == "" || mfstInspect.OS == "" { + return fmt.Errorf("Malformed manifest object. Cannot push to registry.") + } + manifest, repoInfo, err := buildManifestObj(targetRepo, mfstInspect) + if err != nil { + return err + } + manifestList.Manifests = append(manifestList.Manifests, manifest) + + // if this image is in a different repo, we need to add the layer/blob digests to the list of + // requested blob mounts (cross-repository push) before pushing the manifest list + manifestRepoName := reference.Path(repoInfo.Name) + if targetRepoName != manifestRepoName { + bmr, mr := buildBlobMountRequestLists(mfstInspect, targetRepoName, manifestRepoName) + blobMountRequests = append(blobMountRequests, bmr...) + manifestRequests = append(manifestRequests, mr...) + } + } + // @TODO: Pull the dup parts out from these two if/else blocks. Make a list of Manifest objects and run through that + // doing the dup parts. + } + + // Set the schema version + manifestList.Versioned = manifestlist.SchemaVersion + + urlBuilder, err := v2.NewURLBuilderFromString(targetEndpoint.URL.String(), false) + logrus.Infof("manifest: put: target endpoint url: %s", targetEndpoint.URL.String()) + if err != nil { + return fmt.Errorf("Can't create URL builder from endpoint (%s): %v", targetEndpoint.URL.String(), err) + } + pushURL, err := createManifestURLFromRef(targetRef, urlBuilder) + if err != nil { + return fmt.Errorf("Error setting up repository endpoint and references for %q: %v", targetRef, err) + } + logrus.Debugf("Manifest list push url: %s", pushURL) + + deserializedManifestList, err := manifestlist.FromDescriptors(manifestList.Manifests) + if err != nil { + return fmt.Errorf("Cannot deserialize manifest list: %v", err) + } + mediaType, p, err := deserializedManifestList.Payload() + logrus.Debugf("mediaType of manifestList: %s", mediaType) + if err != nil { + return fmt.Errorf("Cannot retrieve payload for HTTP PUT of manifest list: %v", err) + + } + putRequest, err := http.NewRequest("PUT", pushURL, bytes.NewReader(p)) + if err != nil { + return fmt.Errorf("HTTP PUT request creation failed: %v", err) + } + putRequest.Header.Set("Content-Type", mediaType) + + httpClient, err := getHTTPClient(ctx, dockerCli, targetRepo, targetEndpoint, targetRepoName) + if err != nil { + return fmt.Errorf("Failed to setup HTTP client to repository: %v", err) + } + + // before we push the manifest list, if we have any blob mount requests, we need + // to ask the registry to mount those blobs in our target so they are available + // as references + if err := mountBlobs(httpClient, urlBuilder, targetRef, blobMountRequests); err != nil { + return fmt.Errorf("Couldn't mount blobs for cross-repository push: %v", err) + } + + // we also must push any manifests that are referenced in the manifest list into + // the target namespace + // Use the untagged target for this so the digest is used + if err := pushReferences(httpClient, urlBuilder, bareRef, manifestRequests); err != nil { + return fmt.Errorf("Couldn't push manifests referenced in our manifest list: %v", err) + } + + resp, err := httpClient.Do(putRequest) + if err != nil { + return fmt.Errorf("V2 registry PUT of manifest list failed: %v", err) + } + defer resp.Body.Close() + + if statusSuccess(resp.StatusCode) { + dgstHeader := resp.Header.Get("Docker-Content-Digest") + dgst, err := digest.Parse(dgstHeader) + if err != nil { + return err + } + if opts.purge == true { + logrus.Debugf("Deleting files at %s", targetFilename) + if err := os.RemoveAll(targetFilename); err != nil { + // Not a fatal error + logrus.Info("Unable to clean up manifest files in %s", targetFilename) + } + } + logrus.Infof("Succesfully pushed manifest list %s with digest %s", targetRef, dgst) + return nil + } + return fmt.Errorf("Registry push unsuccessful: response %d: %s", resp.StatusCode, resp.Status) +} + +func getYamlInput(dockerCli *command.DockerCli, yamlFile string) (YamlInput, error) { + logrus.Debugf("YAML file: %s", yamlFile) + + if _, err := os.Stat(yamlFile); err != nil { + logrus.Debugf("Unable to open file: %s", yamlFile) + } + + var yamlInput YamlInput + yamlBuf, err := ioutil.ReadFile(yamlFile) + if err != nil { + logrus.Fatalf(fmt.Sprintf("Can't read YAML file %q: %v", yamlFile, err)) + } + err = yaml.Unmarshal(yamlBuf, &yamlInput) + if err != nil { + logrus.Fatalf(fmt.Sprintf("Can't unmarshal YAML file %q: %v", yamlFile, err)) + } + return yamlInput, nil +} + +func buildManifestObj(targetRepo *registry.RepositoryInfo, mfInspect ImgManifestInspect) (manifestlist.ManifestDescriptor, *registry.RepositoryInfo, error) { + + manifestRef, err := reference.ParseNormalizedNamed(mfInspect.RefName) + if err != nil { + return manifestlist.ManifestDescriptor{}, nil, err + } + repoInfo, err := registry.ParseRepositoryInfo(manifestRef) + if err != nil { + return manifestlist.ManifestDescriptor{}, nil, err + } + + manifestRepoHostname := reference.Domain(repoInfo.Name) + targetRepoHostname := reference.Domain(targetRepo.Name) + if manifestRepoHostname != targetRepoHostname { + return manifestlist.ManifestDescriptor{}, nil, fmt.Errorf("Cannot use source images from a different registry than the target image: %s != %s", manifestRepoHostname, targetRepoHostname) + } + + manifest := manifestlist.ManifestDescriptor{ + Platform: manifestlist.PlatformSpec{ + Architecture: mfInspect.Architecture, + OS: mfInspect.OS, + OSVersion: mfInspect.OSVersion, + OSFeatures: mfInspect.OSFeatures, + Variant: mfInspect.Variant, + Features: mfInspect.Features, + }, + } + manifest.Descriptor.Digest = mfInspect.Digest + manifest.Size = mfInspect.Size + manifest.MediaType = mfInspect.MediaType + + err = manifest.Descriptor.Digest.Validate() + if err != nil { + return manifestlist.ManifestDescriptor{}, nil, fmt.Errorf("Digest parse of image %q failed with error: %v", manifestRef, err) + } + + return manifest, repoInfo, nil +} + +func buildBlobMountRequestLists(mfstInspect ImgManifestInspect, targetRepoName, mfRepoName string) ([]blobMount, []manifestPush) { + + var ( + blobMountRequests []blobMount + manifestRequests []manifestPush + ) + + logrus.Debugf("Adding manifest references of %q to blob mount requests to %s", mfRepoName, targetRepoName) + for _, layer := range mfstInspect.References { + blobMountRequests = append(blobMountRequests, blobMount{FromRepo: mfRepoName, Digest: layer}) + } + // also must add the manifest to be pushed in the target namespace + logrus.Debugf("Adding manifest %q -> to be pushed to %q as a manifest reference", mfRepoName, targetRepoName) + manifestRequests = append(manifestRequests, manifestPush{ + Name: mfRepoName, + Digest: mfstInspect.Digest.String(), + JSONBytes: mfstInspect.CanonicalJSON, + MediaType: mfstInspect.MediaType, + }) + + return blobMountRequests, manifestRequests +} + +func getHTTPClient(ctx context.Context, dockerCli *command.DockerCli, repoInfo *registry.RepositoryInfo, endpoint registry.APIEndpoint, repoName string) (*http.Client, error) { + // get the http transport, this will be used in a client to upload manifest + // TODO - add separate function get client + base := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: endpoint.TLSConfig, + DisableKeepAlives: true, + } + + authConfig := command.ResolveAuthConfig(ctx, dockerCli, repoInfo.Index) + modifiers := registry.DockerHeaders(dockerversion.DockerUserAgent(nil), http.Header{}) + authTransport := transport.NewTransport(base, modifiers...) + challengeManager, _, err := registry.PingV2Registry(endpoint.URL, authTransport) + if err != nil { + return nil, fmt.Errorf("Ping of V2 registry failed: %v", err) + } + if authConfig.RegistryToken != "" { + passThruTokenHandler := &existingTokenHandler{token: authConfig.RegistryToken} + modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, passThruTokenHandler)) + } else { + creds := registry.NewStaticCredentialStore(&authConfig) + tokenHandler := auth.NewTokenHandler(authTransport, creds, repoName, "*") + basicHandler := auth.NewBasicHandler(creds) + modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler)) + } + tr := transport.NewTransport(base, modifiers...) + + httpClient := &http.Client{ + Transport: tr, + // @TODO: Use the default (leave CheckRedirect nil), or write a generic one + // and put it somewhere? (There's one in docker/distribution/registry/client/repository.go) + // CheckRedirect: checkHTTPRedirect, + } + return httpClient, nil +} + +func createManifestURLFromRef(targetRef reference.Named, urlBuilder *v2.URLBuilder) (string, error) { + + manifestURL, err := urlBuilder.BuildManifestURL(targetRef) + if err != nil { + return "", fmt.Errorf("Failed to build manifest URL from target reference: %v", err) + } + return manifestURL, nil +} + +func setupRepo(repoInfo *registry.RepositoryInfo) (registry.APIEndpoint, string, error) { + endpoint, err := selectPushEndpoint(repoInfo) + if err != nil { + return endpoint, "", err + } + logrus.Debugf("manifest: create: endpoint: %v", endpoint) + repoName := repoInfo.Name.Name() + // If endpoint does not support CanonicalName, use the RemoteName instead + if endpoint.TrimHostname { + repoName = reference.Path(repoInfo.Name) + logrus.Debugf("repoName: %v", repoName) + } + return endpoint, repoName, nil +} + +func selectPushEndpoint(repoInfo *registry.RepositoryInfo) (registry.APIEndpoint, error) { + var err error + + options := registry.ServiceOptions{} + // By default (unless deprecated), loopback (IPv4 at least...) is automatically added as an insecure registry. + options.InsecureRegistries, err = loadLocalInsecureRegistries() + if err != nil { + return registry.APIEndpoint{}, err + } + registryService := registry.NewService(options) + endpoints, err := registryService.LookupPushEndpoints(reference.Domain(repoInfo.Name)) + if err != nil { + return registry.APIEndpoint{}, err + } + logrus.Debugf("manifest: potential push endpoints: %v\n", endpoints) + // Default to the highest priority endpoint to return + endpoint := endpoints[0] + if !repoInfo.Index.Secure { + for _, ep := range endpoints { + if ep.URL.Scheme == "http" { + endpoint = ep + } + } + } + return endpoint, nil +} + +func loadLocalInsecureRegistries() ([]string, error) { + insecureRegistries := []string{} + // Check $HOME/.docker/config.json. There may be mismatches between what the user has in their + // local config and what the daemon they're talking to allows, but we can be okay with that. + userHome, err := homedir.GetStatic() + if err != nil { + return []string{}, fmt.Errorf("Manifest create: lookup local insecure registries: Unable to retreive $HOME") + } + + jsonData, err := ioutil.ReadFile(fmt.Sprintf("%s/.docker/config.json", userHome)) + if err != nil { + if !os.IsNotExist(err) { + return []string{}, fmt.Errorf("Manifest create: Unable to read $HOME/.docker/config.json: %s", err) + } + // If the file just doesn't exist, no insecure registries were specified. + logrus.Debug("Manifest: No insecure registries were specified via $HOME/.docker/config.json") + return []string{}, nil + } + + if jsonData != nil { + cf := configfile.ConfigFile{} + if err := json.Unmarshal(jsonData, &cf); err != nil { + logrus.Debugf("Manifest create: Unable to unmarshal insecure registries from $HOME/.docker/config.json: %s", err) + return []string{}, nil + } + if cf.InsecureRegistries == nil { + return []string{}, nil + } + // @TODO: Add tests for a) specifying in config.json, b) invalid entries + for _, reg := range cf.InsecureRegistries { + if err := net.ParseIP(reg); err == nil { + insecureRegistries = append(insecureRegistries, reg) + } else if _, _, err := net.ParseCIDR(reg); err == nil { + insecureRegistries = append(insecureRegistries, reg) + } else if ips, err := net.LookupHost(reg); err == nil { + insecureRegistries = append(insecureRegistries, ips...) + } else { + return []string{}, fmt.Errorf("Manifest create: Invalid registry (%s) specified in ~/.docker/config.json: %s", reg, err) + } + } + } + + return insecureRegistries, nil +} + +func pushReferences(httpClient *http.Client, urlBuilder *v2.URLBuilder, ref reference.Named, manifests []manifestPush) error { + for _, manifest := range manifests { + dgst, err := digest.Parse(manifest.Digest) + logrus.Debugf("pushing ref digest %s", dgst) + if err != nil { + return fmt.Errorf("Error parsing manifest digest (%s) for referenced manifest %q: %v", manifest.Digest, manifest.Name, err) + } + targetRef, err := reference.WithDigest(ref, dgst) + logrus.Debugf("pushing ref %v", targetRef) + if err != nil { + return fmt.Errorf("Error creating manifest digest target for referenced manifest %q: %v", manifest.Name, err) + } + pushURL, err := urlBuilder.BuildManifestURL(targetRef) + if err != nil { + return fmt.Errorf("Error setting up manifest push URL for manifest references for %q: %v", manifest.Name, err) + } + logrus.Debugf("manifest reference push URL: %s", pushURL) + + pushRequest, err := http.NewRequest("PUT", pushURL, bytes.NewReader(manifest.JSONBytes)) + if err != nil { + return fmt.Errorf("HTTP PUT request creation for manifest reference push failed: %v", err) + } + pushRequest.Header.Set("Content-Type", manifest.MediaType) + resp, err := httpClient.Do(pushRequest) + if err != nil { + return fmt.Errorf("PUT of manifest reference failed: %v", err) + } + + resp.Body.Close() + if !statusSuccess(resp.StatusCode) { + return fmt.Errorf("Referenced manifest push unsuccessful: response %d: %s", resp.StatusCode, resp.Status) + } + dgstHeader := resp.Header.Get("Docker-Content-Digest") + dgstResult, err := digest.Parse(dgstHeader) + if err != nil { + return fmt.Errorf("Couldn't parse pushed manifest digest response: %v", err) + } + if string(dgstResult) != manifest.Digest { + return fmt.Errorf("Pushed referenced manifest received a different digest: expected %s, got %s", manifest.Digest, string(dgst)) + } + logrus.Debugf("referenced manifest %q pushed; digest matches: %s", manifest.Name, string(dgst)) + } + return nil +} + +func mountBlobs(httpClient *http.Client, urlBuilder *v2.URLBuilder, ref reference.Named, blobsRequested []blobMount) error { + + for _, blob := range blobsRequested { + // create URL request + url, err := urlBuilder.BuildBlobUploadURL(ref, url.Values{"from": {blob.FromRepo}, "mount": {blob.Digest}}) + if err != nil { + return fmt.Errorf("Failed to create blob mount URL: %v", err) + } + mountRequest, err := http.NewRequest("POST", url, nil) + if err != nil { + return fmt.Errorf("HTTP POST request creation for blob mount failed: %v", err) + } + mountRequest.Header.Set("Content-Length", "0") + resp, err := httpClient.Do(mountRequest) + if err != nil { + return fmt.Errorf("V2 registry POST of blob mount failed: %v", err) + } + + resp.Body.Close() + if resp.StatusCode != http.StatusCreated { + return fmt.Errorf("Blob mount failed to url %s: HTTP status %d", url, resp.StatusCode) + } + logrus.Debugf("Mount of blob %s succeeded, location: %q", blob.Digest, resp.Header.Get("Location")) + } + return nil +} + +func statusSuccess(status int) bool { + return status >= 200 && status <= 399 +} diff --git a/cli/command/manifest/types.go b/cli/command/manifest/types.go new file mode 100644 index 000000000000..c096d6f360a3 --- /dev/null +++ b/cli/command/manifest/types.go @@ -0,0 +1,51 @@ +package manifest + +import ( + containerTypes "github.com/docker/docker/api/types/container" + "github.com/opencontainers/go-digest" +) + +// fallbackError wraps an error that can possibly allow fallback to a different +// endpoint. +type fallbackError struct { + // err is the error being wrapped. + err error + // confirmedV2 is set to true if it was confirmed that the registry + // supports the v2 protocol. This is used to limit fallbacks to the v1 + // protocol. + confirmedV2 bool + transportOK bool +} + +// Error renders the FallbackError as a string. +func (f fallbackError) Error() string { + return f.err.Error() +} + +// ImgManifestInspect contains info to output for a manifest object. +type ImgManifestInspect struct { + RefName string `json:"ref"` + Size int64 `json:"size"` + MediaType string `json:"media_type"` + Tag string `json:"tag"` + Digest digest.Digest `json:"digest"` + RepoTags []string `json:"repotags"` + Comment string `json:"comment"` + Created string `json:"created"` + ContainerConfig *containerTypes.Config `json:"container_config"` + DockerVersion string `json:"docker_version"` + Author string `json:"author"` + Config *containerTypes.Config `json:"config"` + References []string `json:"references"` + LayerDigests []string `json:"layers_digests"` + // The following are top-level objects because nested json from a file + // won't unmarshal correctly. + Architecture string `json:"architecture"` + OS string `json:"os"` + OSVersion string `json:"os.version,omitempty"` + OSFeatures []string `json:"os.features,omitempty"` + Variant string `json:"variant,omitempty"` + Features []string `json:"features,omitempty"` + // This one's prettier at the end + CanonicalJSON []byte `json:"json"` +} diff --git a/cli/command/manifest/util.go b/cli/command/manifest/util.go new file mode 100644 index 000000000000..8aa70b5a598e --- /dev/null +++ b/cli/command/manifest/util.go @@ -0,0 +1,184 @@ +package manifest + +// list of valid os/arch values (see "Optional Environment Variables" section +// of https://golang.org/doc/install/source +// Added linux/s390x as we know System z support already exists + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/homedir" +) + +type osArch struct { + os string + arch string +} + +//Remove any unsupported os/arch combo +var validOSArches = map[osArch]bool{ + osArch{os: "darwin", arch: "386"}: true, + osArch{os: "darwin", arch: "amd64"}: true, + osArch{os: "darwin", arch: "arm"}: true, + osArch{os: "darwin", arch: "arm64"}: true, + osArch{os: "dragonfly", arch: "amd64"}: true, + osArch{os: "freebsd", arch: "386"}: true, + osArch{os: "freebsd", arch: "amd64"}: true, + osArch{os: "freebsd", arch: "arm"}: true, + osArch{os: "linux", arch: "386"}: true, + osArch{os: "linux", arch: "amd64"}: true, + osArch{os: "linux", arch: "arm"}: true, + osArch{os: "linux", arch: "arm64"}: true, + osArch{os: "linux", arch: "ppc64"}: true, + osArch{os: "linux", arch: "ppc64le"}: true, + osArch{os: "linux", arch: "mips64"}: true, + osArch{os: "linux", arch: "mips64le"}: true, + osArch{os: "linux", arch: "s390x"}: true, + osArch{os: "netbsd", arch: "386"}: true, + osArch{os: "netbsd", arch: "amd64"}: true, + osArch{os: "netbsd", arch: "arm"}: true, + osArch{os: "openbsd", arch: "386"}: true, + osArch{os: "openbsd", arch: "amd64"}: true, + osArch{os: "openbsd", arch: "arm"}: true, + osArch{os: "plan9", arch: "386"}: true, + osArch{os: "plan9", arch: "amd64"}: true, + osArch{os: "solaris", arch: "amd64"}: true, + osArch{os: "windows", arch: "386"}: true, + osArch{os: "windows", arch: "amd64"}: true, +} + +func isValidOSArch(os string, arch string) bool { + // check for existence of this combo + _, ok := validOSArches[osArch{os, arch}] + return ok +} + +func makeFilesafeName(ref string) string { + // Make sure the ref is a normalized name before calling this func + fileName := strings.Replace(ref, ":", "-", -1) + return strings.Replace(fileName, "/", "_", -1) +} + +func getListFilenames(transaction string) ([]string, error) { + baseDir, err := buildBaseFilename() + if err != nil { + return nil, err + } + transactionDir := filepath.Join(baseDir, makeFilesafeName(transaction)) + fd, err := os.Open(transactionDir) + if err != nil { + if os.IsNotExist(err) { + logrus.Debugf(err.Error()) + return nil, nil + } + return nil, err + } + fileNames, err := fd.Readdirnames(-1) + if err != nil { + return nil, err + } + fd.Close() + for i, f := range fileNames { + fileNames[i] = filepath.Join(transactionDir, f) + } + return fileNames, nil +} + +func getManifestFd(manifest, transaction string) (*os.File, error) { + + fileName, err := mfToFilename(manifest, transaction) + if err != nil { + return nil, err + } + + return getFdGeneric(fileName) +} + +func getFdGeneric(file string) (*os.File, error) { + fileinfo, err := os.Stat(file) + if err != nil && os.IsNotExist(err) { + logrus.Debugf("Manifest file %s not found.", file) + return nil, nil + } else if err != nil { + return nil, err + } + if fileinfo.IsDir() { + return nil, fmt.Errorf("Cannot open directory") + } + fd, err := os.OpenFile(file, os.O_RDWR|os.O_CREATE, 0600) + if err != nil { + return nil, err + } + return fd, nil +} + +func buildBaseFilename() (string, error) { + // Store the manifests in a user's home to prevent conflict. The HOME dir needs to be set, + // but can only be forcibly set on Linux at this time. + // See https://github.com/docker/docker/pull/29478 for more background on why this approach + // is being used. + if err := ensureHomeIfIAmStatic(); err != nil { + return "", err + } + userHome, err := homedir.GetStatic() + if err != nil { + return "", err + } + return filepath.Join(userHome, ".docker", "manifests"), nil +} + +func mfToFilename(manifest, transaction string) (string, error) { + + baseDir, err := buildBaseFilename() + if err != nil { + return "", nil + } + return filepath.Join(baseDir, makeFilesafeName(transaction), makeFilesafeName(manifest)), nil +} + +func unmarshalIntoManifestInspect(manifest, transaction string) (ImgManifestInspect, error) { + + var newMf ImgManifestInspect + filename, err := mfToFilename(manifest, transaction) + if err != nil { + return ImgManifestInspect{}, err + } + buf, err := ioutil.ReadFile(filename) + if err != nil { + return ImgManifestInspect{}, err + } + if err := json.Unmarshal(buf, &newMf); err != nil { + return ImgManifestInspect{}, err + } + return newMf, nil +} + +func updateMfFile(newMf ImgManifestInspect, mfName, transaction string) error { + fileName, err := mfToFilename(mfName, transaction) + if err != nil { + return err + } + if err := os.Remove(fileName); err != nil && !os.IsNotExist(err) { + return err + } + fd, err := os.Create(fileName) + if err != nil { + return err + } + defer fd.Close() + theBytes, err := json.Marshal(newMf) + if err != nil { + return err + } + + if _, err := fd.Write(theBytes); err != nil { + return err + } + return nil +} diff --git a/cli/config/configfile/file.go b/cli/config/configfile/file.go index 7214325d87fd..dc9f0769973a 100644 --- a/cli/config/configfile/file.go +++ b/cli/config/configfile/file.go @@ -41,6 +41,7 @@ type ConfigFile struct { ConfigFormat string `json:"configFormat,omitempty"` NodesFormat string `json:"nodesFormat,omitempty"` PruneFilters []string `json:"pruneFilters,omitempty"` + InsecureRegistries []string `json:"insecure-registries,omitempty"` } // LegacyLoadFromReader reads the non-nested configuration data given and sets up the diff --git a/vendor.conf b/vendor.conf index 0ca92b32281d..abf2c5c394ca 100644 --- a/vendor.conf +++ b/vendor.conf @@ -15,7 +15,7 @@ github.com/docker/go-units 9e638d38cf6977a37a8ea0078f3ee75a7cdb2dd1 github.com/docker/libnetwork b13e0604016a4944025aaff521d9c125850b0d04 github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a github.com/docker/notary v0.4.2 -github.com/docker/swarmkit 1a3e510517be82d18ac04380b5f71eddf06c2fc0 +github.com/docker/swarmkit 1a3e510517be82d18ac04380b5f71eddf06c2fc0 github.com/flynn-archive/go-shlex 3f9db97f856818214da2e1057f8ad84803971cff github.com/gogo/protobuf 7efa791bd276fd4db00867cbd982b552627c24cb github.com/golang/protobuf 8ee79997227bf9b34611aee7946ae64735e6fd93 @@ -28,12 +28,14 @@ github.com/mitchellh/mapstructure f3009df150dadf309fdee4a54ed65c124afad715 github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb github.com/opencontainers/image-spec f03dbe35d449c54915d235f1a3cf8f585a24babe github.com/opencontainers/runc 9c2d8d184e5da67c95d601382adf14862e4f2228 https://github.com/docker/runc.git +github.com/opencontainers/runtime-spec d42f1eb741e6361e858d83fc75aa6893b66292c4 github.com/opencontainers/selinux ba1aefe8057f1d0cfb8e88d0ec1dc85925ef987d github.com/pkg/errors 839d9e913e063e28dfd0e6c7b7512793e0a48be9 github.com/pmezard/go-difflib 792786c7400a136282c1664665ae0a8db921c6c2 github.com/spf13/cobra v1.5.1 https://github.com/dnephin/cobra.git github.com/spf13/pflag 9ff6c6923cfffbcd502984b8e0c80539a94968b7 github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987 +github.com/vbatts/tar-split v0.10.1 github.com/xeipuuv/gojsonpointer e0fe6f68307607d540ed8eac07a342c33fa1b54a github.com/xeipuuv/gojsonreference e02fc20de94c78484cd5ffb007f8af96be030a45 github.com/xeipuuv/gojsonschema 93e72a773fade158921402d6a24c819b48aba29d diff --git a/vendor/github.com/docker/distribution/manifest/doc.go b/vendor/github.com/docker/distribution/manifest/doc.go new file mode 100644 index 000000000000..88367b0a05ff --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/doc.go @@ -0,0 +1 @@ +package manifest diff --git a/vendor/github.com/docker/distribution/manifest/manifestlist/manifestlist.go b/vendor/github.com/docker/distribution/manifest/manifestlist/manifestlist.go new file mode 100644 index 000000000000..7a8cabbdbaaa --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/manifestlist/manifestlist.go @@ -0,0 +1,155 @@ +package manifestlist + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/manifest" + "github.com/opencontainers/go-digest" +) + +// MediaTypeManifestList specifies the mediaType for manifest lists. +const MediaTypeManifestList = "application/vnd.docker.distribution.manifest.list.v2+json" + +// SchemaVersion provides a pre-initialized version structure for this +// packages version of the manifest. +var SchemaVersion = manifest.Versioned{ + SchemaVersion: 2, + MediaType: MediaTypeManifestList, +} + +func init() { + manifestListFunc := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { + m := new(DeserializedManifestList) + err := m.UnmarshalJSON(b) + if err != nil { + return nil, distribution.Descriptor{}, err + } + + dgst := digest.FromBytes(b) + return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifestList}, err + } + err := distribution.RegisterManifestSchema(MediaTypeManifestList, manifestListFunc) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } +} + +// PlatformSpec specifies a platform where a particular image manifest is +// applicable. +type PlatformSpec struct { + // Architecture field specifies the CPU architecture, for example + // `amd64` or `ppc64`. + Architecture string `json:"architecture"` + + // OS specifies the operating system, for example `linux` or `windows`. + OS string `json:"os"` + + // OSVersion is an optional field specifying the operating system + // version, for example `10.0.10586`. + OSVersion string `json:"os.version,omitempty"` + + // OSFeatures is an optional field specifying an array of strings, + // each listing a required OS feature (for example on Windows `win32k`). + OSFeatures []string `json:"os.features,omitempty"` + + // Variant is an optional field specifying a variant of the CPU, for + // example `ppc64le` to specify a little-endian version of a PowerPC CPU. + Variant string `json:"variant,omitempty"` + + // Features is an optional field specifying an array of strings, each + // listing a required CPU feature (for example `sse4` or `aes`). + Features []string `json:"features,omitempty"` +} + +// A ManifestDescriptor references a platform-specific manifest. +type ManifestDescriptor struct { + distribution.Descriptor + + // Platform specifies which platform the manifest pointed to by the + // descriptor runs on. + Platform PlatformSpec `json:"platform"` +} + +// ManifestList references manifests for various platforms. +type ManifestList struct { + manifest.Versioned + + // Config references the image configuration as a blob. + Manifests []ManifestDescriptor `json:"manifests"` +} + +// References returnes the distribution descriptors for the referenced image +// manifests. +func (m ManifestList) References() []distribution.Descriptor { + dependencies := make([]distribution.Descriptor, len(m.Manifests)) + for i := range m.Manifests { + dependencies[i] = m.Manifests[i].Descriptor + } + + return dependencies +} + +// DeserializedManifestList wraps ManifestList with a copy of the original +// JSON. +type DeserializedManifestList struct { + ManifestList + + // canonical is the canonical byte representation of the Manifest. + canonical []byte +} + +// FromDescriptors takes a slice of descriptors, and returns a +// DeserializedManifestList which contains the resulting manifest list +// and its JSON representation. +func FromDescriptors(descriptors []ManifestDescriptor) (*DeserializedManifestList, error) { + m := ManifestList{ + Versioned: SchemaVersion, + } + + m.Manifests = make([]ManifestDescriptor, len(descriptors), len(descriptors)) + copy(m.Manifests, descriptors) + + deserialized := DeserializedManifestList{ + ManifestList: m, + } + + var err error + deserialized.canonical, err = json.MarshalIndent(&m, "", " ") + return &deserialized, err +} + +// UnmarshalJSON populates a new ManifestList struct from JSON data. +func (m *DeserializedManifestList) UnmarshalJSON(b []byte) error { + m.canonical = make([]byte, len(b), len(b)) + // store manifest list in canonical + copy(m.canonical, b) + + // Unmarshal canonical JSON into ManifestList object + var manifestList ManifestList + if err := json.Unmarshal(m.canonical, &manifestList); err != nil { + return err + } + + m.ManifestList = manifestList + + return nil +} + +// MarshalJSON returns the contents of canonical. If canonical is empty, +// marshals the inner contents. +func (m *DeserializedManifestList) MarshalJSON() ([]byte, error) { + if len(m.canonical) > 0 { + return m.canonical, nil + } + + return nil, errors.New("JSON representation not initialized in DeserializedManifestList") +} + +// Payload returns the raw content of the manifest list. The contents can be +// used to calculate the content identifier. +func (m DeserializedManifestList) Payload() (string, []byte, error) { + return m.MediaType, m.canonical, nil +} diff --git a/vendor/github.com/docker/distribution/manifest/schema1/config_builder.go b/vendor/github.com/docker/distribution/manifest/schema1/config_builder.go new file mode 100644 index 000000000000..9d222566ca48 --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/schema1/config_builder.go @@ -0,0 +1,287 @@ +package schema1 + +import ( + "crypto/sha512" + "encoding/json" + "errors" + "fmt" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/reference" + "github.com/docker/libtrust" + "github.com/opencontainers/go-digest" +) + +type diffID digest.Digest + +// gzippedEmptyTar is a gzip-compressed version of an empty tar file +// (1024 NULL bytes) +var gzippedEmptyTar = []byte{ + 31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88, + 0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0, +} + +// digestSHA256GzippedEmptyTar is the canonical sha256 digest of +// gzippedEmptyTar +const digestSHA256GzippedEmptyTar = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") + +// configManifestBuilder is a type for constructing manifests from an image +// configuration and generic descriptors. +type configManifestBuilder struct { + // bs is a BlobService used to create empty layer tars in the + // blob store if necessary. + bs distribution.BlobService + // pk is the libtrust private key used to sign the final manifest. + pk libtrust.PrivateKey + // configJSON is configuration supplied when the ManifestBuilder was + // created. + configJSON []byte + // ref contains the name and optional tag provided to NewConfigManifestBuilder. + ref reference.Named + // descriptors is the set of descriptors referencing the layers. + descriptors []distribution.Descriptor + // emptyTarDigest is set to a valid digest if an empty tar has been + // put in the blob store; otherwise it is empty. + emptyTarDigest digest.Digest +} + +// NewConfigManifestBuilder is used to build new manifests for the current +// schema version from an image configuration and a set of descriptors. +// It takes a BlobService so that it can add an empty tar to the blob store +// if the resulting manifest needs empty layers. +func NewConfigManifestBuilder(bs distribution.BlobService, pk libtrust.PrivateKey, ref reference.Named, configJSON []byte) distribution.ManifestBuilder { + return &configManifestBuilder{ + bs: bs, + pk: pk, + configJSON: configJSON, + ref: ref, + } +} + +// Build produces a final manifest from the given references +func (mb *configManifestBuilder) Build(ctx context.Context) (m distribution.Manifest, err error) { + type imageRootFS struct { + Type string `json:"type"` + DiffIDs []diffID `json:"diff_ids,omitempty"` + BaseLayer string `json:"base_layer,omitempty"` + } + + type imageHistory struct { + Created time.Time `json:"created"` + Author string `json:"author,omitempty"` + CreatedBy string `json:"created_by,omitempty"` + Comment string `json:"comment,omitempty"` + EmptyLayer bool `json:"empty_layer,omitempty"` + } + + type imageConfig struct { + RootFS *imageRootFS `json:"rootfs,omitempty"` + History []imageHistory `json:"history,omitempty"` + Architecture string `json:"architecture,omitempty"` + } + + var img imageConfig + + if err := json.Unmarshal(mb.configJSON, &img); err != nil { + return nil, err + } + + if len(img.History) == 0 { + return nil, errors.New("empty history when trying to create schema1 manifest") + } + + if len(img.RootFS.DiffIDs) != len(mb.descriptors) { + return nil, fmt.Errorf("number of descriptors and number of layers in rootfs must match: len(%v) != len(%v)", img.RootFS.DiffIDs, mb.descriptors) + } + + // Generate IDs for each layer + // For non-top-level layers, create fake V1Compatibility strings that + // fit the format and don't collide with anything else, but don't + // result in runnable images on their own. + type v1Compatibility struct { + ID string `json:"id"` + Parent string `json:"parent,omitempty"` + Comment string `json:"comment,omitempty"` + Created time.Time `json:"created"` + ContainerConfig struct { + Cmd []string + } `json:"container_config,omitempty"` + Author string `json:"author,omitempty"` + ThrowAway bool `json:"throwaway,omitempty"` + } + + fsLayerList := make([]FSLayer, len(img.History)) + history := make([]History, len(img.History)) + + parent := "" + layerCounter := 0 + for i, h := range img.History[:len(img.History)-1] { + var blobsum digest.Digest + if h.EmptyLayer { + if blobsum, err = mb.emptyTar(ctx); err != nil { + return nil, err + } + } else { + if len(img.RootFS.DiffIDs) <= layerCounter { + return nil, errors.New("too many non-empty layers in History section") + } + blobsum = mb.descriptors[layerCounter].Digest + layerCounter++ + } + + v1ID := digest.FromBytes([]byte(blobsum.Hex() + " " + parent)).Hex() + + if i == 0 && img.RootFS.BaseLayer != "" { + // windows-only baselayer setup + baseID := sha512.Sum384([]byte(img.RootFS.BaseLayer)) + parent = fmt.Sprintf("%x", baseID[:32]) + } + + v1Compatibility := v1Compatibility{ + ID: v1ID, + Parent: parent, + Comment: h.Comment, + Created: h.Created, + Author: h.Author, + } + v1Compatibility.ContainerConfig.Cmd = []string{img.History[i].CreatedBy} + if h.EmptyLayer { + v1Compatibility.ThrowAway = true + } + jsonBytes, err := json.Marshal(&v1Compatibility) + if err != nil { + return nil, err + } + + reversedIndex := len(img.History) - i - 1 + history[reversedIndex].V1Compatibility = string(jsonBytes) + fsLayerList[reversedIndex] = FSLayer{BlobSum: blobsum} + + parent = v1ID + } + + latestHistory := img.History[len(img.History)-1] + + var blobsum digest.Digest + if latestHistory.EmptyLayer { + if blobsum, err = mb.emptyTar(ctx); err != nil { + return nil, err + } + } else { + if len(img.RootFS.DiffIDs) <= layerCounter { + return nil, errors.New("too many non-empty layers in History section") + } + blobsum = mb.descriptors[layerCounter].Digest + } + + fsLayerList[0] = FSLayer{BlobSum: blobsum} + dgst := digest.FromBytes([]byte(blobsum.Hex() + " " + parent + " " + string(mb.configJSON))) + + // Top-level v1compatibility string should be a modified version of the + // image config. + transformedConfig, err := MakeV1ConfigFromConfig(mb.configJSON, dgst.Hex(), parent, latestHistory.EmptyLayer) + if err != nil { + return nil, err + } + + history[0].V1Compatibility = string(transformedConfig) + + tag := "" + if tagged, isTagged := mb.ref.(reference.Tagged); isTagged { + tag = tagged.Tag() + } + + mfst := Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: mb.ref.Name(), + Tag: tag, + Architecture: img.Architecture, + FSLayers: fsLayerList, + History: history, + } + + return Sign(&mfst, mb.pk) +} + +// emptyTar pushes a compressed empty tar to the blob store if one doesn't +// already exist, and returns its blobsum. +func (mb *configManifestBuilder) emptyTar(ctx context.Context) (digest.Digest, error) { + if mb.emptyTarDigest != "" { + // Already put an empty tar + return mb.emptyTarDigest, nil + } + + descriptor, err := mb.bs.Stat(ctx, digestSHA256GzippedEmptyTar) + switch err { + case nil: + mb.emptyTarDigest = descriptor.Digest + return descriptor.Digest, nil + case distribution.ErrBlobUnknown: + // nop + default: + return "", err + } + + // Add gzipped empty tar to the blob store + descriptor, err = mb.bs.Put(ctx, "", gzippedEmptyTar) + if err != nil { + return "", err + } + + mb.emptyTarDigest = descriptor.Digest + + return descriptor.Digest, nil +} + +// AppendReference adds a reference to the current ManifestBuilder +func (mb *configManifestBuilder) AppendReference(d distribution.Describable) error { + descriptor := d.Descriptor() + + if err := descriptor.Digest.Validate(); err != nil { + return err + } + + mb.descriptors = append(mb.descriptors, descriptor) + return nil +} + +// References returns the current references added to this builder +func (mb *configManifestBuilder) References() []distribution.Descriptor { + return mb.descriptors +} + +// MakeV1ConfigFromConfig creates an legacy V1 image config from image config JSON +func MakeV1ConfigFromConfig(configJSON []byte, v1ID, parentV1ID string, throwaway bool) ([]byte, error) { + // Top-level v1compatibility string should be a modified version of the + // image config. + var configAsMap map[string]*json.RawMessage + if err := json.Unmarshal(configJSON, &configAsMap); err != nil { + return nil, err + } + + // Delete fields that didn't exist in old manifest + delete(configAsMap, "rootfs") + delete(configAsMap, "history") + configAsMap["id"] = rawJSON(v1ID) + if parentV1ID != "" { + configAsMap["parent"] = rawJSON(parentV1ID) + } + if throwaway { + configAsMap["throwaway"] = rawJSON(true) + } + + return json.Marshal(configAsMap) +} + +func rawJSON(value interface{}) *json.RawMessage { + jsonval, err := json.Marshal(value) + if err != nil { + return nil + } + return (*json.RawMessage)(&jsonval) +} diff --git a/vendor/github.com/docker/distribution/manifest/schema1/manifest.go b/vendor/github.com/docker/distribution/manifest/schema1/manifest.go new file mode 100644 index 000000000000..65042a75fc96 --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/schema1/manifest.go @@ -0,0 +1,184 @@ +package schema1 + +import ( + "encoding/json" + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/manifest" + "github.com/docker/libtrust" + "github.com/opencontainers/go-digest" +) + +const ( + // MediaTypeManifest specifies the mediaType for the current version. Note + // that for schema version 1, the the media is optionally "application/json". + MediaTypeManifest = "application/vnd.docker.distribution.manifest.v1+json" + // MediaTypeSignedManifest specifies the mediatype for current SignedManifest version + MediaTypeSignedManifest = "application/vnd.docker.distribution.manifest.v1+prettyjws" + // MediaTypeManifestLayer specifies the media type for manifest layers + MediaTypeManifestLayer = "application/vnd.docker.container.image.rootfs.diff+x-gtar" +) + +var ( + // SchemaVersion provides a pre-initialized version structure for this + // packages version of the manifest. + SchemaVersion = manifest.Versioned{ + SchemaVersion: 1, + } +) + +func init() { + schema1Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { + sm := new(SignedManifest) + err := sm.UnmarshalJSON(b) + if err != nil { + return nil, distribution.Descriptor{}, err + } + + desc := distribution.Descriptor{ + Digest: digest.FromBytes(sm.Canonical), + Size: int64(len(sm.Canonical)), + MediaType: MediaTypeSignedManifest, + } + return sm, desc, err + } + err := distribution.RegisterManifestSchema(MediaTypeSignedManifest, schema1Func) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } + err = distribution.RegisterManifestSchema("", schema1Func) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } + err = distribution.RegisterManifestSchema("application/json", schema1Func) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } +} + +// FSLayer is a container struct for BlobSums defined in an image manifest +type FSLayer struct { + // BlobSum is the tarsum of the referenced filesystem image layer + BlobSum digest.Digest `json:"blobSum"` +} + +// History stores unstructured v1 compatibility information +type History struct { + // V1Compatibility is the raw v1 compatibility information + V1Compatibility string `json:"v1Compatibility"` +} + +// Manifest provides the base accessible fields for working with V2 image +// format in the registry. +type Manifest struct { + manifest.Versioned + + // Name is the name of the image's repository + Name string `json:"name"` + + // Tag is the tag of the image specified by this manifest + Tag string `json:"tag"` + + // Architecture is the host architecture on which this image is intended to + // run + Architecture string `json:"architecture"` + + // FSLayers is a list of filesystem layer blobSums contained in this image + FSLayers []FSLayer `json:"fsLayers"` + + // History is a list of unstructured historical data for v1 compatibility + History []History `json:"history"` +} + +// SignedManifest provides an envelope for a signed image manifest, including +// the format sensitive raw bytes. +type SignedManifest struct { + Manifest + + // Canonical is the canonical byte representation of the ImageManifest, + // without any attached signatures. The manifest byte + // representation cannot change or it will have to be re-signed. + Canonical []byte `json:"-"` + + // all contains the byte representation of the Manifest including signatures + // and is returned by Payload() + all []byte +} + +// UnmarshalJSON populates a new SignedManifest struct from JSON data. +func (sm *SignedManifest) UnmarshalJSON(b []byte) error { + sm.all = make([]byte, len(b), len(b)) + // store manifest and signatures in all + copy(sm.all, b) + + jsig, err := libtrust.ParsePrettySignature(b, "signatures") + if err != nil { + return err + } + + // Resolve the payload in the manifest. + bytes, err := jsig.Payload() + if err != nil { + return err + } + + // sm.Canonical stores the canonical manifest JSON + sm.Canonical = make([]byte, len(bytes), len(bytes)) + copy(sm.Canonical, bytes) + + // Unmarshal canonical JSON into Manifest object + var manifest Manifest + if err := json.Unmarshal(sm.Canonical, &manifest); err != nil { + return err + } + + sm.Manifest = manifest + + return nil +} + +// References returnes the descriptors of this manifests references +func (sm SignedManifest) References() []distribution.Descriptor { + dependencies := make([]distribution.Descriptor, len(sm.FSLayers)) + for i, fsLayer := range sm.FSLayers { + dependencies[i] = distribution.Descriptor{ + MediaType: "application/vnd.docker.container.image.rootfs.diff+x-gtar", + Digest: fsLayer.BlobSum, + } + } + + return dependencies + +} + +// MarshalJSON returns the contents of raw. If Raw is nil, marshals the inner +// contents. Applications requiring a marshaled signed manifest should simply +// use Raw directly, since the the content produced by json.Marshal will be +// compacted and will fail signature checks. +func (sm *SignedManifest) MarshalJSON() ([]byte, error) { + if len(sm.all) > 0 { + return sm.all, nil + } + + // If the raw data is not available, just dump the inner content. + return json.Marshal(&sm.Manifest) +} + +// Payload returns the signed content of the signed manifest. +func (sm SignedManifest) Payload() (string, []byte, error) { + return MediaTypeSignedManifest, sm.all, nil +} + +// Signatures returns the signatures as provided by +// (*libtrust.JSONSignature).Signatures. The byte slices are opaque jws +// signatures. +func (sm *SignedManifest) Signatures() ([][]byte, error) { + jsig, err := libtrust.ParsePrettySignature(sm.all, "signatures") + if err != nil { + return nil, err + } + + // Resolve the payload in the manifest. + return jsig.Signatures() +} diff --git a/vendor/github.com/docker/distribution/manifest/schema1/reference_builder.go b/vendor/github.com/docker/distribution/manifest/schema1/reference_builder.go new file mode 100644 index 000000000000..ae401478192d --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/schema1/reference_builder.go @@ -0,0 +1,98 @@ +package schema1 + +import ( + "fmt" + + "errors" + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/reference" + "github.com/docker/libtrust" + "github.com/opencontainers/go-digest" +) + +// referenceManifestBuilder is a type for constructing manifests from schema1 +// dependencies. +type referenceManifestBuilder struct { + Manifest + pk libtrust.PrivateKey +} + +// NewReferenceManifestBuilder is used to build new manifests for the current +// schema version using schema1 dependencies. +func NewReferenceManifestBuilder(pk libtrust.PrivateKey, ref reference.Named, architecture string) distribution.ManifestBuilder { + tag := "" + if tagged, isTagged := ref.(reference.Tagged); isTagged { + tag = tagged.Tag() + } + + return &referenceManifestBuilder{ + Manifest: Manifest{ + Versioned: manifest.Versioned{ + SchemaVersion: 1, + }, + Name: ref.Name(), + Tag: tag, + Architecture: architecture, + }, + pk: pk, + } +} + +func (mb *referenceManifestBuilder) Build(ctx context.Context) (distribution.Manifest, error) { + m := mb.Manifest + if len(m.FSLayers) == 0 { + return nil, errors.New("cannot build manifest with zero layers or history") + } + + m.FSLayers = make([]FSLayer, len(mb.Manifest.FSLayers)) + m.History = make([]History, len(mb.Manifest.History)) + copy(m.FSLayers, mb.Manifest.FSLayers) + copy(m.History, mb.Manifest.History) + + return Sign(&m, mb.pk) +} + +// AppendReference adds a reference to the current ManifestBuilder +func (mb *referenceManifestBuilder) AppendReference(d distribution.Describable) error { + r, ok := d.(Reference) + if !ok { + return fmt.Errorf("Unable to add non-reference type to v1 builder") + } + + // Entries need to be prepended + mb.Manifest.FSLayers = append([]FSLayer{{BlobSum: r.Digest}}, mb.Manifest.FSLayers...) + mb.Manifest.History = append([]History{r.History}, mb.Manifest.History...) + return nil + +} + +// References returns the current references added to this builder +func (mb *referenceManifestBuilder) References() []distribution.Descriptor { + refs := make([]distribution.Descriptor, len(mb.Manifest.FSLayers)) + for i := range mb.Manifest.FSLayers { + layerDigest := mb.Manifest.FSLayers[i].BlobSum + history := mb.Manifest.History[i] + ref := Reference{layerDigest, 0, history} + refs[i] = ref.Descriptor() + } + return refs +} + +// Reference describes a manifest v2, schema version 1 dependency. +// An FSLayer associated with a history entry. +type Reference struct { + Digest digest.Digest + Size int64 // if we know it, set it for the descriptor. + History History +} + +// Descriptor describes a reference +func (r Reference) Descriptor() distribution.Descriptor { + return distribution.Descriptor{ + MediaType: MediaTypeManifestLayer, + Digest: r.Digest, + Size: r.Size, + } +} diff --git a/vendor/github.com/docker/distribution/manifest/schema1/sign.go b/vendor/github.com/docker/distribution/manifest/schema1/sign.go new file mode 100644 index 000000000000..c862dd8123b4 --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/schema1/sign.go @@ -0,0 +1,68 @@ +package schema1 + +import ( + "crypto/x509" + "encoding/json" + + "github.com/docker/libtrust" +) + +// Sign signs the manifest with the provided private key, returning a +// SignedManifest. This typically won't be used within the registry, except +// for testing. +func Sign(m *Manifest, pk libtrust.PrivateKey) (*SignedManifest, error) { + p, err := json.MarshalIndent(m, "", " ") + if err != nil { + return nil, err + } + + js, err := libtrust.NewJSONSignature(p) + if err != nil { + return nil, err + } + + if err := js.Sign(pk); err != nil { + return nil, err + } + + pretty, err := js.PrettySignature("signatures") + if err != nil { + return nil, err + } + + return &SignedManifest{ + Manifest: *m, + all: pretty, + Canonical: p, + }, nil +} + +// SignWithChain signs the manifest with the given private key and x509 chain. +// The public key of the first element in the chain must be the public key +// corresponding with the sign key. +func SignWithChain(m *Manifest, key libtrust.PrivateKey, chain []*x509.Certificate) (*SignedManifest, error) { + p, err := json.MarshalIndent(m, "", " ") + if err != nil { + return nil, err + } + + js, err := libtrust.NewJSONSignature(p) + if err != nil { + return nil, err + } + + if err := js.SignWithChain(key, chain); err != nil { + return nil, err + } + + pretty, err := js.PrettySignature("signatures") + if err != nil { + return nil, err + } + + return &SignedManifest{ + Manifest: *m, + all: pretty, + Canonical: p, + }, nil +} diff --git a/vendor/github.com/docker/distribution/manifest/schema1/verify.go b/vendor/github.com/docker/distribution/manifest/schema1/verify.go new file mode 100644 index 000000000000..fa8daa56f55b --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/schema1/verify.go @@ -0,0 +1,32 @@ +package schema1 + +import ( + "crypto/x509" + + "github.com/Sirupsen/logrus" + "github.com/docker/libtrust" +) + +// Verify verifies the signature of the signed manifest returning the public +// keys used during signing. +func Verify(sm *SignedManifest) ([]libtrust.PublicKey, error) { + js, err := libtrust.ParsePrettySignature(sm.all, "signatures") + if err != nil { + logrus.WithField("err", err).Debugf("(*SignedManifest).Verify") + return nil, err + } + + return js.Verify() +} + +// VerifyChains verifies the signature of the signed manifest against the +// certificate pool returning the list of verified chains. Signatures without +// an x509 chain are not checked. +func VerifyChains(sm *SignedManifest, ca *x509.CertPool) ([][]*x509.Certificate, error) { + js, err := libtrust.ParsePrettySignature(sm.all, "signatures") + if err != nil { + return nil, err + } + + return js.VerifyChains(ca) +} diff --git a/vendor/github.com/docker/distribution/manifest/schema2/builder.go b/vendor/github.com/docker/distribution/manifest/schema2/builder.go new file mode 100644 index 000000000000..4b6ba5628ae9 --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/schema2/builder.go @@ -0,0 +1,84 @@ +package schema2 + +import ( + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/opencontainers/go-digest" +) + +// builder is a type for constructing manifests. +type builder struct { + // bs is a BlobService used to publish the configuration blob. + bs distribution.BlobService + + // configMediaType is media type used to describe configuration + configMediaType string + + // configJSON references + configJSON []byte + + // dependencies is a list of descriptors that gets built by successive + // calls to AppendReference. In case of image configuration these are layers. + dependencies []distribution.Descriptor +} + +// NewManifestBuilder is used to build new manifests for the current schema +// version. It takes a BlobService so it can publish the configuration blob +// as part of the Build process. +func NewManifestBuilder(bs distribution.BlobService, configMediaType string, configJSON []byte) distribution.ManifestBuilder { + mb := &builder{ + bs: bs, + configMediaType: configMediaType, + configJSON: make([]byte, len(configJSON)), + } + copy(mb.configJSON, configJSON) + + return mb +} + +// Build produces a final manifest from the given references. +func (mb *builder) Build(ctx context.Context) (distribution.Manifest, error) { + m := Manifest{ + Versioned: SchemaVersion, + Layers: make([]distribution.Descriptor, len(mb.dependencies)), + } + copy(m.Layers, mb.dependencies) + + configDigest := digest.FromBytes(mb.configJSON) + + var err error + m.Config, err = mb.bs.Stat(ctx, configDigest) + switch err { + case nil: + // Override MediaType, since Put always replaces the specified media + // type with application/octet-stream in the descriptor it returns. + m.Config.MediaType = mb.configMediaType + return FromStruct(m) + case distribution.ErrBlobUnknown: + // nop + default: + return nil, err + } + + // Add config to the blob store + m.Config, err = mb.bs.Put(ctx, mb.configMediaType, mb.configJSON) + // Override MediaType, since Put always replaces the specified media + // type with application/octet-stream in the descriptor it returns. + m.Config.MediaType = mb.configMediaType + if err != nil { + return nil, err + } + + return FromStruct(m) +} + +// AppendReference adds a reference to the current ManifestBuilder. +func (mb *builder) AppendReference(d distribution.Describable) error { + mb.dependencies = append(mb.dependencies, d.Descriptor()) + return nil +} + +// References returns the current references added to this builder. +func (mb *builder) References() []distribution.Descriptor { + return mb.dependencies +} diff --git a/vendor/github.com/docker/distribution/manifest/schema2/manifest.go b/vendor/github.com/docker/distribution/manifest/schema2/manifest.go new file mode 100644 index 000000000000..a2708c75098e --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/schema2/manifest.go @@ -0,0 +1,138 @@ +package schema2 + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/manifest" + "github.com/opencontainers/go-digest" +) + +const ( + // MediaTypeManifest specifies the mediaType for the current version. + MediaTypeManifest = "application/vnd.docker.distribution.manifest.v2+json" + + // MediaTypeImageConfig specifies the mediaType for the image configuration. + MediaTypeImageConfig = "application/vnd.docker.container.image.v1+json" + + // MediaTypePluginConfig specifies the mediaType for plugin configuration. + MediaTypePluginConfig = "application/vnd.docker.plugin.v1+json" + + // MediaTypeLayer is the mediaType used for layers referenced by the + // manifest. + MediaTypeLayer = "application/vnd.docker.image.rootfs.diff.tar.gzip" + + // MediaTypeForeignLayer is the mediaType used for layers that must be + // downloaded from foreign URLs. + MediaTypeForeignLayer = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" + + // MediaTypeUncompressedLayer is the mediaType used for layers which + // are not compressed. + MediaTypeUncompressedLayer = "application/vnd.docker.image.rootfs.diff.tar" +) + +var ( + // SchemaVersion provides a pre-initialized version structure for this + // packages version of the manifest. + SchemaVersion = manifest.Versioned{ + SchemaVersion: 2, + MediaType: MediaTypeManifest, + } +) + +func init() { + schema2Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { + m := new(DeserializedManifest) + err := m.UnmarshalJSON(b) + if err != nil { + return nil, distribution.Descriptor{}, err + } + + dgst := digest.FromBytes(b) + return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifest}, err + } + err := distribution.RegisterManifestSchema(MediaTypeManifest, schema2Func) + if err != nil { + panic(fmt.Sprintf("Unable to register manifest: %s", err)) + } +} + +// Manifest defines a schema2 manifest. +type Manifest struct { + manifest.Versioned + + // Config references the image configuration as a blob. + Config distribution.Descriptor `json:"config"` + + // Layers lists descriptors for the layers referenced by the + // configuration. + Layers []distribution.Descriptor `json:"layers"` +} + +// References returnes the descriptors of this manifests references. +func (m Manifest) References() []distribution.Descriptor { + references := make([]distribution.Descriptor, 0, 1+len(m.Layers)) + references = append(references, m.Config) + references = append(references, m.Layers...) + return references +} + +// Target returns the target of this signed manifest. +func (m Manifest) Target() distribution.Descriptor { + return m.Config +} + +// DeserializedManifest wraps Manifest with a copy of the original JSON. +// It satisfies the distribution.Manifest interface. +type DeserializedManifest struct { + Manifest + + // canonical is the canonical byte representation of the Manifest. + canonical []byte +} + +// FromStruct takes a Manifest structure, marshals it to JSON, and returns a +// DeserializedManifest which contains the manifest and its JSON representation. +func FromStruct(m Manifest) (*DeserializedManifest, error) { + var deserialized DeserializedManifest + deserialized.Manifest = m + + var err error + deserialized.canonical, err = json.MarshalIndent(&m, "", " ") + return &deserialized, err +} + +// UnmarshalJSON populates a new Manifest struct from JSON data. +func (m *DeserializedManifest) UnmarshalJSON(b []byte) error { + m.canonical = make([]byte, len(b), len(b)) + // store manifest in canonical + copy(m.canonical, b) + + // Unmarshal canonical JSON into Manifest object + var manifest Manifest + if err := json.Unmarshal(m.canonical, &manifest); err != nil { + return err + } + + m.Manifest = manifest + + return nil +} + +// MarshalJSON returns the contents of canonical. If canonical is empty, +// marshals the inner contents. +func (m *DeserializedManifest) MarshalJSON() ([]byte, error) { + if len(m.canonical) > 0 { + return m.canonical, nil + } + + return nil, errors.New("JSON representation not initialized in DeserializedManifest") +} + +// Payload returns the raw content of the manifest. The contents can be used to +// calculate the content identifier. +func (m DeserializedManifest) Payload() (string, []byte, error) { + return m.MediaType, m.canonical, nil +} diff --git a/vendor/github.com/docker/distribution/manifest/versioned.go b/vendor/github.com/docker/distribution/manifest/versioned.go new file mode 100644 index 000000000000..caa6b14e8850 --- /dev/null +++ b/vendor/github.com/docker/distribution/manifest/versioned.go @@ -0,0 +1,12 @@ +package manifest + +// Versioned provides a struct with the manifest schemaVersion and mediaType. +// Incoming content with unknown schema version can be decoded against this +// struct to check the version. +type Versioned struct { + // SchemaVersion is the image manifest schema that this image follows + SchemaVersion int `json:"schemaVersion"` + + // MediaType is the media type of this schema. + MediaType string `json:"mediaType,omitempty"` +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/counter.go b/vendor/github.com/docker/docker/daemon/graphdriver/counter.go new file mode 100644 index 000000000000..72551a38d469 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/counter.go @@ -0,0 +1,59 @@ +package graphdriver + +import "sync" + +type minfo struct { + check bool + count int +} + +// RefCounter is a generic counter for use by graphdriver Get/Put calls +type RefCounter struct { + counts map[string]*minfo + mu sync.Mutex + checker Checker +} + +// NewRefCounter returns a new RefCounter +func NewRefCounter(c Checker) *RefCounter { + return &RefCounter{ + checker: c, + counts: make(map[string]*minfo), + } +} + +// Increment increases the ref count for the given id and returns the current count +func (c *RefCounter) Increment(path string) int { + return c.incdec(path, func(minfo *minfo) { + minfo.count++ + }) +} + +// Decrement decreases the ref count for the given id and returns the current count +func (c *RefCounter) Decrement(path string) int { + return c.incdec(path, func(minfo *minfo) { + minfo.count-- + }) +} + +func (c *RefCounter) incdec(path string, infoOp func(minfo *minfo)) int { + c.mu.Lock() + m := c.counts[path] + if m == nil { + m = &minfo{} + c.counts[path] = m + } + // if we are checking this path for the first time check to make sure + // if it was already mounted on the system and make sure we have a correct ref + // count if it is mounted as it is in use. + if !m.check { + m.check = true + if c.checker.IsMounted(path) { + m.count++ + } + } + infoOp(m) + count := m.count + c.mu.Unlock() + return count +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver.go new file mode 100644 index 000000000000..88f190d9e18b --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/driver.go @@ -0,0 +1,287 @@ +package graphdriver + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/vbatts/tar-split/tar/storage" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/plugingetter" +) + +// FsMagic unsigned id of the filesystem in use. +type FsMagic uint32 + +const ( + // FsMagicUnsupported is a predefined constant value other than a valid filesystem id. + FsMagicUnsupported = FsMagic(0x00000000) +) + +var ( + // All registered drivers + drivers map[string]InitFunc + + // ErrNotSupported returned when driver is not supported. + ErrNotSupported = errors.New("driver not supported") + // ErrPrerequisites returned when driver does not meet prerequisites. + ErrPrerequisites = errors.New("prerequisites for driver not satisfied (wrong filesystem?)") + // ErrIncompatibleFS returned when file system is not supported. + ErrIncompatibleFS = fmt.Errorf("backing file system is unsupported for this graph driver") +) + +//CreateOpts contains optional arguments for Create() and CreateReadWrite() +// methods. +type CreateOpts struct { + MountLabel string + StorageOpt map[string]string +} + +// InitFunc initializes the storage driver. +type InitFunc func(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) + +// ProtoDriver defines the basic capabilities of a driver. +// This interface exists solely to be a minimum set of methods +// for client code which choose not to implement the entire Driver +// interface and use the NaiveDiffDriver wrapper constructor. +// +// Use of ProtoDriver directly by client code is not recommended. +type ProtoDriver interface { + // String returns a string representation of this driver. + String() string + // CreateReadWrite creates a new, empty filesystem layer that is ready + // to be used as the storage for a container. Additional options can + // be passed in opts. parent may be "" and opts may be nil. + CreateReadWrite(id, parent string, opts *CreateOpts) error + // Create creates a new, empty, filesystem layer with the + // specified id and parent and options passed in opts. Parent + // may be "" and opts may be nil. + Create(id, parent string, opts *CreateOpts) error + // Remove attempts to remove the filesystem layer with this id. + Remove(id string) error + // Get returns the mountpoint for the layered filesystem referred + // to by this id. You can optionally specify a mountLabel or "". + // Returns the absolute path to the mounted layered filesystem. + Get(id, mountLabel string) (dir string, err error) + // Put releases the system resources for the specified id, + // e.g, unmounting layered filesystem. + Put(id string) error + // Exists returns whether a filesystem layer with the specified + // ID exists on this driver. + Exists(id string) bool + // Status returns a set of key-value pairs which give low + // level diagnostic status about this driver. + Status() [][2]string + // Returns a set of key-value pairs which give low level information + // about the image/container driver is managing. + GetMetadata(id string) (map[string]string, error) + // Cleanup performs necessary tasks to release resources + // held by the driver, e.g., unmounting all layered filesystems + // known to this driver. + Cleanup() error +} + +// DiffDriver is the interface to use to implement graph diffs +type DiffDriver interface { + // Diff produces an archive of the changes between the specified + // layer and its parent layer which may be "". + Diff(id, parent string) (io.ReadCloser, error) + // Changes produces a list of changes between the specified layer + // and its parent layer. If parent is "", then all changes will be ADD changes. + Changes(id, parent string) ([]archive.Change, error) + // ApplyDiff extracts the changeset from the given diff into the + // layer with the specified id and parent, returning the size of the + // new layer in bytes. + // The archive.Reader must be an uncompressed stream. + ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) + // DiffSize calculates the changes between the specified id + // and its parent and returns the size in bytes of the changes + // relative to its base filesystem directory. + DiffSize(id, parent string) (size int64, err error) +} + +// Driver is the interface for layered/snapshot file system drivers. +type Driver interface { + ProtoDriver + DiffDriver +} + +// Capabilities defines a list of capabilities a driver may implement. +// These capabilities are not required; however, they do determine how a +// graphdriver can be used. +type Capabilities struct { + // Flags that this driver is capable of reproducing exactly equivalent + // diffs for read-only layers. If set, clients can rely on the driver + // for consistent tar streams, and avoid extra processing to account + // for potential differences (eg: the layer store's use of tar-split). + ReproducesExactDiffs bool +} + +// CapabilityDriver is the interface for layered file system drivers that +// can report on their Capabilities. +type CapabilityDriver interface { + Capabilities() Capabilities +} + +// DiffGetterDriver is the interface for layered file system drivers that +// provide a specialized function for getting file contents for tar-split. +type DiffGetterDriver interface { + Driver + // DiffGetter returns an interface to efficiently retrieve the contents + // of files in a layer. + DiffGetter(id string) (FileGetCloser, error) +} + +// FileGetCloser extends the storage.FileGetter interface with a Close method +// for cleaning up. +type FileGetCloser interface { + storage.FileGetter + // Close cleans up any resources associated with the FileGetCloser. + Close() error +} + +// Checker makes checks on specified filesystems. +type Checker interface { + // IsMounted returns true if the provided path is mounted for the specific checker + IsMounted(path string) bool +} + +func init() { + drivers = make(map[string]InitFunc) +} + +// Register registers an InitFunc for the driver. +func Register(name string, initFunc InitFunc) error { + if _, exists := drivers[name]; exists { + return fmt.Errorf("Name already registered %s", name) + } + drivers[name] = initFunc + + return nil +} + +// GetDriver initializes and returns the registered driver +func GetDriver(name string, pg plugingetter.PluginGetter, config Options) (Driver, error) { + if initFunc, exists := drivers[name]; exists { + return initFunc(filepath.Join(config.Root, name), config.DriverOptions, config.UIDMaps, config.GIDMaps) + } + + pluginDriver, err := lookupPlugin(name, pg, config) + if err == nil { + return pluginDriver, nil + } + logrus.WithError(err).WithField("driver", name).WithField("home-dir", config.Root).Error("Failed to GetDriver graph") + return nil, ErrNotSupported +} + +// getBuiltinDriver initializes and returns the registered driver, but does not try to load from plugins +func getBuiltinDriver(name, home string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) { + if initFunc, exists := drivers[name]; exists { + return initFunc(filepath.Join(home, name), options, uidMaps, gidMaps) + } + logrus.Errorf("Failed to built-in GetDriver graph %s %s", name, home) + return nil, ErrNotSupported +} + +// Options is used to initialize a graphdriver +type Options struct { + Root string + DriverOptions []string + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + ExperimentalEnabled bool +} + +// New creates the driver and initializes it at the specified root. +func New(name string, pg plugingetter.PluginGetter, config Options) (Driver, error) { + if name != "" { + logrus.Debugf("[graphdriver] trying provided driver: %s", name) // so the logs show specified driver + return GetDriver(name, pg, config) + } + + // Guess for prior driver + driversMap := scanPriorDrivers(config.Root) + for _, name := range priority { + if name == "vfs" { + // don't use vfs even if there is state present. + continue + } + if _, prior := driversMap[name]; prior { + // of the state found from prior drivers, check in order of our priority + // which we would prefer + driver, err := getBuiltinDriver(name, config.Root, config.DriverOptions, config.UIDMaps, config.GIDMaps) + if err != nil { + // unlike below, we will return error here, because there is prior + // state, and now it is no longer supported/prereq/compatible, so + // something changed and needs attention. Otherwise the daemon's + // images would just "disappear". + logrus.Errorf("[graphdriver] prior storage driver %s failed: %s", name, err) + return nil, err + } + + // abort starting when there are other prior configured drivers + // to ensure the user explicitly selects the driver to load + if len(driversMap)-1 > 0 { + var driversSlice []string + for name := range driversMap { + driversSlice = append(driversSlice, name) + } + + return nil, fmt.Errorf("%s contains several valid graphdrivers: %s; Please cleanup or explicitly choose storage driver (-s )", config.Root, strings.Join(driversSlice, ", ")) + } + + logrus.Infof("[graphdriver] using prior storage driver: %s", name) + return driver, nil + } + } + + // Check for priority drivers first + for _, name := range priority { + driver, err := getBuiltinDriver(name, config.Root, config.DriverOptions, config.UIDMaps, config.GIDMaps) + if err != nil { + if isDriverNotSupported(err) { + continue + } + return nil, err + } + return driver, nil + } + + // Check all registered drivers if no priority driver is found + for name, initFunc := range drivers { + driver, err := initFunc(filepath.Join(config.Root, name), config.DriverOptions, config.UIDMaps, config.GIDMaps) + if err != nil { + if isDriverNotSupported(err) { + continue + } + return nil, err + } + return driver, nil + } + return nil, fmt.Errorf("No supported storage backend found") +} + +// isDriverNotSupported returns true if the error initializing +// the graph driver is a non-supported error. +func isDriverNotSupported(err error) bool { + return err == ErrNotSupported || err == ErrPrerequisites || err == ErrIncompatibleFS +} + +// scanPriorDrivers returns an un-ordered scan of directories of prior storage drivers +func scanPriorDrivers(root string) map[string]bool { + driversMap := make(map[string]bool) + + for driver := range drivers { + p := filepath.Join(root, driver) + if _, err := os.Stat(p); err == nil && driver != "vfs" { + driversMap[driver] = true + } + } + return driversMap +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver_freebsd.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver_freebsd.go new file mode 100644 index 000000000000..2891a84f3aca --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/driver_freebsd.go @@ -0,0 +1,19 @@ +package graphdriver + +import "syscall" + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "zfs", + } +) + +// Mounted checks if the given path is mounted as the fs type +func Mounted(fsType FsMagic, mountPath string) (bool, error) { + var buf syscall.Statfs_t + if err := syscall.Statfs(mountPath, &buf); err != nil { + return false, err + } + return FsMagic(buf.Type) == fsType, nil +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver_linux.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver_linux.go new file mode 100644 index 000000000000..5c8d0e230141 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/driver_linux.go @@ -0,0 +1,135 @@ +// +build linux + +package graphdriver + +import ( + "path/filepath" + "syscall" + + "github.com/docker/docker/pkg/mount" +) + +const ( + // FsMagicAufs filesystem id for Aufs + FsMagicAufs = FsMagic(0x61756673) + // FsMagicBtrfs filesystem id for Btrfs + FsMagicBtrfs = FsMagic(0x9123683E) + // FsMagicCramfs filesystem id for Cramfs + FsMagicCramfs = FsMagic(0x28cd3d45) + // FsMagicEcryptfs filesystem id for eCryptfs + FsMagicEcryptfs = FsMagic(0xf15f) + // FsMagicExtfs filesystem id for Extfs + FsMagicExtfs = FsMagic(0x0000EF53) + // FsMagicF2fs filesystem id for F2fs + FsMagicF2fs = FsMagic(0xF2F52010) + // FsMagicGPFS filesystem id for GPFS + FsMagicGPFS = FsMagic(0x47504653) + // FsMagicJffs2Fs filesystem if for Jffs2Fs + FsMagicJffs2Fs = FsMagic(0x000072b6) + // FsMagicJfs filesystem id for Jfs + FsMagicJfs = FsMagic(0x3153464a) + // FsMagicNfsFs filesystem id for NfsFs + FsMagicNfsFs = FsMagic(0x00006969) + // FsMagicRAMFs filesystem id for RamFs + FsMagicRAMFs = FsMagic(0x858458f6) + // FsMagicReiserFs filesystem id for ReiserFs + FsMagicReiserFs = FsMagic(0x52654973) + // FsMagicSmbFs filesystem id for SmbFs + FsMagicSmbFs = FsMagic(0x0000517B) + // FsMagicSquashFs filesystem id for SquashFs + FsMagicSquashFs = FsMagic(0x73717368) + // FsMagicTmpFs filesystem id for TmpFs + FsMagicTmpFs = FsMagic(0x01021994) + // FsMagicVxFS filesystem id for VxFs + FsMagicVxFS = FsMagic(0xa501fcf5) + // FsMagicXfs filesystem id for Xfs + FsMagicXfs = FsMagic(0x58465342) + // FsMagicZfs filesystem id for Zfs + FsMagicZfs = FsMagic(0x2fc12fc1) + // FsMagicOverlay filesystem id for overlay + FsMagicOverlay = FsMagic(0x794C7630) +) + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "aufs", + "btrfs", + "zfs", + "overlay2", + "overlay", + "devicemapper", + "vfs", + } + + // FsNames maps filesystem id to name of the filesystem. + FsNames = map[FsMagic]string{ + FsMagicAufs: "aufs", + FsMagicBtrfs: "btrfs", + FsMagicCramfs: "cramfs", + FsMagicExtfs: "extfs", + FsMagicF2fs: "f2fs", + FsMagicGPFS: "gpfs", + FsMagicJffs2Fs: "jffs2", + FsMagicJfs: "jfs", + FsMagicNfsFs: "nfs", + FsMagicOverlay: "overlayfs", + FsMagicRAMFs: "ramfs", + FsMagicReiserFs: "reiserfs", + FsMagicSmbFs: "smb", + FsMagicSquashFs: "squashfs", + FsMagicTmpFs: "tmpfs", + FsMagicUnsupported: "unsupported", + FsMagicVxFS: "vxfs", + FsMagicXfs: "xfs", + FsMagicZfs: "zfs", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + var buf syscall.Statfs_t + if err := syscall.Statfs(filepath.Dir(rootpath), &buf); err != nil { + return 0, err + } + return FsMagic(buf.Type), nil +} + +// NewFsChecker returns a checker configured for the provied FsMagic +func NewFsChecker(t FsMagic) Checker { + return &fsChecker{ + t: t, + } +} + +type fsChecker struct { + t FsMagic +} + +func (c *fsChecker) IsMounted(path string) bool { + m, _ := Mounted(c.t, path) + return m +} + +// NewDefaultChecker returns a check that parses /proc/mountinfo to check +// if the specified path is mounted. +func NewDefaultChecker() Checker { + return &defaultChecker{} +} + +type defaultChecker struct { +} + +func (c *defaultChecker) IsMounted(path string) bool { + m, _ := mount.Mounted(path) + return m +} + +// Mounted checks if the given path is mounted as the fs type +func Mounted(fsType FsMagic, mountPath string) (bool, error) { + var buf syscall.Statfs_t + if err := syscall.Statfs(mountPath, &buf); err != nil { + return false, err + } + return FsMagic(buf.Type) == fsType, nil +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver_solaris.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver_solaris.go new file mode 100644 index 000000000000..7daf01c32d7d --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/driver_solaris.go @@ -0,0 +1,97 @@ +// +build solaris,cgo + +package graphdriver + +/* +#include +#include + +static inline struct statvfs *getstatfs(char *s) { + struct statvfs *buf; + int err; + buf = (struct statvfs *)malloc(sizeof(struct statvfs)); + err = statvfs(s, buf); + return buf; +} +*/ +import "C" +import ( + "path/filepath" + "unsafe" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/mount" +) + +const ( + // FsMagicZfs filesystem id for Zfs + FsMagicZfs = FsMagic(0x2fc12fc1) +) + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "zfs", + } + + // FsNames maps filesystem id to name of the filesystem. + FsNames = map[FsMagic]string{ + FsMagicZfs: "zfs", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + return 0, nil +} + +type fsChecker struct { + t FsMagic +} + +func (c *fsChecker) IsMounted(path string) bool { + m, _ := Mounted(c.t, path) + return m +} + +// NewFsChecker returns a checker configured for the provied FsMagic +func NewFsChecker(t FsMagic) Checker { + return &fsChecker{ + t: t, + } +} + +// NewDefaultChecker returns a check that parses /proc/mountinfo to check +// if the specified path is mounted. +// No-op on Solaris. +func NewDefaultChecker() Checker { + return &defaultChecker{} +} + +type defaultChecker struct { +} + +func (c *defaultChecker) IsMounted(path string) bool { + m, _ := mount.Mounted(path) + return m +} + +// Mounted checks if the given path is mounted as the fs type +//Solaris supports only ZFS for now +func Mounted(fsType FsMagic, mountPath string) (bool, error) { + + cs := C.CString(filepath.Dir(mountPath)) + buf := C.getstatfs(cs) + + // on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ] + if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) || + (buf.f_basetype[3] != 0) { + logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", mountPath) + C.free(unsafe.Pointer(buf)) + return false, ErrPrerequisites + } + + C.free(unsafe.Pointer(buf)) + C.free(unsafe.Pointer(cs)) + return true, nil +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver_unsupported.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver_unsupported.go new file mode 100644 index 000000000000..4a875608b0de --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/driver_unsupported.go @@ -0,0 +1,15 @@ +// +build !linux,!windows,!freebsd,!solaris + +package graphdriver + +var ( + // Slice of drivers that should be used in an order + priority = []string{ + "unsupported", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + return FsMagicUnsupported, nil +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver_windows.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver_windows.go new file mode 100644 index 000000000000..ffd30c2950c7 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/driver_windows.go @@ -0,0 +1,14 @@ +package graphdriver + +var ( + // Slice of drivers that should be used in order + priority = []string{ + "windowsfilter", + } +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + // Note it is OK to return FsMagicUnsupported on Windows. + return FsMagicUnsupported, nil +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/fsdiff.go b/vendor/github.com/docker/docker/daemon/graphdriver/fsdiff.go new file mode 100644 index 000000000000..20826cd7d22d --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/fsdiff.go @@ -0,0 +1,169 @@ +package graphdriver + +import ( + "io" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" +) + +var ( + // ApplyUncompressedLayer defines the unpack method used by the graph + // driver. + ApplyUncompressedLayer = chrootarchive.ApplyUncompressedLayer +) + +// NaiveDiffDriver takes a ProtoDriver and adds the +// capability of the Diffing methods which it may or may not +// support on its own. See the comment on the exported +// NewNaiveDiffDriver function below. +// Notably, the AUFS driver doesn't need to be wrapped like this. +type NaiveDiffDriver struct { + ProtoDriver + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap +} + +// NewNaiveDiffDriver returns a fully functional driver that wraps the +// given ProtoDriver and adds the capability of the following methods which +// it may or may not support on its own: +// Diff(id, parent string) (archive.Archive, error) +// Changes(id, parent string) ([]archive.Change, error) +// ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) +// DiffSize(id, parent string) (size int64, err error) +func NewNaiveDiffDriver(driver ProtoDriver, uidMaps, gidMaps []idtools.IDMap) Driver { + return &NaiveDiffDriver{ProtoDriver: driver, + uidMaps: uidMaps, + gidMaps: gidMaps} +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch io.ReadCloser, err error) { + startTime := time.Now() + driver := gdw.ProtoDriver + + layerFs, err := driver.Get(id, "") + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + driver.Put(id) + } + }() + + if parent == "" { + archive, err := archive.Tar(layerFs, archive.Uncompressed) + if err != nil { + return nil, err + } + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + driver.Put(id) + return err + }), nil + } + + parentFs, err := driver.Get(parent, "") + if err != nil { + return nil, err + } + defer driver.Put(parent) + + changes, err := archive.ChangesDirs(layerFs, parentFs) + if err != nil { + return nil, err + } + + archive, err := archive.ExportChanges(layerFs, changes, gdw.uidMaps, gdw.gidMaps) + if err != nil { + return nil, err + } + + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + driver.Put(id) + + // NaiveDiffDriver compares file metadata with parent layers. Parent layers + // are extracted from tar's with full second precision on modified time. + // We need this hack here to make sure calls within same second receive + // correct result. + time.Sleep(startTime.Truncate(time.Second).Add(time.Second).Sub(time.Now())) + return err + }), nil +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (gdw *NaiveDiffDriver) Changes(id, parent string) ([]archive.Change, error) { + driver := gdw.ProtoDriver + + layerFs, err := driver.Get(id, "") + if err != nil { + return nil, err + } + defer driver.Put(id) + + parentFs := "" + + if parent != "" { + parentFs, err = driver.Get(parent, "") + if err != nil { + return nil, err + } + defer driver.Put(parent) + } + + return archive.ChangesDirs(layerFs, parentFs) +} + +// ApplyDiff extracts the changeset from the given diff into the +// layer with the specified id and parent, returning the size of the +// new layer in bytes. +func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) { + driver := gdw.ProtoDriver + + // Mount the root filesystem so we can apply the diff/layer. + layerFs, err := driver.Get(id, "") + if err != nil { + return + } + defer driver.Put(id) + + options := &archive.TarOptions{UIDMaps: gdw.uidMaps, + GIDMaps: gdw.gidMaps} + start := time.Now().UTC() + logrus.Debug("Start untar layer") + if size, err = ApplyUncompressedLayer(layerFs, diff, options); err != nil { + return + } + logrus.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds()) + + return +} + +// DiffSize calculates the changes between the specified layer +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (gdw *NaiveDiffDriver) DiffSize(id, parent string) (size int64, err error) { + driver := gdw.ProtoDriver + + changes, err := gdw.Changes(id, parent) + if err != nil { + return + } + + layerFs, err := driver.Get(id, "") + if err != nil { + return + } + defer driver.Put(id) + + return archive.ChangesSize(layerFs, changes), nil +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/plugin.go b/vendor/github.com/docker/docker/daemon/graphdriver/plugin.go new file mode 100644 index 000000000000..f6852f075213 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/plugin.go @@ -0,0 +1,43 @@ +package graphdriver + +import ( + "fmt" + "io" + "path/filepath" + + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/plugin/v2" +) + +type pluginClient interface { + // Call calls the specified method with the specified arguments for the plugin. + Call(string, interface{}, interface{}) error + // Stream calls the specified method with the specified arguments for the plugin and returns the response IO stream + Stream(string, interface{}) (io.ReadCloser, error) + // SendFile calls the specified method, and passes through the IO stream + SendFile(string, io.Reader, interface{}) error +} + +func lookupPlugin(name string, pg plugingetter.PluginGetter, config Options) (Driver, error) { + if !config.ExperimentalEnabled { + return nil, fmt.Errorf("graphdriver plugins are only supported with experimental mode") + } + pl, err := pg.Get(name, "GraphDriver", plugingetter.Acquire) + if err != nil { + return nil, fmt.Errorf("Error looking up graphdriver plugin %s: %v", name, err) + } + return newPluginDriver(name, pl, config) +} + +func newPluginDriver(name string, pl plugingetter.CompatPlugin, config Options) (Driver, error) { + home := config.Root + if !pl.IsV1() { + if p, ok := pl.(*v2.Plugin); ok { + if p.PropagatedMount != "" { + home = p.PluginObj.Config.PropagatedMount + } + } + } + proxy := &graphDriverProxy{name, pl, Capabilities{}} + return proxy, proxy.Init(filepath.Join(home, name), config.DriverOptions, config.UIDMaps, config.GIDMaps) +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/proxy.go b/vendor/github.com/docker/docker/daemon/graphdriver/proxy.go new file mode 100644 index 000000000000..120afad45928 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/proxy.go @@ -0,0 +1,263 @@ +package graphdriver + +import ( + "errors" + "fmt" + "io" + "path/filepath" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/plugins" +) + +type graphDriverProxy struct { + name string + p plugingetter.CompatPlugin + caps Capabilities +} + +type graphDriverRequest struct { + ID string `json:",omitempty"` + Parent string `json:",omitempty"` + MountLabel string `json:",omitempty"` + StorageOpt map[string]string `json:",omitempty"` +} + +type graphDriverResponse struct { + Err string `json:",omitempty"` + Dir string `json:",omitempty"` + Exists bool `json:",omitempty"` + Status [][2]string `json:",omitempty"` + Changes []archive.Change `json:",omitempty"` + Size int64 `json:",omitempty"` + Metadata map[string]string `json:",omitempty"` + Capabilities Capabilities `json:",omitempty"` +} + +type graphDriverInitRequest struct { + Home string + Opts []string `json:"Opts"` + UIDMaps []idtools.IDMap `json:"UIDMaps"` + GIDMaps []idtools.IDMap `json:"GIDMaps"` +} + +func (d *graphDriverProxy) Init(home string, opts []string, uidMaps, gidMaps []idtools.IDMap) error { + if !d.p.IsV1() { + if cp, ok := d.p.(plugingetter.CountedPlugin); ok { + // always acquire here, it will be cleaned up on daemon shutdown + cp.Acquire() + } + } + args := &graphDriverInitRequest{ + Home: home, + Opts: opts, + UIDMaps: uidMaps, + GIDMaps: gidMaps, + } + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Init", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + caps, err := d.fetchCaps() + if err != nil { + return err + } + d.caps = caps + return nil +} + +func (d *graphDriverProxy) fetchCaps() (Capabilities, error) { + args := &graphDriverRequest{} + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Capabilities", args, &ret); err != nil { + if !plugins.IsNotFound(err) { + return Capabilities{}, err + } + } + return ret.Capabilities, nil +} + +func (d *graphDriverProxy) String() string { + return d.name +} + +func (d *graphDriverProxy) Capabilities() Capabilities { + return d.caps +} + +func (d *graphDriverProxy) CreateReadWrite(id, parent string, opts *CreateOpts) error { + return d.create("GraphDriver.CreateReadWrite", id, parent, opts) +} + +func (d *graphDriverProxy) Create(id, parent string, opts *CreateOpts) error { + return d.create("GraphDriver.Create", id, parent, opts) +} + +func (d *graphDriverProxy) create(method, id, parent string, opts *CreateOpts) error { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + if opts != nil { + args.MountLabel = opts.MountLabel + args.StorageOpt = opts.StorageOpt + } + var ret graphDriverResponse + if err := d.p.Client().Call(method, args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Remove(id string) error { + args := &graphDriverRequest{ID: id} + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Remove", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Get(id, mountLabel string) (string, error) { + args := &graphDriverRequest{ + ID: id, + MountLabel: mountLabel, + } + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Get", args, &ret); err != nil { + return "", err + } + var err error + if ret.Err != "" { + err = errors.New(ret.Err) + } + return filepath.Join(d.p.BasePath(), ret.Dir), err +} + +func (d *graphDriverProxy) Put(id string) error { + args := &graphDriverRequest{ID: id} + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Put", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Exists(id string) bool { + args := &graphDriverRequest{ID: id} + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Exists", args, &ret); err != nil { + return false + } + return ret.Exists +} + +func (d *graphDriverProxy) Status() [][2]string { + args := &graphDriverRequest{} + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Status", args, &ret); err != nil { + return nil + } + return ret.Status +} + +func (d *graphDriverProxy) GetMetadata(id string) (map[string]string, error) { + args := &graphDriverRequest{ + ID: id, + } + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.GetMetadata", args, &ret); err != nil { + return nil, err + } + if ret.Err != "" { + return nil, errors.New(ret.Err) + } + return ret.Metadata, nil +} + +func (d *graphDriverProxy) Cleanup() error { + if !d.p.IsV1() { + if cp, ok := d.p.(plugingetter.CountedPlugin); ok { + // always release + defer cp.Release() + } + } + + args := &graphDriverRequest{} + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Cleanup", args, &ret); err != nil { + return nil + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Diff(id, parent string) (io.ReadCloser, error) { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + body, err := d.p.Client().Stream("GraphDriver.Diff", args) + if err != nil { + return nil, err + } + return body, nil +} + +func (d *graphDriverProxy) Changes(id, parent string) ([]archive.Change, error) { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Changes", args, &ret); err != nil { + return nil, err + } + if ret.Err != "" { + return nil, errors.New(ret.Err) + } + + return ret.Changes, nil +} + +func (d *graphDriverProxy) ApplyDiff(id, parent string, diff io.Reader) (int64, error) { + var ret graphDriverResponse + if err := d.p.Client().SendFile(fmt.Sprintf("GraphDriver.ApplyDiff?id=%s&parent=%s", id, parent), diff, &ret); err != nil { + return -1, err + } + if ret.Err != "" { + return -1, errors.New(ret.Err) + } + return ret.Size, nil +} + +func (d *graphDriverProxy) DiffSize(id, parent string) (int64, error) { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.DiffSize", args, &ret); err != nil { + return -1, err + } + if ret.Err != "" { + return -1, errors.New(ret.Err) + } + return ret.Size, nil +} diff --git a/vendor/github.com/docker/docker/distribution/config.go b/vendor/github.com/docker/docker/distribution/config.go new file mode 100644 index 000000000000..f24678d0e27c --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/config.go @@ -0,0 +1,244 @@ +package distribution + +import ( + "encoding/json" + "fmt" + "io" + "runtime" + + "github.com/docker/distribution" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/docker/api/types" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/progress" + refstore "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/docker/libtrust" + "github.com/opencontainers/go-digest" + "golang.org/x/net/context" +) + +// Config stores configuration for communicating +// with a registry. +type Config struct { + // MetaHeaders stores HTTP headers with metadata about the image + MetaHeaders map[string][]string + // AuthConfig holds authentication credentials for authenticating with + // the registry. + AuthConfig *types.AuthConfig + // ProgressOutput is the interface for showing the status of the pull + // operation. + ProgressOutput progress.Output + // RegistryService is the registry service to use for TLS configuration + // and endpoint lookup. + RegistryService registry.Service + // ImageEventLogger notifies events for a given image + ImageEventLogger func(id, name, action string) + // MetadataStore is the storage backend for distribution-specific + // metadata. + MetadataStore metadata.Store + // ImageStore manages images. + ImageStore ImageConfigStore + // ReferenceStore manages tags. This value is optional, when excluded + // content will not be tagged. + ReferenceStore refstore.Store + // RequireSchema2 ensures that only schema2 manifests are used. + RequireSchema2 bool +} + +// ImagePullConfig stores pull configuration. +type ImagePullConfig struct { + Config + + // DownloadManager manages concurrent pulls. + DownloadManager RootFSDownloadManager + // Schema2Types is the valid schema2 configuration types allowed + // by the pull operation. + Schema2Types []string +} + +// ImagePushConfig stores push configuration. +type ImagePushConfig struct { + Config + + // ConfigMediaType is the configuration media type for + // schema2 manifests. + ConfigMediaType string + // LayerStore manages layers. + LayerStore PushLayerProvider + // TrustKey is the private key for legacy signatures. This is typically + // an ephemeral key, since these signatures are no longer verified. + TrustKey libtrust.PrivateKey + // UploadManager dispatches uploads. + UploadManager *xfer.LayerUploadManager +} + +// ImageConfigStore handles storing and getting image configurations +// by digest. Allows getting an image configurations rootfs from the +// configuration. +type ImageConfigStore interface { + Put([]byte) (digest.Digest, error) + Get(digest.Digest) ([]byte, error) + RootFSFromConfig([]byte) (*image.RootFS, error) +} + +// PushLayerProvider provides layers to be pushed by ChainID. +type PushLayerProvider interface { + Get(layer.ChainID) (PushLayer, error) +} + +// PushLayer is a pushable layer with metadata about the layer +// and access to the content of the layer. +type PushLayer interface { + ChainID() layer.ChainID + DiffID() layer.DiffID + Parent() PushLayer + Open() (io.ReadCloser, error) + Size() (int64, error) + MediaType() string + Release() +} + +// RootFSDownloadManager handles downloading of the rootfs +type RootFSDownloadManager interface { + // Download downloads the layers into the given initial rootfs and + // returns the final rootfs. + // Given progress output to track download progress + // Returns function to release download resources + Download(ctx context.Context, initialRootFS image.RootFS, layers []xfer.DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) +} + +type imageConfigStore struct { + image.Store +} + +// NewImageConfigStoreFromStore returns an ImageConfigStore backed +// by an image.Store for container images. +func NewImageConfigStoreFromStore(is image.Store) ImageConfigStore { + return &imageConfigStore{ + Store: is, + } +} + +func (s *imageConfigStore) Put(c []byte) (digest.Digest, error) { + id, err := s.Store.Create(c) + return digest.Digest(id), err +} + +func (s *imageConfigStore) Get(d digest.Digest) ([]byte, error) { + img, err := s.Store.Get(image.IDFromDigest(d)) + if err != nil { + return nil, err + } + return img.RawJSON(), nil +} + +func (s *imageConfigStore) RootFSFromConfig(c []byte) (*image.RootFS, error) { + var unmarshalledConfig image.Image + if err := json.Unmarshal(c, &unmarshalledConfig); err != nil { + return nil, err + } + + // fail immediately on Windows when downloading a non-Windows image + // and vice versa + if runtime.GOOS == "windows" && unmarshalledConfig.OS == "linux" { + return nil, fmt.Errorf("image operating system %q cannot be used on this platform", unmarshalledConfig.OS) + } else if runtime.GOOS != "windows" && unmarshalledConfig.OS == "windows" { + return nil, fmt.Errorf("image operating system %q cannot be used on this platform", unmarshalledConfig.OS) + } + + return unmarshalledConfig.RootFS, nil +} + +type storeLayerProvider struct { + ls layer.Store +} + +// NewLayerProviderFromStore returns a layer provider backed by +// an instance of LayerStore. Only getting layers as gzipped +// tars is supported. +func NewLayerProviderFromStore(ls layer.Store) PushLayerProvider { + return &storeLayerProvider{ + ls: ls, + } +} + +func (p *storeLayerProvider) Get(lid layer.ChainID) (PushLayer, error) { + if lid == "" { + return &storeLayer{ + Layer: layer.EmptyLayer, + }, nil + } + l, err := p.ls.Get(lid) + if err != nil { + return nil, err + } + + sl := storeLayer{ + Layer: l, + ls: p.ls, + } + if d, ok := l.(distribution.Describable); ok { + return &describableStoreLayer{ + storeLayer: sl, + describable: d, + }, nil + } + + return &sl, nil +} + +type storeLayer struct { + layer.Layer + ls layer.Store +} + +func (l *storeLayer) Parent() PushLayer { + p := l.Layer.Parent() + if p == nil { + return nil + } + sl := storeLayer{ + Layer: p, + ls: l.ls, + } + if d, ok := p.(distribution.Describable); ok { + return &describableStoreLayer{ + storeLayer: sl, + describable: d, + } + } + + return &sl +} + +func (l *storeLayer) Open() (io.ReadCloser, error) { + return l.Layer.TarStream() +} + +func (l *storeLayer) Size() (int64, error) { + return l.Layer.DiffSize() +} + +func (l *storeLayer) MediaType() string { + // layer store always returns uncompressed tars + return schema2.MediaTypeUncompressedLayer +} + +func (l *storeLayer) Release() { + if l.ls != nil { + layer.ReleaseAndLog(l.ls, l.Layer) + } +} + +type describableStoreLayer struct { + storeLayer + describable distribution.Describable +} + +func (l *describableStoreLayer) Descriptor() distribution.Descriptor { + return l.describable.Descriptor() +} diff --git a/vendor/github.com/docker/docker/distribution/errors.go b/vendor/github.com/docker/docker/distribution/errors.go new file mode 100644 index 000000000000..adf272a5cf42 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/errors.go @@ -0,0 +1,159 @@ +package distribution + +import ( + "net/url" + "strings" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/client" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/docker/distribution/xfer" + "github.com/pkg/errors" +) + +// ErrNoSupport is an error type used for errors indicating that an operation +// is not supported. It encapsulates a more specific error. +type ErrNoSupport struct{ Err error } + +func (e ErrNoSupport) Error() string { + if e.Err == nil { + return "not supported" + } + return e.Err.Error() +} + +// fallbackError wraps an error that can possibly allow fallback to a different +// endpoint. +type fallbackError struct { + // err is the error being wrapped. + err error + // confirmedV2 is set to true if it was confirmed that the registry + // supports the v2 protocol. This is used to limit fallbacks to the v1 + // protocol. + confirmedV2 bool + // transportOK is set to true if we managed to speak HTTP with the + // registry. This confirms that we're using appropriate TLS settings + // (or lack of TLS). + transportOK bool +} + +// Error renders the FallbackError as a string. +func (f fallbackError) Error() string { + return f.Cause().Error() +} + +func (f fallbackError) Cause() error { + return f.err +} + +// shouldV2Fallback returns true if this error is a reason to fall back to v1. +func shouldV2Fallback(err errcode.Error) bool { + switch err.Code { + case errcode.ErrorCodeUnauthorized, v2.ErrorCodeManifestUnknown, v2.ErrorCodeNameUnknown: + return true + } + return false +} + +// TranslatePullError is used to convert an error from a registry pull +// operation to an error representing the entire pull operation. Any error +// information which is not used by the returned error gets output to +// log at info level. +func TranslatePullError(err error, ref reference.Named) error { + switch v := err.(type) { + case errcode.Errors: + if len(v) != 0 { + for _, extra := range v[1:] { + logrus.Infof("Ignoring extra error returned from registry: %v", extra) + } + return TranslatePullError(v[0], ref) + } + case errcode.Error: + var newErr error + switch v.Code { + case errcode.ErrorCodeDenied: + // ErrorCodeDenied is used when access to the repository was denied + newErr = errors.Errorf("repository %s not found: does not exist or no pull access", reference.FamiliarName(ref)) + case v2.ErrorCodeManifestUnknown: + newErr = errors.Errorf("manifest for %s not found", reference.FamiliarString(ref)) + case v2.ErrorCodeNameUnknown: + newErr = errors.Errorf("repository %s not found", reference.FamiliarName(ref)) + } + if newErr != nil { + logrus.Infof("Translating %q to %q", err, newErr) + return newErr + } + case xfer.DoNotRetry: + return TranslatePullError(v.Err, ref) + } + + return err +} + +// continueOnError returns true if we should fallback to the next endpoint +// as a result of this error. +func continueOnError(err error) bool { + switch v := err.(type) { + case errcode.Errors: + if len(v) == 0 { + return true + } + return continueOnError(v[0]) + case ErrNoSupport: + return continueOnError(v.Err) + case errcode.Error: + return shouldV2Fallback(v) + case *client.UnexpectedHTTPResponseError: + return true + case ImageConfigPullError: + return false + case error: + return !strings.Contains(err.Error(), strings.ToLower(syscall.ENOSPC.Error())) + } + // let's be nice and fallback if the error is a completely + // unexpected one. + // If new errors have to be handled in some way, please + // add them to the switch above. + return true +} + +// retryOnError wraps the error in xfer.DoNotRetry if we should not retry the +// operation after this error. +func retryOnError(err error) error { + switch v := err.(type) { + case errcode.Errors: + if len(v) != 0 { + return retryOnError(v[0]) + } + case errcode.Error: + switch v.Code { + case errcode.ErrorCodeUnauthorized, errcode.ErrorCodeUnsupported, errcode.ErrorCodeDenied, errcode.ErrorCodeTooManyRequests, v2.ErrorCodeNameUnknown: + return xfer.DoNotRetry{Err: err} + } + case *url.Error: + switch v.Err { + case auth.ErrNoBasicAuthCredentials, auth.ErrNoToken: + return xfer.DoNotRetry{Err: v.Err} + } + return retryOnError(v.Err) + case *client.UnexpectedHTTPResponseError: + return xfer.DoNotRetry{Err: err} + case error: + if err == distribution.ErrBlobUnknown { + return xfer.DoNotRetry{Err: err} + } + if strings.Contains(err.Error(), strings.ToLower(syscall.ENOSPC.Error())) { + return xfer.DoNotRetry{Err: err} + } + } + // let's be nice and fallback if the error is a completely + // unexpected one. + // If new errors have to be handled in some way, please + // add them to the switch above. + return err +} diff --git a/vendor/github.com/docker/docker/distribution/metadata/metadata.go b/vendor/github.com/docker/docker/distribution/metadata/metadata.go new file mode 100644 index 000000000000..05ba4f817da1 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/metadata/metadata.go @@ -0,0 +1,75 @@ +package metadata + +import ( + "io/ioutil" + "os" + "path/filepath" + "sync" + + "github.com/docker/docker/pkg/ioutils" +) + +// Store implements a K/V store for mapping distribution-related IDs +// to on-disk layer IDs and image IDs. The namespace identifies the type of +// mapping (i.e. "v1ids" or "artifacts"). MetadataStore is goroutine-safe. +type Store interface { + // Get retrieves data by namespace and key. + Get(namespace string, key string) ([]byte, error) + // Set writes data indexed by namespace and key. + Set(namespace, key string, value []byte) error + // Delete removes data indexed by namespace and key. + Delete(namespace, key string) error +} + +// FSMetadataStore uses the filesystem to associate metadata with layer and +// image IDs. +type FSMetadataStore struct { + sync.RWMutex + basePath string +} + +// NewFSMetadataStore creates a new filesystem-based metadata store. +func NewFSMetadataStore(basePath string) (*FSMetadataStore, error) { + if err := os.MkdirAll(basePath, 0700); err != nil { + return nil, err + } + return &FSMetadataStore{ + basePath: basePath, + }, nil +} + +func (store *FSMetadataStore) path(namespace, key string) string { + return filepath.Join(store.basePath, namespace, key) +} + +// Get retrieves data by namespace and key. The data is read from a file named +// after the key, stored in the namespace's directory. +func (store *FSMetadataStore) Get(namespace string, key string) ([]byte, error) { + store.RLock() + defer store.RUnlock() + + return ioutil.ReadFile(store.path(namespace, key)) +} + +// Set writes data indexed by namespace and key. The data is written to a file +// named after the key, stored in the namespace's directory. +func (store *FSMetadataStore) Set(namespace, key string, value []byte) error { + store.Lock() + defer store.Unlock() + + path := store.path(namespace, key) + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } + return ioutils.AtomicWriteFile(path, value, 0644) +} + +// Delete removes data indexed by namespace and key. The data file named after +// the key, stored in the namespace's directory is deleted. +func (store *FSMetadataStore) Delete(namespace, key string) error { + store.Lock() + defer store.Unlock() + + path := store.path(namespace, key) + return os.Remove(path) +} diff --git a/vendor/github.com/docker/docker/distribution/metadata/v1_id_service.go b/vendor/github.com/docker/docker/distribution/metadata/v1_id_service.go new file mode 100644 index 000000000000..f262d4dc34aa --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/metadata/v1_id_service.go @@ -0,0 +1,51 @@ +package metadata + +import ( + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/pkg/errors" +) + +// V1IDService maps v1 IDs to layers on disk. +type V1IDService struct { + store Store +} + +// NewV1IDService creates a new V1 ID mapping service. +func NewV1IDService(store Store) *V1IDService { + return &V1IDService{ + store: store, + } +} + +// namespace returns the namespace used by this service. +func (idserv *V1IDService) namespace() string { + return "v1id" +} + +// Get finds a layer by its V1 ID. +func (idserv *V1IDService) Get(v1ID, registry string) (layer.DiffID, error) { + if idserv.store == nil { + return "", errors.New("no v1IDService storage") + } + if err := v1.ValidateID(v1ID); err != nil { + return layer.DiffID(""), err + } + + idBytes, err := idserv.store.Get(idserv.namespace(), registry+","+v1ID) + if err != nil { + return layer.DiffID(""), err + } + return layer.DiffID(idBytes), nil +} + +// Set associates an image with a V1 ID. +func (idserv *V1IDService) Set(v1ID, registry string, id layer.DiffID) error { + if idserv.store == nil { + return nil + } + if err := v1.ValidateID(v1ID); err != nil { + return err + } + return idserv.store.Set(idserv.namespace(), registry+","+v1ID, []byte(id)) +} diff --git a/vendor/github.com/docker/docker/distribution/metadata/v2_metadata_service.go b/vendor/github.com/docker/docker/distribution/metadata/v2_metadata_service.go new file mode 100644 index 000000000000..c31c7214e7c0 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/metadata/v2_metadata_service.go @@ -0,0 +1,241 @@ +package metadata + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "errors" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/layer" + "github.com/opencontainers/go-digest" +) + +// V2MetadataService maps layer IDs to a set of known metadata for +// the layer. +type V2MetadataService interface { + GetMetadata(diffID layer.DiffID) ([]V2Metadata, error) + GetDiffID(dgst digest.Digest) (layer.DiffID, error) + Add(diffID layer.DiffID, metadata V2Metadata) error + TagAndAdd(diffID layer.DiffID, hmacKey []byte, metadata V2Metadata) error + Remove(metadata V2Metadata) error +} + +// v2MetadataService implements V2MetadataService +type v2MetadataService struct { + store Store +} + +var _ V2MetadataService = &v2MetadataService{} + +// V2Metadata contains the digest and source repository information for a layer. +type V2Metadata struct { + Digest digest.Digest + SourceRepository string + // HMAC hashes above attributes with recent authconfig digest used as a key in order to determine matching + // metadata entries accompanied by the same credentials without actually exposing them. + HMAC string +} + +// CheckV2MetadataHMAC returns true if the given "meta" is tagged with a hmac hashed by the given "key". +func CheckV2MetadataHMAC(meta *V2Metadata, key []byte) bool { + if len(meta.HMAC) == 0 || len(key) == 0 { + return len(meta.HMAC) == 0 && len(key) == 0 + } + mac := hmac.New(sha256.New, key) + mac.Write([]byte(meta.Digest)) + mac.Write([]byte(meta.SourceRepository)) + expectedMac := mac.Sum(nil) + + storedMac, err := hex.DecodeString(meta.HMAC) + if err != nil { + return false + } + + return hmac.Equal(storedMac, expectedMac) +} + +// ComputeV2MetadataHMAC returns a hmac for the given "meta" hash by the given key. +func ComputeV2MetadataHMAC(key []byte, meta *V2Metadata) string { + if len(key) == 0 || meta == nil { + return "" + } + mac := hmac.New(sha256.New, key) + mac.Write([]byte(meta.Digest)) + mac.Write([]byte(meta.SourceRepository)) + return hex.EncodeToString(mac.Sum(nil)) +} + +// ComputeV2MetadataHMACKey returns a key for the given "authConfig" that can be used to hash v2 metadata +// entries. +func ComputeV2MetadataHMACKey(authConfig *types.AuthConfig) ([]byte, error) { + if authConfig == nil { + return nil, nil + } + key := authConfigKeyInput{ + Username: authConfig.Username, + Password: authConfig.Password, + Auth: authConfig.Auth, + IdentityToken: authConfig.IdentityToken, + RegistryToken: authConfig.RegistryToken, + } + buf, err := json.Marshal(&key) + if err != nil { + return nil, err + } + return []byte(digest.FromBytes([]byte(buf))), nil +} + +// authConfigKeyInput is a reduced AuthConfig structure holding just relevant credential data eligible for +// hmac key creation. +type authConfigKeyInput struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth,omitempty"` + + IdentityToken string `json:"identitytoken,omitempty"` + RegistryToken string `json:"registrytoken,omitempty"` +} + +// maxMetadata is the number of metadata entries to keep per layer DiffID. +const maxMetadata = 50 + +// NewV2MetadataService creates a new diff ID to v2 metadata mapping service. +func NewV2MetadataService(store Store) V2MetadataService { + return &v2MetadataService{ + store: store, + } +} + +func (serv *v2MetadataService) diffIDNamespace() string { + return "v2metadata-by-diffid" +} + +func (serv *v2MetadataService) digestNamespace() string { + return "diffid-by-digest" +} + +func (serv *v2MetadataService) diffIDKey(diffID layer.DiffID) string { + return string(digest.Digest(diffID).Algorithm()) + "/" + digest.Digest(diffID).Hex() +} + +func (serv *v2MetadataService) digestKey(dgst digest.Digest) string { + return string(dgst.Algorithm()) + "/" + dgst.Hex() +} + +// GetMetadata finds the metadata associated with a layer DiffID. +func (serv *v2MetadataService) GetMetadata(diffID layer.DiffID) ([]V2Metadata, error) { + if serv.store == nil { + return nil, errors.New("no metadata storage") + } + jsonBytes, err := serv.store.Get(serv.diffIDNamespace(), serv.diffIDKey(diffID)) + if err != nil { + return nil, err + } + + var metadata []V2Metadata + if err := json.Unmarshal(jsonBytes, &metadata); err != nil { + return nil, err + } + + return metadata, nil +} + +// GetDiffID finds a layer DiffID from a digest. +func (serv *v2MetadataService) GetDiffID(dgst digest.Digest) (layer.DiffID, error) { + if serv.store == nil { + return layer.DiffID(""), errors.New("no metadata storage") + } + diffIDBytes, err := serv.store.Get(serv.digestNamespace(), serv.digestKey(dgst)) + if err != nil { + return layer.DiffID(""), err + } + + return layer.DiffID(diffIDBytes), nil +} + +// Add associates metadata with a layer DiffID. If too many metadata entries are +// present, the oldest one is dropped. +func (serv *v2MetadataService) Add(diffID layer.DiffID, metadata V2Metadata) error { + if serv.store == nil { + // Support a service which has no backend storage, in this case + // an add becomes a no-op. + // TODO: implement in memory storage + return nil + } + oldMetadata, err := serv.GetMetadata(diffID) + if err != nil { + oldMetadata = nil + } + newMetadata := make([]V2Metadata, 0, len(oldMetadata)+1) + + // Copy all other metadata to new slice + for _, oldMeta := range oldMetadata { + if oldMeta != metadata { + newMetadata = append(newMetadata, oldMeta) + } + } + + newMetadata = append(newMetadata, metadata) + + if len(newMetadata) > maxMetadata { + newMetadata = newMetadata[len(newMetadata)-maxMetadata:] + } + + jsonBytes, err := json.Marshal(newMetadata) + if err != nil { + return err + } + + err = serv.store.Set(serv.diffIDNamespace(), serv.diffIDKey(diffID), jsonBytes) + if err != nil { + return err + } + + return serv.store.Set(serv.digestNamespace(), serv.digestKey(metadata.Digest), []byte(diffID)) +} + +// TagAndAdd amends the given "meta" for hmac hashed by the given "hmacKey" and associates it with a layer +// DiffID. If too many metadata entries are present, the oldest one is dropped. +func (serv *v2MetadataService) TagAndAdd(diffID layer.DiffID, hmacKey []byte, meta V2Metadata) error { + meta.HMAC = ComputeV2MetadataHMAC(hmacKey, &meta) + return serv.Add(diffID, meta) +} + +// Remove unassociates a metadata entry from a layer DiffID. +func (serv *v2MetadataService) Remove(metadata V2Metadata) error { + if serv.store == nil { + // Support a service which has no backend storage, in this case + // an remove becomes a no-op. + // TODO: implement in memory storage + return nil + } + diffID, err := serv.GetDiffID(metadata.Digest) + if err != nil { + return err + } + oldMetadata, err := serv.GetMetadata(diffID) + if err != nil { + oldMetadata = nil + } + newMetadata := make([]V2Metadata, 0, len(oldMetadata)) + + // Copy all other metadata to new slice + for _, oldMeta := range oldMetadata { + if oldMeta != metadata { + newMetadata = append(newMetadata, oldMeta) + } + } + + if len(newMetadata) == 0 { + return serv.store.Delete(serv.diffIDNamespace(), serv.diffIDKey(diffID)) + } + + jsonBytes, err := json.Marshal(newMetadata) + if err != nil { + return err + } + + return serv.store.Set(serv.diffIDNamespace(), serv.diffIDKey(diffID), jsonBytes) +} diff --git a/vendor/github.com/docker/docker/distribution/pull.go b/vendor/github.com/docker/docker/distribution/pull.go new file mode 100644 index 000000000000..c5bdbd6d3bfd --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/pull.go @@ -0,0 +1,198 @@ +package distribution + +import ( + "fmt" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" + "github.com/docker/docker/api" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/pkg/progress" + refstore "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/opencontainers/go-digest" + "golang.org/x/net/context" +) + +// Puller is an interface that abstracts pulling for different API versions. +type Puller interface { + // Pull tries to pull the image referenced by `tag` + // Pull returns an error if any, as well as a boolean that determines whether to retry Pull on the next configured endpoint. + // + Pull(ctx context.Context, ref reference.Named) error +} + +// newPuller returns a Puller interface that will pull from either a v1 or v2 +// registry. The endpoint argument contains a Version field that determines +// whether a v1 or v2 puller will be created. The other parameters are passed +// through to the underlying puller implementation for use during the actual +// pull operation. +func newPuller(endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo, imagePullConfig *ImagePullConfig) (Puller, error) { + switch endpoint.Version { + case registry.APIVersion2: + return &v2Puller{ + V2MetadataService: metadata.NewV2MetadataService(imagePullConfig.MetadataStore), + endpoint: endpoint, + config: imagePullConfig, + repoInfo: repoInfo, + }, nil + case registry.APIVersion1: + return &v1Puller{ + v1IDService: metadata.NewV1IDService(imagePullConfig.MetadataStore), + endpoint: endpoint, + config: imagePullConfig, + repoInfo: repoInfo, + }, nil + } + return nil, fmt.Errorf("unknown version %d for registry %s", endpoint.Version, endpoint.URL) +} + +// Pull initiates a pull operation. image is the repository name to pull, and +// tag may be either empty, or indicate a specific tag to pull. +func Pull(ctx context.Context, ref reference.Named, imagePullConfig *ImagePullConfig) error { + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := imagePullConfig.RegistryService.ResolveRepository(ref) + if err != nil { + return err + } + + // makes sure name is not `scratch` + if err := ValidateRepoName(repoInfo.Name); err != nil { + return err + } + + endpoints, err := imagePullConfig.RegistryService.LookupPullEndpoints(reference.Domain(repoInfo.Name)) + if err != nil { + return err + } + + var ( + lastErr error + + // discardNoSupportErrors is used to track whether an endpoint encountered an error of type registry.ErrNoSupport + // By default it is false, which means that if an ErrNoSupport error is encountered, it will be saved in lastErr. + // As soon as another kind of error is encountered, discardNoSupportErrors is set to true, avoiding the saving of + // any subsequent ErrNoSupport errors in lastErr. + // It's needed for pull-by-digest on v1 endpoints: if there are only v1 endpoints configured, the error should be + // returned and displayed, but if there was a v2 endpoint which supports pull-by-digest, then the last relevant + // error is the ones from v2 endpoints not v1. + discardNoSupportErrors bool + + // confirmedV2 is set to true if a pull attempt managed to + // confirm that it was talking to a v2 registry. This will + // prevent fallback to the v1 protocol. + confirmedV2 bool + + // confirmedTLSRegistries is a map indicating which registries + // are known to be using TLS. There should never be a plaintext + // retry for any of these. + confirmedTLSRegistries = make(map[string]struct{}) + ) + for _, endpoint := range endpoints { + if imagePullConfig.RequireSchema2 && endpoint.Version == registry.APIVersion1 { + continue + } + + if confirmedV2 && endpoint.Version == registry.APIVersion1 { + logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL) + continue + } + + if endpoint.URL.Scheme != "https" { + if _, confirmedTLS := confirmedTLSRegistries[endpoint.URL.Host]; confirmedTLS { + logrus.Debugf("Skipping non-TLS endpoint %s for host/port that appears to use TLS", endpoint.URL) + continue + } + } + + logrus.Debugf("Trying to pull %s from %s %s", reference.FamiliarName(repoInfo.Name), endpoint.URL, endpoint.Version) + + puller, err := newPuller(endpoint, repoInfo, imagePullConfig) + if err != nil { + lastErr = err + continue + } + if err := puller.Pull(ctx, ref); err != nil { + // Was this pull cancelled? If so, don't try to fall + // back. + fallback := false + select { + case <-ctx.Done(): + default: + if fallbackErr, ok := err.(fallbackError); ok { + fallback = true + confirmedV2 = confirmedV2 || fallbackErr.confirmedV2 + if fallbackErr.transportOK && endpoint.URL.Scheme == "https" { + confirmedTLSRegistries[endpoint.URL.Host] = struct{}{} + } + err = fallbackErr.err + } + } + if fallback { + if _, ok := err.(ErrNoSupport); !ok { + // Because we found an error that's not ErrNoSupport, discard all subsequent ErrNoSupport errors. + discardNoSupportErrors = true + // append subsequent errors + lastErr = err + } else if !discardNoSupportErrors { + // Save the ErrNoSupport error, because it's either the first error or all encountered errors + // were also ErrNoSupport errors. + // append subsequent errors + lastErr = err + } + logrus.Infof("Attempting next endpoint for pull after error: %v", err) + continue + } + logrus.Errorf("Not continuing with pull after error: %v", err) + return TranslatePullError(err, ref) + } + + imagePullConfig.ImageEventLogger(reference.FamiliarString(ref), reference.FamiliarName(repoInfo.Name), "pull") + return nil + } + + if lastErr == nil { + lastErr = fmt.Errorf("no endpoints found for %s", reference.FamiliarString(ref)) + } + + return TranslatePullError(lastErr, ref) +} + +// writeStatus writes a status message to out. If layersDownloaded is true, the +// status message indicates that a newer image was downloaded. Otherwise, it +// indicates that the image is up to date. requestedTag is the tag the message +// will refer to. +func writeStatus(requestedTag string, out progress.Output, layersDownloaded bool) { + if layersDownloaded { + progress.Message(out, "", "Status: Downloaded newer image for "+requestedTag) + } else { + progress.Message(out, "", "Status: Image is up to date for "+requestedTag) + } +} + +// ValidateRepoName validates the name of a repository. +func ValidateRepoName(name reference.Named) error { + if reference.FamiliarName(name) == api.NoBaseImageSpecifier { + return fmt.Errorf("'%s' is a reserved name", api.NoBaseImageSpecifier) + } + return nil +} + +func addDigestReference(store refstore.Store, ref reference.Named, dgst digest.Digest, id digest.Digest) error { + dgstRef, err := reference.WithDigest(reference.TrimNamed(ref), dgst) + if err != nil { + return err + } + + if oldTagID, err := store.Get(dgstRef); err == nil { + if oldTagID != id { + // Updating digests not supported by reference store + logrus.Errorf("Image ID for digest %s changed from %s to %s, cannot update", dgst.String(), oldTagID, id) + } + return nil + } else if err != refstore.ErrDoesNotExist { + return err + } + + return store.AddDigest(dgstRef, id, true) +} diff --git a/vendor/github.com/docker/docker/distribution/pull_v1.go b/vendor/github.com/docker/docker/distribution/pull_v1.go new file mode 100644 index 000000000000..d873d338cde0 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/pull_v1.go @@ -0,0 +1,368 @@ +package distribution + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/url" + "os" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +type v1Puller struct { + v1IDService *metadata.V1IDService + endpoint registry.APIEndpoint + config *ImagePullConfig + repoInfo *registry.RepositoryInfo + session *registry.Session +} + +func (p *v1Puller) Pull(ctx context.Context, ref reference.Named) error { + if _, isCanonical := ref.(reference.Canonical); isCanonical { + // Allowing fallback, because HTTPS v1 is before HTTP v2 + return fallbackError{err: ErrNoSupport{Err: errors.New("Cannot pull by digest with v1 registry")}} + } + + tlsConfig, err := p.config.RegistryService.TLSConfig(p.repoInfo.Index.Name) + if err != nil { + return err + } + // Adds Docker-specific headers as well as user-specified headers (metaHeaders) + tr := transport.NewTransport( + // TODO(tiborvass): was ReceiveTimeout + registry.NewTransport(tlsConfig), + registry.DockerHeaders(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders)..., + ) + client := registry.HTTPClient(tr) + v1Endpoint, err := p.endpoint.ToV1Endpoint(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders) + if err != nil { + logrus.Debugf("Could not get v1 endpoint: %v", err) + return fallbackError{err: err} + } + p.session, err = registry.NewSession(client, p.config.AuthConfig, v1Endpoint) + if err != nil { + // TODO(dmcgowan): Check if should fallback + logrus.Debugf("Fallback from error: %s", err) + return fallbackError{err: err} + } + if err := p.pullRepository(ctx, ref); err != nil { + // TODO(dmcgowan): Check if should fallback + return err + } + progress.Message(p.config.ProgressOutput, "", p.repoInfo.Name.Name()+": this image was pulled from a legacy registry. Important: This registry version will not be supported in future versions of docker.") + + return nil +} + +func (p *v1Puller) pullRepository(ctx context.Context, ref reference.Named) error { + progress.Message(p.config.ProgressOutput, "", "Pulling repository "+p.repoInfo.Name.Name()) + + tagged, isTagged := ref.(reference.NamedTagged) + + repoData, err := p.session.GetRepositoryData(p.repoInfo.Name) + if err != nil { + if strings.Contains(err.Error(), "HTTP code: 404") { + if isTagged { + return fmt.Errorf("Error: image %s:%s not found", reference.Path(p.repoInfo.Name), tagged.Tag()) + } + return fmt.Errorf("Error: image %s not found", reference.Path(p.repoInfo.Name)) + } + // Unexpected HTTP error + return err + } + + logrus.Debug("Retrieving the tag list") + var tagsList map[string]string + if !isTagged { + tagsList, err = p.session.GetRemoteTags(repoData.Endpoints, p.repoInfo.Name) + } else { + var tagID string + tagsList = make(map[string]string) + tagID, err = p.session.GetRemoteTag(repoData.Endpoints, p.repoInfo.Name, tagged.Tag()) + if err == registry.ErrRepoNotFound { + return fmt.Errorf("Tag %s not found in repository %s", tagged.Tag(), p.repoInfo.Name.Name()) + } + tagsList[tagged.Tag()] = tagID + } + if err != nil { + logrus.Errorf("unable to get remote tags: %s", err) + return err + } + + for tag, id := range tagsList { + repoData.ImgList[id] = ®istry.ImgData{ + ID: id, + Tag: tag, + Checksum: "", + } + } + + layersDownloaded := false + for _, imgData := range repoData.ImgList { + if isTagged && imgData.Tag != tagged.Tag() { + continue + } + + err := p.downloadImage(ctx, repoData, imgData, &layersDownloaded) + if err != nil { + return err + } + } + + writeStatus(reference.FamiliarString(ref), p.config.ProgressOutput, layersDownloaded) + return nil +} + +func (p *v1Puller) downloadImage(ctx context.Context, repoData *registry.RepositoryData, img *registry.ImgData, layersDownloaded *bool) error { + if img.Tag == "" { + logrus.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID) + return nil + } + + localNameRef, err := reference.WithTag(p.repoInfo.Name, img.Tag) + if err != nil { + retErr := fmt.Errorf("Image (id: %s) has invalid tag: %s", img.ID, img.Tag) + logrus.Debug(retErr.Error()) + return retErr + } + + if err := v1.ValidateID(img.ID); err != nil { + return err + } + + progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Pulling image (%s) from %s", img.Tag, p.repoInfo.Name.Name()) + success := false + var lastErr error + for _, ep := range p.repoInfo.Index.Mirrors { + ep += "v1/" + progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, mirror: %s", img.Tag, p.repoInfo.Name.Name(), ep)) + if err = p.pullImage(ctx, img.ID, ep, localNameRef, layersDownloaded); err != nil { + // Don't report errors when pulling from mirrors. + logrus.Debugf("Error pulling image (%s) from %s, mirror: %s, %s", img.Tag, p.repoInfo.Name.Name(), ep, err) + continue + } + success = true + break + } + if !success { + for _, ep := range repoData.Endpoints { + progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Pulling image (%s) from %s, endpoint: %s", img.Tag, p.repoInfo.Name.Name(), ep) + if err = p.pullImage(ctx, img.ID, ep, localNameRef, layersDownloaded); err != nil { + // It's not ideal that only the last error is returned, it would be better to concatenate the errors. + // As the error is also given to the output stream the user will see the error. + lastErr = err + progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, p.repoInfo.Name.Name(), ep, err) + continue + } + success = true + break + } + } + if !success { + err := fmt.Errorf("Error pulling image (%s) from %s, %v", img.Tag, p.repoInfo.Name.Name(), lastErr) + progress.Update(p.config.ProgressOutput, stringid.TruncateID(img.ID), err.Error()) + return err + } + return nil +} + +func (p *v1Puller) pullImage(ctx context.Context, v1ID, endpoint string, localNameRef reference.Named, layersDownloaded *bool) (err error) { + var history []string + history, err = p.session.GetRemoteHistory(v1ID, endpoint) + if err != nil { + return err + } + if len(history) < 1 { + return fmt.Errorf("empty history for image %s", v1ID) + } + progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1ID), "Pulling dependent layers") + + var ( + descriptors []xfer.DownloadDescriptor + newHistory []image.History + imgJSON []byte + imgSize int64 + ) + + // Iterate over layers, in order from bottom-most to top-most. Download + // config for all layers and create descriptors. + for i := len(history) - 1; i >= 0; i-- { + v1LayerID := history[i] + imgJSON, imgSize, err = p.downloadLayerConfig(v1LayerID, endpoint) + if err != nil { + return err + } + + // Create a new-style config from the legacy configs + h, err := v1.HistoryFromConfig(imgJSON, false) + if err != nil { + return err + } + newHistory = append(newHistory, h) + + layerDescriptor := &v1LayerDescriptor{ + v1LayerID: v1LayerID, + indexName: p.repoInfo.Index.Name, + endpoint: endpoint, + v1IDService: p.v1IDService, + layersDownloaded: layersDownloaded, + layerSize: imgSize, + session: p.session, + } + + descriptors = append(descriptors, layerDescriptor) + } + + rootFS := image.NewRootFS() + resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, descriptors, p.config.ProgressOutput) + if err != nil { + return err + } + defer release() + + config, err := v1.MakeConfigFromV1Config(imgJSON, &resultRootFS, newHistory) + if err != nil { + return err + } + + imageID, err := p.config.ImageStore.Put(config) + if err != nil { + return err + } + + if p.config.ReferenceStore != nil { + if err := p.config.ReferenceStore.AddTag(localNameRef, imageID, true); err != nil { + return err + } + } + + return nil +} + +func (p *v1Puller) downloadLayerConfig(v1LayerID, endpoint string) (imgJSON []byte, imgSize int64, err error) { + progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1LayerID), "Pulling metadata") + + retries := 5 + for j := 1; j <= retries; j++ { + imgJSON, imgSize, err := p.session.GetRemoteImageJSON(v1LayerID, endpoint) + if err != nil && j == retries { + progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1LayerID), "Error pulling layer metadata") + return nil, 0, err + } else if err != nil { + time.Sleep(time.Duration(j) * 500 * time.Millisecond) + continue + } + + return imgJSON, imgSize, nil + } + + // not reached + return nil, 0, nil +} + +type v1LayerDescriptor struct { + v1LayerID string + indexName string + endpoint string + v1IDService *metadata.V1IDService + layersDownloaded *bool + layerSize int64 + session *registry.Session + tmpFile *os.File +} + +func (ld *v1LayerDescriptor) Key() string { + return "v1:" + ld.v1LayerID +} + +func (ld *v1LayerDescriptor) ID() string { + return stringid.TruncateID(ld.v1LayerID) +} + +func (ld *v1LayerDescriptor) DiffID() (layer.DiffID, error) { + return ld.v1IDService.Get(ld.v1LayerID, ld.indexName) +} + +func (ld *v1LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) { + progress.Update(progressOutput, ld.ID(), "Pulling fs layer") + layerReader, err := ld.session.GetRemoteImageLayer(ld.v1LayerID, ld.endpoint, ld.layerSize) + if err != nil { + progress.Update(progressOutput, ld.ID(), "Error pulling dependent layers") + if uerr, ok := err.(*url.Error); ok { + err = uerr.Err + } + if terr, ok := err.(net.Error); ok && terr.Timeout() { + return nil, 0, err + } + return nil, 0, xfer.DoNotRetry{Err: err} + } + *ld.layersDownloaded = true + + ld.tmpFile, err = ioutil.TempFile("", "GetImageBlob") + if err != nil { + layerReader.Close() + return nil, 0, err + } + + reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerReader), progressOutput, ld.layerSize, ld.ID(), "Downloading") + defer reader.Close() + + _, err = io.Copy(ld.tmpFile, reader) + if err != nil { + ld.Close() + return nil, 0, err + } + + progress.Update(progressOutput, ld.ID(), "Download complete") + + logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), ld.tmpFile.Name()) + + ld.tmpFile.Seek(0, 0) + + // hand off the temporary file to the download manager, so it will only + // be closed once + tmpFile := ld.tmpFile + ld.tmpFile = nil + + return ioutils.NewReadCloserWrapper(tmpFile, func() error { + tmpFile.Close() + err := os.RemoveAll(tmpFile.Name()) + if err != nil { + logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) + } + return err + }), ld.layerSize, nil +} + +func (ld *v1LayerDescriptor) Close() { + if ld.tmpFile != nil { + ld.tmpFile.Close() + if err := os.RemoveAll(ld.tmpFile.Name()); err != nil { + logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) + } + ld.tmpFile = nil + } +} + +func (ld *v1LayerDescriptor) Registered(diffID layer.DiffID) { + // Cache mapping from this layer's DiffID to the blobsum + ld.v1IDService.Set(ld.v1LayerID, ld.indexName, diffID) +} diff --git a/vendor/github.com/docker/docker/distribution/pull_v2.go b/vendor/github.com/docker/docker/distribution/pull_v2.go new file mode 100644 index 000000000000..08df6b768fea --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/pull_v2.go @@ -0,0 +1,899 @@ +package distribution + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/url" + "os" + "runtime" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/manifest/manifestlist" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/stringid" + refstore "github.com/docker/docker/reference" + "github.com/docker/docker/registry" + "github.com/opencontainers/go-digest" + "golang.org/x/net/context" +) + +var ( + errRootFSMismatch = errors.New("layers from manifest don't match image configuration") + errRootFSInvalid = errors.New("invalid rootfs in image configuration") +) + +// ImageConfigPullError is an error pulling the image config blob +// (only applies to schema2). +type ImageConfigPullError struct { + Err error +} + +// Error returns the error string for ImageConfigPullError. +func (e ImageConfigPullError) Error() string { + return "error pulling image configuration: " + e.Err.Error() +} + +type v2Puller struct { + V2MetadataService metadata.V2MetadataService + endpoint registry.APIEndpoint + config *ImagePullConfig + repoInfo *registry.RepositoryInfo + repo distribution.Repository + // confirmedV2 is set to true if we confirm we're talking to a v2 + // registry. This is used to limit fallbacks to the v1 protocol. + confirmedV2 bool +} + +func (p *v2Puller) Pull(ctx context.Context, ref reference.Named) (err error) { + // TODO(tiborvass): was ReceiveTimeout + p.repo, p.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull") + if err != nil { + logrus.Warnf("Error getting v2 registry: %v", err) + return err + } + + if err = p.pullV2Repository(ctx, ref); err != nil { + if _, ok := err.(fallbackError); ok { + return err + } + if continueOnError(err) { + return fallbackError{ + err: err, + confirmedV2: p.confirmedV2, + transportOK: true, + } + } + } + return err +} + +func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named) (err error) { + var layersDownloaded bool + if !reference.IsNameOnly(ref) { + layersDownloaded, err = p.pullV2Tag(ctx, ref) + if err != nil { + return err + } + } else { + tags, err := p.repo.Tags(ctx).All(ctx) + if err != nil { + // If this repository doesn't exist on V2, we should + // permit a fallback to V1. + return allowV1Fallback(err) + } + + // The v2 registry knows about this repository, so we will not + // allow fallback to the v1 protocol even if we encounter an + // error later on. + p.confirmedV2 = true + + for _, tag := range tags { + tagRef, err := reference.WithTag(ref, tag) + if err != nil { + return err + } + pulledNew, err := p.pullV2Tag(ctx, tagRef) + if err != nil { + // Since this is the pull-all-tags case, don't + // allow an error pulling a particular tag to + // make the whole pull fall back to v1. + if fallbackErr, ok := err.(fallbackError); ok { + return fallbackErr.err + } + return err + } + // pulledNew is true if either new layers were downloaded OR if existing images were newly tagged + // TODO(tiborvass): should we change the name of `layersDownload`? What about message in WriteStatus? + layersDownloaded = layersDownloaded || pulledNew + } + } + + writeStatus(reference.FamiliarString(ref), p.config.ProgressOutput, layersDownloaded) + + return nil +} + +type v2LayerDescriptor struct { + digest digest.Digest + diffID layer.DiffID + repoInfo *registry.RepositoryInfo + repo distribution.Repository + V2MetadataService metadata.V2MetadataService + tmpFile *os.File + verifier digest.Verifier + src distribution.Descriptor +} + +func (ld *v2LayerDescriptor) Key() string { + return "v2:" + ld.digest.String() +} + +func (ld *v2LayerDescriptor) ID() string { + return stringid.TruncateID(ld.digest.String()) +} + +func (ld *v2LayerDescriptor) DiffID() (layer.DiffID, error) { + if ld.diffID != "" { + return ld.diffID, nil + } + return ld.V2MetadataService.GetDiffID(ld.digest) +} + +func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) { + logrus.Debugf("pulling blob %q", ld.digest) + + var ( + err error + offset int64 + ) + + if ld.tmpFile == nil { + ld.tmpFile, err = createDownloadFile() + if err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + } else { + offset, err = ld.tmpFile.Seek(0, os.SEEK_END) + if err != nil { + logrus.Debugf("error seeking to end of download file: %v", err) + offset = 0 + + ld.tmpFile.Close() + if err := os.Remove(ld.tmpFile.Name()); err != nil { + logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) + } + ld.tmpFile, err = createDownloadFile() + if err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + } else if offset != 0 { + logrus.Debugf("attempting to resume download of %q from %d bytes", ld.digest, offset) + } + } + + tmpFile := ld.tmpFile + + layerDownload, err := ld.open(ctx) + if err != nil { + logrus.Errorf("Error initiating layer download: %v", err) + return nil, 0, retryOnError(err) + } + + if offset != 0 { + _, err := layerDownload.Seek(offset, os.SEEK_SET) + if err != nil { + if err := ld.truncateDownloadFile(); err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + return nil, 0, err + } + } + size, err := layerDownload.Seek(0, os.SEEK_END) + if err != nil { + // Seek failed, perhaps because there was no Content-Length + // header. This shouldn't fail the download, because we can + // still continue without a progress bar. + size = 0 + } else { + if size != 0 && offset > size { + logrus.Debug("Partial download is larger than full blob. Starting over") + offset = 0 + if err := ld.truncateDownloadFile(); err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + } + + // Restore the seek offset either at the beginning of the + // stream, or just after the last byte we have from previous + // attempts. + _, err = layerDownload.Seek(offset, os.SEEK_SET) + if err != nil { + return nil, 0, err + } + } + + reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerDownload), progressOutput, size-offset, ld.ID(), "Downloading") + defer reader.Close() + + if ld.verifier == nil { + ld.verifier = ld.digest.Verifier() + } + + _, err = io.Copy(tmpFile, io.TeeReader(reader, ld.verifier)) + if err != nil { + if err == transport.ErrWrongCodeForByteRange { + if err := ld.truncateDownloadFile(); err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + return nil, 0, err + } + return nil, 0, retryOnError(err) + } + + progress.Update(progressOutput, ld.ID(), "Verifying Checksum") + + if !ld.verifier.Verified() { + err = fmt.Errorf("filesystem layer verification failed for digest %s", ld.digest) + logrus.Error(err) + + // Allow a retry if this digest verification error happened + // after a resumed download. + if offset != 0 { + if err := ld.truncateDownloadFile(); err != nil { + return nil, 0, xfer.DoNotRetry{Err: err} + } + + return nil, 0, err + } + return nil, 0, xfer.DoNotRetry{Err: err} + } + + progress.Update(progressOutput, ld.ID(), "Download complete") + + logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), tmpFile.Name()) + + _, err = tmpFile.Seek(0, os.SEEK_SET) + if err != nil { + tmpFile.Close() + if err := os.Remove(tmpFile.Name()); err != nil { + logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) + } + ld.tmpFile = nil + ld.verifier = nil + return nil, 0, xfer.DoNotRetry{Err: err} + } + + // hand off the temporary file to the download manager, so it will only + // be closed once + ld.tmpFile = nil + + return ioutils.NewReadCloserWrapper(tmpFile, func() error { + tmpFile.Close() + err := os.RemoveAll(tmpFile.Name()) + if err != nil { + logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) + } + return err + }), size, nil +} + +func (ld *v2LayerDescriptor) Close() { + if ld.tmpFile != nil { + ld.tmpFile.Close() + if err := os.RemoveAll(ld.tmpFile.Name()); err != nil { + logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) + } + } +} + +func (ld *v2LayerDescriptor) truncateDownloadFile() error { + // Need a new hash context since we will be redoing the download + ld.verifier = nil + + if _, err := ld.tmpFile.Seek(0, os.SEEK_SET); err != nil { + logrus.Errorf("error seeking to beginning of download file: %v", err) + return err + } + + if err := ld.tmpFile.Truncate(0); err != nil { + logrus.Errorf("error truncating download file: %v", err) + return err + } + + return nil +} + +func (ld *v2LayerDescriptor) Registered(diffID layer.DiffID) { + // Cache mapping from this layer's DiffID to the blobsum + ld.V2MetadataService.Add(diffID, metadata.V2Metadata{Digest: ld.digest, SourceRepository: ld.repoInfo.Name.Name()}) +} + +func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdated bool, err error) { + manSvc, err := p.repo.Manifests(ctx) + if err != nil { + return false, err + } + + var ( + manifest distribution.Manifest + tagOrDigest string // Used for logging/progress only + ) + if digested, isDigested := ref.(reference.Canonical); isDigested { + manifest, err = manSvc.Get(ctx, digested.Digest()) + if err != nil { + return false, err + } + tagOrDigest = digested.Digest().String() + } else if tagged, isTagged := ref.(reference.NamedTagged); isTagged { + manifest, err = manSvc.Get(ctx, "", distribution.WithTag(tagged.Tag())) + if err != nil { + return false, allowV1Fallback(err) + } + tagOrDigest = tagged.Tag() + } else { + return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", reference.FamiliarString(ref)) + } + + if manifest == nil { + return false, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest) + } + + if m, ok := manifest.(*schema2.DeserializedManifest); ok { + var allowedMediatype bool + for _, t := range p.config.Schema2Types { + if m.Manifest.Config.MediaType == t { + allowedMediatype = true + break + } + } + if !allowedMediatype { + configClass := mediaTypeClasses[m.Manifest.Config.MediaType] + if configClass == "" { + configClass = "unknown" + } + return false, fmt.Errorf("Encountered remote %q(%s) when fetching", m.Manifest.Config.MediaType, configClass) + } + } + + // If manSvc.Get succeeded, we can be confident that the registry on + // the other side speaks the v2 protocol. + p.confirmedV2 = true + + logrus.Debugf("Pulling ref from V2 registry: %s", reference.FamiliarString(ref)) + progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+reference.FamiliarName(p.repo.Named())) + + var ( + id digest.Digest + manifestDigest digest.Digest + ) + + switch v := manifest.(type) { + case *schema1.SignedManifest: + if p.config.RequireSchema2 { + return false, fmt.Errorf("invalid manifest: not schema2") + } + id, manifestDigest, err = p.pullSchema1(ctx, ref, v) + if err != nil { + return false, err + } + case *schema2.DeserializedManifest: + id, manifestDigest, err = p.pullSchema2(ctx, ref, v) + if err != nil { + return false, err + } + case *manifestlist.DeserializedManifestList: + id, manifestDigest, err = p.pullManifestList(ctx, ref, v) + if err != nil { + return false, err + } + default: + return false, errors.New("unsupported manifest format") + } + + progress.Message(p.config.ProgressOutput, "", "Digest: "+manifestDigest.String()) + + if p.config.ReferenceStore != nil { + oldTagID, err := p.config.ReferenceStore.Get(ref) + if err == nil { + if oldTagID == id { + return false, addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id) + } + } else if err != refstore.ErrDoesNotExist { + return false, err + } + + if canonical, ok := ref.(reference.Canonical); ok { + if err = p.config.ReferenceStore.AddDigest(canonical, id, true); err != nil { + return false, err + } + } else { + if err = addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id); err != nil { + return false, err + } + if err = p.config.ReferenceStore.AddTag(ref, id, true); err != nil { + return false, err + } + } + } + return true, nil +} + +func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Named, unverifiedManifest *schema1.SignedManifest) (id digest.Digest, manifestDigest digest.Digest, err error) { + var verifiedManifest *schema1.Manifest + verifiedManifest, err = verifySchema1Manifest(unverifiedManifest, ref) + if err != nil { + return "", "", err + } + + rootFS := image.NewRootFS() + + // remove duplicate layers and check parent chain validity + err = fixManifestLayers(verifiedManifest) + if err != nil { + return "", "", err + } + + var descriptors []xfer.DownloadDescriptor + + // Image history converted to the new format + var history []image.History + + // Note that the order of this loop is in the direction of bottom-most + // to top-most, so that the downloads slice gets ordered correctly. + for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- { + blobSum := verifiedManifest.FSLayers[i].BlobSum + + var throwAway struct { + ThrowAway bool `json:"throwaway,omitempty"` + } + if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil { + return "", "", err + } + + h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway) + if err != nil { + return "", "", err + } + history = append(history, h) + + if throwAway.ThrowAway { + continue + } + + layerDescriptor := &v2LayerDescriptor{ + digest: blobSum, + repoInfo: p.repoInfo, + repo: p.repo, + V2MetadataService: p.V2MetadataService, + } + + descriptors = append(descriptors, layerDescriptor) + } + + resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, descriptors, p.config.ProgressOutput) + if err != nil { + return "", "", err + } + defer release() + + config, err := v1.MakeConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), &resultRootFS, history) + if err != nil { + return "", "", err + } + + imageID, err := p.config.ImageStore.Put(config) + if err != nil { + return "", "", err + } + + manifestDigest = digest.FromBytes(unverifiedManifest.Canonical) + + return imageID, manifestDigest, nil +} + +func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest) (id digest.Digest, manifestDigest digest.Digest, err error) { + manifestDigest, err = schema2ManifestDigest(ref, mfst) + if err != nil { + return "", "", err + } + + target := mfst.Target() + if _, err := p.config.ImageStore.Get(target.Digest); err == nil { + // If the image already exists locally, no need to pull + // anything. + return target.Digest, manifestDigest, nil + } + + var descriptors []xfer.DownloadDescriptor + + // Note that the order of this loop is in the direction of bottom-most + // to top-most, so that the downloads slice gets ordered correctly. + for _, d := range mfst.Layers { + layerDescriptor := &v2LayerDescriptor{ + digest: d.Digest, + repo: p.repo, + repoInfo: p.repoInfo, + V2MetadataService: p.V2MetadataService, + src: d, + } + + descriptors = append(descriptors, layerDescriptor) + } + + configChan := make(chan []byte, 1) + configErrChan := make(chan error, 1) + layerErrChan := make(chan error, 1) + downloadsDone := make(chan struct{}) + var cancel func() + ctx, cancel = context.WithCancel(ctx) + defer cancel() + + // Pull the image config + go func() { + configJSON, err := p.pullSchema2Config(ctx, target.Digest) + if err != nil { + configErrChan <- ImageConfigPullError{Err: err} + cancel() + return + } + configChan <- configJSON + }() + + var ( + configJSON []byte // raw serialized image config + downloadedRootFS *image.RootFS // rootFS from registered layers + configRootFS *image.RootFS // rootFS from configuration + release func() // release resources from rootFS download + ) + + // https://github.com/docker/docker/issues/24766 - Err on the side of caution, + // explicitly blocking images intended for linux from the Windows daemon. On + // Windows, we do this before the attempt to download, effectively serialising + // the download slightly slowing it down. We have to do it this way, as + // chances are the download of layers itself would fail due to file names + // which aren't suitable for NTFS. At some point in the future, if a similar + // check to block Windows images being pulled on Linux is implemented, it + // may be necessary to perform the same type of serialisation. + if runtime.GOOS == "windows" { + configJSON, configRootFS, err = receiveConfig(p.config.ImageStore, configChan, configErrChan) + if err != nil { + return "", "", err + } + + if configRootFS == nil { + return "", "", errRootFSInvalid + } + + if len(descriptors) != len(configRootFS.DiffIDs) { + return "", "", errRootFSMismatch + } + + // Populate diff ids in descriptors to avoid downloading foreign layers + // which have been side loaded + for i := range descriptors { + descriptors[i].(*v2LayerDescriptor).diffID = configRootFS.DiffIDs[i] + } + } + + if p.config.DownloadManager != nil { + go func() { + var ( + err error + rootFS image.RootFS + ) + downloadRootFS := *image.NewRootFS() + rootFS, release, err = p.config.DownloadManager.Download(ctx, downloadRootFS, descriptors, p.config.ProgressOutput) + if err != nil { + // Intentionally do not cancel the config download here + // as the error from config download (if there is one) + // is more interesting than the layer download error + layerErrChan <- err + return + } + + downloadedRootFS = &rootFS + close(downloadsDone) + }() + } else { + // We have nothing to download + close(downloadsDone) + } + + if configJSON == nil { + configJSON, configRootFS, err = receiveConfig(p.config.ImageStore, configChan, configErrChan) + if err == nil && configRootFS == nil { + err = errRootFSInvalid + } + if err != nil { + cancel() + select { + case <-downloadsDone: + case <-layerErrChan: + } + return "", "", err + } + } + + select { + case <-downloadsDone: + case err = <-layerErrChan: + return "", "", err + } + + if release != nil { + defer release() + } + + if downloadedRootFS != nil { + // The DiffIDs returned in rootFS MUST match those in the config. + // Otherwise the image config could be referencing layers that aren't + // included in the manifest. + if len(downloadedRootFS.DiffIDs) != len(configRootFS.DiffIDs) { + return "", "", errRootFSMismatch + } + + for i := range downloadedRootFS.DiffIDs { + if downloadedRootFS.DiffIDs[i] != configRootFS.DiffIDs[i] { + return "", "", errRootFSMismatch + } + } + } + + imageID, err := p.config.ImageStore.Put(configJSON) + if err != nil { + return "", "", err + } + + return imageID, manifestDigest, nil +} + +func receiveConfig(s ImageConfigStore, configChan <-chan []byte, errChan <-chan error) ([]byte, *image.RootFS, error) { + select { + case configJSON := <-configChan: + rootfs, err := s.RootFSFromConfig(configJSON) + if err != nil { + return nil, nil, err + } + return configJSON, rootfs, nil + case err := <-errChan: + return nil, nil, err + // Don't need a case for ctx.Done in the select because cancellation + // will trigger an error in p.pullSchema2ImageConfig. + } +} + +// pullManifestList handles "manifest lists" which point to various +// platform-specific manifests. +func (p *v2Puller) pullManifestList(ctx context.Context, ref reference.Named, mfstList *manifestlist.DeserializedManifestList) (id digest.Digest, manifestListDigest digest.Digest, err error) { + manifestListDigest, err = schema2ManifestDigest(ref, mfstList) + if err != nil { + return "", "", err + } + + logrus.Debugf("%s resolved to a manifestList object with %d entries; looking for a os/arch match", ref, len(mfstList.Manifests)) + var manifestDigest digest.Digest + for _, manifestDescriptor := range mfstList.Manifests { + // TODO(aaronl): The manifest list spec supports optional + // "features" and "variant" fields. These are not yet used. + // Once they are, their values should be interpreted here. + if manifestDescriptor.Platform.Architecture == runtime.GOARCH && manifestDescriptor.Platform.OS == runtime.GOOS { + manifestDigest = manifestDescriptor.Digest + logrus.Debugf("found match for %s/%s with media type %s, digest %s", runtime.GOOS, runtime.GOARCH, manifestDescriptor.MediaType, manifestDigest.String()) + break + } + } + + if manifestDigest == "" { + errMsg := fmt.Sprintf("no matching manifest for %s/%s in the manifest list entries", runtime.GOOS, runtime.GOARCH) + logrus.Debugf(errMsg) + return "", "", errors.New(errMsg) + } + + manSvc, err := p.repo.Manifests(ctx) + if err != nil { + return "", "", err + } + + manifest, err := manSvc.Get(ctx, manifestDigest) + if err != nil { + return "", "", err + } + + manifestRef, err := reference.WithDigest(reference.TrimNamed(ref), manifestDigest) + if err != nil { + return "", "", err + } + + switch v := manifest.(type) { + case *schema1.SignedManifest: + id, _, err = p.pullSchema1(ctx, manifestRef, v) + if err != nil { + return "", "", err + } + case *schema2.DeserializedManifest: + id, _, err = p.pullSchema2(ctx, manifestRef, v) + if err != nil { + return "", "", err + } + default: + return "", "", errors.New("unsupported manifest format") + } + + return id, manifestListDigest, err +} + +func (p *v2Puller) pullSchema2Config(ctx context.Context, dgst digest.Digest) (configJSON []byte, err error) { + blobs := p.repo.Blobs(ctx) + configJSON, err = blobs.Get(ctx, dgst) + if err != nil { + return nil, err + } + + // Verify image config digest + verifier := dgst.Verifier() + if _, err := verifier.Write(configJSON); err != nil { + return nil, err + } + if !verifier.Verified() { + err := fmt.Errorf("image config verification failed for digest %s", dgst) + logrus.Error(err) + return nil, err + } + + return configJSON, nil +} + +// schema2ManifestDigest computes the manifest digest, and, if pulling by +// digest, ensures that it matches the requested digest. +func schema2ManifestDigest(ref reference.Named, mfst distribution.Manifest) (digest.Digest, error) { + _, canonical, err := mfst.Payload() + if err != nil { + return "", err + } + + // If pull by digest, then verify the manifest digest. + if digested, isDigested := ref.(reference.Canonical); isDigested { + verifier := digested.Digest().Verifier() + if _, err := verifier.Write(canonical); err != nil { + return "", err + } + if !verifier.Verified() { + err := fmt.Errorf("manifest verification failed for digest %s", digested.Digest()) + logrus.Error(err) + return "", err + } + return digested.Digest(), nil + } + + return digest.FromBytes(canonical), nil +} + +// allowV1Fallback checks if the error is a possible reason to fallback to v1 +// (even if confirmedV2 has been set already), and if so, wraps the error in +// a fallbackError with confirmedV2 set to false. Otherwise, it returns the +// error unmodified. +func allowV1Fallback(err error) error { + switch v := err.(type) { + case errcode.Errors: + if len(v) != 0 { + if v0, ok := v[0].(errcode.Error); ok && shouldV2Fallback(v0) { + return fallbackError{ + err: err, + confirmedV2: false, + transportOK: true, + } + } + } + case errcode.Error: + if shouldV2Fallback(v) { + return fallbackError{ + err: err, + confirmedV2: false, + transportOK: true, + } + } + case *url.Error: + if v.Err == auth.ErrNoBasicAuthCredentials { + return fallbackError{err: err, confirmedV2: false} + } + } + + return err +} + +func verifySchema1Manifest(signedManifest *schema1.SignedManifest, ref reference.Named) (m *schema1.Manifest, err error) { + // If pull by digest, then verify the manifest digest. NOTE: It is + // important to do this first, before any other content validation. If the + // digest cannot be verified, don't even bother with those other things. + if digested, isCanonical := ref.(reference.Canonical); isCanonical { + verifier := digested.Digest().Verifier() + if _, err := verifier.Write(signedManifest.Canonical); err != nil { + return nil, err + } + if !verifier.Verified() { + err := fmt.Errorf("image verification failed for digest %s", digested.Digest()) + logrus.Error(err) + return nil, err + } + } + m = &signedManifest.Manifest + + if m.SchemaVersion != 1 { + return nil, fmt.Errorf("unsupported schema version %d for %q", m.SchemaVersion, reference.FamiliarString(ref)) + } + if len(m.FSLayers) != len(m.History) { + return nil, fmt.Errorf("length of history not equal to number of layers for %q", reference.FamiliarString(ref)) + } + if len(m.FSLayers) == 0 { + return nil, fmt.Errorf("no FSLayers in manifest for %q", reference.FamiliarString(ref)) + } + return m, nil +} + +// fixManifestLayers removes repeated layers from the manifest and checks the +// correctness of the parent chain. +func fixManifestLayers(m *schema1.Manifest) error { + imgs := make([]*image.V1Image, len(m.FSLayers)) + for i := range m.FSLayers { + img := &image.V1Image{} + + if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil { + return err + } + + imgs[i] = img + if err := v1.ValidateID(img.ID); err != nil { + return err + } + } + + if imgs[len(imgs)-1].Parent != "" && runtime.GOOS != "windows" { + // Windows base layer can point to a base layer parent that is not in manifest. + return errors.New("invalid parent ID in the base layer of the image") + } + + // check general duplicates to error instead of a deadlock + idmap := make(map[string]struct{}) + + var lastID string + for _, img := range imgs { + // skip IDs that appear after each other, we handle those later + if _, exists := idmap[img.ID]; img.ID != lastID && exists { + return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID) + } + lastID = img.ID + idmap[lastID] = struct{}{} + } + + // backwards loop so that we keep the remaining indexes after removing items + for i := len(imgs) - 2; i >= 0; i-- { + if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue + m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...) + m.History = append(m.History[:i], m.History[i+1:]...) + } else if imgs[i].Parent != imgs[i+1].ID { + return fmt.Errorf("Invalid parent ID. Expected %v, got %v.", imgs[i+1].ID, imgs[i].Parent) + } + } + + return nil +} + +func createDownloadFile() (*os.File, error) { + return ioutil.TempFile("", "GetImageBlob") +} diff --git a/vendor/github.com/docker/docker/distribution/pull_v2_unix.go b/vendor/github.com/docker/docker/distribution/pull_v2_unix.go new file mode 100644 index 000000000000..45a7a0c1501c --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/pull_v2_unix.go @@ -0,0 +1,13 @@ +// +build !windows + +package distribution + +import ( + "github.com/docker/distribution" + "github.com/docker/distribution/context" +) + +func (ld *v2LayerDescriptor) open(ctx context.Context) (distribution.ReadSeekCloser, error) { + blobs := ld.repo.Blobs(ctx) + return blobs.Open(ctx, ld.digest) +} diff --git a/vendor/github.com/docker/docker/distribution/pull_v2_windows.go b/vendor/github.com/docker/docker/distribution/pull_v2_windows.go new file mode 100644 index 000000000000..543ecc10eb6f --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/pull_v2_windows.go @@ -0,0 +1,57 @@ +// +build windows + +package distribution + +import ( + "net/http" + "os" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/registry/client/transport" +) + +var _ distribution.Describable = &v2LayerDescriptor{} + +func (ld *v2LayerDescriptor) Descriptor() distribution.Descriptor { + if ld.src.MediaType == schema2.MediaTypeForeignLayer && len(ld.src.URLs) > 0 { + return ld.src + } + return distribution.Descriptor{} +} + +func (ld *v2LayerDescriptor) open(ctx context.Context) (distribution.ReadSeekCloser, error) { + blobs := ld.repo.Blobs(ctx) + rsc, err := blobs.Open(ctx, ld.digest) + + if len(ld.src.URLs) == 0 { + return rsc, err + } + + // We're done if the registry has this blob. + if err == nil { + // Seek does an HTTP GET. If it succeeds, the blob really is accessible. + if _, err = rsc.Seek(0, os.SEEK_SET); err == nil { + return rsc, nil + } + rsc.Close() + } + + // Find the first URL that results in a 200 result code. + for _, url := range ld.src.URLs { + logrus.Debugf("Pulling %v from foreign URL %v", ld.digest, url) + rsc = transport.NewHTTPReadSeeker(http.DefaultClient, url, nil) + + // Seek does an HTTP GET. If it succeeds, the blob really is accessible. + _, err = rsc.Seek(0, os.SEEK_SET) + if err == nil { + break + } + logrus.Debugf("Download for %v failed: %v", ld.digest, err) + rsc.Close() + rsc = nil + } + return rsc, err +} diff --git a/vendor/github.com/docker/docker/distribution/push.go b/vendor/github.com/docker/docker/distribution/push.go new file mode 100644 index 000000000000..395e4d151246 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/push.go @@ -0,0 +1,186 @@ +package distribution + +import ( + "bufio" + "compress/gzip" + "fmt" + "io" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/registry" + "golang.org/x/net/context" +) + +// Pusher is an interface that abstracts pushing for different API versions. +type Pusher interface { + // Push tries to push the image configured at the creation of Pusher. + // Push returns an error if any, as well as a boolean that determines whether to retry Push on the next configured endpoint. + // + // TODO(tiborvass): have Push() take a reference to repository + tag, so that the pusher itself is repository-agnostic. + Push(ctx context.Context) error +} + +const compressionBufSize = 32768 + +// NewPusher creates a new Pusher interface that will push to either a v1 or v2 +// registry. The endpoint argument contains a Version field that determines +// whether a v1 or v2 pusher will be created. The other parameters are passed +// through to the underlying pusher implementation for use during the actual +// push operation. +func NewPusher(ref reference.Named, endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo, imagePushConfig *ImagePushConfig) (Pusher, error) { + switch endpoint.Version { + case registry.APIVersion2: + return &v2Pusher{ + v2MetadataService: metadata.NewV2MetadataService(imagePushConfig.MetadataStore), + ref: ref, + endpoint: endpoint, + repoInfo: repoInfo, + config: imagePushConfig, + }, nil + case registry.APIVersion1: + return &v1Pusher{ + v1IDService: metadata.NewV1IDService(imagePushConfig.MetadataStore), + ref: ref, + endpoint: endpoint, + repoInfo: repoInfo, + config: imagePushConfig, + }, nil + } + return nil, fmt.Errorf("unknown version %d for registry %s", endpoint.Version, endpoint.URL) +} + +// Push initiates a push operation on ref. +// ref is the specific variant of the image to be pushed. +// If no tag is provided, all tags will be pushed. +func Push(ctx context.Context, ref reference.Named, imagePushConfig *ImagePushConfig) error { + // FIXME: Allow to interrupt current push when new push of same image is done. + + // Resolve the Repository name from fqn to RepositoryInfo + repoInfo, err := imagePushConfig.RegistryService.ResolveRepository(ref) + if err != nil { + return err + } + + endpoints, err := imagePushConfig.RegistryService.LookupPushEndpoints(reference.Domain(repoInfo.Name)) + if err != nil { + return err + } + + progress.Messagef(imagePushConfig.ProgressOutput, "", "The push refers to a repository [%s]", repoInfo.Name.Name()) + + associations := imagePushConfig.ReferenceStore.ReferencesByName(repoInfo.Name) + if len(associations) == 0 { + return fmt.Errorf("An image does not exist locally with the tag: %s", reference.FamiliarName(repoInfo.Name)) + } + + var ( + lastErr error + + // confirmedV2 is set to true if a push attempt managed to + // confirm that it was talking to a v2 registry. This will + // prevent fallback to the v1 protocol. + confirmedV2 bool + + // confirmedTLSRegistries is a map indicating which registries + // are known to be using TLS. There should never be a plaintext + // retry for any of these. + confirmedTLSRegistries = make(map[string]struct{}) + ) + + for _, endpoint := range endpoints { + if imagePushConfig.RequireSchema2 && endpoint.Version == registry.APIVersion1 { + continue + } + if confirmedV2 && endpoint.Version == registry.APIVersion1 { + logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL) + continue + } + + if endpoint.URL.Scheme != "https" { + if _, confirmedTLS := confirmedTLSRegistries[endpoint.URL.Host]; confirmedTLS { + logrus.Debugf("Skipping non-TLS endpoint %s for host/port that appears to use TLS", endpoint.URL) + continue + } + } + + logrus.Debugf("Trying to push %s to %s %s", repoInfo.Name.Name(), endpoint.URL, endpoint.Version) + + pusher, err := NewPusher(ref, endpoint, repoInfo, imagePushConfig) + if err != nil { + lastErr = err + continue + } + if err := pusher.Push(ctx); err != nil { + // Was this push cancelled? If so, don't try to fall + // back. + select { + case <-ctx.Done(): + default: + if fallbackErr, ok := err.(fallbackError); ok { + confirmedV2 = confirmedV2 || fallbackErr.confirmedV2 + if fallbackErr.transportOK && endpoint.URL.Scheme == "https" { + confirmedTLSRegistries[endpoint.URL.Host] = struct{}{} + } + err = fallbackErr.err + lastErr = err + logrus.Infof("Attempting next endpoint for push after error: %v", err) + continue + } + } + + logrus.Errorf("Not continuing with push after error: %v", err) + return err + } + + imagePushConfig.ImageEventLogger(reference.FamiliarString(ref), reference.FamiliarName(repoInfo.Name), "push") + return nil + } + + if lastErr == nil { + lastErr = fmt.Errorf("no endpoints found for %s", repoInfo.Name.Name()) + } + return lastErr +} + +// compress returns an io.ReadCloser which will supply a compressed version of +// the provided Reader. The caller must close the ReadCloser after reading the +// compressed data. +// +// Note that this function returns a reader instead of taking a writer as an +// argument so that it can be used with httpBlobWriter's ReadFrom method. +// Using httpBlobWriter's Write method would send a PATCH request for every +// Write call. +// +// The second return value is a channel that gets closed when the goroutine +// is finished. This allows the caller to make sure the goroutine finishes +// before it releases any resources connected with the reader that was +// passed in. +func compress(in io.Reader) (io.ReadCloser, chan struct{}) { + compressionDone := make(chan struct{}) + + pipeReader, pipeWriter := io.Pipe() + // Use a bufio.Writer to avoid excessive chunking in HTTP request. + bufWriter := bufio.NewWriterSize(pipeWriter, compressionBufSize) + compressor := gzip.NewWriter(bufWriter) + + go func() { + _, err := io.Copy(compressor, in) + if err == nil { + err = compressor.Close() + } + if err == nil { + err = bufWriter.Flush() + } + if err != nil { + pipeWriter.CloseWithError(err) + } else { + pipeWriter.Close() + } + close(compressionDone) + }() + + return pipeReader, compressionDone +} diff --git a/vendor/github.com/docker/docker/distribution/push_v1.go b/vendor/github.com/docker/docker/distribution/push_v1.go new file mode 100644 index 000000000000..431faaf28c4c --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/push_v1.go @@ -0,0 +1,457 @@ +package distribution + +import ( + "fmt" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/image" + "github.com/docker/docker/image/v1" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/registry" + "github.com/opencontainers/go-digest" + "golang.org/x/net/context" +) + +type v1Pusher struct { + v1IDService *metadata.V1IDService + endpoint registry.APIEndpoint + ref reference.Named + repoInfo *registry.RepositoryInfo + config *ImagePushConfig + session *registry.Session +} + +func (p *v1Pusher) Push(ctx context.Context) error { + tlsConfig, err := p.config.RegistryService.TLSConfig(p.repoInfo.Index.Name) + if err != nil { + return err + } + // Adds Docker-specific headers as well as user-specified headers (metaHeaders) + tr := transport.NewTransport( + // TODO(tiborvass): was NoTimeout + registry.NewTransport(tlsConfig), + registry.DockerHeaders(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders)..., + ) + client := registry.HTTPClient(tr) + v1Endpoint, err := p.endpoint.ToV1Endpoint(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders) + if err != nil { + logrus.Debugf("Could not get v1 endpoint: %v", err) + return fallbackError{err: err} + } + p.session, err = registry.NewSession(client, p.config.AuthConfig, v1Endpoint) + if err != nil { + // TODO(dmcgowan): Check if should fallback + return fallbackError{err: err} + } + if err := p.pushRepository(ctx); err != nil { + // TODO(dmcgowan): Check if should fallback + return err + } + return nil +} + +// v1Image exposes the configuration, filesystem layer ID, and a v1 ID for an +// image being pushed to a v1 registry. +type v1Image interface { + Config() []byte + Layer() layer.Layer + V1ID() string +} + +type v1ImageCommon struct { + layer layer.Layer + config []byte + v1ID string +} + +func (common *v1ImageCommon) Config() []byte { + return common.config +} + +func (common *v1ImageCommon) V1ID() string { + return common.v1ID +} + +func (common *v1ImageCommon) Layer() layer.Layer { + return common.layer +} + +// v1TopImage defines a runnable (top layer) image being pushed to a v1 +// registry. +type v1TopImage struct { + v1ImageCommon + imageID image.ID +} + +func newV1TopImage(imageID image.ID, img *image.Image, l layer.Layer, parent *v1DependencyImage) (*v1TopImage, error) { + v1ID := imageID.Digest().Hex() + parentV1ID := "" + if parent != nil { + parentV1ID = parent.V1ID() + } + + config, err := v1.MakeV1ConfigFromConfig(img, v1ID, parentV1ID, false) + if err != nil { + return nil, err + } + + return &v1TopImage{ + v1ImageCommon: v1ImageCommon{ + v1ID: v1ID, + config: config, + layer: l, + }, + imageID: imageID, + }, nil +} + +// v1DependencyImage defines a dependency layer being pushed to a v1 registry. +type v1DependencyImage struct { + v1ImageCommon +} + +func newV1DependencyImage(l layer.Layer, parent *v1DependencyImage) *v1DependencyImage { + v1ID := digest.Digest(l.ChainID()).Hex() + + var config string + if parent != nil { + config = fmt.Sprintf(`{"id":"%s","parent":"%s"}`, v1ID, parent.V1ID()) + } else { + config = fmt.Sprintf(`{"id":"%s"}`, v1ID) + } + return &v1DependencyImage{ + v1ImageCommon: v1ImageCommon{ + v1ID: v1ID, + config: []byte(config), + layer: l, + }, + } +} + +// Retrieve the all the images to be uploaded in the correct order +func (p *v1Pusher) getImageList() (imageList []v1Image, tagsByImage map[image.ID][]string, referencedLayers []PushLayer, err error) { + tagsByImage = make(map[image.ID][]string) + + // Ignore digest references + if _, isCanonical := p.ref.(reference.Canonical); isCanonical { + return + } + + tagged, isTagged := p.ref.(reference.NamedTagged) + if isTagged { + // Push a specific tag + var imgID image.ID + var dgst digest.Digest + dgst, err = p.config.ReferenceStore.Get(p.ref) + if err != nil { + return + } + imgID = image.IDFromDigest(dgst) + + imageList, err = p.imageListForTag(imgID, nil, &referencedLayers) + if err != nil { + return + } + + tagsByImage[imgID] = []string{tagged.Tag()} + + return + } + + imagesSeen := make(map[digest.Digest]struct{}) + dependenciesSeen := make(map[layer.ChainID]*v1DependencyImage) + + associations := p.config.ReferenceStore.ReferencesByName(p.ref) + for _, association := range associations { + if tagged, isTagged = association.Ref.(reference.NamedTagged); !isTagged { + // Ignore digest references. + continue + } + + imgID := image.IDFromDigest(association.ID) + tagsByImage[imgID] = append(tagsByImage[imgID], tagged.Tag()) + + if _, present := imagesSeen[association.ID]; present { + // Skip generating image list for already-seen image + continue + } + imagesSeen[association.ID] = struct{}{} + + imageListForThisTag, err := p.imageListForTag(imgID, dependenciesSeen, &referencedLayers) + if err != nil { + return nil, nil, nil, err + } + + // append to main image list + imageList = append(imageList, imageListForThisTag...) + } + if len(imageList) == 0 { + return nil, nil, nil, fmt.Errorf("No images found for the requested repository / tag") + } + logrus.Debugf("Image list: %v", imageList) + logrus.Debugf("Tags by image: %v", tagsByImage) + + return +} + +func (p *v1Pusher) imageListForTag(imgID image.ID, dependenciesSeen map[layer.ChainID]*v1DependencyImage, referencedLayers *[]PushLayer) (imageListForThisTag []v1Image, err error) { + ics, ok := p.config.ImageStore.(*imageConfigStore) + if !ok { + return nil, fmt.Errorf("only image store images supported for v1 push") + } + img, err := ics.Store.Get(imgID) + if err != nil { + return nil, err + } + + topLayerID := img.RootFS.ChainID() + + pl, err := p.config.LayerStore.Get(topLayerID) + *referencedLayers = append(*referencedLayers, pl) + if err != nil { + return nil, fmt.Errorf("failed to get top layer from image: %v", err) + } + + // V1 push is deprecated, only support existing layerstore layers + lsl, ok := pl.(*storeLayer) + if !ok { + return nil, fmt.Errorf("only layer store layers supported for v1 push") + } + l := lsl.Layer + + dependencyImages, parent := generateDependencyImages(l.Parent(), dependenciesSeen) + + topImage, err := newV1TopImage(imgID, img, l, parent) + if err != nil { + return nil, err + } + + imageListForThisTag = append(dependencyImages, topImage) + + return +} + +func generateDependencyImages(l layer.Layer, dependenciesSeen map[layer.ChainID]*v1DependencyImage) (imageListForThisTag []v1Image, parent *v1DependencyImage) { + if l == nil { + return nil, nil + } + + imageListForThisTag, parent = generateDependencyImages(l.Parent(), dependenciesSeen) + + if dependenciesSeen != nil { + if dependencyImage, present := dependenciesSeen[l.ChainID()]; present { + // This layer is already on the list, we can ignore it + // and all its parents. + return imageListForThisTag, dependencyImage + } + } + + dependencyImage := newV1DependencyImage(l, parent) + imageListForThisTag = append(imageListForThisTag, dependencyImage) + + if dependenciesSeen != nil { + dependenciesSeen[l.ChainID()] = dependencyImage + } + + return imageListForThisTag, dependencyImage +} + +// createImageIndex returns an index of an image's layer IDs and tags. +func createImageIndex(images []v1Image, tags map[image.ID][]string) []*registry.ImgData { + var imageIndex []*registry.ImgData + for _, img := range images { + v1ID := img.V1ID() + + if topImage, isTopImage := img.(*v1TopImage); isTopImage { + if tags, hasTags := tags[topImage.imageID]; hasTags { + // If an image has tags you must add an entry in the image index + // for each tag + for _, tag := range tags { + imageIndex = append(imageIndex, ®istry.ImgData{ + ID: v1ID, + Tag: tag, + }) + } + continue + } + } + + // If the image does not have a tag it still needs to be sent to the + // registry with an empty tag so that it is associated with the repository + imageIndex = append(imageIndex, ®istry.ImgData{ + ID: v1ID, + Tag: "", + }) + } + return imageIndex +} + +// lookupImageOnEndpoint checks the specified endpoint to see if an image exists +// and if it is absent then it sends the image id to the channel to be pushed. +func (p *v1Pusher) lookupImageOnEndpoint(wg *sync.WaitGroup, endpoint string, images chan v1Image, imagesToPush chan string) { + defer wg.Done() + for image := range images { + v1ID := image.V1ID() + truncID := stringid.TruncateID(image.Layer().DiffID().String()) + if err := p.session.LookupRemoteImage(v1ID, endpoint); err != nil { + logrus.Errorf("Error in LookupRemoteImage: %s", err) + imagesToPush <- v1ID + progress.Update(p.config.ProgressOutput, truncID, "Waiting") + } else { + progress.Update(p.config.ProgressOutput, truncID, "Already exists") + } + } +} + +func (p *v1Pusher) pushImageToEndpoint(ctx context.Context, endpoint string, imageList []v1Image, tags map[image.ID][]string, repo *registry.RepositoryData) error { + workerCount := len(imageList) + // start a maximum of 5 workers to check if images exist on the specified endpoint. + if workerCount > 5 { + workerCount = 5 + } + var ( + wg = &sync.WaitGroup{} + imageData = make(chan v1Image, workerCount*2) + imagesToPush = make(chan string, workerCount*2) + pushes = make(chan map[string]struct{}, 1) + ) + for i := 0; i < workerCount; i++ { + wg.Add(1) + go p.lookupImageOnEndpoint(wg, endpoint, imageData, imagesToPush) + } + // start a go routine that consumes the images to push + go func() { + shouldPush := make(map[string]struct{}) + for id := range imagesToPush { + shouldPush[id] = struct{}{} + } + pushes <- shouldPush + }() + for _, v1Image := range imageList { + imageData <- v1Image + } + // close the channel to notify the workers that there will be no more images to check. + close(imageData) + wg.Wait() + close(imagesToPush) + // wait for all the images that require pushes to be collected into a consumable map. + shouldPush := <-pushes + // finish by pushing any images and tags to the endpoint. The order that the images are pushed + // is very important that is why we are still iterating over the ordered list of imageIDs. + for _, img := range imageList { + v1ID := img.V1ID() + if _, push := shouldPush[v1ID]; push { + if _, err := p.pushImage(ctx, img, endpoint); err != nil { + // FIXME: Continue on error? + return err + } + } + if topImage, isTopImage := img.(*v1TopImage); isTopImage { + for _, tag := range tags[topImage.imageID] { + progress.Messagef(p.config.ProgressOutput, "", "Pushing tag for rev [%s] on {%s}", stringid.TruncateID(v1ID), endpoint+"repositories/"+reference.Path(p.repoInfo.Name)+"/tags/"+tag) + if err := p.session.PushRegistryTag(p.repoInfo.Name, v1ID, tag, endpoint); err != nil { + return err + } + } + } + } + return nil +} + +// pushRepository pushes layers that do not already exist on the registry. +func (p *v1Pusher) pushRepository(ctx context.Context) error { + imgList, tags, referencedLayers, err := p.getImageList() + defer func() { + for _, l := range referencedLayers { + l.Release() + } + }() + if err != nil { + return err + } + + imageIndex := createImageIndex(imgList, tags) + for _, data := range imageIndex { + logrus.Debugf("Pushing ID: %s with Tag: %s", data.ID, data.Tag) + } + + // Register all the images in a repository with the registry + // If an image is not in this list it will not be associated with the repository + repoData, err := p.session.PushImageJSONIndex(p.repoInfo.Name, imageIndex, false, nil) + if err != nil { + return err + } + // push the repository to each of the endpoints only if it does not exist. + for _, endpoint := range repoData.Endpoints { + if err := p.pushImageToEndpoint(ctx, endpoint, imgList, tags, repoData); err != nil { + return err + } + } + _, err = p.session.PushImageJSONIndex(p.repoInfo.Name, imageIndex, true, repoData.Endpoints) + return err +} + +func (p *v1Pusher) pushImage(ctx context.Context, v1Image v1Image, ep string) (checksum string, err error) { + l := v1Image.Layer() + v1ID := v1Image.V1ID() + truncID := stringid.TruncateID(l.DiffID().String()) + + jsonRaw := v1Image.Config() + progress.Update(p.config.ProgressOutput, truncID, "Pushing") + + // General rule is to use ID for graph accesses and compatibilityID for + // calls to session.registry() + imgData := ®istry.ImgData{ + ID: v1ID, + } + + // Send the json + if err := p.session.PushImageJSONRegistry(imgData, jsonRaw, ep); err != nil { + if err == registry.ErrAlreadyExists { + progress.Update(p.config.ProgressOutput, truncID, "Image already pushed, skipping") + return "", nil + } + return "", err + } + + arch, err := l.TarStream() + if err != nil { + return "", err + } + defer arch.Close() + + // don't care if this fails; best effort + size, _ := l.DiffSize() + + // Send the layer + logrus.Debugf("rendered layer for %s of [%d] size", v1ID, size) + + reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, arch), p.config.ProgressOutput, size, truncID, "Pushing") + defer reader.Close() + + checksum, checksumPayload, err := p.session.PushImageLayerRegistry(v1ID, reader, ep, jsonRaw) + if err != nil { + return "", err + } + imgData.Checksum = checksum + imgData.ChecksumPayload = checksumPayload + // Send the checksum + if err := p.session.PushImageChecksumRegistry(imgData, ep); err != nil { + return "", err + } + + if err := p.v1IDService.Set(v1ID, p.repoInfo.Index.Name, l.DiffID()); err != nil { + logrus.Warnf("Could not set v1 ID mapping: %v", err) + } + + progress.Update(p.config.ProgressOutput, truncID, "Image successfully pushed") + return imgData.Checksum, nil +} diff --git a/vendor/github.com/docker/docker/distribution/push_v2.go b/vendor/github.com/docker/docker/distribution/push_v2.go new file mode 100644 index 000000000000..5a6673780c3d --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/push_v2.go @@ -0,0 +1,691 @@ +package distribution + +import ( + "errors" + "fmt" + "io" + "runtime" + "sort" + "strings" + "sync" + + "golang.org/x/net/context" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/client" + apitypes "github.com/docker/docker/api/types" + "github.com/docker/docker/distribution/metadata" + "github.com/docker/docker/distribution/xfer" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/registry" + "github.com/opencontainers/go-digest" +) + +const ( + smallLayerMaximumSize = 100 * (1 << 10) // 100KB + middleLayerMaximumSize = 10 * (1 << 20) // 10MB +) + +type v2Pusher struct { + v2MetadataService metadata.V2MetadataService + ref reference.Named + endpoint registry.APIEndpoint + repoInfo *registry.RepositoryInfo + config *ImagePushConfig + repo distribution.Repository + + // pushState is state built by the Upload functions. + pushState pushState +} + +type pushState struct { + sync.Mutex + // remoteLayers is the set of layers known to exist on the remote side. + // This avoids redundant queries when pushing multiple tags that + // involve the same layers. It is also used to fill in digest and size + // information when building the manifest. + remoteLayers map[layer.DiffID]distribution.Descriptor + // confirmedV2 is set to true if we confirm we're talking to a v2 + // registry. This is used to limit fallbacks to the v1 protocol. + confirmedV2 bool +} + +func (p *v2Pusher) Push(ctx context.Context) (err error) { + p.pushState.remoteLayers = make(map[layer.DiffID]distribution.Descriptor) + + p.repo, p.pushState.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "push", "pull") + if err != nil { + logrus.Debugf("Error getting v2 registry: %v", err) + return err + } + + if err = p.pushV2Repository(ctx); err != nil { + if continueOnError(err) { + return fallbackError{ + err: err, + confirmedV2: p.pushState.confirmedV2, + transportOK: true, + } + } + } + return err +} + +func (p *v2Pusher) pushV2Repository(ctx context.Context) (err error) { + if namedTagged, isNamedTagged := p.ref.(reference.NamedTagged); isNamedTagged { + imageID, err := p.config.ReferenceStore.Get(p.ref) + if err != nil { + return fmt.Errorf("tag does not exist: %s", reference.FamiliarString(p.ref)) + } + + return p.pushV2Tag(ctx, namedTagged, imageID) + } + + if !reference.IsNameOnly(p.ref) { + return errors.New("cannot push a digest reference") + } + + // Pull all tags + pushed := 0 + for _, association := range p.config.ReferenceStore.ReferencesByName(p.ref) { + if namedTagged, isNamedTagged := association.Ref.(reference.NamedTagged); isNamedTagged { + pushed++ + if err := p.pushV2Tag(ctx, namedTagged, association.ID); err != nil { + return err + } + } + } + + if pushed == 0 { + return fmt.Errorf("no tags to push for %s", reference.FamiliarName(p.repoInfo.Name)) + } + + return nil +} + +func (p *v2Pusher) pushV2Tag(ctx context.Context, ref reference.NamedTagged, id digest.Digest) error { + logrus.Debugf("Pushing repository: %s", reference.FamiliarString(ref)) + + imgConfig, err := p.config.ImageStore.Get(id) + if err != nil { + return fmt.Errorf("could not find image from tag %s: %v", reference.FamiliarString(ref), err) + } + + rootfs, err := p.config.ImageStore.RootFSFromConfig(imgConfig) + if err != nil { + return fmt.Errorf("unable to get rootfs for image %s: %s", reference.FamiliarString(ref), err) + } + + l, err := p.config.LayerStore.Get(rootfs.ChainID()) + if err != nil { + return fmt.Errorf("failed to get top layer from image: %v", err) + } + defer l.Release() + + hmacKey, err := metadata.ComputeV2MetadataHMACKey(p.config.AuthConfig) + if err != nil { + return fmt.Errorf("failed to compute hmac key of auth config: %v", err) + } + + var descriptors []xfer.UploadDescriptor + + descriptorTemplate := v2PushDescriptor{ + v2MetadataService: p.v2MetadataService, + hmacKey: hmacKey, + repoInfo: p.repoInfo.Name, + ref: p.ref, + endpoint: p.endpoint, + repo: p.repo, + pushState: &p.pushState, + } + + // Loop bounds condition is to avoid pushing the base layer on Windows. + for i := 0; i < len(rootfs.DiffIDs); i++ { + descriptor := descriptorTemplate + descriptor.layer = l + descriptor.checkedDigests = make(map[digest.Digest]struct{}) + descriptors = append(descriptors, &descriptor) + + l = l.Parent() + } + + if err := p.config.UploadManager.Upload(ctx, descriptors, p.config.ProgressOutput); err != nil { + return err + } + + // Try schema2 first + builder := schema2.NewManifestBuilder(p.repo.Blobs(ctx), p.config.ConfigMediaType, imgConfig) + manifest, err := manifestFromBuilder(ctx, builder, descriptors) + if err != nil { + return err + } + + manSvc, err := p.repo.Manifests(ctx) + if err != nil { + return err + } + + putOptions := []distribution.ManifestServiceOption{distribution.WithTag(ref.Tag())} + if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil { + if runtime.GOOS == "windows" || p.config.TrustKey == nil || p.config.RequireSchema2 { + logrus.Warnf("failed to upload schema2 manifest: %v", err) + return err + } + + logrus.Warnf("failed to upload schema2 manifest: %v - falling back to schema1", err) + + manifestRef, err := reference.WithTag(p.repo.Named(), ref.Tag()) + if err != nil { + return err + } + builder = schema1.NewConfigManifestBuilder(p.repo.Blobs(ctx), p.config.TrustKey, manifestRef, imgConfig) + manifest, err = manifestFromBuilder(ctx, builder, descriptors) + if err != nil { + return err + } + + if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil { + return err + } + } + + var canonicalManifest []byte + + switch v := manifest.(type) { + case *schema1.SignedManifest: + canonicalManifest = v.Canonical + case *schema2.DeserializedManifest: + _, canonicalManifest, err = v.Payload() + if err != nil { + return err + } + } + + manifestDigest := digest.FromBytes(canonicalManifest) + progress.Messagef(p.config.ProgressOutput, "", "%s: digest: %s size: %d", ref.Tag(), manifestDigest, len(canonicalManifest)) + + if err := addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id); err != nil { + return err + } + + // Signal digest to the trust client so it can sign the + // push, if appropriate. + progress.Aux(p.config.ProgressOutput, apitypes.PushResult{Tag: ref.Tag(), Digest: manifestDigest.String(), Size: len(canonicalManifest)}) + + return nil +} + +func manifestFromBuilder(ctx context.Context, builder distribution.ManifestBuilder, descriptors []xfer.UploadDescriptor) (distribution.Manifest, error) { + // descriptors is in reverse order; iterate backwards to get references + // appended in the right order. + for i := len(descriptors) - 1; i >= 0; i-- { + if err := builder.AppendReference(descriptors[i].(*v2PushDescriptor)); err != nil { + return nil, err + } + } + + return builder.Build(ctx) +} + +type v2PushDescriptor struct { + layer PushLayer + v2MetadataService metadata.V2MetadataService + hmacKey []byte + repoInfo reference.Named + ref reference.Named + endpoint registry.APIEndpoint + repo distribution.Repository + pushState *pushState + remoteDescriptor distribution.Descriptor + // a set of digests whose presence has been checked in a target repository + checkedDigests map[digest.Digest]struct{} +} + +func (pd *v2PushDescriptor) Key() string { + return "v2push:" + pd.ref.Name() + " " + pd.layer.DiffID().String() +} + +func (pd *v2PushDescriptor) ID() string { + return stringid.TruncateID(pd.layer.DiffID().String()) +} + +func (pd *v2PushDescriptor) DiffID() layer.DiffID { + return pd.layer.DiffID() +} + +func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) { + // Skip foreign layers unless this registry allows nondistributable artifacts. + if !pd.endpoint.AllowNondistributableArtifacts { + if fs, ok := pd.layer.(distribution.Describable); ok { + if d := fs.Descriptor(); len(d.URLs) > 0 { + progress.Update(progressOutput, pd.ID(), "Skipped foreign layer") + return d, nil + } + } + } + + diffID := pd.DiffID() + + pd.pushState.Lock() + if descriptor, ok := pd.pushState.remoteLayers[diffID]; ok { + // it is already known that the push is not needed and + // therefore doing a stat is unnecessary + pd.pushState.Unlock() + progress.Update(progressOutput, pd.ID(), "Layer already exists") + return descriptor, nil + } + pd.pushState.Unlock() + + maxMountAttempts, maxExistenceChecks, checkOtherRepositories := getMaxMountAndExistenceCheckAttempts(pd.layer) + + // Do we have any metadata associated with this layer's DiffID? + v2Metadata, err := pd.v2MetadataService.GetMetadata(diffID) + if err == nil { + // check for blob existence in the target repository + descriptor, exists, err := pd.layerAlreadyExists(ctx, progressOutput, diffID, true, 1, v2Metadata) + if exists || err != nil { + return descriptor, err + } + } + + // if digest was empty or not saved, or if blob does not exist on the remote repository, + // then push the blob. + bs := pd.repo.Blobs(ctx) + + var layerUpload distribution.BlobWriter + + // Attempt to find another repository in the same registry to mount the layer from to avoid an unnecessary upload + candidates := getRepositoryMountCandidates(pd.repoInfo, pd.hmacKey, maxMountAttempts, v2Metadata) + for _, mountCandidate := range candidates { + logrus.Debugf("attempting to mount layer %s (%s) from %s", diffID, mountCandidate.Digest, mountCandidate.SourceRepository) + createOpts := []distribution.BlobCreateOption{} + + if len(mountCandidate.SourceRepository) > 0 { + namedRef, err := reference.ParseNormalizedNamed(mountCandidate.SourceRepository) + if err != nil { + logrus.Errorf("failed to parse source repository reference %v: %v", reference.FamiliarString(namedRef), err) + pd.v2MetadataService.Remove(mountCandidate) + continue + } + + // Candidates are always under same domain, create remote reference + // with only path to set mount from with + remoteRef, err := reference.WithName(reference.Path(namedRef)) + if err != nil { + logrus.Errorf("failed to make remote reference out of %q: %v", reference.Path(namedRef), err) + continue + } + + canonicalRef, err := reference.WithDigest(reference.TrimNamed(remoteRef), mountCandidate.Digest) + if err != nil { + logrus.Errorf("failed to make canonical reference: %v", err) + continue + } + + createOpts = append(createOpts, client.WithMountFrom(canonicalRef)) + } + + // send the layer + lu, err := bs.Create(ctx, createOpts...) + switch err := err.(type) { + case nil: + // noop + case distribution.ErrBlobMounted: + progress.Updatef(progressOutput, pd.ID(), "Mounted from %s", err.From.Name()) + + err.Descriptor.MediaType = schema2.MediaTypeLayer + + pd.pushState.Lock() + pd.pushState.confirmedV2 = true + pd.pushState.remoteLayers[diffID] = err.Descriptor + pd.pushState.Unlock() + + // Cache mapping from this layer's DiffID to the blobsum + if err := pd.v2MetadataService.TagAndAdd(diffID, pd.hmacKey, metadata.V2Metadata{ + Digest: err.Descriptor.Digest, + SourceRepository: pd.repoInfo.Name(), + }); err != nil { + return distribution.Descriptor{}, xfer.DoNotRetry{Err: err} + } + return err.Descriptor, nil + default: + logrus.Infof("failed to mount layer %s (%s) from %s: %v", diffID, mountCandidate.Digest, mountCandidate.SourceRepository, err) + } + + if len(mountCandidate.SourceRepository) > 0 && + (metadata.CheckV2MetadataHMAC(&mountCandidate, pd.hmacKey) || + len(mountCandidate.HMAC) == 0) { + cause := "blob mount failure" + if err != nil { + cause = fmt.Sprintf("an error: %v", err.Error()) + } + logrus.Debugf("removing association between layer %s and %s due to %s", mountCandidate.Digest, mountCandidate.SourceRepository, cause) + pd.v2MetadataService.Remove(mountCandidate) + } + + if lu != nil { + // cancel previous upload + cancelLayerUpload(ctx, mountCandidate.Digest, layerUpload) + layerUpload = lu + } + } + + if maxExistenceChecks-len(pd.checkedDigests) > 0 { + // do additional layer existence checks with other known digests if any + descriptor, exists, err := pd.layerAlreadyExists(ctx, progressOutput, diffID, checkOtherRepositories, maxExistenceChecks-len(pd.checkedDigests), v2Metadata) + if exists || err != nil { + return descriptor, err + } + } + + logrus.Debugf("Pushing layer: %s", diffID) + if layerUpload == nil { + layerUpload, err = bs.Create(ctx) + if err != nil { + return distribution.Descriptor{}, retryOnError(err) + } + } + defer layerUpload.Close() + + // upload the blob + desc, err := pd.uploadUsingSession(ctx, progressOutput, diffID, layerUpload) + if err != nil { + return desc, err + } + + return desc, nil +} + +func (pd *v2PushDescriptor) SetRemoteDescriptor(descriptor distribution.Descriptor) { + pd.remoteDescriptor = descriptor +} + +func (pd *v2PushDescriptor) Descriptor() distribution.Descriptor { + return pd.remoteDescriptor +} + +func (pd *v2PushDescriptor) uploadUsingSession( + ctx context.Context, + progressOutput progress.Output, + diffID layer.DiffID, + layerUpload distribution.BlobWriter, +) (distribution.Descriptor, error) { + var reader io.ReadCloser + + contentReader, err := pd.layer.Open() + if err != nil { + return distribution.Descriptor{}, retryOnError(err) + } + + size, _ := pd.layer.Size() + + reader = progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, contentReader), progressOutput, size, pd.ID(), "Pushing") + + switch m := pd.layer.MediaType(); m { + case schema2.MediaTypeUncompressedLayer: + compressedReader, compressionDone := compress(reader) + defer func(closer io.Closer) { + closer.Close() + <-compressionDone + }(reader) + reader = compressedReader + case schema2.MediaTypeLayer: + default: + reader.Close() + return distribution.Descriptor{}, fmt.Errorf("unsupported layer media type %s", m) + } + + digester := digest.Canonical.Digester() + tee := io.TeeReader(reader, digester.Hash()) + + nn, err := layerUpload.ReadFrom(tee) + reader.Close() + if err != nil { + return distribution.Descriptor{}, retryOnError(err) + } + + pushDigest := digester.Digest() + if _, err := layerUpload.Commit(ctx, distribution.Descriptor{Digest: pushDigest}); err != nil { + return distribution.Descriptor{}, retryOnError(err) + } + + logrus.Debugf("uploaded layer %s (%s), %d bytes", diffID, pushDigest, nn) + progress.Update(progressOutput, pd.ID(), "Pushed") + + // Cache mapping from this layer's DiffID to the blobsum + if err := pd.v2MetadataService.TagAndAdd(diffID, pd.hmacKey, metadata.V2Metadata{ + Digest: pushDigest, + SourceRepository: pd.repoInfo.Name(), + }); err != nil { + return distribution.Descriptor{}, xfer.DoNotRetry{Err: err} + } + + desc := distribution.Descriptor{ + Digest: pushDigest, + MediaType: schema2.MediaTypeLayer, + Size: nn, + } + + pd.pushState.Lock() + // If Commit succeeded, that's an indication that the remote registry speaks the v2 protocol. + pd.pushState.confirmedV2 = true + pd.pushState.remoteLayers[diffID] = desc + pd.pushState.Unlock() + + return desc, nil +} + +// layerAlreadyExists checks if the registry already knows about any of the metadata passed in the "metadata" +// slice. If it finds one that the registry knows about, it returns the known digest and "true". If +// "checkOtherRepositories" is true, stat will be performed also with digests mapped to any other repository +// (not just the target one). +func (pd *v2PushDescriptor) layerAlreadyExists( + ctx context.Context, + progressOutput progress.Output, + diffID layer.DiffID, + checkOtherRepositories bool, + maxExistenceCheckAttempts int, + v2Metadata []metadata.V2Metadata, +) (desc distribution.Descriptor, exists bool, err error) { + // filter the metadata + candidates := []metadata.V2Metadata{} + for _, meta := range v2Metadata { + if len(meta.SourceRepository) > 0 && !checkOtherRepositories && meta.SourceRepository != pd.repoInfo.Name() { + continue + } + candidates = append(candidates, meta) + } + // sort the candidates by similarity + sortV2MetadataByLikenessAndAge(pd.repoInfo, pd.hmacKey, candidates) + + digestToMetadata := make(map[digest.Digest]*metadata.V2Metadata) + // an array of unique blob digests ordered from the best mount candidates to worst + layerDigests := []digest.Digest{} + for i := 0; i < len(candidates); i++ { + if len(layerDigests) >= maxExistenceCheckAttempts { + break + } + meta := &candidates[i] + if _, exists := digestToMetadata[meta.Digest]; exists { + // keep reference just to the first mapping (the best mount candidate) + continue + } + if _, exists := pd.checkedDigests[meta.Digest]; exists { + // existence of this digest has already been tested + continue + } + digestToMetadata[meta.Digest] = meta + layerDigests = append(layerDigests, meta.Digest) + } + +attempts: + for _, dgst := range layerDigests { + meta := digestToMetadata[dgst] + logrus.Debugf("Checking for presence of layer %s (%s) in %s", diffID, dgst, pd.repoInfo.Name()) + desc, err = pd.repo.Blobs(ctx).Stat(ctx, dgst) + pd.checkedDigests[meta.Digest] = struct{}{} + switch err { + case nil: + if m, ok := digestToMetadata[desc.Digest]; !ok || m.SourceRepository != pd.repoInfo.Name() || !metadata.CheckV2MetadataHMAC(m, pd.hmacKey) { + // cache mapping from this layer's DiffID to the blobsum + if err := pd.v2MetadataService.TagAndAdd(diffID, pd.hmacKey, metadata.V2Metadata{ + Digest: desc.Digest, + SourceRepository: pd.repoInfo.Name(), + }); err != nil { + return distribution.Descriptor{}, false, xfer.DoNotRetry{Err: err} + } + } + desc.MediaType = schema2.MediaTypeLayer + exists = true + break attempts + case distribution.ErrBlobUnknown: + if meta.SourceRepository == pd.repoInfo.Name() { + // remove the mapping to the target repository + pd.v2MetadataService.Remove(*meta) + } + default: + logrus.WithError(err).Debugf("Failed to check for presence of layer %s (%s) in %s", diffID, dgst, pd.repoInfo.Name()) + } + } + + if exists { + progress.Update(progressOutput, pd.ID(), "Layer already exists") + pd.pushState.Lock() + pd.pushState.remoteLayers[diffID] = desc + pd.pushState.Unlock() + } + + return desc, exists, nil +} + +// getMaxMountAndExistenceCheckAttempts returns a maximum number of cross repository mount attempts from +// source repositories of target registry, maximum number of layer existence checks performed on the target +// repository and whether the check shall be done also with digests mapped to different repositories. The +// decision is based on layer size. The smaller the layer, the fewer attempts shall be made because the cost +// of upload does not outweigh a latency. +func getMaxMountAndExistenceCheckAttempts(layer PushLayer) (maxMountAttempts, maxExistenceCheckAttempts int, checkOtherRepositories bool) { + size, err := layer.Size() + switch { + // big blob + case size > middleLayerMaximumSize: + // 1st attempt to mount the blob few times + // 2nd few existence checks with digests associated to any repository + // then fallback to upload + return 4, 3, true + + // middle sized blobs; if we could not get the size, assume we deal with middle sized blob + case size > smallLayerMaximumSize, err != nil: + // 1st attempt to mount blobs of average size few times + // 2nd try at most 1 existence check if there's an existing mapping to the target repository + // then fallback to upload + return 3, 1, false + + // small blobs, do a minimum number of checks + default: + return 1, 1, false + } +} + +// getRepositoryMountCandidates returns an array of v2 metadata items belonging to the given registry. The +// array is sorted from youngest to oldest. If requireRegistryMatch is true, the resulting array will contain +// only metadata entries having registry part of SourceRepository matching the part of repoInfo. +func getRepositoryMountCandidates( + repoInfo reference.Named, + hmacKey []byte, + max int, + v2Metadata []metadata.V2Metadata, +) []metadata.V2Metadata { + candidates := []metadata.V2Metadata{} + for _, meta := range v2Metadata { + sourceRepo, err := reference.ParseNamed(meta.SourceRepository) + if err != nil || reference.Domain(repoInfo) != reference.Domain(sourceRepo) { + continue + } + // target repository is not a viable candidate + if meta.SourceRepository == repoInfo.Name() { + continue + } + candidates = append(candidates, meta) + } + + sortV2MetadataByLikenessAndAge(repoInfo, hmacKey, candidates) + if max >= 0 && len(candidates) > max { + // select the youngest metadata + candidates = candidates[:max] + } + + return candidates +} + +// byLikeness is a sorting container for v2 metadata candidates for cross repository mount. The +// candidate "a" is preferred over "b": +// +// 1. if it was hashed using the same AuthConfig as the one used to authenticate to target repository and the +// "b" was not +// 2. if a number of its repository path components exactly matching path components of target repository is higher +type byLikeness struct { + arr []metadata.V2Metadata + hmacKey []byte + pathComponents []string +} + +func (bla byLikeness) Less(i, j int) bool { + aMacMatch := metadata.CheckV2MetadataHMAC(&bla.arr[i], bla.hmacKey) + bMacMatch := metadata.CheckV2MetadataHMAC(&bla.arr[j], bla.hmacKey) + if aMacMatch != bMacMatch { + return aMacMatch + } + aMatch := numOfMatchingPathComponents(bla.arr[i].SourceRepository, bla.pathComponents) + bMatch := numOfMatchingPathComponents(bla.arr[j].SourceRepository, bla.pathComponents) + return aMatch > bMatch +} +func (bla byLikeness) Swap(i, j int) { + bla.arr[i], bla.arr[j] = bla.arr[j], bla.arr[i] +} +func (bla byLikeness) Len() int { return len(bla.arr) } + +func sortV2MetadataByLikenessAndAge(repoInfo reference.Named, hmacKey []byte, marr []metadata.V2Metadata) { + // reverse the metadata array to shift the newest entries to the beginning + for i := 0; i < len(marr)/2; i++ { + marr[i], marr[len(marr)-i-1] = marr[len(marr)-i-1], marr[i] + } + // keep equal entries ordered from the youngest to the oldest + sort.Stable(byLikeness{ + arr: marr, + hmacKey: hmacKey, + pathComponents: getPathComponents(repoInfo.Name()), + }) +} + +// numOfMatchingPathComponents returns a number of path components in "pth" that exactly match "matchComponents". +func numOfMatchingPathComponents(pth string, matchComponents []string) int { + pthComponents := getPathComponents(pth) + i := 0 + for ; i < len(pthComponents) && i < len(matchComponents); i++ { + if matchComponents[i] != pthComponents[i] { + return i + } + } + return i +} + +func getPathComponents(path string) []string { + return strings.Split(path, "/") +} + +func cancelLayerUpload(ctx context.Context, dgst digest.Digest, layerUpload distribution.BlobWriter) { + if layerUpload != nil { + logrus.Debugf("cancelling upload of blob %s", dgst) + err := layerUpload.Cancel(ctx) + if err != nil { + logrus.Warnf("failed to cancel upload: %v", err) + } + } +} diff --git a/vendor/github.com/docker/docker/distribution/registry.go b/vendor/github.com/docker/docker/distribution/registry.go new file mode 100644 index 000000000000..bce270a5ed65 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/registry.go @@ -0,0 +1,156 @@ +package distribution + +import ( + "fmt" + "net" + "net/http" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/manifest/schema2" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/client" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/api/types" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/registry" + "github.com/docker/go-connections/sockets" + "golang.org/x/net/context" +) + +// ImageTypes represents the schema2 config types for images +var ImageTypes = []string{ + schema2.MediaTypeImageConfig, + // Handle unexpected values from https://github.com/docker/distribution/issues/1621 + // (see also https://github.com/docker/docker/issues/22378, + // https://github.com/docker/docker/issues/30083) + "application/octet-stream", + "application/json", + "text/html", + // Treat defaulted values as images, newer types cannot be implied + "", +} + +// PluginTypes represents the schema2 config types for plugins +var PluginTypes = []string{ + schema2.MediaTypePluginConfig, +} + +var mediaTypeClasses map[string]string + +func init() { + // initialize media type classes with all know types for + // plugin + mediaTypeClasses = map[string]string{} + for _, t := range ImageTypes { + mediaTypeClasses[t] = "image" + } + for _, t := range PluginTypes { + mediaTypeClasses[t] = "plugin" + } +} + +// NewV2Repository returns a repository (v2 only). It creates an HTTP transport +// providing timeout settings and authentication support, and also verifies the +// remote API version. +func NewV2Repository(ctx context.Context, repoInfo *registry.RepositoryInfo, endpoint registry.APIEndpoint, metaHeaders http.Header, authConfig *types.AuthConfig, actions ...string) (repo distribution.Repository, foundVersion bool, err error) { + repoName := repoInfo.Name.Name() + // If endpoint does not support CanonicalName, use the RemoteName instead + if endpoint.TrimHostname { + repoName = reference.Path(repoInfo.Name) + } + + direct := &net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + } + + // TODO(dmcgowan): Call close idle connections when complete, use keep alive + base := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: direct.Dial, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: endpoint.TLSConfig, + // TODO(dmcgowan): Call close idle connections when complete and use keep alive + DisableKeepAlives: true, + } + + proxyDialer, err := sockets.DialerFromEnvironment(direct) + if err == nil { + base.Dial = proxyDialer.Dial + } + + modifiers := registry.DockerHeaders(dockerversion.DockerUserAgent(ctx), metaHeaders) + authTransport := transport.NewTransport(base, modifiers...) + + challengeManager, foundVersion, err := registry.PingV2Registry(endpoint.URL, authTransport) + if err != nil { + transportOK := false + if responseErr, ok := err.(registry.PingResponseError); ok { + transportOK = true + err = responseErr.Err + } + return nil, foundVersion, fallbackError{ + err: err, + confirmedV2: foundVersion, + transportOK: transportOK, + } + } + + if authConfig.RegistryToken != "" { + passThruTokenHandler := &existingTokenHandler{token: authConfig.RegistryToken} + modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, passThruTokenHandler)) + } else { + scope := auth.RepositoryScope{ + Repository: repoName, + Actions: actions, + Class: repoInfo.Class, + } + + creds := registry.NewStaticCredentialStore(authConfig) + tokenHandlerOptions := auth.TokenHandlerOptions{ + Transport: authTransport, + Credentials: creds, + Scopes: []auth.Scope{scope}, + ClientID: registry.AuthClientID, + } + tokenHandler := auth.NewTokenHandlerWithOptions(tokenHandlerOptions) + basicHandler := auth.NewBasicHandler(creds) + modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler)) + } + tr := transport.NewTransport(base, modifiers...) + + repoNameRef, err := reference.WithName(repoName) + if err != nil { + return nil, foundVersion, fallbackError{ + err: err, + confirmedV2: foundVersion, + transportOK: true, + } + } + + repo, err = client.NewRepository(ctx, repoNameRef, endpoint.URL.String(), tr) + if err != nil { + err = fallbackError{ + err: err, + confirmedV2: foundVersion, + transportOK: true, + } + } + return +} + +type existingTokenHandler struct { + token string +} + +func (th *existingTokenHandler) Scheme() string { + return "bearer" +} + +func (th *existingTokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", th.token)) + return nil +} diff --git a/vendor/github.com/docker/docker/distribution/xfer/download.go b/vendor/github.com/docker/docker/distribution/xfer/download.go new file mode 100644 index 000000000000..8bd48646d440 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/xfer/download.go @@ -0,0 +1,463 @@ +package xfer + +import ( + "errors" + "fmt" + "io" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "golang.org/x/net/context" +) + +const maxDownloadAttempts = 5 + +// LayerDownloadManager figures out which layers need to be downloaded, then +// registers and downloads those, taking into account dependencies between +// layers. +type LayerDownloadManager struct { + layerStore layer.Store + tm TransferManager + waitDuration time.Duration +} + +// SetConcurrency sets the max concurrent downloads for each pull +func (ldm *LayerDownloadManager) SetConcurrency(concurrency int) { + ldm.tm.SetConcurrency(concurrency) +} + +// NewLayerDownloadManager returns a new LayerDownloadManager. +func NewLayerDownloadManager(layerStore layer.Store, concurrencyLimit int, options ...func(*LayerDownloadManager)) *LayerDownloadManager { + manager := LayerDownloadManager{ + layerStore: layerStore, + tm: NewTransferManager(concurrencyLimit), + waitDuration: time.Second, + } + for _, option := range options { + option(&manager) + } + return &manager +} + +type downloadTransfer struct { + Transfer + + layerStore layer.Store + layer layer.Layer + err error +} + +// result returns the layer resulting from the download, if the download +// and registration were successful. +func (d *downloadTransfer) result() (layer.Layer, error) { + return d.layer, d.err +} + +// A DownloadDescriptor references a layer that may need to be downloaded. +type DownloadDescriptor interface { + // Key returns the key used to deduplicate downloads. + Key() string + // ID returns the ID for display purposes. + ID() string + // DiffID should return the DiffID for this layer, or an error + // if it is unknown (for example, if it has not been downloaded + // before). + DiffID() (layer.DiffID, error) + // Download is called to perform the download. + Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) + // Close is called when the download manager is finished with this + // descriptor and will not call Download again or read from the reader + // that Download returned. + Close() +} + +// DownloadDescriptorWithRegistered is a DownloadDescriptor that has an +// additional Registered method which gets called after a downloaded layer is +// registered. This allows the user of the download manager to know the DiffID +// of each registered layer. This method is called if a cast to +// DownloadDescriptorWithRegistered is successful. +type DownloadDescriptorWithRegistered interface { + DownloadDescriptor + Registered(diffID layer.DiffID) +} + +// Download is a blocking function which ensures the requested layers are +// present in the layer store. It uses the string returned by the Key method to +// deduplicate downloads. If a given layer is not already known to present in +// the layer store, and the key is not used by an in-progress download, the +// Download method is called to get the layer tar data. Layers are then +// registered in the appropriate order. The caller must call the returned +// release function once it is done with the returned RootFS object. +func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS image.RootFS, layers []DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) { + var ( + topLayer layer.Layer + topDownload *downloadTransfer + watcher *Watcher + missingLayer bool + transferKey = "" + downloadsByKey = make(map[string]*downloadTransfer) + ) + + rootFS := initialRootFS + for _, descriptor := range layers { + key := descriptor.Key() + transferKey += key + + if !missingLayer { + missingLayer = true + diffID, err := descriptor.DiffID() + if err == nil { + getRootFS := rootFS + getRootFS.Append(diffID) + l, err := ldm.layerStore.Get(getRootFS.ChainID()) + if err == nil { + // Layer already exists. + logrus.Debugf("Layer already exists: %s", descriptor.ID()) + progress.Update(progressOutput, descriptor.ID(), "Already exists") + if topLayer != nil { + layer.ReleaseAndLog(ldm.layerStore, topLayer) + } + topLayer = l + missingLayer = false + rootFS.Append(diffID) + // Register this repository as a source of this layer. + withRegistered, hasRegistered := descriptor.(DownloadDescriptorWithRegistered) + if hasRegistered { + withRegistered.Registered(diffID) + } + continue + } + } + } + + // Does this layer have the same data as a previous layer in + // the stack? If so, avoid downloading it more than once. + var topDownloadUncasted Transfer + if existingDownload, ok := downloadsByKey[key]; ok { + xferFunc := ldm.makeDownloadFuncFromDownload(descriptor, existingDownload, topDownload) + defer topDownload.Transfer.Release(watcher) + topDownloadUncasted, watcher = ldm.tm.Transfer(transferKey, xferFunc, progressOutput) + topDownload = topDownloadUncasted.(*downloadTransfer) + continue + } + + // Layer is not known to exist - download and register it. + progress.Update(progressOutput, descriptor.ID(), "Pulling fs layer") + + var xferFunc DoFunc + if topDownload != nil { + xferFunc = ldm.makeDownloadFunc(descriptor, "", topDownload) + defer topDownload.Transfer.Release(watcher) + } else { + xferFunc = ldm.makeDownloadFunc(descriptor, rootFS.ChainID(), nil) + } + topDownloadUncasted, watcher = ldm.tm.Transfer(transferKey, xferFunc, progressOutput) + topDownload = topDownloadUncasted.(*downloadTransfer) + downloadsByKey[key] = topDownload + } + + if topDownload == nil { + return rootFS, func() { + if topLayer != nil { + layer.ReleaseAndLog(ldm.layerStore, topLayer) + } + }, nil + } + + // Won't be using the list built up so far - will generate it + // from downloaded layers instead. + rootFS.DiffIDs = []layer.DiffID{} + + defer func() { + if topLayer != nil { + layer.ReleaseAndLog(ldm.layerStore, topLayer) + } + }() + + select { + case <-ctx.Done(): + topDownload.Transfer.Release(watcher) + return rootFS, func() {}, ctx.Err() + case <-topDownload.Done(): + break + } + + l, err := topDownload.result() + if err != nil { + topDownload.Transfer.Release(watcher) + return rootFS, func() {}, err + } + + // Must do this exactly len(layers) times, so we don't include the + // base layer on Windows. + for range layers { + if l == nil { + topDownload.Transfer.Release(watcher) + return rootFS, func() {}, errors.New("internal error: too few parent layers") + } + rootFS.DiffIDs = append([]layer.DiffID{l.DiffID()}, rootFS.DiffIDs...) + l = l.Parent() + } + return rootFS, func() { topDownload.Transfer.Release(watcher) }, err +} + +// makeDownloadFunc returns a function that performs the layer download and +// registration. If parentDownload is non-nil, it waits for that download to +// complete before the registration step, and registers the downloaded data +// on top of parentDownload's resulting layer. Otherwise, it registers the +// layer on top of the ChainID given by parentLayer. +func (ldm *LayerDownloadManager) makeDownloadFunc(descriptor DownloadDescriptor, parentLayer layer.ChainID, parentDownload *downloadTransfer) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + d := &downloadTransfer{ + Transfer: NewTransfer(), + layerStore: ldm.layerStore, + } + + go func() { + defer func() { + close(progressChan) + }() + + progressOutput := progress.ChanOutput(progressChan) + + select { + case <-start: + default: + progress.Update(progressOutput, descriptor.ID(), "Waiting") + <-start + } + + if parentDownload != nil { + // Did the parent download already fail or get + // cancelled? + select { + case <-parentDownload.Done(): + _, err := parentDownload.result() + if err != nil { + d.err = err + return + } + default: + } + } + + var ( + downloadReader io.ReadCloser + size int64 + err error + retries int + ) + + defer descriptor.Close() + + for { + downloadReader, size, err = descriptor.Download(d.Transfer.Context(), progressOutput) + if err == nil { + break + } + + // If an error was returned because the context + // was cancelled, we shouldn't retry. + select { + case <-d.Transfer.Context().Done(): + d.err = err + return + default: + } + + retries++ + if _, isDNR := err.(DoNotRetry); isDNR || retries == maxDownloadAttempts { + logrus.Errorf("Download failed: %v", err) + d.err = err + return + } + + logrus.Errorf("Download failed, retrying: %v", err) + delay := retries * 5 + ticker := time.NewTicker(ldm.waitDuration) + + selectLoop: + for { + progress.Updatef(progressOutput, descriptor.ID(), "Retrying in %d second%s", delay, (map[bool]string{true: "s"})[delay != 1]) + select { + case <-ticker.C: + delay-- + if delay == 0 { + ticker.Stop() + break selectLoop + } + case <-d.Transfer.Context().Done(): + ticker.Stop() + d.err = errors.New("download cancelled during retry delay") + return + } + + } + } + + close(inactive) + + if parentDownload != nil { + select { + case <-d.Transfer.Context().Done(): + d.err = errors.New("layer registration cancelled") + downloadReader.Close() + return + case <-parentDownload.Done(): + } + + l, err := parentDownload.result() + if err != nil { + d.err = err + downloadReader.Close() + return + } + parentLayer = l.ChainID() + } + + reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(d.Transfer.Context(), downloadReader), progressOutput, size, descriptor.ID(), "Extracting") + defer reader.Close() + + inflatedLayerData, err := archive.DecompressStream(reader) + if err != nil { + d.err = fmt.Errorf("could not get decompression stream: %v", err) + return + } + + var src distribution.Descriptor + if fs, ok := descriptor.(distribution.Describable); ok { + src = fs.Descriptor() + } + if ds, ok := d.layerStore.(layer.DescribableStore); ok { + d.layer, err = ds.RegisterWithDescriptor(inflatedLayerData, parentLayer, src) + } else { + d.layer, err = d.layerStore.Register(inflatedLayerData, parentLayer) + } + if err != nil { + select { + case <-d.Transfer.Context().Done(): + d.err = errors.New("layer registration cancelled") + default: + d.err = fmt.Errorf("failed to register layer: %v", err) + } + return + } + + progress.Update(progressOutput, descriptor.ID(), "Pull complete") + withRegistered, hasRegistered := descriptor.(DownloadDescriptorWithRegistered) + if hasRegistered { + withRegistered.Registered(d.layer.DiffID()) + } + + // Doesn't actually need to be its own goroutine, but + // done like this so we can defer close(c). + go func() { + <-d.Transfer.Released() + if d.layer != nil { + layer.ReleaseAndLog(d.layerStore, d.layer) + } + }() + }() + + return d + } +} + +// makeDownloadFuncFromDownload returns a function that performs the layer +// registration when the layer data is coming from an existing download. It +// waits for sourceDownload and parentDownload to complete, and then +// reregisters the data from sourceDownload's top layer on top of +// parentDownload. This function does not log progress output because it would +// interfere with the progress reporting for sourceDownload, which has the same +// Key. +func (ldm *LayerDownloadManager) makeDownloadFuncFromDownload(descriptor DownloadDescriptor, sourceDownload *downloadTransfer, parentDownload *downloadTransfer) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + d := &downloadTransfer{ + Transfer: NewTransfer(), + layerStore: ldm.layerStore, + } + + go func() { + defer func() { + close(progressChan) + }() + + <-start + + close(inactive) + + select { + case <-d.Transfer.Context().Done(): + d.err = errors.New("layer registration cancelled") + return + case <-parentDownload.Done(): + } + + l, err := parentDownload.result() + if err != nil { + d.err = err + return + } + parentLayer := l.ChainID() + + // sourceDownload should have already finished if + // parentDownload finished, but wait for it explicitly + // to be sure. + select { + case <-d.Transfer.Context().Done(): + d.err = errors.New("layer registration cancelled") + return + case <-sourceDownload.Done(): + } + + l, err = sourceDownload.result() + if err != nil { + d.err = err + return + } + + layerReader, err := l.TarStream() + if err != nil { + d.err = err + return + } + defer layerReader.Close() + + var src distribution.Descriptor + if fs, ok := l.(distribution.Describable); ok { + src = fs.Descriptor() + } + if ds, ok := d.layerStore.(layer.DescribableStore); ok { + d.layer, err = ds.RegisterWithDescriptor(layerReader, parentLayer, src) + } else { + d.layer, err = d.layerStore.Register(layerReader, parentLayer) + } + if err != nil { + d.err = fmt.Errorf("failed to register layer: %v", err) + return + } + + withRegistered, hasRegistered := descriptor.(DownloadDescriptorWithRegistered) + if hasRegistered { + withRegistered.Registered(d.layer.DiffID()) + } + + // Doesn't actually need to be its own goroutine, but + // done like this so we can defer close(c). + go func() { + <-d.Transfer.Released() + if d.layer != nil { + layer.ReleaseAndLog(d.layerStore, d.layer) + } + }() + }() + + return d + } +} diff --git a/vendor/github.com/docker/docker/distribution/xfer/transfer.go b/vendor/github.com/docker/docker/distribution/xfer/transfer.go new file mode 100644 index 000000000000..b86c503a085a --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/xfer/transfer.go @@ -0,0 +1,401 @@ +package xfer + +import ( + "runtime" + "sync" + + "github.com/docker/docker/pkg/progress" + "golang.org/x/net/context" +) + +// DoNotRetry is an error wrapper indicating that the error cannot be resolved +// with a retry. +type DoNotRetry struct { + Err error +} + +// Error returns the stringified representation of the encapsulated error. +func (e DoNotRetry) Error() string { + return e.Err.Error() +} + +// Watcher is returned by Watch and can be passed to Release to stop watching. +type Watcher struct { + // signalChan is used to signal to the watcher goroutine that + // new progress information is available, or that the transfer + // has finished. + signalChan chan struct{} + // releaseChan signals to the watcher goroutine that the watcher + // should be detached. + releaseChan chan struct{} + // running remains open as long as the watcher is watching the + // transfer. It gets closed if the transfer finishes or the + // watcher is detached. + running chan struct{} +} + +// Transfer represents an in-progress transfer. +type Transfer interface { + Watch(progressOutput progress.Output) *Watcher + Release(*Watcher) + Context() context.Context + Close() + Done() <-chan struct{} + Released() <-chan struct{} + Broadcast(masterProgressChan <-chan progress.Progress) +} + +type transfer struct { + mu sync.Mutex + + ctx context.Context + cancel context.CancelFunc + + // watchers keeps track of the goroutines monitoring progress output, + // indexed by the channels that release them. + watchers map[chan struct{}]*Watcher + + // lastProgress is the most recently received progress event. + lastProgress progress.Progress + // hasLastProgress is true when lastProgress has been set. + hasLastProgress bool + + // running remains open as long as the transfer is in progress. + running chan struct{} + // released stays open until all watchers release the transfer and + // the transfer is no longer tracked by the transfer manager. + released chan struct{} + + // broadcastDone is true if the master progress channel has closed. + broadcastDone bool + // closed is true if Close has been called + closed bool + // broadcastSyncChan allows watchers to "ping" the broadcasting + // goroutine to wait for it for deplete its input channel. This ensures + // a detaching watcher won't miss an event that was sent before it + // started detaching. + broadcastSyncChan chan struct{} +} + +// NewTransfer creates a new transfer. +func NewTransfer() Transfer { + t := &transfer{ + watchers: make(map[chan struct{}]*Watcher), + running: make(chan struct{}), + released: make(chan struct{}), + broadcastSyncChan: make(chan struct{}), + } + + // This uses context.Background instead of a caller-supplied context + // so that a transfer won't be cancelled automatically if the client + // which requested it is ^C'd (there could be other viewers). + t.ctx, t.cancel = context.WithCancel(context.Background()) + + return t +} + +// Broadcast copies the progress and error output to all viewers. +func (t *transfer) Broadcast(masterProgressChan <-chan progress.Progress) { + for { + var ( + p progress.Progress + ok bool + ) + select { + case p, ok = <-masterProgressChan: + default: + // We've depleted the channel, so now we can handle + // reads on broadcastSyncChan to let detaching watchers + // know we're caught up. + select { + case <-t.broadcastSyncChan: + continue + case p, ok = <-masterProgressChan: + } + } + + t.mu.Lock() + if ok { + t.lastProgress = p + t.hasLastProgress = true + for _, w := range t.watchers { + select { + case w.signalChan <- struct{}{}: + default: + } + } + } else { + t.broadcastDone = true + } + t.mu.Unlock() + if !ok { + close(t.running) + return + } + } +} + +// Watch adds a watcher to the transfer. The supplied channel gets progress +// updates and is closed when the transfer finishes. +func (t *transfer) Watch(progressOutput progress.Output) *Watcher { + t.mu.Lock() + defer t.mu.Unlock() + + w := &Watcher{ + releaseChan: make(chan struct{}), + signalChan: make(chan struct{}), + running: make(chan struct{}), + } + + t.watchers[w.releaseChan] = w + + if t.broadcastDone { + close(w.running) + return w + } + + go func() { + defer func() { + close(w.running) + }() + var ( + done bool + lastWritten progress.Progress + hasLastWritten bool + ) + for { + t.mu.Lock() + hasLastProgress := t.hasLastProgress + lastProgress := t.lastProgress + t.mu.Unlock() + + // Make sure we don't write the last progress item + // twice. + if hasLastProgress && (!done || !hasLastWritten || lastProgress != lastWritten) { + progressOutput.WriteProgress(lastProgress) + lastWritten = lastProgress + hasLastWritten = true + } + + if done { + return + } + + select { + case <-w.signalChan: + case <-w.releaseChan: + done = true + // Since the watcher is going to detach, make + // sure the broadcaster is caught up so we + // don't miss anything. + select { + case t.broadcastSyncChan <- struct{}{}: + case <-t.running: + } + case <-t.running: + done = true + } + } + }() + + return w +} + +// Release is the inverse of Watch; indicating that the watcher no longer wants +// to be notified about the progress of the transfer. All calls to Watch must +// be paired with later calls to Release so that the lifecycle of the transfer +// is properly managed. +func (t *transfer) Release(watcher *Watcher) { + t.mu.Lock() + delete(t.watchers, watcher.releaseChan) + + if len(t.watchers) == 0 { + if t.closed { + // released may have been closed already if all + // watchers were released, then another one was added + // while waiting for a previous watcher goroutine to + // finish. + select { + case <-t.released: + default: + close(t.released) + } + } else { + t.cancel() + } + } + t.mu.Unlock() + + close(watcher.releaseChan) + // Block until the watcher goroutine completes + <-watcher.running +} + +// Done returns a channel which is closed if the transfer completes or is +// cancelled. Note that having 0 watchers causes a transfer to be cancelled. +func (t *transfer) Done() <-chan struct{} { + // Note that this doesn't return t.ctx.Done() because that channel will + // be closed the moment Cancel is called, and we need to return a + // channel that blocks until a cancellation is actually acknowledged by + // the transfer function. + return t.running +} + +// Released returns a channel which is closed once all watchers release the +// transfer AND the transfer is no longer tracked by the transfer manager. +func (t *transfer) Released() <-chan struct{} { + return t.released +} + +// Context returns the context associated with the transfer. +func (t *transfer) Context() context.Context { + return t.ctx +} + +// Close is called by the transfer manager when the transfer is no longer +// being tracked. +func (t *transfer) Close() { + t.mu.Lock() + t.closed = true + if len(t.watchers) == 0 { + close(t.released) + } + t.mu.Unlock() +} + +// DoFunc is a function called by the transfer manager to actually perform +// a transfer. It should be non-blocking. It should wait until the start channel +// is closed before transferring any data. If the function closes inactive, that +// signals to the transfer manager that the job is no longer actively moving +// data - for example, it may be waiting for a dependent transfer to finish. +// This prevents it from taking up a slot. +type DoFunc func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer + +// TransferManager is used by LayerDownloadManager and LayerUploadManager to +// schedule and deduplicate transfers. It is up to the TransferManager +// implementation to make the scheduling and concurrency decisions. +type TransferManager interface { + // Transfer checks if a transfer with the given key is in progress. If + // so, it returns progress and error output from that transfer. + // Otherwise, it will call xferFunc to initiate the transfer. + Transfer(key string, xferFunc DoFunc, progressOutput progress.Output) (Transfer, *Watcher) + // SetConcurrency set the concurrencyLimit so that it is adjustable daemon reload + SetConcurrency(concurrency int) +} + +type transferManager struct { + mu sync.Mutex + + concurrencyLimit int + activeTransfers int + transfers map[string]Transfer + waitingTransfers []chan struct{} +} + +// NewTransferManager returns a new TransferManager. +func NewTransferManager(concurrencyLimit int) TransferManager { + return &transferManager{ + concurrencyLimit: concurrencyLimit, + transfers: make(map[string]Transfer), + } +} + +// SetConcurrency sets the concurrencyLimit +func (tm *transferManager) SetConcurrency(concurrency int) { + tm.mu.Lock() + tm.concurrencyLimit = concurrency + tm.mu.Unlock() +} + +// Transfer checks if a transfer matching the given key is in progress. If not, +// it starts one by calling xferFunc. The caller supplies a channel which +// receives progress output from the transfer. +func (tm *transferManager) Transfer(key string, xferFunc DoFunc, progressOutput progress.Output) (Transfer, *Watcher) { + tm.mu.Lock() + defer tm.mu.Unlock() + + for { + xfer, present := tm.transfers[key] + if !present { + break + } + // Transfer is already in progress. + watcher := xfer.Watch(progressOutput) + + select { + case <-xfer.Context().Done(): + // We don't want to watch a transfer that has been cancelled. + // Wait for it to be removed from the map and try again. + xfer.Release(watcher) + tm.mu.Unlock() + // The goroutine that removes this transfer from the + // map is also waiting for xfer.Done(), so yield to it. + // This could be avoided by adding a Closed method + // to Transfer to allow explicitly waiting for it to be + // removed the map, but forcing a scheduling round in + // this very rare case seems better than bloating the + // interface definition. + runtime.Gosched() + <-xfer.Done() + tm.mu.Lock() + default: + return xfer, watcher + } + } + + start := make(chan struct{}) + inactive := make(chan struct{}) + + if tm.concurrencyLimit == 0 || tm.activeTransfers < tm.concurrencyLimit { + close(start) + tm.activeTransfers++ + } else { + tm.waitingTransfers = append(tm.waitingTransfers, start) + } + + masterProgressChan := make(chan progress.Progress) + xfer := xferFunc(masterProgressChan, start, inactive) + watcher := xfer.Watch(progressOutput) + go xfer.Broadcast(masterProgressChan) + tm.transfers[key] = xfer + + // When the transfer is finished, remove from the map. + go func() { + for { + select { + case <-inactive: + tm.mu.Lock() + tm.inactivate(start) + tm.mu.Unlock() + inactive = nil + case <-xfer.Done(): + tm.mu.Lock() + if inactive != nil { + tm.inactivate(start) + } + delete(tm.transfers, key) + tm.mu.Unlock() + xfer.Close() + return + } + } + }() + + return xfer, watcher +} + +func (tm *transferManager) inactivate(start chan struct{}) { + // If the transfer was started, remove it from the activeTransfers + // count. + select { + case <-start: + // Start next transfer if any are waiting + if len(tm.waitingTransfers) != 0 { + close(tm.waitingTransfers[0]) + tm.waitingTransfers = tm.waitingTransfers[1:] + } else { + tm.activeTransfers-- + } + default: + } +} diff --git a/vendor/github.com/docker/docker/distribution/xfer/upload.go b/vendor/github.com/docker/docker/distribution/xfer/upload.go new file mode 100644 index 000000000000..58422e57a127 --- /dev/null +++ b/vendor/github.com/docker/docker/distribution/xfer/upload.go @@ -0,0 +1,174 @@ +package xfer + +import ( + "errors" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/progress" + "golang.org/x/net/context" +) + +const maxUploadAttempts = 5 + +// LayerUploadManager provides task management and progress reporting for +// uploads. +type LayerUploadManager struct { + tm TransferManager + waitDuration time.Duration +} + +// SetConcurrency sets the max concurrent uploads for each push +func (lum *LayerUploadManager) SetConcurrency(concurrency int) { + lum.tm.SetConcurrency(concurrency) +} + +// NewLayerUploadManager returns a new LayerUploadManager. +func NewLayerUploadManager(concurrencyLimit int, options ...func(*LayerUploadManager)) *LayerUploadManager { + manager := LayerUploadManager{ + tm: NewTransferManager(concurrencyLimit), + waitDuration: time.Second, + } + for _, option := range options { + option(&manager) + } + return &manager +} + +type uploadTransfer struct { + Transfer + + remoteDescriptor distribution.Descriptor + err error +} + +// An UploadDescriptor references a layer that may need to be uploaded. +type UploadDescriptor interface { + // Key returns the key used to deduplicate uploads. + Key() string + // ID returns the ID for display purposes. + ID() string + // DiffID should return the DiffID for this layer. + DiffID() layer.DiffID + // Upload is called to perform the Upload. + Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) + // SetRemoteDescriptor provides the distribution.Descriptor that was + // returned by Upload. This descriptor is not to be confused with + // the UploadDescriptor interface, which is used for internally + // identifying layers that are being uploaded. + SetRemoteDescriptor(descriptor distribution.Descriptor) +} + +// Upload is a blocking function which ensures the listed layers are present on +// the remote registry. It uses the string returned by the Key method to +// deduplicate uploads. +func (lum *LayerUploadManager) Upload(ctx context.Context, layers []UploadDescriptor, progressOutput progress.Output) error { + var ( + uploads []*uploadTransfer + dedupDescriptors = make(map[string]*uploadTransfer) + ) + + for _, descriptor := range layers { + progress.Update(progressOutput, descriptor.ID(), "Preparing") + + key := descriptor.Key() + if _, present := dedupDescriptors[key]; present { + continue + } + + xferFunc := lum.makeUploadFunc(descriptor) + upload, watcher := lum.tm.Transfer(descriptor.Key(), xferFunc, progressOutput) + defer upload.Release(watcher) + uploads = append(uploads, upload.(*uploadTransfer)) + dedupDescriptors[key] = upload.(*uploadTransfer) + } + + for _, upload := range uploads { + select { + case <-ctx.Done(): + return ctx.Err() + case <-upload.Transfer.Done(): + if upload.err != nil { + return upload.err + } + } + } + for _, l := range layers { + l.SetRemoteDescriptor(dedupDescriptors[l.Key()].remoteDescriptor) + } + + return nil +} + +func (lum *LayerUploadManager) makeUploadFunc(descriptor UploadDescriptor) DoFunc { + return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { + u := &uploadTransfer{ + Transfer: NewTransfer(), + } + + go func() { + defer func() { + close(progressChan) + }() + + progressOutput := progress.ChanOutput(progressChan) + + select { + case <-start: + default: + progress.Update(progressOutput, descriptor.ID(), "Waiting") + <-start + } + + retries := 0 + for { + remoteDescriptor, err := descriptor.Upload(u.Transfer.Context(), progressOutput) + if err == nil { + u.remoteDescriptor = remoteDescriptor + break + } + + // If an error was returned because the context + // was cancelled, we shouldn't retry. + select { + case <-u.Transfer.Context().Done(): + u.err = err + return + default: + } + + retries++ + if _, isDNR := err.(DoNotRetry); isDNR || retries == maxUploadAttempts { + logrus.Errorf("Upload failed: %v", err) + u.err = err + return + } + + logrus.Errorf("Upload failed, retrying: %v", err) + delay := retries * 5 + ticker := time.NewTicker(lum.waitDuration) + + selectLoop: + for { + progress.Updatef(progressOutput, descriptor.ID(), "Retrying in %d second%s", delay, (map[bool]string{true: "s"})[delay != 1]) + select { + case <-ticker.C: + delay-- + if delay == 0 { + ticker.Stop() + break selectLoop + } + case <-u.Transfer.Context().Done(): + ticker.Stop() + u.err = errors.New("upload cancelled during retry delay") + return + } + } + } + }() + + return u + } +} diff --git a/vendor/github.com/docker/docker/dockerversion/useragent.go b/vendor/github.com/docker/docker/dockerversion/useragent.go new file mode 100644 index 000000000000..53632cbc3532 --- /dev/null +++ b/vendor/github.com/docker/docker/dockerversion/useragent.go @@ -0,0 +1,76 @@ +package dockerversion + +import ( + "fmt" + "runtime" + + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/useragent" + "golang.org/x/net/context" +) + +// UAStringKey is used as key type for user-agent string in net/context struct +const UAStringKey = "upstream-user-agent" + +// DockerUserAgent is the User-Agent the Docker client uses to identify itself. +// In accordance with RFC 7231 (5.5.3) is of the form: +// [docker client's UA] UpstreamClient([upstream client's UA]) +func DockerUserAgent(ctx context.Context) string { + httpVersion := make([]useragent.VersionInfo, 0, 6) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "docker", Version: Version}) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "go", Version: runtime.Version()}) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "git-commit", Version: GitCommit}) + if kernelVersion, err := kernel.GetKernelVersion(); err == nil { + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "kernel", Version: kernelVersion.String()}) + } + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "os", Version: runtime.GOOS}) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "arch", Version: runtime.GOARCH}) + + dockerUA := useragent.AppendVersions("", httpVersion...) + upstreamUA := getUserAgentFromContext(ctx) + if len(upstreamUA) > 0 { + ret := insertUpstreamUserAgent(upstreamUA, dockerUA) + return ret + } + return dockerUA +} + +// getUserAgentFromContext returns the previously saved user-agent context stored in ctx, if one exists +func getUserAgentFromContext(ctx context.Context) string { + var upstreamUA string + if ctx != nil { + var ki interface{} = ctx.Value(UAStringKey) + if ki != nil { + upstreamUA = ctx.Value(UAStringKey).(string) + } + } + return upstreamUA +} + +// escapeStr returns s with every rune in charsToEscape escaped by a backslash +func escapeStr(s string, charsToEscape string) string { + var ret string + for _, currRune := range s { + appended := false + for _, escapeableRune := range charsToEscape { + if currRune == escapeableRune { + ret += `\` + string(currRune) + appended = true + break + } + } + if !appended { + ret += string(currRune) + } + } + return ret +} + +// insertUpstreamUserAgent adds the upstream client useragent to create a user-agent +// string of the form: +// $dockerUA UpstreamClient($upstreamUA) +func insertUpstreamUserAgent(upstreamUA string, dockerUA string) string { + charsToEscape := `();\` + upstreamUAEscaped := escapeStr(upstreamUA, charsToEscape) + return fmt.Sprintf("%s UpstreamClient(%s)", dockerUA, upstreamUAEscaped) +} diff --git a/vendor/github.com/docker/docker/dockerversion/version_lib.go b/vendor/github.com/docker/docker/dockerversion/version_lib.go new file mode 100644 index 000000000000..33f77d3ce6ee --- /dev/null +++ b/vendor/github.com/docker/docker/dockerversion/version_lib.go @@ -0,0 +1,16 @@ +// +build !autogen + +// Package dockerversion is auto-generated at build-time +package dockerversion + +// Default build-time variable for library-import. +// This file is overridden on build with build-time informations. +const ( + GitCommit string = "library-import" + Version string = "library-import" + BuildTime string = "library-import" + IAmStatic string = "library-import" + ContainerdCommitID string = "library-import" + RuncCommitID string = "library-import" + InitCommitID string = "library-import" +) diff --git a/vendor/github.com/docker/docker/image/fs.go b/vendor/github.com/docker/docker/image/fs.go new file mode 100644 index 000000000000..10f6dab5fc1a --- /dev/null +++ b/vendor/github.com/docker/docker/image/fs.go @@ -0,0 +1,178 @@ +package image + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/ioutils" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// DigestWalkFunc is function called by StoreBackend.Walk +type DigestWalkFunc func(id digest.Digest) error + +// StoreBackend provides interface for image.Store persistence +type StoreBackend interface { + Walk(f DigestWalkFunc) error + Get(id digest.Digest) ([]byte, error) + Set(data []byte) (digest.Digest, error) + Delete(id digest.Digest) error + SetMetadata(id digest.Digest, key string, data []byte) error + GetMetadata(id digest.Digest, key string) ([]byte, error) + DeleteMetadata(id digest.Digest, key string) error +} + +// fs implements StoreBackend using the filesystem. +type fs struct { + sync.RWMutex + root string +} + +const ( + contentDirName = "content" + metadataDirName = "metadata" +) + +// NewFSStoreBackend returns new filesystem based backend for image.Store +func NewFSStoreBackend(root string) (StoreBackend, error) { + return newFSStore(root) +} + +func newFSStore(root string) (*fs, error) { + s := &fs{ + root: root, + } + if err := os.MkdirAll(filepath.Join(root, contentDirName, string(digest.Canonical)), 0700); err != nil { + return nil, errors.Wrap(err, "failed to create storage backend") + } + if err := os.MkdirAll(filepath.Join(root, metadataDirName, string(digest.Canonical)), 0700); err != nil { + return nil, errors.Wrap(err, "failed to create storage backend") + } + return s, nil +} + +func (s *fs) contentFile(dgst digest.Digest) string { + return filepath.Join(s.root, contentDirName, string(dgst.Algorithm()), dgst.Hex()) +} + +func (s *fs) metadataDir(dgst digest.Digest) string { + return filepath.Join(s.root, metadataDirName, string(dgst.Algorithm()), dgst.Hex()) +} + +// Walk calls the supplied callback for each image ID in the storage backend. +func (s *fs) Walk(f DigestWalkFunc) error { + // Only Canonical digest (sha256) is currently supported + s.RLock() + dir, err := ioutil.ReadDir(filepath.Join(s.root, contentDirName, string(digest.Canonical))) + s.RUnlock() + if err != nil { + return err + } + for _, v := range dir { + dgst := digest.NewDigestFromHex(string(digest.Canonical), v.Name()) + if err := dgst.Validate(); err != nil { + logrus.Debugf("skipping invalid digest %s: %s", dgst, err) + continue + } + if err := f(dgst); err != nil { + return err + } + } + return nil +} + +// Get returns the content stored under a given digest. +func (s *fs) Get(dgst digest.Digest) ([]byte, error) { + s.RLock() + defer s.RUnlock() + + return s.get(dgst) +} + +func (s *fs) get(dgst digest.Digest) ([]byte, error) { + content, err := ioutil.ReadFile(s.contentFile(dgst)) + if err != nil { + return nil, errors.Wrapf(err, "failed to get digest %s", dgst) + } + + // todo: maybe optional + if digest.FromBytes(content) != dgst { + return nil, fmt.Errorf("failed to verify: %v", dgst) + } + + return content, nil +} + +// Set stores content by checksum. +func (s *fs) Set(data []byte) (digest.Digest, error) { + s.Lock() + defer s.Unlock() + + if len(data) == 0 { + return "", fmt.Errorf("invalid empty data") + } + + dgst := digest.FromBytes(data) + if err := ioutils.AtomicWriteFile(s.contentFile(dgst), data, 0600); err != nil { + return "", errors.Wrap(err, "failed to write digest data") + } + + return dgst, nil +} + +// Delete removes content and metadata files associated with the digest. +func (s *fs) Delete(dgst digest.Digest) error { + s.Lock() + defer s.Unlock() + + if err := os.RemoveAll(s.metadataDir(dgst)); err != nil { + return err + } + if err := os.Remove(s.contentFile(dgst)); err != nil { + return err + } + return nil +} + +// SetMetadata sets metadata for a given ID. It fails if there's no base file. +func (s *fs) SetMetadata(dgst digest.Digest, key string, data []byte) error { + s.Lock() + defer s.Unlock() + if _, err := s.get(dgst); err != nil { + return err + } + + baseDir := filepath.Join(s.metadataDir(dgst)) + if err := os.MkdirAll(baseDir, 0700); err != nil { + return err + } + return ioutils.AtomicWriteFile(filepath.Join(s.metadataDir(dgst), key), data, 0600) +} + +// GetMetadata returns metadata for a given digest. +func (s *fs) GetMetadata(dgst digest.Digest, key string) ([]byte, error) { + s.RLock() + defer s.RUnlock() + + if _, err := s.get(dgst); err != nil { + return nil, err + } + bytes, err := ioutil.ReadFile(filepath.Join(s.metadataDir(dgst), key)) + if err != nil { + return nil, errors.Wrap(err, "failed to read metadata") + } + return bytes, nil +} + +// DeleteMetadata removes the metadata associated with a digest. +func (s *fs) DeleteMetadata(dgst digest.Digest, key string) error { + s.Lock() + defer s.Unlock() + + return os.RemoveAll(filepath.Join(s.metadataDir(dgst), key)) +} diff --git a/vendor/github.com/docker/docker/image/image.go b/vendor/github.com/docker/docker/image/image.go new file mode 100644 index 000000000000..17935ac2342a --- /dev/null +++ b/vendor/github.com/docker/docker/image/image.go @@ -0,0 +1,150 @@ +package image + +import ( + "encoding/json" + "errors" + "io" + "time" + + "github.com/docker/docker/api/types/container" + "github.com/opencontainers/go-digest" +) + +// ID is the content-addressable ID of an image. +type ID digest.Digest + +func (id ID) String() string { + return id.Digest().String() +} + +// Digest converts ID into a digest +func (id ID) Digest() digest.Digest { + return digest.Digest(id) +} + +// IDFromDigest creates an ID from a digest +func IDFromDigest(digest digest.Digest) ID { + return ID(digest) +} + +// V1Image stores the V1 image configuration. +type V1Image struct { + // ID is a unique 64 character identifier of the image + ID string `json:"id,omitempty"` + // Parent is the ID of the parent image + Parent string `json:"parent,omitempty"` + // Comment is the commit message that was set when committing the image + Comment string `json:"comment,omitempty"` + // Created is the timestamp at which the image was created + Created time.Time `json:"created"` + // Container is the id of the container used to commit + Container string `json:"container,omitempty"` + // ContainerConfig is the configuration of the container that is committed into the image + ContainerConfig container.Config `json:"container_config,omitempty"` + // DockerVersion specifies the version of Docker that was used to build the image + DockerVersion string `json:"docker_version,omitempty"` + // Author is the name of the author that was specified when committing the image + Author string `json:"author,omitempty"` + // Config is the configuration of the container received from the client + Config *container.Config `json:"config,omitempty"` + // Architecture is the hardware that the image is built and runs on + Architecture string `json:"architecture,omitempty"` + // OS is the operating system used to build and run the image + OS string `json:"os,omitempty"` + // Size is the total size of the image including all layers it is composed of + Size int64 `json:",omitempty"` +} + +// Image stores the image configuration +type Image struct { + V1Image + Parent ID `json:"parent,omitempty"` + RootFS *RootFS `json:"rootfs,omitempty"` + History []History `json:"history,omitempty"` + OSVersion string `json:"os.version,omitempty"` + OSFeatures []string `json:"os.features,omitempty"` + + // rawJSON caches the immutable JSON associated with this image. + rawJSON []byte + + // computedID is the ID computed from the hash of the image config. + // Not to be confused with the legacy V1 ID in V1Image. + computedID ID +} + +// RawJSON returns the immutable JSON associated with the image. +func (img *Image) RawJSON() []byte { + return img.rawJSON +} + +// ID returns the image's content-addressable ID. +func (img *Image) ID() ID { + return img.computedID +} + +// ImageID stringifies ID. +func (img *Image) ImageID() string { + return img.ID().String() +} + +// RunConfig returns the image's container config. +func (img *Image) RunConfig() *container.Config { + return img.Config +} + +// MarshalJSON serializes the image to JSON. It sorts the top-level keys so +// that JSON that's been manipulated by a push/pull cycle with a legacy +// registry won't end up with a different key order. +func (img *Image) MarshalJSON() ([]byte, error) { + type MarshalImage Image + + pass1, err := json.Marshal(MarshalImage(*img)) + if err != nil { + return nil, err + } + + var c map[string]*json.RawMessage + if err := json.Unmarshal(pass1, &c); err != nil { + return nil, err + } + return json.Marshal(c) +} + +// History stores build commands that were used to create an image +type History struct { + // Created is the timestamp at which the image was created + Created time.Time `json:"created"` + // Author is the name of the author that was specified when committing the image + Author string `json:"author,omitempty"` + // CreatedBy keeps the Dockerfile command used while building the image + CreatedBy string `json:"created_by,omitempty"` + // Comment is the commit message that was set when committing the image + Comment string `json:"comment,omitempty"` + // EmptyLayer is set to true if this history item did not generate a + // layer. Otherwise, the history item is associated with the next + // layer in the RootFS section. + EmptyLayer bool `json:"empty_layer,omitempty"` +} + +// Exporter provides interface for loading and saving images +type Exporter interface { + Load(io.ReadCloser, io.Writer, bool) error + // TODO: Load(net.Context, io.ReadCloser, <- chan StatusMessage) error + Save([]string, io.Writer) error +} + +// NewFromJSON creates an Image configuration from json. +func NewFromJSON(src []byte) (*Image, error) { + img := &Image{} + + if err := json.Unmarshal(src, img); err != nil { + return nil, err + } + if img.RootFS == nil { + return nil, errors.New("invalid image JSON, no RootFS key") + } + + img.rawJSON = src + + return img, nil +} diff --git a/vendor/github.com/docker/docker/image/rootfs.go b/vendor/github.com/docker/docker/image/rootfs.go new file mode 100644 index 000000000000..7b24e3ed1e2f --- /dev/null +++ b/vendor/github.com/docker/docker/image/rootfs.go @@ -0,0 +1,44 @@ +package image + +import ( + "runtime" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/layer" +) + +// TypeLayers is used for RootFS.Type for filesystems organized into layers. +const TypeLayers = "layers" + +// typeLayersWithBase is an older format used by Windows up to v1.12. We +// explicitly handle this as an error case to ensure that a daemon which still +// has an older image like this on disk can still start, even though the +// image itself is not usable. See https://github.com/docker/docker/pull/25806. +const typeLayersWithBase = "layers+base" + +// RootFS describes images root filesystem +// This is currently a placeholder that only supports layers. In the future +// this can be made into an interface that supports different implementations. +type RootFS struct { + Type string `json:"type"` + DiffIDs []layer.DiffID `json:"diff_ids,omitempty"` +} + +// NewRootFS returns empty RootFS struct +func NewRootFS() *RootFS { + return &RootFS{Type: TypeLayers} +} + +// Append appends a new diffID to rootfs +func (r *RootFS) Append(id layer.DiffID) { + r.DiffIDs = append(r.DiffIDs, id) +} + +// ChainID returns the ChainID for the top layer in RootFS. +func (r *RootFS) ChainID() layer.ChainID { + if runtime.GOOS == "windows" && r.Type == typeLayersWithBase { + logrus.Warnf("Layer type is unsupported on this platform. DiffIDs: '%v'", r.DiffIDs) + return "" + } + return layer.CreateChainID(r.DiffIDs) +} diff --git a/vendor/github.com/docker/docker/image/store.go b/vendor/github.com/docker/docker/image/store.go new file mode 100644 index 000000000000..26ae109a0639 --- /dev/null +++ b/vendor/github.com/docker/docker/image/store.go @@ -0,0 +1,296 @@ +package image + +import ( + "encoding/json" + "errors" + "fmt" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution/digestset" + "github.com/docker/docker/layer" + "github.com/opencontainers/go-digest" +) + +// Store is an interface for creating and accessing images +type Store interface { + Create(config []byte) (ID, error) + Get(id ID) (*Image, error) + Delete(id ID) ([]layer.Metadata, error) + Search(partialID string) (ID, error) + SetParent(id ID, parent ID) error + GetParent(id ID) (ID, error) + Children(id ID) []ID + Map() map[ID]*Image + Heads() map[ID]*Image +} + +// LayerGetReleaser is a minimal interface for getting and releasing images. +type LayerGetReleaser interface { + Get(layer.ChainID) (layer.Layer, error) + Release(layer.Layer) ([]layer.Metadata, error) +} + +type imageMeta struct { + layer layer.Layer + children map[ID]struct{} +} + +type store struct { + sync.Mutex + ls LayerGetReleaser + images map[ID]*imageMeta + fs StoreBackend + digestSet *digestset.Set +} + +// NewImageStore returns new store object for given layer store +func NewImageStore(fs StoreBackend, ls LayerGetReleaser) (Store, error) { + is := &store{ + ls: ls, + images: make(map[ID]*imageMeta), + fs: fs, + digestSet: digestset.NewSet(), + } + + // load all current images and retain layers + if err := is.restore(); err != nil { + return nil, err + } + + return is, nil +} + +func (is *store) restore() error { + err := is.fs.Walk(func(dgst digest.Digest) error { + img, err := is.Get(IDFromDigest(dgst)) + if err != nil { + logrus.Errorf("invalid image %v, %v", dgst, err) + return nil + } + var l layer.Layer + if chainID := img.RootFS.ChainID(); chainID != "" { + l, err = is.ls.Get(chainID) + if err != nil { + return err + } + } + if err := is.digestSet.Add(dgst); err != nil { + return err + } + + imageMeta := &imageMeta{ + layer: l, + children: make(map[ID]struct{}), + } + + is.images[IDFromDigest(dgst)] = imageMeta + + return nil + }) + if err != nil { + return err + } + + // Second pass to fill in children maps + for id := range is.images { + if parent, err := is.GetParent(id); err == nil { + if parentMeta := is.images[parent]; parentMeta != nil { + parentMeta.children[id] = struct{}{} + } + } + } + + return nil +} + +func (is *store) Create(config []byte) (ID, error) { + var img Image + err := json.Unmarshal(config, &img) + if err != nil { + return "", err + } + + // Must reject any config that references diffIDs from the history + // which aren't among the rootfs layers. + rootFSLayers := make(map[layer.DiffID]struct{}) + for _, diffID := range img.RootFS.DiffIDs { + rootFSLayers[diffID] = struct{}{} + } + + layerCounter := 0 + for _, h := range img.History { + if !h.EmptyLayer { + layerCounter++ + } + } + if layerCounter > len(img.RootFS.DiffIDs) { + return "", errors.New("too many non-empty layers in History section") + } + + dgst, err := is.fs.Set(config) + if err != nil { + return "", err + } + imageID := IDFromDigest(dgst) + + is.Lock() + defer is.Unlock() + + if _, exists := is.images[imageID]; exists { + return imageID, nil + } + + layerID := img.RootFS.ChainID() + + var l layer.Layer + if layerID != "" { + l, err = is.ls.Get(layerID) + if err != nil { + return "", err + } + } + + imageMeta := &imageMeta{ + layer: l, + children: make(map[ID]struct{}), + } + + is.images[imageID] = imageMeta + if err := is.digestSet.Add(imageID.Digest()); err != nil { + delete(is.images, imageID) + return "", err + } + + return imageID, nil +} + +func (is *store) Search(term string) (ID, error) { + is.Lock() + defer is.Unlock() + + dgst, err := is.digestSet.Lookup(term) + if err != nil { + if err == digestset.ErrDigestNotFound { + err = fmt.Errorf("No such image: %s", term) + } + return "", err + } + return IDFromDigest(dgst), nil +} + +func (is *store) Get(id ID) (*Image, error) { + // todo: Check if image is in images + // todo: Detect manual insertions and start using them + config, err := is.fs.Get(id.Digest()) + if err != nil { + return nil, err + } + + img, err := NewFromJSON(config) + if err != nil { + return nil, err + } + img.computedID = id + + img.Parent, err = is.GetParent(id) + if err != nil { + img.Parent = "" + } + + return img, nil +} + +func (is *store) Delete(id ID) ([]layer.Metadata, error) { + is.Lock() + defer is.Unlock() + + imageMeta := is.images[id] + if imageMeta == nil { + return nil, fmt.Errorf("unrecognized image ID %s", id.String()) + } + for id := range imageMeta.children { + is.fs.DeleteMetadata(id.Digest(), "parent") + } + if parent, err := is.GetParent(id); err == nil && is.images[parent] != nil { + delete(is.images[parent].children, id) + } + + if err := is.digestSet.Remove(id.Digest()); err != nil { + logrus.Errorf("error removing %s from digest set: %q", id, err) + } + delete(is.images, id) + is.fs.Delete(id.Digest()) + + if imageMeta.layer != nil { + return is.ls.Release(imageMeta.layer) + } + return nil, nil +} + +func (is *store) SetParent(id, parent ID) error { + is.Lock() + defer is.Unlock() + parentMeta := is.images[parent] + if parentMeta == nil { + return fmt.Errorf("unknown parent image ID %s", parent.String()) + } + if parent, err := is.GetParent(id); err == nil && is.images[parent] != nil { + delete(is.images[parent].children, id) + } + parentMeta.children[id] = struct{}{} + return is.fs.SetMetadata(id.Digest(), "parent", []byte(parent)) +} + +func (is *store) GetParent(id ID) (ID, error) { + d, err := is.fs.GetMetadata(id.Digest(), "parent") + if err != nil { + return "", err + } + return ID(d), nil // todo: validate? +} + +func (is *store) Children(id ID) []ID { + is.Lock() + defer is.Unlock() + + return is.children(id) +} + +func (is *store) children(id ID) []ID { + var ids []ID + if is.images[id] != nil { + for id := range is.images[id].children { + ids = append(ids, id) + } + } + return ids +} + +func (is *store) Heads() map[ID]*Image { + return is.imagesMap(false) +} + +func (is *store) Map() map[ID]*Image { + return is.imagesMap(true) +} + +func (is *store) imagesMap(all bool) map[ID]*Image { + is.Lock() + defer is.Unlock() + + images := make(map[ID]*Image) + + for id := range is.images { + if !all && len(is.children(id)) > 0 { + continue + } + img, err := is.Get(id) + if err != nil { + logrus.Errorf("invalid image access: %q, error: %q", id, err) + continue + } + images[id] = img + } + return images +} diff --git a/vendor/github.com/docker/docker/image/v1/imagev1.go b/vendor/github.com/docker/docker/image/v1/imagev1.go new file mode 100644 index 000000000000..0e8a23cb5de8 --- /dev/null +++ b/vendor/github.com/docker/docker/image/v1/imagev1.go @@ -0,0 +1,150 @@ +package v1 + +import ( + "encoding/json" + "reflect" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/stringid" + "github.com/opencontainers/go-digest" +) + +// noFallbackMinVersion is the minimum version for which v1compatibility +// information will not be marshaled through the Image struct to remove +// blank fields. +var noFallbackMinVersion = "1.8.3" + +// HistoryFromConfig creates a History struct from v1 configuration JSON +func HistoryFromConfig(imageJSON []byte, emptyLayer bool) (image.History, error) { + h := image.History{} + var v1Image image.V1Image + if err := json.Unmarshal(imageJSON, &v1Image); err != nil { + return h, err + } + + return image.History{ + Author: v1Image.Author, + Created: v1Image.Created, + CreatedBy: strings.Join(v1Image.ContainerConfig.Cmd, " "), + Comment: v1Image.Comment, + EmptyLayer: emptyLayer, + }, nil +} + +// CreateID creates an ID from v1 image, layerID and parent ID. +// Used for backwards compatibility with old clients. +func CreateID(v1Image image.V1Image, layerID layer.ChainID, parent digest.Digest) (digest.Digest, error) { + v1Image.ID = "" + v1JSON, err := json.Marshal(v1Image) + if err != nil { + return "", err + } + + var config map[string]*json.RawMessage + if err := json.Unmarshal(v1JSON, &config); err != nil { + return "", err + } + + // FIXME: note that this is slightly incompatible with RootFS logic + config["layer_id"] = rawJSON(layerID) + if parent != "" { + config["parent"] = rawJSON(parent) + } + + configJSON, err := json.Marshal(config) + if err != nil { + return "", err + } + logrus.Debugf("CreateV1ID %s", configJSON) + + return digest.FromBytes(configJSON), nil +} + +// MakeConfigFromV1Config creates an image config from the legacy V1 config format. +func MakeConfigFromV1Config(imageJSON []byte, rootfs *image.RootFS, history []image.History) ([]byte, error) { + var dver struct { + DockerVersion string `json:"docker_version"` + } + + if err := json.Unmarshal(imageJSON, &dver); err != nil { + return nil, err + } + + useFallback := versions.LessThan(dver.DockerVersion, noFallbackMinVersion) + + if useFallback { + var v1Image image.V1Image + err := json.Unmarshal(imageJSON, &v1Image) + if err != nil { + return nil, err + } + imageJSON, err = json.Marshal(v1Image) + if err != nil { + return nil, err + } + } + + var c map[string]*json.RawMessage + if err := json.Unmarshal(imageJSON, &c); err != nil { + return nil, err + } + + delete(c, "id") + delete(c, "parent") + delete(c, "Size") // Size is calculated from data on disk and is inconsistent + delete(c, "parent_id") + delete(c, "layer_id") + delete(c, "throwaway") + + c["rootfs"] = rawJSON(rootfs) + c["history"] = rawJSON(history) + + return json.Marshal(c) +} + +// MakeV1ConfigFromConfig creates a legacy V1 image config from an Image struct +func MakeV1ConfigFromConfig(img *image.Image, v1ID, parentV1ID string, throwaway bool) ([]byte, error) { + // Top-level v1compatibility string should be a modified version of the + // image config. + var configAsMap map[string]*json.RawMessage + if err := json.Unmarshal(img.RawJSON(), &configAsMap); err != nil { + return nil, err + } + + // Delete fields that didn't exist in old manifest + imageType := reflect.TypeOf(img).Elem() + for i := 0; i < imageType.NumField(); i++ { + f := imageType.Field(i) + jsonName := strings.Split(f.Tag.Get("json"), ",")[0] + // Parent is handled specially below. + if jsonName != "" && jsonName != "parent" { + delete(configAsMap, jsonName) + } + } + configAsMap["id"] = rawJSON(v1ID) + if parentV1ID != "" { + configAsMap["parent"] = rawJSON(parentV1ID) + } + if throwaway { + configAsMap["throwaway"] = rawJSON(true) + } + + return json.Marshal(configAsMap) +} + +func rawJSON(value interface{}) *json.RawMessage { + jsonval, err := json.Marshal(value) + if err != nil { + return nil + } + return (*json.RawMessage)(&jsonval) +} + +// ValidateID checks whether an ID string is a valid image ID. +func ValidateID(id string) error { + return stringid.ValidateID(id) +} diff --git a/vendor/github.com/docker/docker/layer/empty.go b/vendor/github.com/docker/docker/layer/empty.go new file mode 100644 index 000000000000..3b6ffc82f71c --- /dev/null +++ b/vendor/github.com/docker/docker/layer/empty.go @@ -0,0 +1,56 @@ +package layer + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "io/ioutil" +) + +// DigestSHA256EmptyTar is the canonical sha256 digest of empty tar file - +// (1024 NULL bytes) +const DigestSHA256EmptyTar = DiffID("sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef") + +type emptyLayer struct{} + +// EmptyLayer is a layer that corresponds to empty tar. +var EmptyLayer = &emptyLayer{} + +func (el *emptyLayer) TarStream() (io.ReadCloser, error) { + buf := new(bytes.Buffer) + tarWriter := tar.NewWriter(buf) + tarWriter.Close() + return ioutil.NopCloser(buf), nil +} + +func (el *emptyLayer) TarStreamFrom(p ChainID) (io.ReadCloser, error) { + if p == "" { + return el.TarStream() + } + return nil, fmt.Errorf("can't get parent tar stream of an empty layer") +} + +func (el *emptyLayer) ChainID() ChainID { + return ChainID(DigestSHA256EmptyTar) +} + +func (el *emptyLayer) DiffID() DiffID { + return DigestSHA256EmptyTar +} + +func (el *emptyLayer) Parent() Layer { + return nil +} + +func (el *emptyLayer) Size() (size int64, err error) { + return 0, nil +} + +func (el *emptyLayer) DiffSize() (size int64, err error) { + return 0, nil +} + +func (el *emptyLayer) Metadata() (map[string]string, error) { + return make(map[string]string), nil +} diff --git a/vendor/github.com/docker/docker/layer/filestore.go b/vendor/github.com/docker/docker/layer/filestore.go new file mode 100644 index 000000000000..7ea418cd5ae4 --- /dev/null +++ b/vendor/github.com/docker/docker/layer/filestore.go @@ -0,0 +1,354 @@ +package layer + +import ( + "compress/gzip" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/docker/pkg/ioutils" + "github.com/opencontainers/go-digest" +) + +var ( + stringIDRegexp = regexp.MustCompile(`^[a-f0-9]{64}(-init)?$`) + supportedAlgorithms = []digest.Algorithm{ + digest.SHA256, + // digest.SHA384, // Currently not used + // digest.SHA512, // Currently not used + } +) + +type fileMetadataStore struct { + root string +} + +type fileMetadataTransaction struct { + store *fileMetadataStore + ws *ioutils.AtomicWriteSet +} + +// NewFSMetadataStore returns an instance of a metadata store +// which is backed by files on disk using the provided root +// as the root of metadata files. +func NewFSMetadataStore(root string) (MetadataStore, error) { + if err := os.MkdirAll(root, 0700); err != nil { + return nil, err + } + return &fileMetadataStore{ + root: root, + }, nil +} + +func (fms *fileMetadataStore) getLayerDirectory(layer ChainID) string { + dgst := digest.Digest(layer) + return filepath.Join(fms.root, string(dgst.Algorithm()), dgst.Hex()) +} + +func (fms *fileMetadataStore) getLayerFilename(layer ChainID, filename string) string { + return filepath.Join(fms.getLayerDirectory(layer), filename) +} + +func (fms *fileMetadataStore) getMountDirectory(mount string) string { + return filepath.Join(fms.root, "mounts", mount) +} + +func (fms *fileMetadataStore) getMountFilename(mount, filename string) string { + return filepath.Join(fms.getMountDirectory(mount), filename) +} + +func (fms *fileMetadataStore) StartTransaction() (MetadataTransaction, error) { + tmpDir := filepath.Join(fms.root, "tmp") + if err := os.MkdirAll(tmpDir, 0755); err != nil { + return nil, err + } + ws, err := ioutils.NewAtomicWriteSet(tmpDir) + if err != nil { + return nil, err + } + + return &fileMetadataTransaction{ + store: fms, + ws: ws, + }, nil +} + +func (fm *fileMetadataTransaction) SetSize(size int64) error { + content := fmt.Sprintf("%d", size) + return fm.ws.WriteFile("size", []byte(content), 0644) +} + +func (fm *fileMetadataTransaction) SetParent(parent ChainID) error { + return fm.ws.WriteFile("parent", []byte(digest.Digest(parent).String()), 0644) +} + +func (fm *fileMetadataTransaction) SetDiffID(diff DiffID) error { + return fm.ws.WriteFile("diff", []byte(digest.Digest(diff).String()), 0644) +} + +func (fm *fileMetadataTransaction) SetCacheID(cacheID string) error { + return fm.ws.WriteFile("cache-id", []byte(cacheID), 0644) +} + +func (fm *fileMetadataTransaction) SetDescriptor(ref distribution.Descriptor) error { + jsonRef, err := json.Marshal(ref) + if err != nil { + return err + } + return fm.ws.WriteFile("descriptor.json", jsonRef, 0644) +} + +func (fm *fileMetadataTransaction) TarSplitWriter(compressInput bool) (io.WriteCloser, error) { + f, err := fm.ws.FileWriter("tar-split.json.gz", os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return nil, err + } + var wc io.WriteCloser + if compressInput { + wc = gzip.NewWriter(f) + } else { + wc = f + } + + return ioutils.NewWriteCloserWrapper(wc, func() error { + wc.Close() + return f.Close() + }), nil +} + +func (fm *fileMetadataTransaction) Commit(layer ChainID) error { + finalDir := fm.store.getLayerDirectory(layer) + if err := os.MkdirAll(filepath.Dir(finalDir), 0755); err != nil { + return err + } + + return fm.ws.Commit(finalDir) +} + +func (fm *fileMetadataTransaction) Cancel() error { + return fm.ws.Cancel() +} + +func (fm *fileMetadataTransaction) String() string { + return fm.ws.String() +} + +func (fms *fileMetadataStore) GetSize(layer ChainID) (int64, error) { + content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "size")) + if err != nil { + return 0, err + } + + size, err := strconv.ParseInt(string(content), 10, 64) + if err != nil { + return 0, err + } + + return size, nil +} + +func (fms *fileMetadataStore) GetParent(layer ChainID) (ChainID, error) { + content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "parent")) + if err != nil { + if os.IsNotExist(err) { + return "", nil + } + return "", err + } + + dgst, err := digest.Parse(strings.TrimSpace(string(content))) + if err != nil { + return "", err + } + + return ChainID(dgst), nil +} + +func (fms *fileMetadataStore) GetDiffID(layer ChainID) (DiffID, error) { + content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "diff")) + if err != nil { + return "", err + } + + dgst, err := digest.Parse(strings.TrimSpace(string(content))) + if err != nil { + return "", err + } + + return DiffID(dgst), nil +} + +func (fms *fileMetadataStore) GetCacheID(layer ChainID) (string, error) { + contentBytes, err := ioutil.ReadFile(fms.getLayerFilename(layer, "cache-id")) + if err != nil { + return "", err + } + content := strings.TrimSpace(string(contentBytes)) + + if !stringIDRegexp.MatchString(content) { + return "", errors.New("invalid cache id value") + } + + return content, nil +} + +func (fms *fileMetadataStore) GetDescriptor(layer ChainID) (distribution.Descriptor, error) { + content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "descriptor.json")) + if err != nil { + if os.IsNotExist(err) { + // only return empty descriptor to represent what is stored + return distribution.Descriptor{}, nil + } + return distribution.Descriptor{}, err + } + + var ref distribution.Descriptor + err = json.Unmarshal(content, &ref) + if err != nil { + return distribution.Descriptor{}, err + } + return ref, err +} + +func (fms *fileMetadataStore) TarSplitReader(layer ChainID) (io.ReadCloser, error) { + fz, err := os.Open(fms.getLayerFilename(layer, "tar-split.json.gz")) + if err != nil { + return nil, err + } + f, err := gzip.NewReader(fz) + if err != nil { + return nil, err + } + + return ioutils.NewReadCloserWrapper(f, func() error { + f.Close() + return fz.Close() + }), nil +} + +func (fms *fileMetadataStore) SetMountID(mount string, mountID string) error { + if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { + return err + } + return ioutil.WriteFile(fms.getMountFilename(mount, "mount-id"), []byte(mountID), 0644) +} + +func (fms *fileMetadataStore) SetInitID(mount string, init string) error { + if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { + return err + } + return ioutil.WriteFile(fms.getMountFilename(mount, "init-id"), []byte(init), 0644) +} + +func (fms *fileMetadataStore) SetMountParent(mount string, parent ChainID) error { + if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { + return err + } + return ioutil.WriteFile(fms.getMountFilename(mount, "parent"), []byte(digest.Digest(parent).String()), 0644) +} + +func (fms *fileMetadataStore) GetMountID(mount string) (string, error) { + contentBytes, err := ioutil.ReadFile(fms.getMountFilename(mount, "mount-id")) + if err != nil { + return "", err + } + content := strings.TrimSpace(string(contentBytes)) + + if !stringIDRegexp.MatchString(content) { + return "", errors.New("invalid mount id value") + } + + return content, nil +} + +func (fms *fileMetadataStore) GetInitID(mount string) (string, error) { + contentBytes, err := ioutil.ReadFile(fms.getMountFilename(mount, "init-id")) + if err != nil { + if os.IsNotExist(err) { + return "", nil + } + return "", err + } + content := strings.TrimSpace(string(contentBytes)) + + if !stringIDRegexp.MatchString(content) { + return "", errors.New("invalid init id value") + } + + return content, nil +} + +func (fms *fileMetadataStore) GetMountParent(mount string) (ChainID, error) { + content, err := ioutil.ReadFile(fms.getMountFilename(mount, "parent")) + if err != nil { + if os.IsNotExist(err) { + return "", nil + } + return "", err + } + + dgst, err := digest.Parse(strings.TrimSpace(string(content))) + if err != nil { + return "", err + } + + return ChainID(dgst), nil +} + +func (fms *fileMetadataStore) List() ([]ChainID, []string, error) { + var ids []ChainID + for _, algorithm := range supportedAlgorithms { + fileInfos, err := ioutil.ReadDir(filepath.Join(fms.root, string(algorithm))) + if err != nil { + if os.IsNotExist(err) { + continue + } + return nil, nil, err + } + + for _, fi := range fileInfos { + if fi.IsDir() && fi.Name() != "mounts" { + dgst := digest.NewDigestFromHex(string(algorithm), fi.Name()) + if err := dgst.Validate(); err != nil { + logrus.Debugf("Ignoring invalid digest %s:%s", algorithm, fi.Name()) + } else { + ids = append(ids, ChainID(dgst)) + } + } + } + } + + fileInfos, err := ioutil.ReadDir(filepath.Join(fms.root, "mounts")) + if err != nil { + if os.IsNotExist(err) { + return ids, []string{}, nil + } + return nil, nil, err + } + + var mounts []string + for _, fi := range fileInfos { + if fi.IsDir() { + mounts = append(mounts, fi.Name()) + } + } + + return ids, mounts, nil +} + +func (fms *fileMetadataStore) Remove(layer ChainID) error { + return os.RemoveAll(fms.getLayerDirectory(layer)) +} + +func (fms *fileMetadataStore) RemoveMount(mount string) error { + return os.RemoveAll(fms.getMountDirectory(mount)) +} diff --git a/vendor/github.com/docker/docker/layer/layer.go b/vendor/github.com/docker/docker/layer/layer.go new file mode 100644 index 000000000000..7b993ee4ad60 --- /dev/null +++ b/vendor/github.com/docker/docker/layer/layer.go @@ -0,0 +1,282 @@ +// Package layer is package for managing read-only +// and read-write mounts on the union file system +// driver. Read-only mounts are referenced using a +// content hash and are protected from mutation in +// the exposed interface. The tar format is used +// to create read-only layers and export both +// read-only and writable layers. The exported +// tar data for a read-only layer should match +// the tar used to create the layer. +package layer + +import ( + "errors" + "io" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/docker/pkg/archive" + "github.com/opencontainers/go-digest" +) + +var ( + // ErrLayerDoesNotExist is used when an operation is + // attempted on a layer which does not exist. + ErrLayerDoesNotExist = errors.New("layer does not exist") + + // ErrLayerNotRetained is used when a release is + // attempted on a layer which is not retained. + ErrLayerNotRetained = errors.New("layer not retained") + + // ErrMountDoesNotExist is used when an operation is + // attempted on a mount layer which does not exist. + ErrMountDoesNotExist = errors.New("mount does not exist") + + // ErrMountNameConflict is used when a mount is attempted + // to be created but there is already a mount with the name + // used for creation. + ErrMountNameConflict = errors.New("mount already exists with name") + + // ErrActiveMount is used when an operation on a + // mount is attempted but the layer is still + // mounted and the operation cannot be performed. + ErrActiveMount = errors.New("mount still active") + + // ErrNotMounted is used when requesting an active + // mount but the layer is not mounted. + ErrNotMounted = errors.New("not mounted") + + // ErrMaxDepthExceeded is used when a layer is attempted + // to be created which would result in a layer depth + // greater than the 125 max. + ErrMaxDepthExceeded = errors.New("max depth exceeded") + + // ErrNotSupported is used when the action is not supported + // on the current platform + ErrNotSupported = errors.New("not support on this platform") +) + +// ChainID is the content-addressable ID of a layer. +type ChainID digest.Digest + +// String returns a string rendition of a layer ID +func (id ChainID) String() string { + return string(id) +} + +// DiffID is the hash of an individual layer tar. +type DiffID digest.Digest + +// String returns a string rendition of a layer DiffID +func (diffID DiffID) String() string { + return string(diffID) +} + +// TarStreamer represents an object which may +// have its contents exported as a tar stream. +type TarStreamer interface { + // TarStream returns a tar archive stream + // for the contents of a layer. + TarStream() (io.ReadCloser, error) +} + +// Layer represents a read-only layer +type Layer interface { + TarStreamer + + // TarStreamFrom returns a tar archive stream for all the layer chain with + // arbitrary depth. + TarStreamFrom(ChainID) (io.ReadCloser, error) + + // ChainID returns the content hash of the entire layer chain. The hash + // chain is made up of DiffID of top layer and all of its parents. + ChainID() ChainID + + // DiffID returns the content hash of the layer + // tar stream used to create this layer. + DiffID() DiffID + + // Parent returns the next layer in the layer chain. + Parent() Layer + + // Size returns the size of the entire layer chain. The size + // is calculated from the total size of all files in the layers. + Size() (int64, error) + + // DiffSize returns the size difference of the top layer + // from parent layer. + DiffSize() (int64, error) + + // Metadata returns the low level storage metadata associated + // with layer. + Metadata() (map[string]string, error) +} + +// RWLayer represents a layer which is +// read and writable +type RWLayer interface { + TarStreamer + + // Name of mounted layer + Name() string + + // Parent returns the layer which the writable + // layer was created from. + Parent() Layer + + // Mount mounts the RWLayer and returns the filesystem path + // the to the writable layer. + Mount(mountLabel string) (string, error) + + // Unmount unmounts the RWLayer. This should be called + // for every mount. If there are multiple mount calls + // this operation will only decrement the internal mount counter. + Unmount() error + + // Size represents the size of the writable layer + // as calculated by the total size of the files + // changed in the mutable layer. + Size() (int64, error) + + // Changes returns the set of changes for the mutable layer + // from the base layer. + Changes() ([]archive.Change, error) + + // Metadata returns the low level metadata for the mutable layer + Metadata() (map[string]string, error) +} + +// Metadata holds information about a +// read-only layer +type Metadata struct { + // ChainID is the content hash of the layer + ChainID ChainID + + // DiffID is the hash of the tar data used to + // create the layer + DiffID DiffID + + // Size is the size of the layer and all parents + Size int64 + + // DiffSize is the size of the top layer + DiffSize int64 +} + +// MountInit is a function to initialize a +// writable mount. Changes made here will +// not be included in the Tar stream of the +// RWLayer. +type MountInit func(root string) error + +// CreateRWLayerOpts contains optional arguments to be passed to CreateRWLayer +type CreateRWLayerOpts struct { + MountLabel string + InitFunc MountInit + StorageOpt map[string]string +} + +// Store represents a backend for managing both +// read-only and read-write layers. +type Store interface { + Register(io.Reader, ChainID) (Layer, error) + Get(ChainID) (Layer, error) + Map() map[ChainID]Layer + Release(Layer) ([]Metadata, error) + + CreateRWLayer(id string, parent ChainID, opts *CreateRWLayerOpts) (RWLayer, error) + GetRWLayer(id string) (RWLayer, error) + GetMountID(id string) (string, error) + ReleaseRWLayer(RWLayer) ([]Metadata, error) + + Cleanup() error + DriverStatus() [][2]string + DriverName() string +} + +// DescribableStore represents a layer store capable of storing +// descriptors for layers. +type DescribableStore interface { + RegisterWithDescriptor(io.Reader, ChainID, distribution.Descriptor) (Layer, error) +} + +// MetadataTransaction represents functions for setting layer metadata +// with a single transaction. +type MetadataTransaction interface { + SetSize(int64) error + SetParent(parent ChainID) error + SetDiffID(DiffID) error + SetCacheID(string) error + SetDescriptor(distribution.Descriptor) error + TarSplitWriter(compressInput bool) (io.WriteCloser, error) + + Commit(ChainID) error + Cancel() error + String() string +} + +// MetadataStore represents a backend for persisting +// metadata about layers and providing the metadata +// for restoring a Store. +type MetadataStore interface { + // StartTransaction starts an update for new metadata + // which will be used to represent an ID on commit. + StartTransaction() (MetadataTransaction, error) + + GetSize(ChainID) (int64, error) + GetParent(ChainID) (ChainID, error) + GetDiffID(ChainID) (DiffID, error) + GetCacheID(ChainID) (string, error) + GetDescriptor(ChainID) (distribution.Descriptor, error) + TarSplitReader(ChainID) (io.ReadCloser, error) + + SetMountID(string, string) error + SetInitID(string, string) error + SetMountParent(string, ChainID) error + + GetMountID(string) (string, error) + GetInitID(string) (string, error) + GetMountParent(string) (ChainID, error) + + // List returns the full list of referenced + // read-only and read-write layers + List() ([]ChainID, []string, error) + + Remove(ChainID) error + RemoveMount(string) error +} + +// CreateChainID returns ID for a layerDigest slice +func CreateChainID(dgsts []DiffID) ChainID { + return createChainIDFromParent("", dgsts...) +} + +func createChainIDFromParent(parent ChainID, dgsts ...DiffID) ChainID { + if len(dgsts) == 0 { + return parent + } + if parent == "" { + return createChainIDFromParent(ChainID(dgsts[0]), dgsts[1:]...) + } + // H = "H(n-1) SHA256(n)" + dgst := digest.FromBytes([]byte(string(parent) + " " + string(dgsts[0]))) + return createChainIDFromParent(ChainID(dgst), dgsts[1:]...) +} + +// ReleaseAndLog releases the provided layer from the given layer +// store, logging any error and release metadata +func ReleaseAndLog(ls Store, l Layer) { + metadata, err := ls.Release(l) + if err != nil { + logrus.Errorf("Error releasing layer %s: %v", l.ChainID(), err) + } + LogReleaseMetadata(metadata) +} + +// LogReleaseMetadata logs a metadata array, uses this to +// ensure consistent logging for release metadata +func LogReleaseMetadata(metadatas []Metadata) { + for _, metadata := range metadatas { + logrus.Infof("Layer %s cleaned up", metadata.ChainID) + } +} diff --git a/vendor/github.com/docker/docker/layer/layer_store.go b/vendor/github.com/docker/docker/layer/layer_store.go new file mode 100644 index 000000000000..5caa5d41f289 --- /dev/null +++ b/vendor/github.com/docker/docker/layer/layer_store.go @@ -0,0 +1,735 @@ +package layer + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "sync" + + "github.com/Sirupsen/logrus" + "github.com/docker/distribution" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/stringid" + "github.com/opencontainers/go-digest" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" +) + +// maxLayerDepth represents the maximum number of +// layers which can be chained together. 125 was +// chosen to account for the 127 max in some +// graphdrivers plus the 2 additional layers +// used to create a rwlayer. +const maxLayerDepth = 125 + +type layerStore struct { + store MetadataStore + driver graphdriver.Driver + + layerMap map[ChainID]*roLayer + layerL sync.Mutex + + mounts map[string]*mountedLayer + mountL sync.Mutex + + useTarSplit bool +} + +// StoreOptions are the options used to create a new Store instance +type StoreOptions struct { + StorePath string + MetadataStorePathTemplate string + GraphDriver string + GraphDriverOptions []string + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + PluginGetter plugingetter.PluginGetter + ExperimentalEnabled bool +} + +// NewStoreFromOptions creates a new Store instance +func NewStoreFromOptions(options StoreOptions) (Store, error) { + driver, err := graphdriver.New(options.GraphDriver, options.PluginGetter, graphdriver.Options{ + Root: options.StorePath, + DriverOptions: options.GraphDriverOptions, + UIDMaps: options.UIDMaps, + GIDMaps: options.GIDMaps, + ExperimentalEnabled: options.ExperimentalEnabled, + }) + if err != nil { + return nil, fmt.Errorf("error initializing graphdriver: %v", err) + } + logrus.Debugf("Using graph driver %s", driver) + + fms, err := NewFSMetadataStore(fmt.Sprintf(options.MetadataStorePathTemplate, driver)) + if err != nil { + return nil, err + } + + return NewStoreFromGraphDriver(fms, driver) +} + +// NewStoreFromGraphDriver creates a new Store instance using the provided +// metadata store and graph driver. The metadata store will be used to restore +// the Store. +func NewStoreFromGraphDriver(store MetadataStore, driver graphdriver.Driver) (Store, error) { + caps := graphdriver.Capabilities{} + if capDriver, ok := driver.(graphdriver.CapabilityDriver); ok { + caps = capDriver.Capabilities() + } + + ls := &layerStore{ + store: store, + driver: driver, + layerMap: map[ChainID]*roLayer{}, + mounts: map[string]*mountedLayer{}, + useTarSplit: !caps.ReproducesExactDiffs, + } + + ids, mounts, err := store.List() + if err != nil { + return nil, err + } + + for _, id := range ids { + l, err := ls.loadLayer(id) + if err != nil { + logrus.Debugf("Failed to load layer %s: %s", id, err) + continue + } + if l.parent != nil { + l.parent.referenceCount++ + } + } + + for _, mount := range mounts { + if err := ls.loadMount(mount); err != nil { + logrus.Debugf("Failed to load mount %s: %s", mount, err) + } + } + + return ls, nil +} + +func (ls *layerStore) loadLayer(layer ChainID) (*roLayer, error) { + cl, ok := ls.layerMap[layer] + if ok { + return cl, nil + } + + diff, err := ls.store.GetDiffID(layer) + if err != nil { + return nil, fmt.Errorf("failed to get diff id for %s: %s", layer, err) + } + + size, err := ls.store.GetSize(layer) + if err != nil { + return nil, fmt.Errorf("failed to get size for %s: %s", layer, err) + } + + cacheID, err := ls.store.GetCacheID(layer) + if err != nil { + return nil, fmt.Errorf("failed to get cache id for %s: %s", layer, err) + } + + parent, err := ls.store.GetParent(layer) + if err != nil { + return nil, fmt.Errorf("failed to get parent for %s: %s", layer, err) + } + + descriptor, err := ls.store.GetDescriptor(layer) + if err != nil { + return nil, fmt.Errorf("failed to get descriptor for %s: %s", layer, err) + } + + cl = &roLayer{ + chainID: layer, + diffID: diff, + size: size, + cacheID: cacheID, + layerStore: ls, + references: map[Layer]struct{}{}, + descriptor: descriptor, + } + + if parent != "" { + p, err := ls.loadLayer(parent) + if err != nil { + return nil, err + } + cl.parent = p + } + + ls.layerMap[cl.chainID] = cl + + return cl, nil +} + +func (ls *layerStore) loadMount(mount string) error { + if _, ok := ls.mounts[mount]; ok { + return nil + } + + mountID, err := ls.store.GetMountID(mount) + if err != nil { + return err + } + + initID, err := ls.store.GetInitID(mount) + if err != nil { + return err + } + + parent, err := ls.store.GetMountParent(mount) + if err != nil { + return err + } + + ml := &mountedLayer{ + name: mount, + mountID: mountID, + initID: initID, + layerStore: ls, + references: map[RWLayer]*referencedRWLayer{}, + } + + if parent != "" { + p, err := ls.loadLayer(parent) + if err != nil { + return err + } + ml.parent = p + + p.referenceCount++ + } + + ls.mounts[ml.name] = ml + + return nil +} + +func (ls *layerStore) applyTar(tx MetadataTransaction, ts io.Reader, parent string, layer *roLayer) error { + digester := digest.Canonical.Digester() + tr := io.TeeReader(ts, digester.Hash()) + + rdr := tr + if ls.useTarSplit { + tsw, err := tx.TarSplitWriter(true) + if err != nil { + return err + } + metaPacker := storage.NewJSONPacker(tsw) + defer tsw.Close() + + // we're passing nil here for the file putter, because the ApplyDiff will + // handle the extraction of the archive + rdr, err = asm.NewInputTarStream(tr, metaPacker, nil) + if err != nil { + return err + } + } + + applySize, err := ls.driver.ApplyDiff(layer.cacheID, parent, rdr) + if err != nil { + return err + } + + // Discard trailing data but ensure metadata is picked up to reconstruct stream + io.Copy(ioutil.Discard, rdr) // ignore error as reader may be closed + + layer.size = applySize + layer.diffID = DiffID(digester.Digest()) + + logrus.Debugf("Applied tar %s to %s, size: %d", layer.diffID, layer.cacheID, applySize) + + return nil +} + +func (ls *layerStore) Register(ts io.Reader, parent ChainID) (Layer, error) { + return ls.registerWithDescriptor(ts, parent, distribution.Descriptor{}) +} + +func (ls *layerStore) registerWithDescriptor(ts io.Reader, parent ChainID, descriptor distribution.Descriptor) (Layer, error) { + // err is used to hold the error which will always trigger + // cleanup of creates sources but may not be an error returned + // to the caller (already exists). + var err error + var pid string + var p *roLayer + if string(parent) != "" { + p = ls.get(parent) + if p == nil { + return nil, ErrLayerDoesNotExist + } + pid = p.cacheID + // Release parent chain if error + defer func() { + if err != nil { + ls.layerL.Lock() + ls.releaseLayer(p) + ls.layerL.Unlock() + } + }() + if p.depth() >= maxLayerDepth { + err = ErrMaxDepthExceeded + return nil, err + } + } + + // Create new roLayer + layer := &roLayer{ + parent: p, + cacheID: stringid.GenerateRandomID(), + referenceCount: 1, + layerStore: ls, + references: map[Layer]struct{}{}, + descriptor: descriptor, + } + + if err = ls.driver.Create(layer.cacheID, pid, nil); err != nil { + return nil, err + } + + tx, err := ls.store.StartTransaction() + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + logrus.Debugf("Cleaning up layer %s: %v", layer.cacheID, err) + if err := ls.driver.Remove(layer.cacheID); err != nil { + logrus.Errorf("Error cleaning up cache layer %s: %v", layer.cacheID, err) + } + if err := tx.Cancel(); err != nil { + logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) + } + } + }() + + if err = ls.applyTar(tx, ts, pid, layer); err != nil { + return nil, err + } + + if layer.parent == nil { + layer.chainID = ChainID(layer.diffID) + } else { + layer.chainID = createChainIDFromParent(layer.parent.chainID, layer.diffID) + } + + if err = storeLayer(tx, layer); err != nil { + return nil, err + } + + ls.layerL.Lock() + defer ls.layerL.Unlock() + + if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil { + // Set error for cleanup, but do not return the error + err = errors.New("layer already exists") + return existingLayer.getReference(), nil + } + + if err = tx.Commit(layer.chainID); err != nil { + return nil, err + } + + ls.layerMap[layer.chainID] = layer + + return layer.getReference(), nil +} + +func (ls *layerStore) getWithoutLock(layer ChainID) *roLayer { + l, ok := ls.layerMap[layer] + if !ok { + return nil + } + + l.referenceCount++ + + return l +} + +func (ls *layerStore) get(l ChainID) *roLayer { + ls.layerL.Lock() + defer ls.layerL.Unlock() + return ls.getWithoutLock(l) +} + +func (ls *layerStore) Get(l ChainID) (Layer, error) { + ls.layerL.Lock() + defer ls.layerL.Unlock() + + layer := ls.getWithoutLock(l) + if layer == nil { + return nil, ErrLayerDoesNotExist + } + + return layer.getReference(), nil +} + +func (ls *layerStore) Map() map[ChainID]Layer { + ls.layerL.Lock() + defer ls.layerL.Unlock() + + layers := map[ChainID]Layer{} + + for k, v := range ls.layerMap { + layers[k] = v + } + + return layers +} + +func (ls *layerStore) deleteLayer(layer *roLayer, metadata *Metadata) error { + err := ls.driver.Remove(layer.cacheID) + if err != nil { + return err + } + + err = ls.store.Remove(layer.chainID) + if err != nil { + return err + } + metadata.DiffID = layer.diffID + metadata.ChainID = layer.chainID + metadata.Size, err = layer.Size() + if err != nil { + return err + } + metadata.DiffSize = layer.size + + return nil +} + +func (ls *layerStore) releaseLayer(l *roLayer) ([]Metadata, error) { + depth := 0 + removed := []Metadata{} + for { + if l.referenceCount == 0 { + panic("layer not retained") + } + l.referenceCount-- + if l.referenceCount != 0 { + return removed, nil + } + + if len(removed) == 0 && depth > 0 { + panic("cannot remove layer with child") + } + if l.hasReferences() { + panic("cannot delete referenced layer") + } + var metadata Metadata + if err := ls.deleteLayer(l, &metadata); err != nil { + return nil, err + } + + delete(ls.layerMap, l.chainID) + removed = append(removed, metadata) + + if l.parent == nil { + return removed, nil + } + + depth++ + l = l.parent + } +} + +func (ls *layerStore) Release(l Layer) ([]Metadata, error) { + ls.layerL.Lock() + defer ls.layerL.Unlock() + layer, ok := ls.layerMap[l.ChainID()] + if !ok { + return []Metadata{}, nil + } + if !layer.hasReference(l) { + return nil, ErrLayerNotRetained + } + + layer.deleteReference(l) + + return ls.releaseLayer(layer) +} + +func (ls *layerStore) CreateRWLayer(name string, parent ChainID, opts *CreateRWLayerOpts) (RWLayer, error) { + var ( + storageOpt map[string]string + initFunc MountInit + mountLabel string + ) + + if opts != nil { + mountLabel = opts.MountLabel + storageOpt = opts.StorageOpt + initFunc = opts.InitFunc + } + + ls.mountL.Lock() + defer ls.mountL.Unlock() + m, ok := ls.mounts[name] + if ok { + return nil, ErrMountNameConflict + } + + var err error + var pid string + var p *roLayer + if string(parent) != "" { + p = ls.get(parent) + if p == nil { + return nil, ErrLayerDoesNotExist + } + pid = p.cacheID + + // Release parent chain if error + defer func() { + if err != nil { + ls.layerL.Lock() + ls.releaseLayer(p) + ls.layerL.Unlock() + } + }() + } + + m = &mountedLayer{ + name: name, + parent: p, + mountID: ls.mountID(name), + layerStore: ls, + references: map[RWLayer]*referencedRWLayer{}, + } + + if initFunc != nil { + pid, err = ls.initMount(m.mountID, pid, mountLabel, initFunc, storageOpt) + if err != nil { + return nil, err + } + m.initID = pid + } + + createOpts := &graphdriver.CreateOpts{ + StorageOpt: storageOpt, + } + + if err = ls.driver.CreateReadWrite(m.mountID, pid, createOpts); err != nil { + return nil, err + } + + if err = ls.saveMount(m); err != nil { + return nil, err + } + + return m.getReference(), nil +} + +func (ls *layerStore) GetRWLayer(id string) (RWLayer, error) { + ls.mountL.Lock() + defer ls.mountL.Unlock() + mount, ok := ls.mounts[id] + if !ok { + return nil, ErrMountDoesNotExist + } + + return mount.getReference(), nil +} + +func (ls *layerStore) GetMountID(id string) (string, error) { + ls.mountL.Lock() + defer ls.mountL.Unlock() + mount, ok := ls.mounts[id] + if !ok { + return "", ErrMountDoesNotExist + } + logrus.Debugf("GetMountID id: %s -> mountID: %s", id, mount.mountID) + + return mount.mountID, nil +} + +func (ls *layerStore) ReleaseRWLayer(l RWLayer) ([]Metadata, error) { + ls.mountL.Lock() + defer ls.mountL.Unlock() + m, ok := ls.mounts[l.Name()] + if !ok { + return []Metadata{}, nil + } + + if err := m.deleteReference(l); err != nil { + return nil, err + } + + if m.hasReferences() { + return []Metadata{}, nil + } + + if err := ls.driver.Remove(m.mountID); err != nil { + logrus.Errorf("Error removing mounted layer %s: %s", m.name, err) + m.retakeReference(l) + return nil, err + } + + if m.initID != "" { + if err := ls.driver.Remove(m.initID); err != nil { + logrus.Errorf("Error removing init layer %s: %s", m.name, err) + m.retakeReference(l) + return nil, err + } + } + + if err := ls.store.RemoveMount(m.name); err != nil { + logrus.Errorf("Error removing mount metadata: %s: %s", m.name, err) + m.retakeReference(l) + return nil, err + } + + delete(ls.mounts, m.Name()) + + ls.layerL.Lock() + defer ls.layerL.Unlock() + if m.parent != nil { + return ls.releaseLayer(m.parent) + } + + return []Metadata{}, nil +} + +func (ls *layerStore) saveMount(mount *mountedLayer) error { + if err := ls.store.SetMountID(mount.name, mount.mountID); err != nil { + return err + } + + if mount.initID != "" { + if err := ls.store.SetInitID(mount.name, mount.initID); err != nil { + return err + } + } + + if mount.parent != nil { + if err := ls.store.SetMountParent(mount.name, mount.parent.chainID); err != nil { + return err + } + } + + ls.mounts[mount.name] = mount + + return nil +} + +func (ls *layerStore) initMount(graphID, parent, mountLabel string, initFunc MountInit, storageOpt map[string]string) (string, error) { + // Use "-init" to maintain compatibility with graph drivers + // which are expecting this layer with this special name. If all + // graph drivers can be updated to not rely on knowing about this layer + // then the initID should be randomly generated. + initID := fmt.Sprintf("%s-init", graphID) + + createOpts := &graphdriver.CreateOpts{ + MountLabel: mountLabel, + StorageOpt: storageOpt, + } + + if err := ls.driver.CreateReadWrite(initID, parent, createOpts); err != nil { + return "", err + } + p, err := ls.driver.Get(initID, "") + if err != nil { + return "", err + } + + if err := initFunc(p); err != nil { + ls.driver.Put(initID) + return "", err + } + + if err := ls.driver.Put(initID); err != nil { + return "", err + } + + return initID, nil +} + +func (ls *layerStore) getTarStream(rl *roLayer) (io.ReadCloser, error) { + if !ls.useTarSplit { + var parentCacheID string + if rl.parent != nil { + parentCacheID = rl.parent.cacheID + } + + return ls.driver.Diff(rl.cacheID, parentCacheID) + } + + r, err := ls.store.TarSplitReader(rl.chainID) + if err != nil { + return nil, err + } + + pr, pw := io.Pipe() + go func() { + err := ls.assembleTarTo(rl.cacheID, r, nil, pw) + if err != nil { + pw.CloseWithError(err) + } else { + pw.Close() + } + }() + + return pr, nil +} + +func (ls *layerStore) assembleTarTo(graphID string, metadata io.ReadCloser, size *int64, w io.Writer) error { + diffDriver, ok := ls.driver.(graphdriver.DiffGetterDriver) + if !ok { + diffDriver = &naiveDiffPathDriver{ls.driver} + } + + defer metadata.Close() + + // get our relative path to the container + fileGetCloser, err := diffDriver.DiffGetter(graphID) + if err != nil { + return err + } + defer fileGetCloser.Close() + + metaUnpacker := storage.NewJSONUnpacker(metadata) + upackerCounter := &unpackSizeCounter{metaUnpacker, size} + logrus.Debugf("Assembling tar data for %s", graphID) + return asm.WriteOutputTarStream(fileGetCloser, upackerCounter, w) +} + +func (ls *layerStore) Cleanup() error { + return ls.driver.Cleanup() +} + +func (ls *layerStore) DriverStatus() [][2]string { + return ls.driver.Status() +} + +func (ls *layerStore) DriverName() string { + return ls.driver.String() +} + +type naiveDiffPathDriver struct { + graphdriver.Driver +} + +type fileGetPutter struct { + storage.FileGetter + driver graphdriver.Driver + id string +} + +func (w *fileGetPutter) Close() error { + return w.driver.Put(w.id) +} + +func (n *naiveDiffPathDriver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { + p, err := n.Driver.Get(id, "") + if err != nil { + return nil, err + } + return &fileGetPutter{storage.NewPathFileGetter(p), n.Driver, id}, nil +} diff --git a/vendor/github.com/docker/docker/layer/layer_store_windows.go b/vendor/github.com/docker/docker/layer/layer_store_windows.go new file mode 100644 index 000000000000..1276a912cc8c --- /dev/null +++ b/vendor/github.com/docker/docker/layer/layer_store_windows.go @@ -0,0 +1,11 @@ +package layer + +import ( + "io" + + "github.com/docker/distribution" +) + +func (ls *layerStore) RegisterWithDescriptor(ts io.Reader, parent ChainID, descriptor distribution.Descriptor) (Layer, error) { + return ls.registerWithDescriptor(ts, parent, descriptor) +} diff --git a/vendor/github.com/docker/docker/layer/layer_unix.go b/vendor/github.com/docker/docker/layer/layer_unix.go new file mode 100644 index 000000000000..776b78ac02b2 --- /dev/null +++ b/vendor/github.com/docker/docker/layer/layer_unix.go @@ -0,0 +1,9 @@ +// +build linux freebsd darwin openbsd solaris + +package layer + +import "github.com/docker/docker/pkg/stringid" + +func (ls *layerStore) mountID(name string) string { + return stringid.GenerateRandomID() +} diff --git a/vendor/github.com/docker/docker/layer/layer_windows.go b/vendor/github.com/docker/docker/layer/layer_windows.go new file mode 100644 index 000000000000..a1c195311e0f --- /dev/null +++ b/vendor/github.com/docker/docker/layer/layer_windows.go @@ -0,0 +1,34 @@ +package layer + +import "errors" + +// GetLayerPath returns the path to a layer +func GetLayerPath(s Store, layer ChainID) (string, error) { + ls, ok := s.(*layerStore) + if !ok { + return "", errors.New("unsupported layer store") + } + ls.layerL.Lock() + defer ls.layerL.Unlock() + + rl, ok := ls.layerMap[layer] + if !ok { + return "", ErrLayerDoesNotExist + } + + path, err := ls.driver.Get(rl.cacheID, "") + if err != nil { + return "", err + } + + if err := ls.driver.Put(rl.cacheID); err != nil { + return "", err + } + + return path, nil +} + +func (ls *layerStore) mountID(name string) string { + // windows has issues if container ID doesn't match mount ID + return name +} diff --git a/vendor/github.com/docker/docker/layer/migration.go b/vendor/github.com/docker/docker/layer/migration.go new file mode 100644 index 000000000000..4803a1ae5632 --- /dev/null +++ b/vendor/github.com/docker/docker/layer/migration.go @@ -0,0 +1,256 @@ +package layer + +import ( + "compress/gzip" + "errors" + "fmt" + "io" + "os" + + "github.com/Sirupsen/logrus" + "github.com/opencontainers/go-digest" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" +) + +// CreateRWLayerByGraphID creates a RWLayer in the layer store using +// the provided name with the given graphID. To get the RWLayer +// after migration the layer may be retrieved by the given name. +func (ls *layerStore) CreateRWLayerByGraphID(name string, graphID string, parent ChainID) (err error) { + ls.mountL.Lock() + defer ls.mountL.Unlock() + m, ok := ls.mounts[name] + if ok { + if m.parent.chainID != parent { + return errors.New("name conflict, mismatched parent") + } + if m.mountID != graphID { + return errors.New("mount already exists") + } + + return nil + } + + if !ls.driver.Exists(graphID) { + return fmt.Errorf("graph ID does not exist: %q", graphID) + } + + var p *roLayer + if string(parent) != "" { + p = ls.get(parent) + if p == nil { + return ErrLayerDoesNotExist + } + + // Release parent chain if error + defer func() { + if err != nil { + ls.layerL.Lock() + ls.releaseLayer(p) + ls.layerL.Unlock() + } + }() + } + + // TODO: Ensure graphID has correct parent + + m = &mountedLayer{ + name: name, + parent: p, + mountID: graphID, + layerStore: ls, + references: map[RWLayer]*referencedRWLayer{}, + } + + // Check for existing init layer + initID := fmt.Sprintf("%s-init", graphID) + if ls.driver.Exists(initID) { + m.initID = initID + } + + if err = ls.saveMount(m); err != nil { + return err + } + + return nil +} + +func (ls *layerStore) ChecksumForGraphID(id, parent, oldTarDataPath, newTarDataPath string) (diffID DiffID, size int64, err error) { + defer func() { + if err != nil { + logrus.Debugf("could not get checksum for %q with tar-split: %q", id, err) + diffID, size, err = ls.checksumForGraphIDNoTarsplit(id, parent, newTarDataPath) + } + }() + + if oldTarDataPath == "" { + err = errors.New("no tar-split file") + return + } + + tarDataFile, err := os.Open(oldTarDataPath) + if err != nil { + return + } + defer tarDataFile.Close() + uncompressed, err := gzip.NewReader(tarDataFile) + if err != nil { + return + } + + dgst := digest.Canonical.Digester() + err = ls.assembleTarTo(id, uncompressed, &size, dgst.Hash()) + if err != nil { + return + } + + diffID = DiffID(dgst.Digest()) + err = os.RemoveAll(newTarDataPath) + if err != nil { + return + } + err = os.Link(oldTarDataPath, newTarDataPath) + + return +} + +func (ls *layerStore) checksumForGraphIDNoTarsplit(id, parent, newTarDataPath string) (diffID DiffID, size int64, err error) { + rawarchive, err := ls.driver.Diff(id, parent) + if err != nil { + return + } + defer rawarchive.Close() + + f, err := os.Create(newTarDataPath) + if err != nil { + return + } + defer f.Close() + mfz := gzip.NewWriter(f) + defer mfz.Close() + metaPacker := storage.NewJSONPacker(mfz) + + packerCounter := &packSizeCounter{metaPacker, &size} + + archive, err := asm.NewInputTarStream(rawarchive, packerCounter, nil) + if err != nil { + return + } + dgst, err := digest.FromReader(archive) + if err != nil { + return + } + diffID = DiffID(dgst) + return +} + +func (ls *layerStore) RegisterByGraphID(graphID string, parent ChainID, diffID DiffID, tarDataFile string, size int64) (Layer, error) { + // err is used to hold the error which will always trigger + // cleanup of creates sources but may not be an error returned + // to the caller (already exists). + var err error + var p *roLayer + if string(parent) != "" { + p = ls.get(parent) + if p == nil { + return nil, ErrLayerDoesNotExist + } + + // Release parent chain if error + defer func() { + if err != nil { + ls.layerL.Lock() + ls.releaseLayer(p) + ls.layerL.Unlock() + } + }() + } + + // Create new roLayer + layer := &roLayer{ + parent: p, + cacheID: graphID, + referenceCount: 1, + layerStore: ls, + references: map[Layer]struct{}{}, + diffID: diffID, + size: size, + chainID: createChainIDFromParent(parent, diffID), + } + + ls.layerL.Lock() + defer ls.layerL.Unlock() + + if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil { + // Set error for cleanup, but do not return + err = errors.New("layer already exists") + return existingLayer.getReference(), nil + } + + tx, err := ls.store.StartTransaction() + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + logrus.Debugf("Cleaning up transaction after failed migration for %s: %v", graphID, err) + if err := tx.Cancel(); err != nil { + logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) + } + } + }() + + tsw, err := tx.TarSplitWriter(false) + if err != nil { + return nil, err + } + defer tsw.Close() + tdf, err := os.Open(tarDataFile) + if err != nil { + return nil, err + } + defer tdf.Close() + _, err = io.Copy(tsw, tdf) + if err != nil { + return nil, err + } + + if err = storeLayer(tx, layer); err != nil { + return nil, err + } + + if err = tx.Commit(layer.chainID); err != nil { + return nil, err + } + + ls.layerMap[layer.chainID] = layer + + return layer.getReference(), nil +} + +type unpackSizeCounter struct { + unpacker storage.Unpacker + size *int64 +} + +func (u *unpackSizeCounter) Next() (*storage.Entry, error) { + e, err := u.unpacker.Next() + if err == nil && u.size != nil { + *u.size += e.Size + } + return e, err +} + +type packSizeCounter struct { + packer storage.Packer + size *int64 +} + +func (p *packSizeCounter) AddEntry(e storage.Entry) (int, error) { + n, err := p.packer.AddEntry(e) + if err == nil && p.size != nil { + *p.size += e.Size + } + return n, err +} diff --git a/vendor/github.com/docker/docker/layer/mounted_layer.go b/vendor/github.com/docker/docker/layer/mounted_layer.go new file mode 100644 index 000000000000..a5cfcfa9bd75 --- /dev/null +++ b/vendor/github.com/docker/docker/layer/mounted_layer.go @@ -0,0 +1,99 @@ +package layer + +import ( + "io" + + "github.com/docker/docker/pkg/archive" +) + +type mountedLayer struct { + name string + mountID string + initID string + parent *roLayer + path string + layerStore *layerStore + + references map[RWLayer]*referencedRWLayer +} + +func (ml *mountedLayer) cacheParent() string { + if ml.initID != "" { + return ml.initID + } + if ml.parent != nil { + return ml.parent.cacheID + } + return "" +} + +func (ml *mountedLayer) TarStream() (io.ReadCloser, error) { + return ml.layerStore.driver.Diff(ml.mountID, ml.cacheParent()) +} + +func (ml *mountedLayer) Name() string { + return ml.name +} + +func (ml *mountedLayer) Parent() Layer { + if ml.parent != nil { + return ml.parent + } + + // Return a nil interface instead of an interface wrapping a nil + // pointer. + return nil +} + +func (ml *mountedLayer) Size() (int64, error) { + return ml.layerStore.driver.DiffSize(ml.mountID, ml.cacheParent()) +} + +func (ml *mountedLayer) Changes() ([]archive.Change, error) { + return ml.layerStore.driver.Changes(ml.mountID, ml.cacheParent()) +} + +func (ml *mountedLayer) Metadata() (map[string]string, error) { + return ml.layerStore.driver.GetMetadata(ml.mountID) +} + +func (ml *mountedLayer) getReference() RWLayer { + ref := &referencedRWLayer{ + mountedLayer: ml, + } + ml.references[ref] = ref + + return ref +} + +func (ml *mountedLayer) hasReferences() bool { + return len(ml.references) > 0 +} + +func (ml *mountedLayer) deleteReference(ref RWLayer) error { + if _, ok := ml.references[ref]; !ok { + return ErrLayerNotRetained + } + delete(ml.references, ref) + return nil +} + +func (ml *mountedLayer) retakeReference(r RWLayer) { + if ref, ok := r.(*referencedRWLayer); ok { + ml.references[ref] = ref + } +} + +type referencedRWLayer struct { + *mountedLayer +} + +func (rl *referencedRWLayer) Mount(mountLabel string) (string, error) { + return rl.layerStore.driver.Get(rl.mountedLayer.mountID, mountLabel) +} + +// Unmount decrements the activity count and unmounts the underlying layer +// Callers should only call `Unmount` once per call to `Mount`, even on error. +func (rl *referencedRWLayer) Unmount() error { + return rl.layerStore.driver.Put(rl.mountedLayer.mountID) +} diff --git a/vendor/github.com/docker/docker/layer/ro_layer.go b/vendor/github.com/docker/docker/layer/ro_layer.go new file mode 100644 index 000000000000..8b4cf8f0de9e --- /dev/null +++ b/vendor/github.com/docker/docker/layer/ro_layer.go @@ -0,0 +1,179 @@ +package layer + +import ( + "fmt" + "io" + + "github.com/docker/distribution" + "github.com/opencontainers/go-digest" +) + +type roLayer struct { + chainID ChainID + diffID DiffID + parent *roLayer + cacheID string + size int64 + layerStore *layerStore + descriptor distribution.Descriptor + + referenceCount int + references map[Layer]struct{} +} + +// TarStream for roLayer guarantees that the data that is produced is the exact +// data that the layer was registered with. +func (rl *roLayer) TarStream() (io.ReadCloser, error) { + rc, err := rl.layerStore.getTarStream(rl) + if err != nil { + return nil, err + } + + vrc, err := newVerifiedReadCloser(rc, digest.Digest(rl.diffID)) + if err != nil { + return nil, err + } + return vrc, nil +} + +// TarStreamFrom does not make any guarantees to the correctness of the produced +// data. As such it should not be used when the layer content must be verified +// to be an exact match to the registered layer. +func (rl *roLayer) TarStreamFrom(parent ChainID) (io.ReadCloser, error) { + var parentCacheID string + for pl := rl.parent; pl != nil; pl = pl.parent { + if pl.chainID == parent { + parentCacheID = pl.cacheID + break + } + } + + if parent != ChainID("") && parentCacheID == "" { + return nil, fmt.Errorf("layer ID '%s' is not a parent of the specified layer: cannot provide diff to non-parent", parent) + } + return rl.layerStore.driver.Diff(rl.cacheID, parentCacheID) +} + +func (rl *roLayer) ChainID() ChainID { + return rl.chainID +} + +func (rl *roLayer) DiffID() DiffID { + return rl.diffID +} + +func (rl *roLayer) Parent() Layer { + if rl.parent == nil { + return nil + } + return rl.parent +} + +func (rl *roLayer) Size() (size int64, err error) { + if rl.parent != nil { + size, err = rl.parent.Size() + if err != nil { + return + } + } + + return size + rl.size, nil +} + +func (rl *roLayer) DiffSize() (size int64, err error) { + return rl.size, nil +} + +func (rl *roLayer) Metadata() (map[string]string, error) { + return rl.layerStore.driver.GetMetadata(rl.cacheID) +} + +type referencedCacheLayer struct { + *roLayer +} + +func (rl *roLayer) getReference() Layer { + ref := &referencedCacheLayer{ + roLayer: rl, + } + rl.references[ref] = struct{}{} + + return ref +} + +func (rl *roLayer) hasReference(ref Layer) bool { + _, ok := rl.references[ref] + return ok +} + +func (rl *roLayer) hasReferences() bool { + return len(rl.references) > 0 +} + +func (rl *roLayer) deleteReference(ref Layer) { + delete(rl.references, ref) +} + +func (rl *roLayer) depth() int { + if rl.parent == nil { + return 1 + } + return rl.parent.depth() + 1 +} + +func storeLayer(tx MetadataTransaction, layer *roLayer) error { + if err := tx.SetDiffID(layer.diffID); err != nil { + return err + } + if err := tx.SetSize(layer.size); err != nil { + return err + } + if err := tx.SetCacheID(layer.cacheID); err != nil { + return err + } + // Do not store empty descriptors + if layer.descriptor.Digest != "" { + if err := tx.SetDescriptor(layer.descriptor); err != nil { + return err + } + } + if layer.parent != nil { + if err := tx.SetParent(layer.parent.chainID); err != nil { + return err + } + } + + return nil +} + +func newVerifiedReadCloser(rc io.ReadCloser, dgst digest.Digest) (io.ReadCloser, error) { + return &verifiedReadCloser{ + rc: rc, + dgst: dgst, + verifier: dgst.Verifier(), + }, nil +} + +type verifiedReadCloser struct { + rc io.ReadCloser + dgst digest.Digest + verifier digest.Verifier +} + +func (vrc *verifiedReadCloser) Read(p []byte) (n int, err error) { + n, err = vrc.rc.Read(p) + if n > 0 { + if n, err := vrc.verifier.Write(p[:n]); err != nil { + return n, err + } + } + if err == io.EOF { + if !vrc.verifier.Verified() { + err = fmt.Errorf("could not verify layer data for: %s. This may be because internal files in the layer store were modified. Re-pulling or rebuilding this image may resolve the issue", vrc.dgst) + } + } + return +} +func (vrc *verifiedReadCloser) Close() error { + return vrc.rc.Close() +} diff --git a/vendor/github.com/docker/docker/layer/ro_layer_windows.go b/vendor/github.com/docker/docker/layer/ro_layer_windows.go new file mode 100644 index 000000000000..32bd7182a3a3 --- /dev/null +++ b/vendor/github.com/docker/docker/layer/ro_layer_windows.go @@ -0,0 +1,9 @@ +package layer + +import "github.com/docker/distribution" + +var _ distribution.Describable = &roLayer{} + +func (rl *roLayer) Descriptor() distribution.Descriptor { + return rl.descriptor +} diff --git a/vendor/github.com/docker/docker/oci/defaults_linux.go b/vendor/github.com/docker/docker/oci/defaults_linux.go new file mode 100644 index 000000000000..c1ff931c2267 --- /dev/null +++ b/vendor/github.com/docker/docker/oci/defaults_linux.go @@ -0,0 +1,176 @@ +package oci + +import ( + "os" + "runtime" + + "github.com/opencontainers/runtime-spec/specs-go" +) + +func iPtr(i int64) *int64 { return &i } +func u32Ptr(i int64) *uint32 { u := uint32(i); return &u } +func fmPtr(i int64) *os.FileMode { fm := os.FileMode(i); return &fm } + +func defaultCapabilities() []string { + return []string{ + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_FSETID", + "CAP_FOWNER", + "CAP_MKNOD", + "CAP_NET_RAW", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETFCAP", + "CAP_SETPCAP", + "CAP_NET_BIND_SERVICE", + "CAP_SYS_CHROOT", + "CAP_KILL", + "CAP_AUDIT_WRITE", + } +} + +// DefaultSpec returns default oci spec used by docker. +func DefaultSpec() specs.Spec { + s := specs.Spec{ + Version: specs.Version, + Platform: specs.Platform{ + OS: runtime.GOOS, + Arch: runtime.GOARCH, + }, + } + s.Mounts = []specs.Mount{ + { + Destination: "/proc", + Type: "proc", + Source: "proc", + Options: []string{"nosuid", "noexec", "nodev"}, + }, + { + Destination: "/dev", + Type: "tmpfs", + Source: "tmpfs", + Options: []string{"nosuid", "strictatime", "mode=755"}, + }, + { + Destination: "/dev/pts", + Type: "devpts", + Source: "devpts", + Options: []string{"nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620", "gid=5"}, + }, + { + Destination: "/sys", + Type: "sysfs", + Source: "sysfs", + Options: []string{"nosuid", "noexec", "nodev", "ro"}, + }, + { + Destination: "/sys/fs/cgroup", + Type: "cgroup", + Source: "cgroup", + Options: []string{"ro", "nosuid", "noexec", "nodev"}, + }, + { + Destination: "/dev/mqueue", + Type: "mqueue", + Source: "mqueue", + Options: []string{"nosuid", "noexec", "nodev"}, + }, + } + s.Process.Capabilities = &specs.LinuxCapabilities{ + Bounding: defaultCapabilities(), + Permitted: defaultCapabilities(), + Inheritable: defaultCapabilities(), + Effective: defaultCapabilities(), + } + + s.Linux = &specs.Linux{ + MaskedPaths: []string{ + "/proc/kcore", + "/proc/latency_stats", + "/proc/timer_list", + "/proc/timer_stats", + "/proc/sched_debug", + "/sys/firmware", + }, + ReadonlyPaths: []string{ + "/proc/asound", + "/proc/bus", + "/proc/fs", + "/proc/irq", + "/proc/sys", + "/proc/sysrq-trigger", + }, + Namespaces: []specs.LinuxNamespace{ + {Type: "mount"}, + {Type: "network"}, + {Type: "uts"}, + {Type: "pid"}, + {Type: "ipc"}, + }, + // Devices implicitly contains the following devices: + // null, zero, full, random, urandom, tty, console, and ptmx. + // ptmx is a bind-mount or symlink of the container's ptmx. + // See also: https://github.com/opencontainers/runtime-spec/blob/master/config-linux.md#default-devices + Devices: []specs.LinuxDevice{}, + Resources: &specs.LinuxResources{ + Devices: []specs.LinuxDeviceCgroup{ + { + Allow: false, + Access: "rwm", + }, + { + Allow: true, + Type: "c", + Major: iPtr(1), + Minor: iPtr(5), + Access: "rwm", + }, + { + Allow: true, + Type: "c", + Major: iPtr(1), + Minor: iPtr(3), + Access: "rwm", + }, + { + Allow: true, + Type: "c", + Major: iPtr(1), + Minor: iPtr(9), + Access: "rwm", + }, + { + Allow: true, + Type: "c", + Major: iPtr(1), + Minor: iPtr(8), + Access: "rwm", + }, + { + Allow: true, + Type: "c", + Major: iPtr(5), + Minor: iPtr(0), + Access: "rwm", + }, + { + Allow: true, + Type: "c", + Major: iPtr(5), + Minor: iPtr(1), + Access: "rwm", + }, + { + Allow: false, + Type: "c", + Major: iPtr(10), + Minor: iPtr(229), + Access: "rwm", + }, + }, + }, + } + + return s +} diff --git a/vendor/github.com/docker/docker/oci/defaults_solaris.go b/vendor/github.com/docker/docker/oci/defaults_solaris.go new file mode 100644 index 000000000000..85c8b68e16e9 --- /dev/null +++ b/vendor/github.com/docker/docker/oci/defaults_solaris.go @@ -0,0 +1,20 @@ +package oci + +import ( + "runtime" + + "github.com/opencontainers/runtime-spec/specs-go" +) + +// DefaultSpec returns default oci spec used by docker. +func DefaultSpec() specs.Spec { + s := specs.Spec{ + Version: "0.6.0", + Platform: specs.Platform{ + OS: "SunOS", + Arch: runtime.GOARCH, + }, + } + s.Solaris = &specs.Solaris{} + return s +} diff --git a/vendor/github.com/docker/docker/oci/defaults_windows.go b/vendor/github.com/docker/docker/oci/defaults_windows.go new file mode 100644 index 000000000000..ab51904ec4ed --- /dev/null +++ b/vendor/github.com/docker/docker/oci/defaults_windows.go @@ -0,0 +1,19 @@ +package oci + +import ( + "runtime" + + "github.com/opencontainers/runtime-spec/specs-go" +) + +// DefaultSpec returns default spec used by docker. +func DefaultSpec() specs.Spec { + return specs.Spec{ + Version: specs.Version, + Platform: specs.Platform{ + OS: runtime.GOOS, + Arch: runtime.GOARCH, + }, + Windows: &specs.Windows{}, + } +} diff --git a/vendor/github.com/docker/docker/oci/devices_linux.go b/vendor/github.com/docker/docker/oci/devices_linux.go new file mode 100644 index 000000000000..fa9c72698420 --- /dev/null +++ b/vendor/github.com/docker/docker/oci/devices_linux.go @@ -0,0 +1,86 @@ +package oci + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/runc/libcontainer/devices" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +// Device transforms a libcontainer configs.Device to a specs.LinuxDevice object. +func Device(d *configs.Device) specs.LinuxDevice { + return specs.LinuxDevice{ + Type: string(d.Type), + Path: d.Path, + Major: d.Major, + Minor: d.Minor, + FileMode: fmPtr(int64(d.FileMode)), + UID: u32Ptr(int64(d.Uid)), + GID: u32Ptr(int64(d.Gid)), + } +} + +func deviceCgroup(d *configs.Device) specs.LinuxDeviceCgroup { + t := string(d.Type) + return specs.LinuxDeviceCgroup{ + Allow: true, + Type: t, + Major: &d.Major, + Minor: &d.Minor, + Access: d.Permissions, + } +} + +// DevicesFromPath computes a list of devices and device permissions from paths (pathOnHost and pathInContainer) and cgroup permissions. +func DevicesFromPath(pathOnHost, pathInContainer, cgroupPermissions string) (devs []specs.LinuxDevice, devPermissions []specs.LinuxDeviceCgroup, err error) { + resolvedPathOnHost := pathOnHost + + // check if it is a symbolic link + if src, e := os.Lstat(pathOnHost); e == nil && src.Mode()&os.ModeSymlink == os.ModeSymlink { + if linkedPathOnHost, e := filepath.EvalSymlinks(pathOnHost); e == nil { + resolvedPathOnHost = linkedPathOnHost + } + } + + device, err := devices.DeviceFromPath(resolvedPathOnHost, cgroupPermissions) + // if there was no error, return the device + if err == nil { + device.Path = pathInContainer + return append(devs, Device(device)), append(devPermissions, deviceCgroup(device)), nil + } + + // if the device is not a device node + // try to see if it's a directory holding many devices + if err == devices.ErrNotADevice { + + // check if it is a directory + if src, e := os.Stat(resolvedPathOnHost); e == nil && src.IsDir() { + + // mount the internal devices recursively + filepath.Walk(resolvedPathOnHost, func(dpath string, f os.FileInfo, e error) error { + childDevice, e := devices.DeviceFromPath(dpath, cgroupPermissions) + if e != nil { + // ignore the device + return nil + } + + // add the device to userSpecified devices + childDevice.Path = strings.Replace(dpath, resolvedPathOnHost, pathInContainer, 1) + devs = append(devs, Device(childDevice)) + devPermissions = append(devPermissions, deviceCgroup(childDevice)) + + return nil + }) + } + } + + if len(devs) > 0 { + return devs, devPermissions, nil + } + + return devs, devPermissions, fmt.Errorf("error gathering device information while adding custom device %q: %s", pathOnHost, err) +} diff --git a/vendor/github.com/docker/docker/oci/devices_unsupported.go b/vendor/github.com/docker/docker/oci/devices_unsupported.go new file mode 100644 index 000000000000..b5d3fab595b4 --- /dev/null +++ b/vendor/github.com/docker/docker/oci/devices_unsupported.go @@ -0,0 +1,20 @@ +// +build !linux + +package oci + +import ( + "errors" + + "github.com/opencontainers/runc/libcontainer/configs" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +// Device transforms a libcontainer configs.Device to a specs.Device object. +// Not implemented +func Device(d *configs.Device) specs.LinuxDevice { return specs.LinuxDevice{} } + +// DevicesFromPath computes a list of devices and device permissions from paths (pathOnHost and pathInContainer) and cgroup permissions. +// Not implemented +func DevicesFromPath(pathOnHost, pathInContainer, cgroupPermissions string) (devs []specs.LinuxDevice, devPermissions []specs.LinuxDeviceCgroup, err error) { + return nil, nil, errors.New("oci/devices: unsupported platform") +} diff --git a/vendor/github.com/docker/docker/oci/namespaces.go b/vendor/github.com/docker/docker/oci/namespaces.go new file mode 100644 index 000000000000..cb222dcee2d9 --- /dev/null +++ b/vendor/github.com/docker/docker/oci/namespaces.go @@ -0,0 +1,13 @@ +package oci + +import specs "github.com/opencontainers/runtime-spec/specs-go" + +// RemoveNamespace removes the `nsType` namespace from OCI spec `s` +func RemoveNamespace(s *specs.Spec, nsType specs.LinuxNamespaceType) { + for i, n := range s.Linux.Namespaces { + if n.Type == nsType { + s.Linux.Namespaces = append(s.Linux.Namespaces[:i], s.Linux.Namespaces[i+1:]...) + return + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go new file mode 100644 index 000000000000..a7814f5b906d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go @@ -0,0 +1,97 @@ +package chrootarchive + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" +) + +var chrootArchiver = &archive.Archiver{Untar: Untar} + +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive may be compressed with one of the following algorithms: +// identity (uncompressed), gzip, bzip2, xz. +func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + return untarHandler(tarArchive, dest, options, true) +} + +// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive must be an uncompressed stream. +func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + return untarHandler(tarArchive, dest, options, false) +} + +// Handler for teasing out the automatic decompression +func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool) error { + + if tarArchive == nil { + return fmt.Errorf("Empty archive") + } + if options == nil { + options = &archive.TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) + if err != nil { + return err + } + + dest = filepath.Clean(dest) + if _, err := os.Stat(dest); os.IsNotExist(err) { + if err := idtools.MkdirAllNewAs(dest, 0755, rootUID, rootGID); err != nil { + return err + } + } + + r := ioutil.NopCloser(tarArchive) + if decompress { + decompressedArchive, err := archive.DecompressStream(tarArchive) + if err != nil { + return err + } + defer decompressedArchive.Close() + r = decompressedArchive + } + + return invokeUnpack(r, dest, options) +} + +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func TarUntar(src, dst string) error { + return chrootArchiver.TarUntar(src, dst) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func CopyWithTar(src, dst string) error { + return chrootArchiver.CopyWithTar(src, dst) +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +// +// If `dst` ends with a trailing slash '/' ('\' on Windows), the final +// destination path will be `dst/base(src)` or `dst\base(src)` +func CopyFileWithTar(src, dst string) (err error) { + return chrootArchiver.CopyFileWithTar(src, dst) +} + +// UntarPath is a convenience function which looks for an archive +// at filesystem path `src`, and unpacks it at `dst`. +func UntarPath(src, dst string) error { + return chrootArchiver.UntarPath(src, dst) +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go new file mode 100644 index 000000000000..f2325abd74e4 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go @@ -0,0 +1,86 @@ +// +build !windows + +package chrootarchive + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "runtime" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" +) + +// untar is the entry-point for docker-untar on re-exec. This is not used on +// Windows as it does not support chroot, hence no point sandboxing through +// chroot and rexec. +func untar() { + runtime.LockOSThread() + flag.Parse() + + var options *archive.TarOptions + + //read the options from the pipe "ExtraFiles" + if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil { + fatal(err) + } + + if err := chroot(flag.Arg(0)); err != nil { + fatal(err) + } + + if err := archive.Unpack(os.Stdin, "/", options); err != nil { + fatal(err) + } + // fully consume stdin in case it is zero padded + if _, err := flush(os.Stdin); err != nil { + fatal(err) + } + + os.Exit(0) +} + +func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions) error { + + // We can't pass a potentially large exclude list directly via cmd line + // because we easily overrun the kernel's max argument/environment size + // when the full image list is passed (e.g. when this is used by + // `docker load`). We will marshall the options via a pipe to the + // child + r, w, err := os.Pipe() + if err != nil { + return fmt.Errorf("Untar pipe failure: %v", err) + } + + cmd := reexec.Command("docker-untar", dest) + cmd.Stdin = decompressedArchive + + cmd.ExtraFiles = append(cmd.ExtraFiles, r) + output := bytes.NewBuffer(nil) + cmd.Stdout = output + cmd.Stderr = output + + if err := cmd.Start(); err != nil { + return fmt.Errorf("Untar error on re-exec cmd: %v", err) + } + //write the options to the pipe for the untar exec to read + if err := json.NewEncoder(w).Encode(options); err != nil { + return fmt.Errorf("Untar json encode to pipe failed: %v", err) + } + w.Close() + + if err := cmd.Wait(); err != nil { + // when `xz -d -c -q | docker-untar ...` failed on docker-untar side, + // we need to exhaust `xz`'s output, otherwise the `xz` side will be + // pending on write pipe forever + io.Copy(ioutil.Discard, decompressedArchive) + + return fmt.Errorf("Error processing tar file(%v): %s", err, output) + } + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go new file mode 100644 index 000000000000..0a500ed5c2dd --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go @@ -0,0 +1,22 @@ +package chrootarchive + +import ( + "io" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/longpath" +) + +// chroot is not supported by Windows +func chroot(path string) error { + return nil +} + +func invokeUnpack(decompressedArchive io.ReadCloser, + dest string, + options *archive.TarOptions) error { + // Windows is different to Linux here because Windows does not support + // chroot. Hence there is no point sandboxing a chrooted process to + // do the unpack. We call inline instead within the daemon process. + return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest), options) +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go b/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go new file mode 100644 index 000000000000..f9d7fed633de --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go @@ -0,0 +1,108 @@ +package chrootarchive + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "syscall" + + "github.com/docker/docker/pkg/mount" + rsystem "github.com/opencontainers/runc/libcontainer/system" +) + +// chroot on linux uses pivot_root instead of chroot +// pivot_root takes a new root and an old root. +// Old root must be a sub-dir of new root, it is where the current rootfs will reside after the call to pivot_root. +// New root is where the new rootfs is set to. +// Old root is removed after the call to pivot_root so it is no longer available under the new root. +// This is similar to how libcontainer sets up a container's rootfs +func chroot(path string) (err error) { + // if the engine is running in a user namespace we need to use actual chroot + if rsystem.RunningInUserNS() { + return realChroot(path) + } + if err := syscall.Unshare(syscall.CLONE_NEWNS); err != nil { + return fmt.Errorf("Error creating mount namespace before pivot: %v", err) + } + + // make everything in new ns private + if err := mount.MakeRPrivate("/"); err != nil { + return err + } + + if mounted, _ := mount.Mounted(path); !mounted { + if err := mount.Mount(path, path, "bind", "rbind,rw"); err != nil { + return realChroot(path) + } + } + + // setup oldRoot for pivot_root + pivotDir, err := ioutil.TempDir(path, ".pivot_root") + if err != nil { + return fmt.Errorf("Error setting up pivot dir: %v", err) + } + + var mounted bool + defer func() { + if mounted { + // make sure pivotDir is not mounted before we try to remove it + if errCleanup := syscall.Unmount(pivotDir, syscall.MNT_DETACH); errCleanup != nil { + if err == nil { + err = errCleanup + } + return + } + } + + errCleanup := os.Remove(pivotDir) + // pivotDir doesn't exist if pivot_root failed and chroot+chdir was successful + // because we already cleaned it up on failed pivot_root + if errCleanup != nil && !os.IsNotExist(errCleanup) { + errCleanup = fmt.Errorf("Error cleaning up after pivot: %v", errCleanup) + if err == nil { + err = errCleanup + } + } + }() + + if err := syscall.PivotRoot(path, pivotDir); err != nil { + // If pivot fails, fall back to the normal chroot after cleaning up temp dir + if err := os.Remove(pivotDir); err != nil { + return fmt.Errorf("Error cleaning up after failed pivot: %v", err) + } + return realChroot(path) + } + mounted = true + + // This is the new path for where the old root (prior to the pivot) has been moved to + // This dir contains the rootfs of the caller, which we need to remove so it is not visible during extraction + pivotDir = filepath.Join("/", filepath.Base(pivotDir)) + + if err := syscall.Chdir("/"); err != nil { + return fmt.Errorf("Error changing to new root: %v", err) + } + + // Make the pivotDir (where the old root lives) private so it can be unmounted without propagating to the host + if err := syscall.Mount("", pivotDir, "", syscall.MS_PRIVATE|syscall.MS_REC, ""); err != nil { + return fmt.Errorf("Error making old root private after pivot: %v", err) + } + + // Now unmount the old root so it's no longer visible from the new root + if err := syscall.Unmount(pivotDir, syscall.MNT_DETACH); err != nil { + return fmt.Errorf("Error while unmounting old root after pivot: %v", err) + } + mounted = false + + return nil +} + +func realChroot(path string) error { + if err := syscall.Chroot(path); err != nil { + return fmt.Errorf("Error after fallback to chroot: %v", err) + } + if err := syscall.Chdir("/"); err != nil { + return fmt.Errorf("Error changing to new root after chroot: %v", err) + } + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go new file mode 100644 index 000000000000..16354bf64877 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go @@ -0,0 +1,12 @@ +// +build !windows,!linux + +package chrootarchive + +import "syscall" + +func chroot(path string) error { + if err := syscall.Chroot(path); err != nil { + return err + } + return syscall.Chdir("/") +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/diff.go b/vendor/github.com/docker/docker/pkg/chrootarchive/diff.go new file mode 100644 index 000000000000..49acad79ff28 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/diff.go @@ -0,0 +1,23 @@ +package chrootarchive + +import ( + "io" + + "github.com/docker/docker/pkg/archive" +) + +// ApplyLayer parses a diff in the standard layer format from `layer`, +// and applies it to the directory `dest`. The stream `layer` can only be +// uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyLayer(dest string, layer io.Reader) (size int64, err error) { + return applyLayerHandler(dest, layer, &archive.TarOptions{}, true) +} + +// ApplyUncompressedLayer parses a diff in the standard layer format from +// `layer`, and applies it to the directory `dest`. The stream `layer` +// can only be uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyUncompressedLayer(dest string, layer io.Reader, options *archive.TarOptions) (int64, error) { + return applyLayerHandler(dest, layer, options, false) +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go new file mode 100644 index 000000000000..33098b33e829 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go @@ -0,0 +1,130 @@ +//+build !windows + +package chrootarchive + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" + "github.com/docker/docker/pkg/system" + rsystem "github.com/opencontainers/runc/libcontainer/system" +) + +type applyLayerResponse struct { + LayerSize int64 `json:"layerSize"` +} + +// applyLayer is the entry-point for docker-applylayer on re-exec. This is not +// used on Windows as it does not support chroot, hence no point sandboxing +// through chroot and rexec. +func applyLayer() { + + var ( + tmpDir string + err error + options *archive.TarOptions + ) + runtime.LockOSThread() + flag.Parse() + + inUserns := rsystem.RunningInUserNS() + if err := chroot(flag.Arg(0)); err != nil { + fatal(err) + } + + // We need to be able to set any perms + oldmask, err := system.Umask(0) + defer system.Umask(oldmask) + if err != nil { + fatal(err) + } + + if err := json.Unmarshal([]byte(os.Getenv("OPT")), &options); err != nil { + fatal(err) + } + + if inUserns { + options.InUserNS = true + } + + if tmpDir, err = ioutil.TempDir("/", "temp-docker-extract"); err != nil { + fatal(err) + } + + os.Setenv("TMPDIR", tmpDir) + size, err := archive.UnpackLayer("/", os.Stdin, options) + os.RemoveAll(tmpDir) + if err != nil { + fatal(err) + } + + encoder := json.NewEncoder(os.Stdout) + if err := encoder.Encode(applyLayerResponse{size}); err != nil { + fatal(fmt.Errorf("unable to encode layerSize JSON: %s", err)) + } + + if _, err := flush(os.Stdin); err != nil { + fatal(err) + } + + os.Exit(0) +} + +// applyLayerHandler parses a diff in the standard layer format from `layer`, and +// applies it to the directory `dest`. Returns the size in bytes of the +// contents of the layer. +func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { + dest = filepath.Clean(dest) + if decompress { + decompressed, err := archive.DecompressStream(layer) + if err != nil { + return 0, err + } + defer decompressed.Close() + + layer = decompressed + } + if options == nil { + options = &archive.TarOptions{} + if rsystem.RunningInUserNS() { + options.InUserNS = true + } + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + data, err := json.Marshal(options) + if err != nil { + return 0, fmt.Errorf("ApplyLayer json encode: %v", err) + } + + cmd := reexec.Command("docker-applyLayer", dest) + cmd.Stdin = layer + cmd.Env = append(cmd.Env, fmt.Sprintf("OPT=%s", data)) + + outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer) + cmd.Stdout, cmd.Stderr = outBuf, errBuf + + if err = cmd.Run(); err != nil { + return 0, fmt.Errorf("ApplyLayer %s stdout: %s stderr: %s", err, outBuf, errBuf) + } + + // Stdout should be a valid JSON struct representing an applyLayerResponse. + response := applyLayerResponse{} + decoder := json.NewDecoder(outBuf) + if err = decoder.Decode(&response); err != nil { + return 0, fmt.Errorf("unable to decode ApplyLayer JSON response: %s", err) + } + + return response.LayerSize, nil +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go b/vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go new file mode 100644 index 000000000000..dc07eb680dd8 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go @@ -0,0 +1,45 @@ +package chrootarchive + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/longpath" +) + +// applyLayerHandler parses a diff in the standard layer format from `layer`, and +// applies it to the directory `dest`. Returns the size in bytes of the +// contents of the layer. +func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { + dest = filepath.Clean(dest) + + // Ensure it is a Windows-style volume path + dest = longpath.AddPrefix(dest) + + if decompress { + decompressed, err := archive.DecompressStream(layer) + if err != nil { + return 0, err + } + defer decompressed.Close() + + layer = decompressed + } + + tmpDir, err := ioutil.TempDir(os.Getenv("temp"), "temp-docker-extract") + if err != nil { + return 0, fmt.Errorf("ApplyLayer failed to create temp-docker-extract under %s. %s", dest, err) + } + + s, err := archive.UnpackLayer(dest, layer, nil) + os.RemoveAll(tmpDir) + if err != nil { + return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s: %s", layer, dest, err) + } + + return s, nil +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go new file mode 100644 index 000000000000..4f637f17b8f5 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go @@ -0,0 +1,28 @@ +// +build !windows + +package chrootarchive + +import ( + "fmt" + "io" + "io/ioutil" + "os" + + "github.com/docker/docker/pkg/reexec" +) + +func init() { + reexec.Register("docker-applyLayer", applyLayer) + reexec.Register("docker-untar", untar) +} + +func fatal(err error) { + fmt.Fprint(os.Stderr, err) + os.Exit(1) +} + +// flush consumes all the bytes from the reader discarding +// any errors +func flush(r io.Reader) (bytes int64, err error) { + return io.Copy(ioutil.Discard, r) +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go b/vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go new file mode 100644 index 000000000000..fa17c9bf8316 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go @@ -0,0 +1,4 @@ +package chrootarchive + +func init() { +} diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel.go new file mode 100644 index 000000000000..7738fc7411e1 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel.go @@ -0,0 +1,74 @@ +// +build !windows + +// Package kernel provides helper function to get, parse and compare kernel +// versions for different platforms. +package kernel + +import ( + "errors" + "fmt" +) + +// VersionInfo holds information about the kernel. +type VersionInfo struct { + Kernel int // Version of the kernel (e.g. 4.1.2-generic -> 4) + Major int // Major part of the kernel version (e.g. 4.1.2-generic -> 1) + Minor int // Minor part of the kernel version (e.g. 4.1.2-generic -> 2) + Flavor string // Flavor of the kernel version (e.g. 4.1.2-generic -> generic) +} + +func (k *VersionInfo) String() string { + return fmt.Sprintf("%d.%d.%d%s", k.Kernel, k.Major, k.Minor, k.Flavor) +} + +// CompareKernelVersion compares two kernel.VersionInfo structs. +// Returns -1 if a < b, 0 if a == b, 1 it a > b +func CompareKernelVersion(a, b VersionInfo) int { + if a.Kernel < b.Kernel { + return -1 + } else if a.Kernel > b.Kernel { + return 1 + } + + if a.Major < b.Major { + return -1 + } else if a.Major > b.Major { + return 1 + } + + if a.Minor < b.Minor { + return -1 + } else if a.Minor > b.Minor { + return 1 + } + + return 0 +} + +// ParseRelease parses a string and creates a VersionInfo based on it. +func ParseRelease(release string) (*VersionInfo, error) { + var ( + kernel, major, minor, parsed int + flavor, partial string + ) + + // Ignore error from Sscanf to allow an empty flavor. Instead, just + // make sure we got all the version numbers. + parsed, _ = fmt.Sscanf(release, "%d.%d%s", &kernel, &major, &partial) + if parsed < 2 { + return nil, errors.New("Can't parse kernel version " + release) + } + + // sometimes we have 3.12.25-gentoo, but sometimes we just have 3.12-1-amd64 + parsed, _ = fmt.Sscanf(partial, ".%d%s", &minor, &flavor) + if parsed < 1 { + flavor = partial + } + + return &VersionInfo{ + Kernel: kernel, + Major: major, + Minor: minor, + Flavor: flavor, + }, nil +} diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go new file mode 100644 index 000000000000..71f205b2852e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go @@ -0,0 +1,56 @@ +// +build darwin + +// Package kernel provides helper function to get, parse and compare kernel +// versions for different platforms. +package kernel + +import ( + "fmt" + "os/exec" + "strings" + + "github.com/mattn/go-shellwords" +) + +// GetKernelVersion gets the current kernel version. +func GetKernelVersion() (*VersionInfo, error) { + release, err := getRelease() + if err != nil { + return nil, err + } + + return ParseRelease(release) +} + +// getRelease uses `system_profiler SPSoftwareDataType` to get OSX kernel version +func getRelease() (string, error) { + cmd := exec.Command("system_profiler", "SPSoftwareDataType") + osName, err := cmd.Output() + if err != nil { + return "", err + } + + var release string + data := strings.Split(string(osName), "\n") + for _, line := range data { + if strings.Contains(line, "Kernel Version") { + // It has the format like ' Kernel Version: Darwin 14.5.0' + content := strings.SplitN(line, ":", 2) + if len(content) != 2 { + return "", fmt.Errorf("Kernel Version is invalid") + } + + prettyNames, err := shellwords.Parse(content[1]) + if err != nil { + return "", fmt.Errorf("Kernel Version is invalid: %s", err.Error()) + } + + if len(prettyNames) != 2 { + return "", fmt.Errorf("Kernel Version needs to be 'Darwin x.x.x' ") + } + release = prettyNames[1] + } + } + + return release, nil +} diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix.go new file mode 100644 index 000000000000..bd137dfb6fd5 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix.go @@ -0,0 +1,45 @@ +// +build linux freebsd solaris openbsd + +// Package kernel provides helper function to get, parse and compare kernel +// versions for different platforms. +package kernel + +import ( + "bytes" + + "github.com/Sirupsen/logrus" +) + +// GetKernelVersion gets the current kernel version. +func GetKernelVersion() (*VersionInfo, error) { + uts, err := uname() + if err != nil { + return nil, err + } + + release := make([]byte, len(uts.Release)) + + i := 0 + for _, c := range uts.Release { + release[i] = byte(c) + i++ + } + + // Remove the \x00 from the release for Atoi to parse correctly + release = release[:bytes.IndexByte(release, 0)] + + return ParseRelease(string(release)) +} + +// CheckKernelVersion checks if current kernel is newer than (or equal to) +// the given version. +func CheckKernelVersion(k, major, minor int) bool { + if v, err := GetKernelVersion(); err != nil { + logrus.Warnf("error getting kernel version: %s", err) + } else { + if CompareKernelVersion(*v, VersionInfo{Kernel: k, Major: major, Minor: minor}) < 0 { + return false + } + } + return true +} diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go new file mode 100644 index 000000000000..80fab8ff6424 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go @@ -0,0 +1,69 @@ +// +build windows + +package kernel + +import ( + "fmt" + "syscall" + "unsafe" +) + +// VersionInfo holds information about the kernel. +type VersionInfo struct { + kvi string // Version of the kernel (e.g. 6.1.7601.17592 -> 6) + major int // Major part of the kernel version (e.g. 6.1.7601.17592 -> 1) + minor int // Minor part of the kernel version (e.g. 6.1.7601.17592 -> 7601) + build int // Build number of the kernel version (e.g. 6.1.7601.17592 -> 17592) +} + +func (k *VersionInfo) String() string { + return fmt.Sprintf("%d.%d %d (%s)", k.major, k.minor, k.build, k.kvi) +} + +// GetKernelVersion gets the current kernel version. +func GetKernelVersion() (*VersionInfo, error) { + + var ( + h syscall.Handle + dwVersion uint32 + err error + ) + + KVI := &VersionInfo{"Unknown", 0, 0, 0} + + if err = syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE, + syscall.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`), + 0, + syscall.KEY_READ, + &h); err != nil { + return KVI, err + } + defer syscall.RegCloseKey(h) + + var buf [1 << 10]uint16 + var typ uint32 + n := uint32(len(buf) * 2) // api expects array of bytes, not uint16 + + if err = syscall.RegQueryValueEx(h, + syscall.StringToUTF16Ptr("BuildLabEx"), + nil, + &typ, + (*byte)(unsafe.Pointer(&buf[0])), + &n); err != nil { + return KVI, err + } + + KVI.kvi = syscall.UTF16ToString(buf[:]) + + // Important - docker.exe MUST be manifested for this API to return + // the correct information. + if dwVersion, err = syscall.GetVersion(); err != nil { + return KVI, err + } + + KVI.major = int(dwVersion & 0xFF) + KVI.minor = int((dwVersion & 0XFF00) >> 8) + KVI.build = int((dwVersion & 0xFFFF0000) >> 16) + + return KVI, nil +} diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go new file mode 100644 index 000000000000..bb9b32641e80 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go @@ -0,0 +1,19 @@ +package kernel + +import ( + "syscall" +) + +// Utsname represents the system name structure. +// It is passthrough for syscall.Utsname in order to make it portable with +// other platforms where it is not available. +type Utsname syscall.Utsname + +func uname() (*syscall.Utsname, error) { + uts := &syscall.Utsname{} + + if err := syscall.Uname(uts); err != nil { + return nil, err + } + return uts, nil +} diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_solaris.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_solaris.go new file mode 100644 index 000000000000..49370bd3dd93 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_solaris.go @@ -0,0 +1,14 @@ +package kernel + +import ( + "golang.org/x/sys/unix" +) + +func uname() (*unix.Utsname, error) { + uts := &unix.Utsname{} + + if err := unix.Uname(uts); err != nil { + return nil, err + } + return uts, nil +} diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go new file mode 100644 index 000000000000..1da3f239fac8 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go @@ -0,0 +1,18 @@ +// +build !linux,!solaris + +package kernel + +import ( + "errors" +) + +// Utsname represents the system name structure. +// It is defined here to make it portable as it is available on linux but not +// on windows. +type Utsname struct { + Release [65]byte +} + +func uname() (*Utsname, error) { + return nil, errors.New("Kernel version detection is available only on linux") +} diff --git a/vendor/github.com/docker/docker/pkg/plugingetter/getter.go b/vendor/github.com/docker/docker/pkg/plugingetter/getter.go new file mode 100644 index 000000000000..b04b7bc82878 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugingetter/getter.go @@ -0,0 +1,35 @@ +package plugingetter + +import "github.com/docker/docker/pkg/plugins" + +const ( + // Lookup doesn't update RefCount + Lookup = 0 + // Acquire increments RefCount + Acquire = 1 + // Release decrements RefCount + Release = -1 +) + +// CompatPlugin is an abstraction to handle both v2(new) and v1(legacy) plugins. +type CompatPlugin interface { + Client() *plugins.Client + Name() string + BasePath() string + IsV1() bool +} + +// CountedPlugin is a plugin which is reference counted. +type CountedPlugin interface { + Acquire() + Release() + CompatPlugin +} + +// PluginGetter is the interface implemented by Store +type PluginGetter interface { + Get(name, capability string, mode int) (CompatPlugin, error) + GetAllByCap(capability string) ([]CompatPlugin, error) + GetAllManagedPluginsByCap(capability string) []CompatPlugin + Handle(capability string, callback func(string, *plugins.Client)) +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/client.go b/vendor/github.com/docker/docker/pkg/plugins/client.go new file mode 100644 index 000000000000..e8e730eb58e5 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/client.go @@ -0,0 +1,205 @@ +package plugins + +import ( + "bytes" + "encoding/json" + "io" + "io/ioutil" + "net/http" + "net/url" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/pkg/plugins/transport" + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" +) + +const ( + defaultTimeOut = 30 +) + +func newTransport(addr string, tlsConfig *tlsconfig.Options) (transport.Transport, error) { + tr := &http.Transport{} + + if tlsConfig != nil { + c, err := tlsconfig.Client(*tlsConfig) + if err != nil { + return nil, err + } + tr.TLSClientConfig = c + } + + u, err := url.Parse(addr) + if err != nil { + return nil, err + } + socket := u.Host + if socket == "" { + // valid local socket addresses have the host empty. + socket = u.Path + } + if err := sockets.ConfigureTransport(tr, u.Scheme, socket); err != nil { + return nil, err + } + scheme := httpScheme(u) + + return transport.NewHTTPTransport(tr, scheme, socket), nil +} + +// NewClient creates a new plugin client (http). +func NewClient(addr string, tlsConfig *tlsconfig.Options) (*Client, error) { + clientTransport, err := newTransport(addr, tlsConfig) + if err != nil { + return nil, err + } + return newClientWithTransport(clientTransport, 0), nil +} + +// NewClientWithTimeout creates a new plugin client (http). +func NewClientWithTimeout(addr string, tlsConfig *tlsconfig.Options, timeoutInSecs int) (*Client, error) { + clientTransport, err := newTransport(addr, tlsConfig) + if err != nil { + return nil, err + } + return newClientWithTransport(clientTransport, timeoutInSecs), nil +} + +// newClientWithTransport creates a new plugin client with a given transport. +func newClientWithTransport(tr transport.Transport, timeoutInSecs int) *Client { + return &Client{ + http: &http.Client{ + Transport: tr, + Timeout: time.Duration(timeoutInSecs) * time.Second, + }, + requestFactory: tr, + } +} + +// Client represents a plugin client. +type Client struct { + http *http.Client // http client to use + requestFactory transport.RequestFactory +} + +// Call calls the specified method with the specified arguments for the plugin. +// It will retry for 30 seconds if a failure occurs when calling. +func (c *Client) Call(serviceMethod string, args interface{}, ret interface{}) error { + var buf bytes.Buffer + if args != nil { + if err := json.NewEncoder(&buf).Encode(args); err != nil { + return err + } + } + body, err := c.callWithRetry(serviceMethod, &buf, true) + if err != nil { + return err + } + defer body.Close() + if ret != nil { + if err := json.NewDecoder(body).Decode(&ret); err != nil { + logrus.Errorf("%s: error reading plugin resp: %v", serviceMethod, err) + return err + } + } + return nil +} + +// Stream calls the specified method with the specified arguments for the plugin and returns the response body +func (c *Client) Stream(serviceMethod string, args interface{}) (io.ReadCloser, error) { + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(args); err != nil { + return nil, err + } + return c.callWithRetry(serviceMethod, &buf, true) +} + +// SendFile calls the specified method, and passes through the IO stream +func (c *Client) SendFile(serviceMethod string, data io.Reader, ret interface{}) error { + body, err := c.callWithRetry(serviceMethod, data, true) + if err != nil { + return err + } + defer body.Close() + if err := json.NewDecoder(body).Decode(&ret); err != nil { + logrus.Errorf("%s: error reading plugin resp: %v", serviceMethod, err) + return err + } + return nil +} + +func (c *Client) callWithRetry(serviceMethod string, data io.Reader, retry bool) (io.ReadCloser, error) { + req, err := c.requestFactory.NewRequest(serviceMethod, data) + if err != nil { + return nil, err + } + + var retries int + start := time.Now() + + for { + resp, err := c.http.Do(req) + if err != nil { + if !retry { + return nil, err + } + + timeOff := backoff(retries) + if abort(start, timeOff) { + return nil, err + } + retries++ + logrus.Warnf("Unable to connect to plugin: %s%s: %v, retrying in %v", req.URL.Host, req.URL.Path, err, timeOff) + time.Sleep(timeOff) + continue + } + + if resp.StatusCode != http.StatusOK { + b, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + return nil, &statusError{resp.StatusCode, serviceMethod, err.Error()} + } + + // Plugins' Response(s) should have an Err field indicating what went + // wrong. Try to unmarshal into ResponseErr. Otherwise fallback to just + // return the string(body) + type responseErr struct { + Err string + } + remoteErr := responseErr{} + if err := json.Unmarshal(b, &remoteErr); err == nil { + if remoteErr.Err != "" { + return nil, &statusError{resp.StatusCode, serviceMethod, remoteErr.Err} + } + } + // old way... + return nil, &statusError{resp.StatusCode, serviceMethod, string(b)} + } + return resp.Body, nil + } +} + +func backoff(retries int) time.Duration { + b, max := 1, defaultTimeOut + for b < max && retries > 0 { + b *= 2 + retries-- + } + if b > max { + b = max + } + return time.Duration(b) * time.Second +} + +func abort(start time.Time, timeOff time.Duration) bool { + return timeOff+time.Since(start) >= time.Duration(defaultTimeOut)*time.Second +} + +func httpScheme(u *url.URL) string { + scheme := u.Scheme + if scheme != "https" { + scheme = "http" + } + return scheme +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/discovery.go b/vendor/github.com/docker/docker/pkg/plugins/discovery.go new file mode 100644 index 000000000000..e99581c57331 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/discovery.go @@ -0,0 +1,131 @@ +package plugins + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/url" + "os" + "path/filepath" + "strings" + "sync" +) + +var ( + // ErrNotFound plugin not found + ErrNotFound = errors.New("plugin not found") + socketsPath = "/run/docker/plugins" +) + +// localRegistry defines a registry that is local (using unix socket). +type localRegistry struct{} + +func newLocalRegistry() localRegistry { + return localRegistry{} +} + +// Scan scans all the plugin paths and returns all the names it found +func Scan() ([]string, error) { + var names []string + if err := filepath.Walk(socketsPath, func(path string, fi os.FileInfo, err error) error { + if err != nil { + return nil + } + + if fi.Mode()&os.ModeSocket != 0 { + name := strings.TrimSuffix(fi.Name(), filepath.Ext(fi.Name())) + names = append(names, name) + } + return nil + }); err != nil { + return nil, err + } + + for _, path := range specsPaths { + if err := filepath.Walk(path, func(p string, fi os.FileInfo, err error) error { + if err != nil || fi.IsDir() { + return nil + } + name := strings.TrimSuffix(fi.Name(), filepath.Ext(fi.Name())) + names = append(names, name) + return nil + }); err != nil { + return nil, err + } + } + return names, nil +} + +// Plugin returns the plugin registered with the given name (or returns an error). +func (l *localRegistry) Plugin(name string) (*Plugin, error) { + socketpaths := pluginPaths(socketsPath, name, ".sock") + + for _, p := range socketpaths { + if fi, err := os.Stat(p); err == nil && fi.Mode()&os.ModeSocket != 0 { + return NewLocalPlugin(name, "unix://"+p), nil + } + } + + var txtspecpaths []string + for _, p := range specsPaths { + txtspecpaths = append(txtspecpaths, pluginPaths(p, name, ".spec")...) + txtspecpaths = append(txtspecpaths, pluginPaths(p, name, ".json")...) + } + + for _, p := range txtspecpaths { + if _, err := os.Stat(p); err == nil { + if strings.HasSuffix(p, ".json") { + return readPluginJSONInfo(name, p) + } + return readPluginInfo(name, p) + } + } + return nil, ErrNotFound +} + +func readPluginInfo(name, path string) (*Plugin, error) { + content, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + addr := strings.TrimSpace(string(content)) + + u, err := url.Parse(addr) + if err != nil { + return nil, err + } + + if len(u.Scheme) == 0 { + return nil, fmt.Errorf("Unknown protocol") + } + + return NewLocalPlugin(name, addr), nil +} + +func readPluginJSONInfo(name, path string) (*Plugin, error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + + var p Plugin + if err := json.NewDecoder(f).Decode(&p); err != nil { + return nil, err + } + p.name = name + if p.TLSConfig != nil && len(p.TLSConfig.CAFile) == 0 { + p.TLSConfig.InsecureSkipVerify = true + } + p.activateWait = sync.NewCond(&sync.Mutex{}) + + return &p, nil +} + +func pluginPaths(base, name, ext string) []string { + return []string{ + filepath.Join(base, name+ext), + filepath.Join(base, name, name+ext), + } +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/discovery_unix.go b/vendor/github.com/docker/docker/pkg/plugins/discovery_unix.go new file mode 100644 index 000000000000..693a47e39485 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/discovery_unix.go @@ -0,0 +1,5 @@ +// +build !windows + +package plugins + +var specsPaths = []string{"/etc/docker/plugins", "/usr/lib/docker/plugins"} diff --git a/vendor/github.com/docker/docker/pkg/plugins/discovery_windows.go b/vendor/github.com/docker/docker/pkg/plugins/discovery_windows.go new file mode 100644 index 000000000000..d7c1fe4942bc --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/discovery_windows.go @@ -0,0 +1,8 @@ +package plugins + +import ( + "os" + "path/filepath" +) + +var specsPaths = []string{filepath.Join(os.Getenv("programdata"), "docker", "plugins")} diff --git a/vendor/github.com/docker/docker/pkg/plugins/errors.go b/vendor/github.com/docker/docker/pkg/plugins/errors.go new file mode 100644 index 000000000000..7988471026d4 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/errors.go @@ -0,0 +1,33 @@ +package plugins + +import ( + "fmt" + "net/http" +) + +type statusError struct { + status int + method string + err string +} + +// Error returns a formatted string for this error type +func (e *statusError) Error() string { + return fmt.Sprintf("%s: %v", e.method, e.err) +} + +// IsNotFound indicates if the passed in error is from an http.StatusNotFound from the plugin +func IsNotFound(err error) bool { + return isStatusError(err, http.StatusNotFound) +} + +func isStatusError(err error, status int) bool { + if err == nil { + return false + } + e, ok := err.(*statusError) + if !ok { + return false + } + return e.status == status +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/plugins.go b/vendor/github.com/docker/docker/pkg/plugins/plugins.go new file mode 100644 index 000000000000..c0059cba7586 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/plugins.go @@ -0,0 +1,329 @@ +// Package plugins provides structures and helper functions to manage Docker +// plugins. +// +// Docker discovers plugins by looking for them in the plugin directory whenever +// a user or container tries to use one by name. UNIX domain socket files must +// be located under /run/docker/plugins, whereas spec files can be located +// either under /etc/docker/plugins or /usr/lib/docker/plugins. This is handled +// by the Registry interface, which lets you list all plugins or get a plugin by +// its name if it exists. +// +// The plugins need to implement an HTTP server and bind this to the UNIX socket +// or the address specified in the spec files. +// A handshake is send at /Plugin.Activate, and plugins are expected to return +// a Manifest with a list of of Docker subsystems which this plugin implements. +// +// In order to use a plugins, you can use the ``Get`` with the name of the +// plugin and the subsystem it implements. +// +// plugin, err := plugins.Get("example", "VolumeDriver") +// if err != nil { +// return fmt.Errorf("Error looking up volume plugin example: %v", err) +// } +package plugins + +import ( + "errors" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/go-connections/tlsconfig" +) + +var ( + // ErrNotImplements is returned if the plugin does not implement the requested driver. + ErrNotImplements = errors.New("Plugin does not implement the requested driver") +) + +type plugins struct { + sync.Mutex + plugins map[string]*Plugin +} + +type extpointHandlers struct { + sync.RWMutex + extpointHandlers map[string][]func(string, *Client) +} + +var ( + storage = plugins{plugins: make(map[string]*Plugin)} + handlers = extpointHandlers{extpointHandlers: make(map[string][]func(string, *Client))} +) + +// Manifest lists what a plugin implements. +type Manifest struct { + // List of subsystem the plugin implements. + Implements []string +} + +// Plugin is the definition of a docker plugin. +type Plugin struct { + // Name of the plugin + name string + // Address of the plugin + Addr string + // TLS configuration of the plugin + TLSConfig *tlsconfig.Options + // Client attached to the plugin + client *Client + // Manifest of the plugin (see above) + Manifest *Manifest `json:"-"` + + // wait for activation to finish + activateWait *sync.Cond + // error produced by activation + activateErr error + // keeps track of callback handlers run against this plugin + handlersRun bool +} + +// Name returns the name of the plugin. +func (p *Plugin) Name() string { + return p.name +} + +// Client returns a ready-to-use plugin client that can be used to communicate with the plugin. +func (p *Plugin) Client() *Client { + return p.client +} + +// IsV1 returns true for V1 plugins and false otherwise. +func (p *Plugin) IsV1() bool { + return true +} + +// NewLocalPlugin creates a new local plugin. +func NewLocalPlugin(name, addr string) *Plugin { + return &Plugin{ + name: name, + Addr: addr, + // TODO: change to nil + TLSConfig: &tlsconfig.Options{InsecureSkipVerify: true}, + activateWait: sync.NewCond(&sync.Mutex{}), + } +} + +func (p *Plugin) activate() error { + p.activateWait.L.Lock() + + if p.activated() { + p.runHandlers() + p.activateWait.L.Unlock() + return p.activateErr + } + + p.activateErr = p.activateWithLock() + + p.runHandlers() + p.activateWait.L.Unlock() + p.activateWait.Broadcast() + return p.activateErr +} + +// runHandlers runs the registered handlers for the implemented plugin types +// This should only be run after activation, and while the activation lock is held. +func (p *Plugin) runHandlers() { + if !p.activated() { + return + } + + handlers.RLock() + if !p.handlersRun { + for _, iface := range p.Manifest.Implements { + hdlrs, handled := handlers.extpointHandlers[iface] + if !handled { + continue + } + for _, handler := range hdlrs { + handler(p.name, p.client) + } + } + p.handlersRun = true + } + handlers.RUnlock() + +} + +// activated returns if the plugin has already been activated. +// This should only be called with the activation lock held +func (p *Plugin) activated() bool { + return p.Manifest != nil +} + +func (p *Plugin) activateWithLock() error { + c, err := NewClient(p.Addr, p.TLSConfig) + if err != nil { + return err + } + p.client = c + + m := new(Manifest) + if err = p.client.Call("Plugin.Activate", nil, m); err != nil { + return err + } + + p.Manifest = m + return nil +} + +func (p *Plugin) waitActive() error { + p.activateWait.L.Lock() + for !p.activated() && p.activateErr == nil { + p.activateWait.Wait() + } + p.activateWait.L.Unlock() + return p.activateErr +} + +func (p *Plugin) implements(kind string) bool { + if p.Manifest == nil { + return false + } + for _, driver := range p.Manifest.Implements { + if driver == kind { + return true + } + } + return false +} + +func load(name string) (*Plugin, error) { + return loadWithRetry(name, true) +} + +func loadWithRetry(name string, retry bool) (*Plugin, error) { + registry := newLocalRegistry() + start := time.Now() + + var retries int + for { + pl, err := registry.Plugin(name) + if err != nil { + if !retry { + return nil, err + } + + timeOff := backoff(retries) + if abort(start, timeOff) { + return nil, err + } + retries++ + logrus.Warnf("Unable to locate plugin: %s, retrying in %v", name, timeOff) + time.Sleep(timeOff) + continue + } + + storage.Lock() + if pl, exists := storage.plugins[name]; exists { + storage.Unlock() + return pl, pl.activate() + } + storage.plugins[name] = pl + storage.Unlock() + + err = pl.activate() + + if err != nil { + storage.Lock() + delete(storage.plugins, name) + storage.Unlock() + } + + return pl, err + } +} + +func get(name string) (*Plugin, error) { + storage.Lock() + pl, ok := storage.plugins[name] + storage.Unlock() + if ok { + return pl, pl.activate() + } + return load(name) +} + +// Get returns the plugin given the specified name and requested implementation. +func Get(name, imp string) (*Plugin, error) { + pl, err := get(name) + if err != nil { + return nil, err + } + if err := pl.waitActive(); err == nil && pl.implements(imp) { + logrus.Debugf("%s implements: %s", name, imp) + return pl, nil + } + return nil, ErrNotImplements +} + +// Handle adds the specified function to the extpointHandlers. +func Handle(iface string, fn func(string, *Client)) { + handlers.Lock() + hdlrs, ok := handlers.extpointHandlers[iface] + if !ok { + hdlrs = []func(string, *Client){} + } + + hdlrs = append(hdlrs, fn) + handlers.extpointHandlers[iface] = hdlrs + + storage.Lock() + for _, p := range storage.plugins { + p.activateWait.L.Lock() + if p.activated() && p.implements(iface) { + p.handlersRun = false + } + p.activateWait.L.Unlock() + } + storage.Unlock() + + handlers.Unlock() +} + +// GetAll returns all the plugins for the specified implementation +func GetAll(imp string) ([]*Plugin, error) { + pluginNames, err := Scan() + if err != nil { + return nil, err + } + + type plLoad struct { + pl *Plugin + err error + } + + chPl := make(chan *plLoad, len(pluginNames)) + var wg sync.WaitGroup + for _, name := range pluginNames { + storage.Lock() + pl, ok := storage.plugins[name] + storage.Unlock() + if ok { + chPl <- &plLoad{pl, nil} + continue + } + + wg.Add(1) + go func(name string) { + defer wg.Done() + pl, err := loadWithRetry(name, false) + chPl <- &plLoad{pl, err} + }(name) + } + + wg.Wait() + close(chPl) + + var out []*Plugin + for pl := range chPl { + if pl.err != nil { + logrus.Error(pl.err) + continue + } + if err := pl.pl.waitActive(); err == nil && pl.pl.implements(imp) { + out = append(out, pl.pl) + } + } + return out, nil +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/plugins_unix.go b/vendor/github.com/docker/docker/pkg/plugins/plugins_unix.go new file mode 100644 index 000000000000..02f1da69a1c1 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/plugins_unix.go @@ -0,0 +1,9 @@ +// +build !windows + +package plugins + +// BasePath returns the path to which all paths returned by the plugin are relative to. +// For v1 plugins, this always returns the host's root directory. +func (p *Plugin) BasePath() string { + return "/" +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/plugins_windows.go b/vendor/github.com/docker/docker/pkg/plugins/plugins_windows.go new file mode 100644 index 000000000000..3c8d8feb8397 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/plugins_windows.go @@ -0,0 +1,8 @@ +package plugins + +// BasePath returns the path to which all paths returned by the plugin are relative to. +// For Windows v1 plugins, this returns an empty string, since the plugin is already aware +// of the absolute path of the mount. +func (p *Plugin) BasePath() string { + return "" +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/transport/http.go b/vendor/github.com/docker/docker/pkg/plugins/transport/http.go new file mode 100644 index 000000000000..5be146af6574 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/transport/http.go @@ -0,0 +1,36 @@ +package transport + +import ( + "io" + "net/http" +) + +// httpTransport holds an http.RoundTripper +// and information about the scheme and address the transport +// sends request to. +type httpTransport struct { + http.RoundTripper + scheme string + addr string +} + +// NewHTTPTransport creates a new httpTransport. +func NewHTTPTransport(r http.RoundTripper, scheme, addr string) Transport { + return httpTransport{ + RoundTripper: r, + scheme: scheme, + addr: addr, + } +} + +// NewRequest creates a new http.Request and sets the URL +// scheme and address with the transport's fields. +func (t httpTransport) NewRequest(path string, data io.Reader) (*http.Request, error) { + req, err := newHTTPRequest(path, data) + if err != nil { + return nil, err + } + req.URL.Scheme = t.scheme + req.URL.Host = t.addr + return req, nil +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/transport/transport.go b/vendor/github.com/docker/docker/pkg/plugins/transport/transport.go new file mode 100644 index 000000000000..d7f1e2100c43 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/transport/transport.go @@ -0,0 +1,36 @@ +package transport + +import ( + "io" + "net/http" + "strings" +) + +// VersionMimetype is the Content-Type the engine sends to plugins. +const VersionMimetype = "application/vnd.docker.plugins.v1.2+json" + +// RequestFactory defines an interface that +// transports can implement to create new requests. +type RequestFactory interface { + NewRequest(path string, data io.Reader) (*http.Request, error) +} + +// Transport defines an interface that plugin transports +// must implement. +type Transport interface { + http.RoundTripper + RequestFactory +} + +// newHTTPRequest creates a new request with a path and a body. +func newHTTPRequest(path string, data io.Reader) (*http.Request, error) { + if !strings.HasPrefix(path, "/") { + path = "/" + path + } + req, err := http.NewRequest("POST", path, data) + if err != nil { + return nil, err + } + req.Header.Add("Accept", VersionMimetype) + return req, nil +} diff --git a/vendor/github.com/docker/docker/pkg/reexec/README.md b/vendor/github.com/docker/docker/pkg/reexec/README.md new file mode 100644 index 000000000000..6658f69b69d7 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/reexec/README.md @@ -0,0 +1,5 @@ +# reexec + +The `reexec` package facilitates the busybox style reexec of the docker binary that we require because +of the forking limitations of using Go. Handlers can be registered with a name and the argv 0 of +the exec of the binary will be used to find and execute custom init paths. diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_linux.go b/vendor/github.com/docker/docker/pkg/reexec/command_linux.go new file mode 100644 index 000000000000..34ae2a9dcdab --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/reexec/command_linux.go @@ -0,0 +1,28 @@ +// +build linux + +package reexec + +import ( + "os/exec" + "syscall" +) + +// Self returns the path to the current process's binary. +// Returns "/proc/self/exe". +func Self() string { + return "/proc/self/exe" +} + +// Command returns *exec.Cmd which has Path as current binary. Also it setting +// SysProcAttr.Pdeathsig to SIGTERM. +// This will use the in-memory version (/proc/self/exe) of the current binary, +// it is thus safe to delete or replace the on-disk binary (os.Args[0]). +func Command(args ...string) *exec.Cmd { + return &exec.Cmd{ + Path: Self(), + Args: args, + SysProcAttr: &syscall.SysProcAttr{ + Pdeathsig: syscall.SIGTERM, + }, + } +} diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_unix.go b/vendor/github.com/docker/docker/pkg/reexec/command_unix.go new file mode 100644 index 000000000000..778a720e3b91 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/reexec/command_unix.go @@ -0,0 +1,23 @@ +// +build freebsd solaris darwin + +package reexec + +import ( + "os/exec" +) + +// Self returns the path to the current process's binary. +// Uses os.Args[0]. +func Self() string { + return naiveSelf() +} + +// Command returns *exec.Cmd which has Path as current binary. +// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will +// be set to "/usr/bin/docker". +func Command(args ...string) *exec.Cmd { + return &exec.Cmd{ + Path: Self(), + Args: args, + } +} diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go b/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go new file mode 100644 index 000000000000..76edd824273e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux,!windows,!freebsd,!solaris,!darwin + +package reexec + +import ( + "os/exec" +) + +// Command is unsupported on operating systems apart from Linux, Windows, Solaris and Darwin. +func Command(args ...string) *exec.Cmd { + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_windows.go b/vendor/github.com/docker/docker/pkg/reexec/command_windows.go new file mode 100644 index 000000000000..ca871c4227ed --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/reexec/command_windows.go @@ -0,0 +1,23 @@ +// +build windows + +package reexec + +import ( + "os/exec" +) + +// Self returns the path to the current process's binary. +// Uses os.Args[0]. +func Self() string { + return naiveSelf() +} + +// Command returns *exec.Cmd which has Path as current binary. +// For example if current binary is "docker.exe" at "C:\", then cmd.Path will +// be set to "C:\docker.exe". +func Command(args ...string) *exec.Cmd { + return &exec.Cmd{ + Path: Self(), + Args: args, + } +} diff --git a/vendor/github.com/docker/docker/pkg/reexec/reexec.go b/vendor/github.com/docker/docker/pkg/reexec/reexec.go new file mode 100644 index 000000000000..c56671d91927 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/reexec/reexec.go @@ -0,0 +1,47 @@ +package reexec + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" +) + +var registeredInitializers = make(map[string]func()) + +// Register adds an initialization func under the specified name +func Register(name string, initializer func()) { + if _, exists := registeredInitializers[name]; exists { + panic(fmt.Sprintf("reexec func already registered under name %q", name)) + } + + registeredInitializers[name] = initializer +} + +// Init is called as the first part of the exec process and returns true if an +// initialization function was called. +func Init() bool { + initializer, exists := registeredInitializers[os.Args[0]] + if exists { + initializer() + + return true + } + return false +} + +func naiveSelf() string { + name := os.Args[0] + if filepath.Base(name) == name { + if lp, err := exec.LookPath(name); err == nil { + return lp + } + } + // handle conversion of relative paths to absolute + if absName, err := filepath.Abs(name); err == nil { + return absName + } + // if we couldn't get absolute name, return original + // (NOTE: Go only errors on Abs() if os.Getwd fails) + return name +} diff --git a/vendor/github.com/docker/docker/pkg/useragent/README.md b/vendor/github.com/docker/docker/pkg/useragent/README.md new file mode 100644 index 000000000000..d9cb367d109e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/useragent/README.md @@ -0,0 +1 @@ +This package provides helper functions to pack version information into a single User-Agent header. diff --git a/vendor/github.com/docker/docker/pkg/useragent/useragent.go b/vendor/github.com/docker/docker/pkg/useragent/useragent.go new file mode 100644 index 000000000000..1137db51b895 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/useragent/useragent.go @@ -0,0 +1,55 @@ +// Package useragent provides helper functions to pack +// version information into a single User-Agent header. +package useragent + +import ( + "strings" +) + +// VersionInfo is used to model UserAgent versions. +type VersionInfo struct { + Name string + Version string +} + +func (vi *VersionInfo) isValid() bool { + const stopChars = " \t\r\n/" + name := vi.Name + vers := vi.Version + if len(name) == 0 || strings.ContainsAny(name, stopChars) { + return false + } + if len(vers) == 0 || strings.ContainsAny(vers, stopChars) { + return false + } + return true +} + +// AppendVersions converts versions to a string and appends the string to the string base. +// +// Each VersionInfo will be converted to a string in the format of +// "product/version", where the "product" is get from the name field, while +// version is get from the version field. Several pieces of version information +// will be concatenated and separated by space. +// +// Example: +// AppendVersions("base", VersionInfo{"foo", "1.0"}, VersionInfo{"bar", "2.0"}) +// results in "base foo/1.0 bar/2.0". +func AppendVersions(base string, versions ...VersionInfo) string { + if len(versions) == 0 { + return base + } + + verstrs := make([]string, 0, 1+len(versions)) + if len(base) > 0 { + verstrs = append(verstrs, base) + } + + for _, v := range versions { + if !v.isValid() { + continue + } + verstrs = append(verstrs, v.Name+"/"+v.Version) + } + return strings.Join(verstrs, " ") +} diff --git a/vendor/github.com/docker/docker/plugin/v2/plugin.go b/vendor/github.com/docker/docker/plugin/v2/plugin.go new file mode 100644 index 000000000000..74ff64080ab2 --- /dev/null +++ b/vendor/github.com/docker/docker/plugin/v2/plugin.go @@ -0,0 +1,244 @@ +package v2 + +import ( + "fmt" + "strings" + "sync" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/plugins" + "github.com/opencontainers/go-digest" +) + +// Plugin represents an individual plugin. +type Plugin struct { + mu sync.RWMutex + PluginObj types.Plugin `json:"plugin"` // todo: embed struct + pClient *plugins.Client + refCount int + PropagatedMount string // TODO: make private + Rootfs string // TODO: make private + + Config digest.Digest + Blobsums []digest.Digest +} + +const defaultPluginRuntimeDestination = "/run/docker/plugins" + +// ErrInadequateCapability indicates that the plugin did not have the requested capability. +type ErrInadequateCapability struct { + cap string +} + +func (e ErrInadequateCapability) Error() string { + return fmt.Sprintf("plugin does not provide %q capability", e.cap) +} + +// BasePath returns the path to which all paths returned by the plugin are relative to. +// For Plugin objects this returns the host path of the plugin container's rootfs. +func (p *Plugin) BasePath() string { + return p.Rootfs +} + +// Client returns the plugin client. +func (p *Plugin) Client() *plugins.Client { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.pClient +} + +// SetPClient set the plugin client. +func (p *Plugin) SetPClient(client *plugins.Client) { + p.mu.Lock() + defer p.mu.Unlock() + + p.pClient = client +} + +// IsV1 returns true for V1 plugins and false otherwise. +func (p *Plugin) IsV1() bool { + return false +} + +// Name returns the plugin name. +func (p *Plugin) Name() string { + return p.PluginObj.Name +} + +// FilterByCap query the plugin for a given capability. +func (p *Plugin) FilterByCap(capability string) (*Plugin, error) { + capability = strings.ToLower(capability) + for _, typ := range p.PluginObj.Config.Interface.Types { + if typ.Capability == capability && typ.Prefix == "docker" { + return p, nil + } + } + return nil, ErrInadequateCapability{capability} +} + +// InitEmptySettings initializes empty settings for a plugin. +func (p *Plugin) InitEmptySettings() { + p.PluginObj.Settings.Mounts = make([]types.PluginMount, len(p.PluginObj.Config.Mounts)) + copy(p.PluginObj.Settings.Mounts, p.PluginObj.Config.Mounts) + p.PluginObj.Settings.Devices = make([]types.PluginDevice, len(p.PluginObj.Config.Linux.Devices)) + copy(p.PluginObj.Settings.Devices, p.PluginObj.Config.Linux.Devices) + p.PluginObj.Settings.Env = make([]string, 0, len(p.PluginObj.Config.Env)) + for _, env := range p.PluginObj.Config.Env { + if env.Value != nil { + p.PluginObj.Settings.Env = append(p.PluginObj.Settings.Env, fmt.Sprintf("%s=%s", env.Name, *env.Value)) + } + } + p.PluginObj.Settings.Args = make([]string, len(p.PluginObj.Config.Args.Value)) + copy(p.PluginObj.Settings.Args, p.PluginObj.Config.Args.Value) +} + +// Set is used to pass arguments to the plugin. +func (p *Plugin) Set(args []string) error { + p.mu.Lock() + defer p.mu.Unlock() + + if p.PluginObj.Enabled { + return fmt.Errorf("cannot set on an active plugin, disable plugin before setting") + } + + sets, err := newSettables(args) + if err != nil { + return err + } + + // TODO(vieux): lots of code duplication here, needs to be refactored. + +next: + for _, s := range sets { + // range over all the envs in the config + for _, env := range p.PluginObj.Config.Env { + // found the env in the config + if env.Name == s.name { + // is it settable ? + if ok, err := s.isSettable(allowedSettableFieldsEnv, env.Settable); err != nil { + return err + } else if !ok { + return fmt.Errorf("%q is not settable", s.prettyName()) + } + // is it, so lets update the settings in memory + updateSettingsEnv(&p.PluginObj.Settings.Env, &s) + continue next + } + } + + // range over all the mounts in the config + for _, mount := range p.PluginObj.Config.Mounts { + // found the mount in the config + if mount.Name == s.name { + // is it settable ? + if ok, err := s.isSettable(allowedSettableFieldsMounts, mount.Settable); err != nil { + return err + } else if !ok { + return fmt.Errorf("%q is not settable", s.prettyName()) + } + + // it is, so lets update the settings in memory + *mount.Source = s.value + continue next + } + } + + // range over all the devices in the config + for _, device := range p.PluginObj.Config.Linux.Devices { + // found the device in the config + if device.Name == s.name { + // is it settable ? + if ok, err := s.isSettable(allowedSettableFieldsDevices, device.Settable); err != nil { + return err + } else if !ok { + return fmt.Errorf("%q is not settable", s.prettyName()) + } + + // it is, so lets update the settings in memory + *device.Path = s.value + continue next + } + } + + // found the name in the config + if p.PluginObj.Config.Args.Name == s.name { + // is it settable ? + if ok, err := s.isSettable(allowedSettableFieldsArgs, p.PluginObj.Config.Args.Settable); err != nil { + return err + } else if !ok { + return fmt.Errorf("%q is not settable", s.prettyName()) + } + + // it is, so lets update the settings in memory + p.PluginObj.Settings.Args = strings.Split(s.value, " ") + continue next + } + + return fmt.Errorf("setting %q not found in the plugin configuration", s.name) + } + + return nil +} + +// IsEnabled returns the active state of the plugin. +func (p *Plugin) IsEnabled() bool { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.PluginObj.Enabled +} + +// GetID returns the plugin's ID. +func (p *Plugin) GetID() string { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.PluginObj.ID +} + +// GetSocket returns the plugin socket. +func (p *Plugin) GetSocket() string { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.PluginObj.Config.Interface.Socket +} + +// GetTypes returns the interface types of a plugin. +func (p *Plugin) GetTypes() []types.PluginInterfaceType { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.PluginObj.Config.Interface.Types +} + +// GetRefCount returns the reference count. +func (p *Plugin) GetRefCount() int { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.refCount +} + +// AddRefCount adds to reference count. +func (p *Plugin) AddRefCount(count int) { + p.mu.Lock() + defer p.mu.Unlock() + + p.refCount += count +} + +// Acquire increments the plugin's reference count +// This should be followed up by `Release()` when the plugin is no longer in use. +func (p *Plugin) Acquire() { + p.AddRefCount(plugingetter.Acquire) +} + +// Release decrements the plugin's reference count +// This should only be called when the plugin is no longer in use, e.g. with +// via `Acquire()` or getter.Get("name", "type", plugingetter.Acquire) +func (p *Plugin) Release() { + p.AddRefCount(plugingetter.Release) +} diff --git a/vendor/github.com/docker/docker/plugin/v2/plugin_linux.go b/vendor/github.com/docker/docker/plugin/v2/plugin_linux.go new file mode 100644 index 000000000000..412818ed3363 --- /dev/null +++ b/vendor/github.com/docker/docker/plugin/v2/plugin_linux.go @@ -0,0 +1,131 @@ +// +build linux + +package v2 + +import ( + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/oci" + "github.com/docker/docker/pkg/system" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +// InitSpec creates an OCI spec from the plugin's config. +func (p *Plugin) InitSpec(execRoot string) (*specs.Spec, error) { + s := oci.DefaultSpec() + s.Root = specs.Root{ + Path: p.Rootfs, + Readonly: false, // TODO: all plugins should be readonly? settable in config? + } + + userMounts := make(map[string]struct{}, len(p.PluginObj.Settings.Mounts)) + for _, m := range p.PluginObj.Settings.Mounts { + userMounts[m.Destination] = struct{}{} + } + + execRoot = filepath.Join(execRoot, p.PluginObj.ID) + if err := os.MkdirAll(execRoot, 0700); err != nil { + return nil, errors.WithStack(err) + } + + mounts := append(p.PluginObj.Config.Mounts, types.PluginMount{ + Source: &execRoot, + Destination: defaultPluginRuntimeDestination, + Type: "bind", + Options: []string{"rbind", "rshared"}, + }) + + if p.PluginObj.Config.Network.Type != "" { + // TODO: if net == bridge, use libnetwork controller to create a new plugin-specific bridge, bind mount /etc/hosts and /etc/resolv.conf look at the docker code (allocateNetwork, initialize) + if p.PluginObj.Config.Network.Type == "host" { + oci.RemoveNamespace(&s, specs.LinuxNamespaceType("network")) + } + etcHosts := "/etc/hosts" + resolvConf := "/etc/resolv.conf" + mounts = append(mounts, + types.PluginMount{ + Source: &etcHosts, + Destination: etcHosts, + Type: "bind", + Options: []string{"rbind", "ro"}, + }, + types.PluginMount{ + Source: &resolvConf, + Destination: resolvConf, + Type: "bind", + Options: []string{"rbind", "ro"}, + }) + } + if p.PluginObj.Config.PidHost { + oci.RemoveNamespace(&s, specs.LinuxNamespaceType("pid")) + } + + if p.PluginObj.Config.IpcHost { + oci.RemoveNamespace(&s, specs.LinuxNamespaceType("ipc")) + } + + for _, mnt := range mounts { + m := specs.Mount{ + Destination: mnt.Destination, + Type: mnt.Type, + Options: mnt.Options, + } + if mnt.Source == nil { + return nil, errors.New("mount source is not specified") + } + m.Source = *mnt.Source + s.Mounts = append(s.Mounts, m) + } + + for i, m := range s.Mounts { + if strings.HasPrefix(m.Destination, "/dev/") { + if _, ok := userMounts[m.Destination]; ok { + s.Mounts = append(s.Mounts[:i], s.Mounts[i+1:]...) + } + } + } + + if p.PluginObj.Config.PropagatedMount != "" { + p.PropagatedMount = filepath.Join(p.Rootfs, p.PluginObj.Config.PropagatedMount) + s.Linux.RootfsPropagation = "rshared" + } + + if p.PluginObj.Config.Linux.AllowAllDevices { + s.Linux.Resources.Devices = []specs.LinuxDeviceCgroup{{Allow: true, Access: "rwm"}} + } + for _, dev := range p.PluginObj.Settings.Devices { + path := *dev.Path + d, dPermissions, err := oci.DevicesFromPath(path, path, "rwm") + if err != nil { + return nil, errors.WithStack(err) + } + s.Linux.Devices = append(s.Linux.Devices, d...) + s.Linux.Resources.Devices = append(s.Linux.Resources.Devices, dPermissions...) + } + + envs := make([]string, 1, len(p.PluginObj.Settings.Env)+1) + envs[0] = "PATH=" + system.DefaultPathEnv + envs = append(envs, p.PluginObj.Settings.Env...) + + args := append(p.PluginObj.Config.Entrypoint, p.PluginObj.Settings.Args...) + cwd := p.PluginObj.Config.WorkDir + if len(cwd) == 0 { + cwd = "/" + } + s.Process.Terminal = false + s.Process.Args = args + s.Process.Cwd = cwd + s.Process.Env = envs + + caps := s.Process.Capabilities + caps.Bounding = append(caps.Bounding, p.PluginObj.Config.Linux.Capabilities...) + caps.Permitted = append(caps.Permitted, p.PluginObj.Config.Linux.Capabilities...) + caps.Inheritable = append(caps.Inheritable, p.PluginObj.Config.Linux.Capabilities...) + caps.Effective = append(caps.Effective, p.PluginObj.Config.Linux.Capabilities...) + + return &s, nil +} diff --git a/vendor/github.com/docker/docker/plugin/v2/plugin_unsupported.go b/vendor/github.com/docker/docker/plugin/v2/plugin_unsupported.go new file mode 100644 index 000000000000..e60fb8311edb --- /dev/null +++ b/vendor/github.com/docker/docker/plugin/v2/plugin_unsupported.go @@ -0,0 +1,14 @@ +// +build !linux + +package v2 + +import ( + "errors" + + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +// InitSpec creates an OCI spec from the plugin's config. +func (p *Plugin) InitSpec(execRoot string) (*specs.Spec, error) { + return nil, errors.New("not supported") +} diff --git a/vendor/github.com/docker/docker/plugin/v2/settable.go b/vendor/github.com/docker/docker/plugin/v2/settable.go new file mode 100644 index 000000000000..79c6befc24bf --- /dev/null +++ b/vendor/github.com/docker/docker/plugin/v2/settable.go @@ -0,0 +1,102 @@ +package v2 + +import ( + "errors" + "fmt" + "strings" +) + +type settable struct { + name string + field string + value string +} + +var ( + allowedSettableFieldsEnv = []string{"value"} + allowedSettableFieldsArgs = []string{"value"} + allowedSettableFieldsDevices = []string{"path"} + allowedSettableFieldsMounts = []string{"source"} + + errMultipleFields = errors.New("multiple fields are settable, one must be specified") + errInvalidFormat = errors.New("invalid format, must be [.][=]") +) + +func newSettables(args []string) ([]settable, error) { + sets := make([]settable, 0, len(args)) + for _, arg := range args { + set, err := newSettable(arg) + if err != nil { + return nil, err + } + sets = append(sets, set) + } + return sets, nil +} + +func newSettable(arg string) (settable, error) { + var set settable + if i := strings.Index(arg, "="); i == 0 { + return set, errInvalidFormat + } else if i < 0 { + set.name = arg + } else { + set.name = arg[:i] + set.value = arg[i+1:] + } + + if i := strings.LastIndex(set.name, "."); i > 0 { + set.field = set.name[i+1:] + set.name = arg[:i] + } + + return set, nil +} + +// prettyName return name.field if there is a field, otherwise name. +func (set *settable) prettyName() string { + if set.field != "" { + return fmt.Sprintf("%s.%s", set.name, set.field) + } + return set.name +} + +func (set *settable) isSettable(allowedSettableFields []string, settable []string) (bool, error) { + if set.field == "" { + if len(settable) == 1 { + // if field is not specified and there only one settable, default to it. + set.field = settable[0] + } else if len(settable) > 1 { + return false, errMultipleFields + } + } + + isAllowed := false + for _, allowedSettableField := range allowedSettableFields { + if set.field == allowedSettableField { + isAllowed = true + break + } + } + + if isAllowed { + for _, settableField := range settable { + if set.field == settableField { + return true, nil + } + } + } + + return false, nil +} + +func updateSettingsEnv(env *[]string, set *settable) { + for i, e := range *env { + if parts := strings.SplitN(e, "=", 2); parts[0] == set.name { + (*env)[i] = fmt.Sprintf("%s=%s", set.name, set.value) + return + } + } + + *env = append(*env, fmt.Sprintf("%s=%s", set.name, set.value)) +} diff --git a/vendor/github.com/docker/docker/reference/store.go b/vendor/github.com/docker/docker/reference/store.go new file mode 100644 index 000000000000..8466e6e2cc99 --- /dev/null +++ b/vendor/github.com/docker/docker/reference/store.go @@ -0,0 +1,339 @@ +package reference + +import ( + "encoding/json" + "errors" + "fmt" + "os" + "path/filepath" + "sort" + "sync" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/pkg/ioutils" + "github.com/opencontainers/go-digest" +) + +var ( + // ErrDoesNotExist is returned if a reference is not found in the + // store. + ErrDoesNotExist = errors.New("reference does not exist") +) + +// An Association is a tuple associating a reference with an image ID. +type Association struct { + Ref reference.Named + ID digest.Digest +} + +// Store provides the set of methods which can operate on a tag store. +type Store interface { + References(id digest.Digest) []reference.Named + ReferencesByName(ref reference.Named) []Association + AddTag(ref reference.Named, id digest.Digest, force bool) error + AddDigest(ref reference.Canonical, id digest.Digest, force bool) error + Delete(ref reference.Named) (bool, error) + Get(ref reference.Named) (digest.Digest, error) +} + +type store struct { + mu sync.RWMutex + // jsonPath is the path to the file where the serialized tag data is + // stored. + jsonPath string + // Repositories is a map of repositories, indexed by name. + Repositories map[string]repository + // referencesByIDCache is a cache of references indexed by ID, to speed + // up References. + referencesByIDCache map[digest.Digest]map[string]reference.Named +} + +// Repository maps tags to digests. The key is a stringified Reference, +// including the repository name. +type repository map[string]digest.Digest + +type lexicalRefs []reference.Named + +func (a lexicalRefs) Len() int { return len(a) } +func (a lexicalRefs) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a lexicalRefs) Less(i, j int) bool { + return a[i].String() < a[j].String() +} + +type lexicalAssociations []Association + +func (a lexicalAssociations) Len() int { return len(a) } +func (a lexicalAssociations) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a lexicalAssociations) Less(i, j int) bool { + return a[i].Ref.String() < a[j].Ref.String() +} + +// NewReferenceStore creates a new reference store, tied to a file path where +// the set of references are serialized in JSON format. +func NewReferenceStore(jsonPath string) (Store, error) { + abspath, err := filepath.Abs(jsonPath) + if err != nil { + return nil, err + } + + store := &store{ + jsonPath: abspath, + Repositories: make(map[string]repository), + referencesByIDCache: make(map[digest.Digest]map[string]reference.Named), + } + // Load the json file if it exists, otherwise create it. + if err := store.reload(); os.IsNotExist(err) { + if err := store.save(); err != nil { + return nil, err + } + } else if err != nil { + return nil, err + } + return store, nil +} + +// AddTag adds a tag reference to the store. If force is set to true, existing +// references can be overwritten. This only works for tags, not digests. +func (store *store) AddTag(ref reference.Named, id digest.Digest, force bool) error { + if _, isCanonical := ref.(reference.Canonical); isCanonical { + return errors.New("refusing to create a tag with a digest reference") + } + return store.addReference(reference.TagNameOnly(ref), id, force) +} + +// AddDigest adds a digest reference to the store. +func (store *store) AddDigest(ref reference.Canonical, id digest.Digest, force bool) error { + return store.addReference(ref, id, force) +} + +func favorDigest(originalRef reference.Named) (reference.Named, error) { + ref := originalRef + // If the reference includes a digest and a tag, we must store only the + // digest. + canonical, isCanonical := originalRef.(reference.Canonical) + _, isNamedTagged := originalRef.(reference.NamedTagged) + + if isCanonical && isNamedTagged { + trimmed, err := reference.WithDigest(reference.TrimNamed(canonical), canonical.Digest()) + if err != nil { + // should never happen + return originalRef, err + } + ref = trimmed + } + return ref, nil +} + +func (store *store) addReference(ref reference.Named, id digest.Digest, force bool) error { + ref, err := favorDigest(ref) + if err != nil { + return err + } + + refName := reference.FamiliarName(ref) + refStr := reference.FamiliarString(ref) + + if refName == string(digest.Canonical) { + return errors.New("refusing to create an ambiguous tag using digest algorithm as name") + } + + store.mu.Lock() + defer store.mu.Unlock() + + repository, exists := store.Repositories[refName] + if !exists || repository == nil { + repository = make(map[string]digest.Digest) + store.Repositories[refName] = repository + } + + oldID, exists := repository[refStr] + + if exists { + // force only works for tags + if digested, isDigest := ref.(reference.Canonical); isDigest { + return fmt.Errorf("Cannot overwrite digest %s", digested.Digest().String()) + } + + if !force { + return fmt.Errorf("Conflict: Tag %s is already set to image %s, if you want to replace it, please use -f option", refStr, oldID.String()) + } + + if store.referencesByIDCache[oldID] != nil { + delete(store.referencesByIDCache[oldID], refStr) + if len(store.referencesByIDCache[oldID]) == 0 { + delete(store.referencesByIDCache, oldID) + } + } + } + + repository[refStr] = id + if store.referencesByIDCache[id] == nil { + store.referencesByIDCache[id] = make(map[string]reference.Named) + } + store.referencesByIDCache[id][refStr] = ref + + return store.save() +} + +// Delete deletes a reference from the store. It returns true if a deletion +// happened, or false otherwise. +func (store *store) Delete(ref reference.Named) (bool, error) { + ref, err := favorDigest(ref) + if err != nil { + return false, err + } + + ref = reference.TagNameOnly(ref) + + refName := reference.FamiliarName(ref) + refStr := reference.FamiliarString(ref) + + store.mu.Lock() + defer store.mu.Unlock() + + repository, exists := store.Repositories[refName] + if !exists { + return false, ErrDoesNotExist + } + + if id, exists := repository[refStr]; exists { + delete(repository, refStr) + if len(repository) == 0 { + delete(store.Repositories, refName) + } + if store.referencesByIDCache[id] != nil { + delete(store.referencesByIDCache[id], refStr) + if len(store.referencesByIDCache[id]) == 0 { + delete(store.referencesByIDCache, id) + } + } + return true, store.save() + } + + return false, ErrDoesNotExist +} + +// Get retrieves an item from the store by reference +func (store *store) Get(ref reference.Named) (digest.Digest, error) { + if canonical, ok := ref.(reference.Canonical); ok { + // If reference contains both tag and digest, only + // lookup by digest as it takes precendent over + // tag, until tag/digest combos are stored. + if _, ok := ref.(reference.Tagged); ok { + var err error + ref, err = reference.WithDigest(reference.TrimNamed(canonical), canonical.Digest()) + if err != nil { + return "", err + } + } + } else { + ref = reference.TagNameOnly(ref) + } + + refName := reference.FamiliarName(ref) + refStr := reference.FamiliarString(ref) + + store.mu.RLock() + defer store.mu.RUnlock() + + repository, exists := store.Repositories[refName] + if !exists || repository == nil { + return "", ErrDoesNotExist + } + + id, exists := repository[refStr] + if !exists { + return "", ErrDoesNotExist + } + + return id, nil +} + +// References returns a slice of references to the given ID. The slice +// will be nil if there are no references to this ID. +func (store *store) References(id digest.Digest) []reference.Named { + store.mu.RLock() + defer store.mu.RUnlock() + + // Convert the internal map to an array for two reasons: + // 1) We must not return a mutable + // 2) It would be ugly to expose the extraneous map keys to callers. + + var references []reference.Named + for _, ref := range store.referencesByIDCache[id] { + references = append(references, ref) + } + + sort.Sort(lexicalRefs(references)) + + return references +} + +// ReferencesByName returns the references for a given repository name. +// If there are no references known for this repository name, +// ReferencesByName returns nil. +func (store *store) ReferencesByName(ref reference.Named) []Association { + refName := reference.FamiliarName(ref) + + store.mu.RLock() + defer store.mu.RUnlock() + + repository, exists := store.Repositories[refName] + if !exists { + return nil + } + + var associations []Association + for refStr, refID := range repository { + ref, err := reference.ParseNormalizedNamed(refStr) + if err != nil { + // Should never happen + return nil + } + associations = append(associations, + Association{ + Ref: ref, + ID: refID, + }) + } + + sort.Sort(lexicalAssociations(associations)) + + return associations +} + +func (store *store) save() error { + // Store the json + jsonData, err := json.Marshal(store) + if err != nil { + return err + } + return ioutils.AtomicWriteFile(store.jsonPath, jsonData, 0600) +} + +func (store *store) reload() error { + f, err := os.Open(store.jsonPath) + if err != nil { + return err + } + defer f.Close() + if err := json.NewDecoder(f).Decode(&store); err != nil { + return err + } + + for _, repository := range store.Repositories { + for refStr, refID := range repository { + ref, err := reference.ParseNormalizedNamed(refStr) + if err != nil { + // Should never happen + continue + } + if store.referencesByIDCache[refID] == nil { + store.referencesByIDCache[refID] = make(map[string]reference.Named) + } + store.referencesByIDCache[refID][refStr] = ref + } + } + + return nil +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/devices/devices_unix.go b/vendor/github.com/opencontainers/runc/libcontainer/devices/devices_unix.go new file mode 100644 index 000000000000..c02b73e3e6a3 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/devices/devices_unix.go @@ -0,0 +1,102 @@ +// +build linux freebsd + +package devices + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "syscall" + + "github.com/opencontainers/runc/libcontainer/configs" +) + +var ( + ErrNotADevice = errors.New("not a device node") +) + +// Testing dependencies +var ( + osLstat = os.Lstat + ioutilReadDir = ioutil.ReadDir +) + +// Given the path to a device and it's cgroup_permissions(which cannot be easily queried) look up the information about a linux device and return that information as a Device struct. +func DeviceFromPath(path, permissions string) (*configs.Device, error) { + fileInfo, err := osLstat(path) + if err != nil { + return nil, err + } + var ( + devType rune + mode = fileInfo.Mode() + fileModePermissionBits = os.FileMode.Perm(mode) + ) + switch { + case mode&os.ModeDevice == 0: + return nil, ErrNotADevice + case mode&os.ModeCharDevice != 0: + fileModePermissionBits |= syscall.S_IFCHR + devType = 'c' + default: + fileModePermissionBits |= syscall.S_IFBLK + devType = 'b' + } + stat_t, ok := fileInfo.Sys().(*syscall.Stat_t) + if !ok { + return nil, fmt.Errorf("cannot determine the device number for device %s", path) + } + devNumber := int(stat_t.Rdev) + return &configs.Device{ + Type: devType, + Path: path, + Major: Major(devNumber), + Minor: Minor(devNumber), + Permissions: permissions, + FileMode: fileModePermissionBits, + Uid: stat_t.Uid, + Gid: stat_t.Gid, + }, nil +} + +func HostDevices() ([]*configs.Device, error) { + return getDevices("/dev") +} + +func getDevices(path string) ([]*configs.Device, error) { + files, err := ioutilReadDir(path) + if err != nil { + return nil, err + } + out := []*configs.Device{} + for _, f := range files { + switch { + case f.IsDir(): + switch f.Name() { + case "pts", "shm", "fd", "mqueue": + continue + default: + sub, err := getDevices(filepath.Join(path, f.Name())) + if err != nil { + return nil, err + } + + out = append(out, sub...) + continue + } + case f.Name() == "console": + continue + } + device, err := DeviceFromPath(filepath.Join(path, f.Name()), "rwm") + if err != nil { + if err == ErrNotADevice { + continue + } + return nil, err + } + out = append(out, device) + } + return out, nil +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/devices/devices_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/devices/devices_unsupported.go new file mode 100644 index 000000000000..1e84033daff8 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/devices/devices_unsupported.go @@ -0,0 +1,3 @@ +// +build windows + +package devices diff --git a/vendor/github.com/opencontainers/runc/libcontainer/devices/number.go b/vendor/github.com/opencontainers/runc/libcontainer/devices/number.go new file mode 100644 index 000000000000..885b6e5dd936 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/devices/number.go @@ -0,0 +1,24 @@ +// +build linux freebsd + +package devices + +/* + +This code provides support for manipulating linux device numbers. It should be replaced by normal syscall functions once http://code.google.com/p/go/issues/detail?id=8106 is solved. + +You can read what they are here: + + - http://www.makelinux.net/ldd3/chp-3-sect-2 + - http://www.linux-tutorial.info/modules.php?name=MContent&pageid=94 + +Note! These are NOT the same as the MAJOR(dev_t device);, MINOR(dev_t device); and MKDEV(int major, int minor); functions as defined in as the representation of device numbers used by go is different than the one used internally to the kernel! - https://github.com/torvalds/linux/blob/master/include/linux/kdev_t.h#L9 + +*/ + +func Major(devNumber int) int64 { + return int64((devNumber >> 8) & 0xfff) +} + +func Minor(devNumber int) int64 { + return int64((devNumber & 0xff) | ((devNumber >> 12) & 0xfff00)) +} diff --git a/vendor/github.com/opencontainers/runtime-spec/LICENSE b/vendor/github.com/opencontainers/runtime-spec/LICENSE new file mode 100644 index 000000000000..bdc403653e0a --- /dev/null +++ b/vendor/github.com/opencontainers/runtime-spec/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 The Linux Foundation. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/opencontainers/runtime-spec/README.md b/vendor/github.com/opencontainers/runtime-spec/README.md new file mode 100644 index 000000000000..1364f6fcb530 --- /dev/null +++ b/vendor/github.com/opencontainers/runtime-spec/README.md @@ -0,0 +1,164 @@ +# Open Container Initiative Runtime Specification + +The [Open Container Initiative][oci] develops specifications for standards on Operating System process and application containers. + +The specification can be found [here](spec.md). + +## Table of Contents + +Additional documentation about how this group operates: + +- [Code of Conduct][code-of-conduct] +- [Style and Conventions](style.md) +- [Roadmap](ROADMAP.md) +- [Implementations](implementations.md) +- [Releases](RELEASES.md) +- [project](project.md) +- [charter][charter] + +## Use Cases + +To provide context for users the following section gives example use cases for each part of the spec. + +### Application Bundle Builders + +Application bundle builders can create a [bundle](bundle.md) directory that includes all of the files required for launching an application as a container. +The bundle contains an OCI [configuration file](config.md) where the builder can specify host-independent details such as [which executable to launch](config.md#process) and host-specific settings such as [mount](config.md#mounts) locations, [hook](config.md#hooks) paths, Linux [namespaces](config-linux.md#namespaces) and [cgroups](config-linux.md#control-groups). +Because the configuration includes host-specific settings, application bundle directories copied between two hosts may require configuration adjustments. + +### Hook Developers + +[Hook](config.md#hooks) developers can extend the functionality of an OCI-compliant runtime by hooking into a container's lifecycle with an external application. +Example use cases include sophisticated network configuration, volume garbage collection, etc. + +### Runtime Developers + +Runtime developers can build runtime implementations that run OCI-compliant bundles and container configuration, containing low-level OS and host specific details, on a particular platform. + +## Releases + +There is a loose [Road Map](./ROADMAP.md). +During the `0.x` series of OCI releases we make no backwards compatibility guarantees and intend to break the schema during this series. + +## Contributing + +Development happens on GitHub for the spec. +Issues are used for bugs and actionable items and longer discussions can happen on the [mailing list](#mailing-list). + +The specification and code is licensed under the Apache 2.0 license found in the [LICENSE](./LICENSE) file. + +### Discuss your design + +The project welcomes submissions, but please let everyone know what you are working on. + +Before undertaking a nontrivial change to this specification, send mail to the [mailing list](#mailing-list) to discuss what you plan to do. +This gives everyone a chance to validate the design, helps prevent duplication of effort, and ensures that the idea fits. +It also guarantees that the design is sound before code is written; a GitHub pull-request is not the place for high-level discussions. + +Typos and grammatical errors can go straight to a pull-request. +When in doubt, start on the [mailing-list](#mailing-list). + +### Weekly Call + +The contributors and maintainers of all OCI projects have a weekly meeting on Wednesdays at: + +* 8:00 AM (USA Pacific), during [odd weeks][iso-week]. +* 5:00 PM (USA Pacific), during [even weeks][iso-week]. + +There is an [iCalendar][rfc5545] format for the meetings [here](meeting.ics). + +Everyone is welcome to participate via [UberConference web][uberconference] or audio-only: +1 415 968 0849 (no PIN needed). +An initial agenda will be posted to the [mailing list](#mailing-list) earlier in the week, and everyone is welcome to propose additional topics or suggest other agenda alterations there. +Minutes are posted to the [mailing list](#mailing-list) and minutes from past calls are archived [here][minutes], with minutes from especially old meetings (September 2015 and earlier) archived [here][runtime-wiki]. + +### Mailing List + +You can subscribe and join the mailing list on [Google Groups][dev-list]. + +### IRC + +OCI discussion happens on #opencontainers on Freenode ([logs][irc-logs]). + +### Git commit + +#### Sign your work + +The sign-off is a simple line at the end of the explanation for the patch, which certifies that you wrote it or otherwise have the right to pass it on as an open-source patch. +The rules are pretty simple: if you can certify the below (from http://developercertificate.org): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +using your real name (sorry, no pseudonyms or anonymous contributions.) + +You can add the sign off when creating the git commit via `git commit -s`. + +#### Commit Style + +Simple house-keeping for clean git history. +Read more on [How to Write a Git Commit Message][how-to-git-commit] or the Discussion section of [git-commit(1)][git-commit.1]. + +1. Separate the subject from body with a blank line +2. Limit the subject line to 50 characters +3. Capitalize the subject line +4. Do not end the subject line with a period +5. Use the imperative mood in the subject line +6. Wrap the body at 72 characters +7. Use the body to explain what and why vs. how + * If there was important/useful/essential conversation or information, copy or include a reference +8. When possible, one keyword to scope the change in the subject (i.e. "README: ...", "runtime: ...") + + +[charter]: https://www.opencontainers.org/about/governance +[code-of-conduct]: https://github.com/opencontainers/tob/blob/master/code-of-conduct.md +[dev-list]: https://groups.google.com/a/opencontainers.org/forum/#!forum/dev +[how-to-git-commit]: http://chris.beams.io/posts/git-commit +[irc-logs]: http://ircbot.wl.linuxfoundation.org/eavesdrop/%23opencontainers/ +[iso-week]: https://en.wikipedia.org/wiki/ISO_week_date#Calculating_the_week_number_of_a_given_date +[minutes]: http://ircbot.wl.linuxfoundation.org/meetings/opencontainers/ +[oci]: https://www.opencontainers.org +[rfc5545]: https://tools.ietf.org/html/rfc5545 +[runtime-wiki]: https://github.com/opencontainers/runtime-spec/wiki +[uberconference]: https://www.uberconference.com/opencontainers + +[git-commit.1]: http://git-scm.com/docs/git-commit diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go new file mode 100644 index 000000000000..da9040594c04 --- /dev/null +++ b/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go @@ -0,0 +1,563 @@ +package specs + +import "os" + +// Spec is the base configuration for the container. +type Spec struct { + // Version of the Open Container Runtime Specification with which the bundle complies. + Version string `json:"ociVersion"` + // Platform specifies the configuration's target platform. + Platform Platform `json:"platform"` + // Process configures the container process. + Process Process `json:"process"` + // Root configures the container's root filesystem. + Root Root `json:"root"` + // Hostname configures the container's hostname. + Hostname string `json:"hostname,omitempty"` + // Mounts configures additional mounts (on top of Root). + Mounts []Mount `json:"mounts,omitempty"` + // Hooks configures callbacks for container lifecycle events. + Hooks *Hooks `json:"hooks,omitempty"` + // Annotations contains arbitrary metadata for the container. + Annotations map[string]string `json:"annotations,omitempty"` + + // Linux is platform specific configuration for Linux based containers. + Linux *Linux `json:"linux,omitempty" platform:"linux"` + // Solaris is platform specific configuration for Solaris containers. + Solaris *Solaris `json:"solaris,omitempty" platform:"solaris"` + // Windows is platform specific configuration for Windows based containers, including Hyper-V containers. + Windows *Windows `json:"windows,omitempty" platform:"windows"` +} + +// Process contains information to start a specific application inside the container. +type Process struct { + // Terminal creates an interactive terminal for the container. + Terminal bool `json:"terminal,omitempty"` + // ConsoleSize specifies the size of the console. + ConsoleSize Box `json:"consoleSize,omitempty"` + // User specifies user information for the process. + User User `json:"user"` + // Args specifies the binary and arguments for the application to execute. + Args []string `json:"args"` + // Env populates the process environment for the process. + Env []string `json:"env,omitempty"` + // Cwd is the current working directory for the process and must be + // relative to the container's root. + Cwd string `json:"cwd"` + // Capabilities are Linux capabilities that are kept for the process. + Capabilities *LinuxCapabilities `json:"capabilities,omitempty" platform:"linux"` + // Rlimits specifies rlimit options to apply to the process. + Rlimits []LinuxRlimit `json:"rlimits,omitempty" platform:"linux"` + // NoNewPrivileges controls whether additional privileges could be gained by processes in the container. + NoNewPrivileges bool `json:"noNewPrivileges,omitempty" platform:"linux"` + // ApparmorProfile specifies the apparmor profile for the container. + ApparmorProfile string `json:"apparmorProfile,omitempty" platform:"linux"` + // SelinuxLabel specifies the selinux context that the container process is run as. + SelinuxLabel string `json:"selinuxLabel,omitempty" platform:"linux"` +} + +// LinuxCapabilities specifies the whitelist of capabilities that are kept for a process. +// http://man7.org/linux/man-pages/man7/capabilities.7.html +type LinuxCapabilities struct { + // Bounding is the set of capabilities checked by the kernel. + Bounding []string `json:"bounding,omitempty" platform:"linux"` + // Effective is the set of capabilities checked by the kernel. + Effective []string `json:"effective,omitempty" platform:"linux"` + // Inheritable is the capabilities preserved across execve. + Inheritable []string `json:"inheritable,omitempty" platform:"linux"` + // Permitted is the limiting superset for effective capabilities. + Permitted []string `json:"permitted,omitempty" platform:"linux"` + // Ambient is the ambient set of capabilities that are kept. + Ambient []string `json:"ambient,omitempty" platform:"linux"` +} + +// Box specifies dimensions of a rectangle. Used for specifying the size of a console. +type Box struct { + // Height is the vertical dimension of a box. + Height uint `json:"height"` + // Width is the horizontal dimension of a box. + Width uint `json:"width"` +} + +// User specifies specific user (and group) information for the container process. +type User struct { + // UID is the user id. + UID uint32 `json:"uid" platform:"linux,solaris"` + // GID is the group id. + GID uint32 `json:"gid" platform:"linux,solaris"` + // AdditionalGids are additional group ids set for the container's process. + AdditionalGids []uint32 `json:"additionalGids,omitempty" platform:"linux,solaris"` + // Username is the user name. + Username string `json:"username,omitempty" platform:"windows"` +} + +// Root contains information about the container's root filesystem on the host. +type Root struct { + // Path is the absolute path to the container's root filesystem. + Path string `json:"path"` + // Readonly makes the root filesystem for the container readonly before the process is executed. + Readonly bool `json:"readonly,omitempty"` +} + +// Platform specifies OS and arch information for the host system that the container +// is created for. +type Platform struct { + // OS is the operating system. + OS string `json:"os"` + // Arch is the architecture + Arch string `json:"arch"` +} + +// Mount specifies a mount for a container. +type Mount struct { + // Destination is the path where the mount will be placed relative to the container's root. The path and child directories MUST exist, a runtime MUST NOT create directories automatically to a mount point. + Destination string `json:"destination"` + // Type specifies the mount kind. + Type string `json:"type,omitempty"` + // Source specifies the source path of the mount. In the case of bind mounts on + // Linux based systems this would be the file on the host. + Source string `json:"source,omitempty"` + // Options are fstab style mount options. + Options []string `json:"options,omitempty"` +} + +// Hook specifies a command that is run at a particular event in the lifecycle of a container +type Hook struct { + Path string `json:"path"` + Args []string `json:"args,omitempty"` + Env []string `json:"env,omitempty"` + Timeout *int `json:"timeout,omitempty"` +} + +// Hooks for container setup and teardown +type Hooks struct { + // Prestart is a list of hooks to be run before the container process is executed. + // On Linux, they are run after the container namespaces are created. + Prestart []Hook `json:"prestart,omitempty"` + // Poststart is a list of hooks to be run after the container process is started. + Poststart []Hook `json:"poststart,omitempty"` + // Poststop is a list of hooks to be run after the container process exits. + Poststop []Hook `json:"poststop,omitempty"` +} + +// Linux contains platform specific configuration for Linux based containers. +type Linux struct { + // UIDMapping specifies user mappings for supporting user namespaces on Linux. + UIDMappings []LinuxIDMapping `json:"uidMappings,omitempty"` + // GIDMapping specifies group mappings for supporting user namespaces on Linux. + GIDMappings []LinuxIDMapping `json:"gidMappings,omitempty"` + // Sysctl are a set of key value pairs that are set for the container on start + Sysctl map[string]string `json:"sysctl,omitempty"` + // Resources contain cgroup information for handling resource constraints + // for the container + Resources *LinuxResources `json:"resources,omitempty"` + // CgroupsPath specifies the path to cgroups that are created and/or joined by the container. + // The path is expected to be relative to the cgroups mountpoint. + // If resources are specified, the cgroups at CgroupsPath will be updated based on resources. + CgroupsPath string `json:"cgroupsPath,omitempty"` + // Namespaces contains the namespaces that are created and/or joined by the container + Namespaces []LinuxNamespace `json:"namespaces,omitempty"` + // Devices are a list of device nodes that are created for the container + Devices []LinuxDevice `json:"devices,omitempty"` + // Seccomp specifies the seccomp security settings for the container. + Seccomp *LinuxSeccomp `json:"seccomp,omitempty"` + // RootfsPropagation is the rootfs mount propagation mode for the container. + RootfsPropagation string `json:"rootfsPropagation,omitempty"` + // MaskedPaths masks over the provided paths inside the container. + MaskedPaths []string `json:"maskedPaths,omitempty"` + // ReadonlyPaths sets the provided paths as RO inside the container. + ReadonlyPaths []string `json:"readonlyPaths,omitempty"` + // MountLabel specifies the selinux context for the mounts in the container. + MountLabel string `json:"mountLabel,omitempty"` + // IntelRdt contains Intel Resource Director Technology (RDT) information + // for handling resource constraints (e.g., L3 cache) for the container + IntelRdt *LinuxIntelRdt `json:"intelRdt,omitempty"` +} + +// LinuxNamespace is the configuration for a Linux namespace +type LinuxNamespace struct { + // Type is the type of Linux namespace + Type LinuxNamespaceType `json:"type"` + // Path is a path to an existing namespace persisted on disk that can be joined + // and is of the same type + Path string `json:"path,omitempty"` +} + +// LinuxNamespaceType is one of the Linux namespaces +type LinuxNamespaceType string + +const ( + // PIDNamespace for isolating process IDs + PIDNamespace LinuxNamespaceType = "pid" + // NetworkNamespace for isolating network devices, stacks, ports, etc + NetworkNamespace = "network" + // MountNamespace for isolating mount points + MountNamespace = "mount" + // IPCNamespace for isolating System V IPC, POSIX message queues + IPCNamespace = "ipc" + // UTSNamespace for isolating hostname and NIS domain name + UTSNamespace = "uts" + // UserNamespace for isolating user and group IDs + UserNamespace = "user" + // CgroupNamespace for isolating cgroup hierarchies + CgroupNamespace = "cgroup" +) + +// LinuxIDMapping specifies UID/GID mappings +type LinuxIDMapping struct { + // HostID is the starting UID/GID on the host to be mapped to 'ContainerID' + HostID uint32 `json:"hostID"` + // ContainerID is the starting UID/GID in the container + ContainerID uint32 `json:"containerID"` + // Size is the number of IDs to be mapped + Size uint32 `json:"size"` +} + +// LinuxRlimit type and restrictions +type LinuxRlimit struct { + // Type of the rlimit to set + Type string `json:"type"` + // Hard is the hard limit for the specified type + Hard uint64 `json:"hard"` + // Soft is the soft limit for the specified type + Soft uint64 `json:"soft"` +} + +// LinuxHugepageLimit structure corresponds to limiting kernel hugepages +type LinuxHugepageLimit struct { + // Pagesize is the hugepage size + Pagesize string `json:"pageSize"` + // Limit is the limit of "hugepagesize" hugetlb usage + Limit uint64 `json:"limit"` +} + +// LinuxInterfacePriority for network interfaces +type LinuxInterfacePriority struct { + // Name is the name of the network interface + Name string `json:"name"` + // Priority for the interface + Priority uint32 `json:"priority"` +} + +// linuxBlockIODevice holds major:minor format supported in blkio cgroup +type linuxBlockIODevice struct { + // Major is the device's major number. + Major int64 `json:"major"` + // Minor is the device's minor number. + Minor int64 `json:"minor"` +} + +// LinuxWeightDevice struct holds a `major:minor weight` pair for blkioWeightDevice +type LinuxWeightDevice struct { + linuxBlockIODevice + // Weight is the bandwidth rate for the device. + Weight *uint16 `json:"weight,omitempty"` + // LeafWeight is the bandwidth rate for the device while competing with the cgroup's child cgroups, CFQ scheduler only + LeafWeight *uint16 `json:"leafWeight,omitempty"` +} + +// LinuxThrottleDevice struct holds a `major:minor rate_per_second` pair +type LinuxThrottleDevice struct { + linuxBlockIODevice + // Rate is the IO rate limit per cgroup per device + Rate uint64 `json:"rate"` +} + +// LinuxBlockIO for Linux cgroup 'blkio' resource management +type LinuxBlockIO struct { + // Specifies per cgroup weight + Weight *uint16 `json:"blkioWeight,omitempty"` + // Specifies tasks' weight in the given cgroup while competing with the cgroup's child cgroups, CFQ scheduler only + LeafWeight *uint16 `json:"blkioLeafWeight,omitempty"` + // Weight per cgroup per device, can override BlkioWeight + WeightDevice []LinuxWeightDevice `json:"blkioWeightDevice,omitempty"` + // IO read rate limit per cgroup per device, bytes per second + ThrottleReadBpsDevice []LinuxThrottleDevice `json:"blkioThrottleReadBpsDevice,omitempty"` + // IO write rate limit per cgroup per device, bytes per second + ThrottleWriteBpsDevice []LinuxThrottleDevice `json:"blkioThrottleWriteBpsDevice,omitempty"` + // IO read rate limit per cgroup per device, IO per second + ThrottleReadIOPSDevice []LinuxThrottleDevice `json:"blkioThrottleReadIOPSDevice,omitempty"` + // IO write rate limit per cgroup per device, IO per second + ThrottleWriteIOPSDevice []LinuxThrottleDevice `json:"blkioThrottleWriteIOPSDevice,omitempty"` +} + +// LinuxMemory for Linux cgroup 'memory' resource management +type LinuxMemory struct { + // Memory limit (in bytes). + Limit *uint64 `json:"limit,omitempty"` + // Memory reservation or soft_limit (in bytes). + Reservation *uint64 `json:"reservation,omitempty"` + // Total memory limit (memory + swap). + Swap *uint64 `json:"swap,omitempty"` + // Kernel memory limit (in bytes). + Kernel *uint64 `json:"kernel,omitempty"` + // Kernel memory limit for tcp (in bytes) + KernelTCP *uint64 `json:"kernelTCP,omitempty"` + // How aggressive the kernel will swap memory pages. Range from 0 to 100. + Swappiness *uint64 `json:"swappiness,omitempty"` +} + +// LinuxCPU for Linux cgroup 'cpu' resource management +type LinuxCPU struct { + // CPU shares (relative weight (ratio) vs. other cgroups with cpu shares). + Shares *uint64 `json:"shares,omitempty"` + // CPU hardcap limit (in usecs). Allowed cpu time in a given period. + Quota *int64 `json:"quota,omitempty"` + // CPU period to be used for hardcapping (in usecs). + Period *uint64 `json:"period,omitempty"` + // How much time realtime scheduling may use (in usecs). + RealtimeRuntime *int64 `json:"realtimeRuntime,omitempty"` + // CPU period to be used for realtime scheduling (in usecs). + RealtimePeriod *uint64 `json:"realtimePeriod,omitempty"` + // CPUs to use within the cpuset. Default is to use any CPU available. + Cpus string `json:"cpus,omitempty"` + // List of memory nodes in the cpuset. Default is to use any available memory node. + Mems string `json:"mems,omitempty"` +} + +// LinuxPids for Linux cgroup 'pids' resource management (Linux 4.3) +type LinuxPids struct { + // Maximum number of PIDs. Default is "no limit". + Limit int64 `json:"limit"` +} + +// LinuxNetwork identification and priority configuration +type LinuxNetwork struct { + // Set class identifier for container's network packets + ClassID *uint32 `json:"classID,omitempty"` + // Set priority of network traffic for container + Priorities []LinuxInterfacePriority `json:"priorities,omitempty"` +} + +// LinuxResources has container runtime resource constraints +type LinuxResources struct { + // Devices configures the device whitelist. + Devices []LinuxDeviceCgroup `json:"devices,omitempty"` + // DisableOOMKiller disables the OOM killer for out of memory conditions + DisableOOMKiller *bool `json:"disableOOMKiller,omitempty"` + // Specify an oom_score_adj for the container. + OOMScoreAdj *int `json:"oomScoreAdj,omitempty"` + // Memory restriction configuration + Memory *LinuxMemory `json:"memory,omitempty"` + // CPU resource restriction configuration + CPU *LinuxCPU `json:"cpu,omitempty"` + // Task resource restriction configuration. + Pids *LinuxPids `json:"pids,omitempty"` + // BlockIO restriction configuration + BlockIO *LinuxBlockIO `json:"blockIO,omitempty"` + // Hugetlb limit (in bytes) + HugepageLimits []LinuxHugepageLimit `json:"hugepageLimits,omitempty"` + // Network restriction configuration + Network *LinuxNetwork `json:"network,omitempty"` +} + +// LinuxDevice represents the mknod information for a Linux special device file +type LinuxDevice struct { + // Path to the device. + Path string `json:"path"` + // Device type, block, char, etc. + Type string `json:"type"` + // Major is the device's major number. + Major int64 `json:"major"` + // Minor is the device's minor number. + Minor int64 `json:"minor"` + // FileMode permission bits for the device. + FileMode *os.FileMode `json:"fileMode,omitempty"` + // UID of the device. + UID *uint32 `json:"uid,omitempty"` + // Gid of the device. + GID *uint32 `json:"gid,omitempty"` +} + +// LinuxDeviceCgroup represents a device rule for the whitelist controller +type LinuxDeviceCgroup struct { + // Allow or deny + Allow bool `json:"allow"` + // Device type, block, char, etc. + Type string `json:"type,omitempty"` + // Major is the device's major number. + Major *int64 `json:"major,omitempty"` + // Minor is the device's minor number. + Minor *int64 `json:"minor,omitempty"` + // Cgroup access permissions format, rwm. + Access string `json:"access,omitempty"` +} + +// Solaris contains platform specific configuration for Solaris application containers. +type Solaris struct { + // SMF FMRI which should go "online" before we start the container process. + Milestone string `json:"milestone,omitempty"` + // Maximum set of privileges any process in this container can obtain. + LimitPriv string `json:"limitpriv,omitempty"` + // The maximum amount of shared memory allowed for this container. + MaxShmMemory string `json:"maxShmMemory,omitempty"` + // Specification for automatic creation of network resources for this container. + Anet []SolarisAnet `json:"anet,omitempty"` + // Set limit on the amount of CPU time that can be used by container. + CappedCPU *SolarisCappedCPU `json:"cappedCPU,omitempty"` + // The physical and swap caps on the memory that can be used by this container. + CappedMemory *SolarisCappedMemory `json:"cappedMemory,omitempty"` +} + +// SolarisCappedCPU allows users to set limit on the amount of CPU time that can be used by container. +type SolarisCappedCPU struct { + Ncpus string `json:"ncpus,omitempty"` +} + +// SolarisCappedMemory allows users to set the physical and swap caps on the memory that can be used by this container. +type SolarisCappedMemory struct { + Physical string `json:"physical,omitempty"` + Swap string `json:"swap,omitempty"` +} + +// SolarisAnet provides the specification for automatic creation of network resources for this container. +type SolarisAnet struct { + // Specify a name for the automatically created VNIC datalink. + Linkname string `json:"linkname,omitempty"` + // Specify the link over which the VNIC will be created. + Lowerlink string `json:"lowerLink,omitempty"` + // The set of IP addresses that the container can use. + Allowedaddr string `json:"allowedAddress,omitempty"` + // Specifies whether allowedAddress limitation is to be applied to the VNIC. + Configallowedaddr string `json:"configureAllowedAddress,omitempty"` + // The value of the optional default router. + Defrouter string `json:"defrouter,omitempty"` + // Enable one or more types of link protection. + Linkprotection string `json:"linkProtection,omitempty"` + // Set the VNIC's macAddress + Macaddress string `json:"macAddress,omitempty"` +} + +// Windows defines the runtime configuration for Windows based containers, including Hyper-V containers. +type Windows struct { + // Resources contains information for handling resource constraints for the container. + Resources *WindowsResources `json:"resources,omitempty"` +} + +// WindowsResources has container runtime resource constraints for containers running on Windows. +type WindowsResources struct { + // Memory restriction configuration. + Memory *WindowsMemoryResources `json:"memory,omitempty"` + // CPU resource restriction configuration. + CPU *WindowsCPUResources `json:"cpu,omitempty"` + // Storage restriction configuration. + Storage *WindowsStorageResources `json:"storage,omitempty"` + // Network restriction configuration. + Network *WindowsNetworkResources `json:"network,omitempty"` +} + +// WindowsMemoryResources contains memory resource management settings. +type WindowsMemoryResources struct { + // Memory limit in bytes. + Limit *uint64 `json:"limit,omitempty"` + // Memory reservation in bytes. + Reservation *uint64 `json:"reservation,omitempty"` +} + +// WindowsCPUResources contains CPU resource management settings. +type WindowsCPUResources struct { + // Number of CPUs available to the container. + Count *uint64 `json:"count,omitempty"` + // CPU shares (relative weight to other containers with cpu shares). Range is from 1 to 10000. + Shares *uint16 `json:"shares,omitempty"` + // Specifies the portion of processor cycles that this container can use as a percentage times 100. + Maximum *uint16 `json:"maximum,omitempty"` +} + +// WindowsStorageResources contains storage resource management settings. +type WindowsStorageResources struct { + // Specifies maximum Iops for the system drive. + Iops *uint64 `json:"iops,omitempty"` + // Specifies maximum bytes per second for the system drive. + Bps *uint64 `json:"bps,omitempty"` + // Sandbox size specifies the minimum size of the system drive in bytes. + SandboxSize *uint64 `json:"sandboxSize,omitempty"` +} + +// WindowsNetworkResources contains network resource management settings. +type WindowsNetworkResources struct { + // EgressBandwidth is the maximum egress bandwidth in bytes per second. + EgressBandwidth *uint64 `json:"egressBandwidth,omitempty"` +} + +// LinuxSeccomp represents syscall restrictions +type LinuxSeccomp struct { + DefaultAction LinuxSeccompAction `json:"defaultAction"` + Architectures []Arch `json:"architectures,omitempty"` + Syscalls []LinuxSyscall `json:"syscalls,omitempty"` +} + +// Arch used for additional architectures +type Arch string + +// Additional architectures permitted to be used for system calls +// By default only the native architecture of the kernel is permitted +const ( + ArchX86 Arch = "SCMP_ARCH_X86" + ArchX86_64 Arch = "SCMP_ARCH_X86_64" + ArchX32 Arch = "SCMP_ARCH_X32" + ArchARM Arch = "SCMP_ARCH_ARM" + ArchAARCH64 Arch = "SCMP_ARCH_AARCH64" + ArchMIPS Arch = "SCMP_ARCH_MIPS" + ArchMIPS64 Arch = "SCMP_ARCH_MIPS64" + ArchMIPS64N32 Arch = "SCMP_ARCH_MIPS64N32" + ArchMIPSEL Arch = "SCMP_ARCH_MIPSEL" + ArchMIPSEL64 Arch = "SCMP_ARCH_MIPSEL64" + ArchMIPSEL64N32 Arch = "SCMP_ARCH_MIPSEL64N32" + ArchPPC Arch = "SCMP_ARCH_PPC" + ArchPPC64 Arch = "SCMP_ARCH_PPC64" + ArchPPC64LE Arch = "SCMP_ARCH_PPC64LE" + ArchS390 Arch = "SCMP_ARCH_S390" + ArchS390X Arch = "SCMP_ARCH_S390X" + ArchPARISC Arch = "SCMP_ARCH_PARISC" + ArchPARISC64 Arch = "SCMP_ARCH_PARISC64" +) + +// LinuxSeccompAction taken upon Seccomp rule match +type LinuxSeccompAction string + +// Define actions for Seccomp rules +const ( + ActKill LinuxSeccompAction = "SCMP_ACT_KILL" + ActTrap LinuxSeccompAction = "SCMP_ACT_TRAP" + ActErrno LinuxSeccompAction = "SCMP_ACT_ERRNO" + ActTrace LinuxSeccompAction = "SCMP_ACT_TRACE" + ActAllow LinuxSeccompAction = "SCMP_ACT_ALLOW" +) + +// LinuxSeccompOperator used to match syscall arguments in Seccomp +type LinuxSeccompOperator string + +// Define operators for syscall arguments in Seccomp +const ( + OpNotEqual LinuxSeccompOperator = "SCMP_CMP_NE" + OpLessThan LinuxSeccompOperator = "SCMP_CMP_LT" + OpLessEqual LinuxSeccompOperator = "SCMP_CMP_LE" + OpEqualTo LinuxSeccompOperator = "SCMP_CMP_EQ" + OpGreaterEqual LinuxSeccompOperator = "SCMP_CMP_GE" + OpGreaterThan LinuxSeccompOperator = "SCMP_CMP_GT" + OpMaskedEqual LinuxSeccompOperator = "SCMP_CMP_MASKED_EQ" +) + +// LinuxSeccompArg used for matching specific syscall arguments in Seccomp +type LinuxSeccompArg struct { + Index uint `json:"index"` + Value uint64 `json:"value"` + ValueTwo uint64 `json:"valueTwo"` + Op LinuxSeccompOperator `json:"op"` +} + +// LinuxSyscall is used to match a syscall in Seccomp +type LinuxSyscall struct { + Names []string `json:"names"` + Action LinuxSeccompAction `json:"action"` + Args []LinuxSeccompArg `json:"args,omitempty"` +} + +// LinuxIntelRdt has container runtime resource constraints +// for Intel RDT/CAT which introduced in Linux 4.10 kernel +type LinuxIntelRdt struct { + // The schema for L3 cache id and capacity bitmask (CBM) + // Format: "L3:=;=;..." + L3CacheSchema string `json:"l3CacheSchema,omitempty"` +} diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/state.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/state.go new file mode 100644 index 000000000000..b5dd3bee8bed --- /dev/null +++ b/vendor/github.com/opencontainers/runtime-spec/specs-go/state.go @@ -0,0 +1,17 @@ +package specs + +// State holds information about the runtime state of the container. +type State struct { + // Version is the version of the specification that is supported. + Version string `json:"ociVersion"` + // ID is the container ID + ID string `json:"id"` + // Status is the runtime status of the container. + Status string `json:"status"` + // Pid is the process ID for the container process. + Pid int `json:"pid"` + // Bundle is the path to the container's bundle directory. + Bundle string `json:"bundle"` + // Annotations are key values associated with the container. + Annotations map[string]string `json:"annotations,omitempty"` +} diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go new file mode 100644 index 000000000000..dfcf0090eedf --- /dev/null +++ b/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go @@ -0,0 +1,18 @@ +package specs + +import "fmt" + +const ( + // VersionMajor is for an API incompatible changes + VersionMajor = 1 + // VersionMinor is for functionality in a backwards-compatible manner + VersionMinor = 0 + // VersionPatch is for backwards-compatible bug fixes + VersionPatch = 0 + + // VersionDev indicates development branch. Releases will be empty string. + VersionDev = "-rc5-dev" +) + +// Version is the specification version that the package types support. +var Version = fmt.Sprintf("%d.%d.%d%s", VersionMajor, VersionMinor, VersionPatch, VersionDev) diff --git a/vendor/github.com/vbatts/tar-split/LICENSE b/vendor/github.com/vbatts/tar-split/LICENSE new file mode 100644 index 000000000000..ca03685b1586 --- /dev/null +++ b/vendor/github.com/vbatts/tar-split/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2015 Vincent Batts, Raleigh, NC, USA + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this +list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors +may be used to endorse or promote products derived from this software without +specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/vbatts/tar-split/README.md b/vendor/github.com/vbatts/tar-split/README.md new file mode 100644 index 000000000000..4c544d823fbc --- /dev/null +++ b/vendor/github.com/vbatts/tar-split/README.md @@ -0,0 +1,137 @@ +# tar-split + +[![Build Status](https://travis-ci.org/vbatts/tar-split.svg?branch=master)](https://travis-ci.org/vbatts/tar-split) + +Pristinely disassembling a tar archive, and stashing needed raw bytes and offsets to reassemble a validating original archive. + +## Docs + +Code API for libraries provided by `tar-split`: + +* https://godoc.org/github.com/vbatts/tar-split/tar/asm +* https://godoc.org/github.com/vbatts/tar-split/tar/storage +* https://godoc.org/github.com/vbatts/tar-split/archive/tar + +## Install + +The command line utilitiy is installable via: + +```bash +go get github.com/vbatts/tar-split/cmd/tar-split +``` + +## Usage + +For cli usage, see its [README.md](cmd/tar-split/README.md). +For the library see the [docs](#docs) + +## Demo + +### Basic disassembly and assembly + +This demonstrates the `tar-split` command and how to assemble a tar archive from the `tar-data.json.gz` + + +![basic cmd demo thumbnail](https://i.ytimg.com/vi/vh5wyjIOBtc/2.jpg?time=1445027151805) +[youtube video of basic command demo](https://youtu.be/vh5wyjIOBtc) + +### Docker layer preservation + +This demonstrates the tar-split integration for docker-1.8. Providing consistent tar archives for the image layer content. + +![docker tar-split demo](https://i.ytimg.com/vi_webp/vh5wyjIOBtc/default.webp) +[youtube vide of docker layer checksums](https://youtu.be/tV_Dia8E8xw) + +## Caveat + +Eventually this should detect TARs that this is not possible with. + +For example stored sparse files that have "holes" in them, will be read as a +contiguous file, though the archive contents may be recorded in sparse format. +Therefore when adding the file payload to a reassembled tar, to achieve +identical output, the file payload would need be precisely re-sparsified. This +is not something I seek to fix imediately, but would rather have an alert that +precise reassembly is not possible. +(see more http://www.gnu.org/software/tar/manual/html_node/Sparse-Formats.html) + + +Other caveat, while tar archives support having multiple file entries for the +same path, we will not support this feature. If there are more than one entries +with the same path, expect an err (like `ErrDuplicatePath`) or a resulting tar +stream that does not validate your original checksum/signature. + +## Contract + +Do not break the API of stdlib `archive/tar` in our fork (ideally find an upstream mergeable solution). + +## Std Version + +The version of golang stdlib `archive/tar` is from go1.6 +It is minimally extended to expose the raw bytes of the TAR, rather than just the marshalled headers and file stream. + + +## Design + +See the [design](concept/DESIGN.md). + +## Stored Metadata + +Since the raw bytes of the headers and padding are stored, you may be wondering +what the size implications are. The headers are at least 512 bytes per +file (sometimes more), at least 1024 null bytes on the end, and then various +padding. This makes for a constant linear growth in the stored metadata, with a +naive storage implementation. + +First we'll get an archive to work with. For repeatability, we'll make an +archive from what you've just cloned: + +```bash +git archive --format=tar -o tar-split.tar HEAD . +``` + +```bash +$ go get github.com/vbatts/tar-split/cmd/tar-split +$ tar-split checksize ./tar-split.tar +inspecting "tar-split.tar" (size 210k) + -- number of files: 50 + -- size of metadata uncompressed: 53k + -- size of gzip compressed metadata: 3k +``` + +So assuming you've managed the extraction of the archive yourself, for reuse of +the file payloads from a relative path, then the only additional storage +implications are as little as 3kb. + +But let's look at a larger archive, with many files. + +```bash +$ ls -sh ./d.tar +1.4G ./d.tar +$ tar-split checksize ~/d.tar +inspecting "/home/vbatts/d.tar" (size 1420749k) + -- number of files: 38718 + -- size of metadata uncompressed: 43261k + -- size of gzip compressed metadata: 2251k +``` + +Here, an archive with 38,718 files has a compressed footprint of about 2mb. + +Rolling the null bytes on the end of the archive, we will assume a +bytes-per-file rate for the storage implications. + +| uncompressed | compressed | +| :----------: | :--------: | +| ~ 1kb per/file | 0.06kb per/file | + + +## What's Next? + +* More implementations of storage Packer and Unpacker +* More implementations of FileGetter and FilePutter +* would be interesting to have an assembler stream that implements `io.Seeker` + + +## License + +See [LICENSE](LICENSE) + diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/common.go b/vendor/github.com/vbatts/tar-split/archive/tar/common.go new file mode 100644 index 000000000000..36f4e2398093 --- /dev/null +++ b/vendor/github.com/vbatts/tar-split/archive/tar/common.go @@ -0,0 +1,340 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package tar implements access to tar archives. +// It aims to cover most of the variations, including those produced +// by GNU and BSD tars. +// +// References: +// http://www.freebsd.org/cgi/man.cgi?query=tar&sektion=5 +// http://www.gnu.org/software/tar/manual/html_node/Standard.html +// http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html +package tar + +import ( + "bytes" + "errors" + "fmt" + "os" + "path" + "time" +) + +const ( + blockSize = 512 + + // Types + TypeReg = '0' // regular file + TypeRegA = '\x00' // regular file + TypeLink = '1' // hard link + TypeSymlink = '2' // symbolic link + TypeChar = '3' // character device node + TypeBlock = '4' // block device node + TypeDir = '5' // directory + TypeFifo = '6' // fifo node + TypeCont = '7' // reserved + TypeXHeader = 'x' // extended header + TypeXGlobalHeader = 'g' // global extended header + TypeGNULongName = 'L' // Next file has a long name + TypeGNULongLink = 'K' // Next file symlinks to a file w/ a long name + TypeGNUSparse = 'S' // sparse file +) + +// A Header represents a single header in a tar archive. +// Some fields may not be populated. +type Header struct { + Name string // name of header file entry + Mode int64 // permission and mode bits + Uid int // user id of owner + Gid int // group id of owner + Size int64 // length in bytes + ModTime time.Time // modified time + Typeflag byte // type of header entry + Linkname string // target name of link + Uname string // user name of owner + Gname string // group name of owner + Devmajor int64 // major number of character or block device + Devminor int64 // minor number of character or block device + AccessTime time.Time // access time + ChangeTime time.Time // status change time + Xattrs map[string]string +} + +// File name constants from the tar spec. +const ( + fileNameSize = 100 // Maximum number of bytes in a standard tar name. + fileNamePrefixSize = 155 // Maximum number of ustar extension bytes. +) + +// FileInfo returns an os.FileInfo for the Header. +func (h *Header) FileInfo() os.FileInfo { + return headerFileInfo{h} +} + +// headerFileInfo implements os.FileInfo. +type headerFileInfo struct { + h *Header +} + +func (fi headerFileInfo) Size() int64 { return fi.h.Size } +func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() } +func (fi headerFileInfo) ModTime() time.Time { return fi.h.ModTime } +func (fi headerFileInfo) Sys() interface{} { return fi.h } + +// Name returns the base name of the file. +func (fi headerFileInfo) Name() string { + if fi.IsDir() { + return path.Base(path.Clean(fi.h.Name)) + } + return path.Base(fi.h.Name) +} + +// Mode returns the permission and mode bits for the headerFileInfo. +func (fi headerFileInfo) Mode() (mode os.FileMode) { + // Set file permission bits. + mode = os.FileMode(fi.h.Mode).Perm() + + // Set setuid, setgid and sticky bits. + if fi.h.Mode&c_ISUID != 0 { + // setuid + mode |= os.ModeSetuid + } + if fi.h.Mode&c_ISGID != 0 { + // setgid + mode |= os.ModeSetgid + } + if fi.h.Mode&c_ISVTX != 0 { + // sticky + mode |= os.ModeSticky + } + + // Set file mode bits. + // clear perm, setuid, setgid and sticky bits. + m := os.FileMode(fi.h.Mode) &^ 07777 + if m == c_ISDIR { + // directory + mode |= os.ModeDir + } + if m == c_ISFIFO { + // named pipe (FIFO) + mode |= os.ModeNamedPipe + } + if m == c_ISLNK { + // symbolic link + mode |= os.ModeSymlink + } + if m == c_ISBLK { + // device file + mode |= os.ModeDevice + } + if m == c_ISCHR { + // Unix character device + mode |= os.ModeDevice + mode |= os.ModeCharDevice + } + if m == c_ISSOCK { + // Unix domain socket + mode |= os.ModeSocket + } + + switch fi.h.Typeflag { + case TypeSymlink: + // symbolic link + mode |= os.ModeSymlink + case TypeChar: + // character device node + mode |= os.ModeDevice + mode |= os.ModeCharDevice + case TypeBlock: + // block device node + mode |= os.ModeDevice + case TypeDir: + // directory + mode |= os.ModeDir + case TypeFifo: + // fifo node + mode |= os.ModeNamedPipe + } + + return mode +} + +// sysStat, if non-nil, populates h from system-dependent fields of fi. +var sysStat func(fi os.FileInfo, h *Header) error + +// Mode constants from the tar spec. +const ( + c_ISUID = 04000 // Set uid + c_ISGID = 02000 // Set gid + c_ISVTX = 01000 // Save text (sticky bit) + c_ISDIR = 040000 // Directory + c_ISFIFO = 010000 // FIFO + c_ISREG = 0100000 // Regular file + c_ISLNK = 0120000 // Symbolic link + c_ISBLK = 060000 // Block special file + c_ISCHR = 020000 // Character special file + c_ISSOCK = 0140000 // Socket +) + +// Keywords for the PAX Extended Header +const ( + paxAtime = "atime" + paxCharset = "charset" + paxComment = "comment" + paxCtime = "ctime" // please note that ctime is not a valid pax header. + paxGid = "gid" + paxGname = "gname" + paxLinkpath = "linkpath" + paxMtime = "mtime" + paxPath = "path" + paxSize = "size" + paxUid = "uid" + paxUname = "uname" + paxXattr = "SCHILY.xattr." + paxNone = "" +) + +// FileInfoHeader creates a partially-populated Header from fi. +// If fi describes a symlink, FileInfoHeader records link as the link target. +// If fi describes a directory, a slash is appended to the name. +// Because os.FileInfo's Name method returns only the base name of +// the file it describes, it may be necessary to modify the Name field +// of the returned header to provide the full path name of the file. +func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) { + if fi == nil { + return nil, errors.New("tar: FileInfo is nil") + } + fm := fi.Mode() + h := &Header{ + Name: fi.Name(), + ModTime: fi.ModTime(), + Mode: int64(fm.Perm()), // or'd with c_IS* constants later + } + switch { + case fm.IsRegular(): + h.Mode |= c_ISREG + h.Typeflag = TypeReg + h.Size = fi.Size() + case fi.IsDir(): + h.Typeflag = TypeDir + h.Mode |= c_ISDIR + h.Name += "/" + case fm&os.ModeSymlink != 0: + h.Typeflag = TypeSymlink + h.Mode |= c_ISLNK + h.Linkname = link + case fm&os.ModeDevice != 0: + if fm&os.ModeCharDevice != 0 { + h.Mode |= c_ISCHR + h.Typeflag = TypeChar + } else { + h.Mode |= c_ISBLK + h.Typeflag = TypeBlock + } + case fm&os.ModeNamedPipe != 0: + h.Typeflag = TypeFifo + h.Mode |= c_ISFIFO + case fm&os.ModeSocket != 0: + h.Mode |= c_ISSOCK + default: + return nil, fmt.Errorf("archive/tar: unknown file mode %v", fm) + } + if fm&os.ModeSetuid != 0 { + h.Mode |= c_ISUID + } + if fm&os.ModeSetgid != 0 { + h.Mode |= c_ISGID + } + if fm&os.ModeSticky != 0 { + h.Mode |= c_ISVTX + } + // If possible, populate additional fields from OS-specific + // FileInfo fields. + if sys, ok := fi.Sys().(*Header); ok { + // This FileInfo came from a Header (not the OS). Use the + // original Header to populate all remaining fields. + h.Uid = sys.Uid + h.Gid = sys.Gid + h.Uname = sys.Uname + h.Gname = sys.Gname + h.AccessTime = sys.AccessTime + h.ChangeTime = sys.ChangeTime + if sys.Xattrs != nil { + h.Xattrs = make(map[string]string) + for k, v := range sys.Xattrs { + h.Xattrs[k] = v + } + } + if sys.Typeflag == TypeLink { + // hard link + h.Typeflag = TypeLink + h.Size = 0 + h.Linkname = sys.Linkname + } + } + if sysStat != nil { + return h, sysStat(fi, h) + } + return h, nil +} + +var zeroBlock = make([]byte, blockSize) + +// POSIX specifies a sum of the unsigned byte values, but the Sun tar uses signed byte values. +// We compute and return both. +func checksum(header []byte) (unsigned int64, signed int64) { + for i := 0; i < len(header); i++ { + if i == 148 { + // The chksum field (header[148:156]) is special: it should be treated as space bytes. + unsigned += ' ' * 8 + signed += ' ' * 8 + i += 7 + continue + } + unsigned += int64(header[i]) + signed += int64(int8(header[i])) + } + return +} + +type slicer []byte + +func (sp *slicer) next(n int) (b []byte) { + s := *sp + b, *sp = s[0:n], s[n:] + return +} + +func isASCII(s string) bool { + for _, c := range s { + if c >= 0x80 { + return false + } + } + return true +} + +func toASCII(s string) string { + if isASCII(s) { + return s + } + var buf bytes.Buffer + for _, c := range s { + if c < 0x80 { + buf.WriteByte(byte(c)) + } + } + return buf.String() +} + +// isHeaderOnlyType checks if the given type flag is of the type that has no +// data section even if a size is specified. +func isHeaderOnlyType(flag byte) bool { + switch flag { + case TypeLink, TypeSymlink, TypeChar, TypeBlock, TypeDir, TypeFifo: + return true + default: + return false + } +} diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/reader.go b/vendor/github.com/vbatts/tar-split/archive/tar/reader.go new file mode 100644 index 000000000000..adf32122e1b9 --- /dev/null +++ b/vendor/github.com/vbatts/tar-split/archive/tar/reader.go @@ -0,0 +1,1064 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar + +// TODO(dsymonds): +// - pax extensions + +import ( + "bytes" + "errors" + "io" + "io/ioutil" + "math" + "os" + "strconv" + "strings" + "time" +) + +var ( + ErrHeader = errors.New("archive/tar: invalid tar header") +) + +const maxNanoSecondIntSize = 9 + +// A Reader provides sequential access to the contents of a tar archive. +// A tar archive consists of a sequence of files. +// The Next method advances to the next file in the archive (including the first), +// and then it can be treated as an io.Reader to access the file's data. +type Reader struct { + r io.Reader + err error + pad int64 // amount of padding (ignored) after current file entry + curr numBytesReader // reader for current file entry + hdrBuff [blockSize]byte // buffer to use in readHeader + + RawAccounting bool // Whether to enable the access needed to reassemble the tar from raw bytes. Some performance/memory hit for this. + rawBytes *bytes.Buffer // last raw bits +} + +type parser struct { + err error // Last error seen +} + +// RawBytes accesses the raw bytes of the archive, apart from the file payload itself. +// This includes the header and padding. +// +// This call resets the current rawbytes buffer +// +// Only when RawAccounting is enabled, otherwise this returns nil +func (tr *Reader) RawBytes() []byte { + if !tr.RawAccounting { + return nil + } + if tr.rawBytes == nil { + tr.rawBytes = bytes.NewBuffer(nil) + } + // if we've read them, then flush them. + defer tr.rawBytes.Reset() + return tr.rawBytes.Bytes() +} + +// A numBytesReader is an io.Reader with a numBytes method, returning the number +// of bytes remaining in the underlying encoded data. +type numBytesReader interface { + io.Reader + numBytes() int64 +} + +// A regFileReader is a numBytesReader for reading file data from a tar archive. +type regFileReader struct { + r io.Reader // underlying reader + nb int64 // number of unread bytes for current file entry +} + +// A sparseFileReader is a numBytesReader for reading sparse file data from a +// tar archive. +type sparseFileReader struct { + rfr numBytesReader // Reads the sparse-encoded file data + sp []sparseEntry // The sparse map for the file + pos int64 // Keeps track of file position + total int64 // Total size of the file +} + +// A sparseEntry holds a single entry in a sparse file's sparse map. +// +// Sparse files are represented using a series of sparseEntrys. +// Despite the name, a sparseEntry represents an actual data fragment that +// references data found in the underlying archive stream. All regions not +// covered by a sparseEntry are logically filled with zeros. +// +// For example, if the underlying raw file contains the 10-byte data: +// var compactData = "abcdefgh" +// +// And the sparse map has the following entries: +// var sp = []sparseEntry{ +// {offset: 2, numBytes: 5} // Data fragment for [2..7] +// {offset: 18, numBytes: 3} // Data fragment for [18..21] +// } +// +// Then the content of the resulting sparse file with a "real" size of 25 is: +// var sparseData = "\x00"*2 + "abcde" + "\x00"*11 + "fgh" + "\x00"*4 +type sparseEntry struct { + offset int64 // Starting position of the fragment + numBytes int64 // Length of the fragment +} + +// Keywords for GNU sparse files in a PAX extended header +const ( + paxGNUSparseNumBlocks = "GNU.sparse.numblocks" + paxGNUSparseOffset = "GNU.sparse.offset" + paxGNUSparseNumBytes = "GNU.sparse.numbytes" + paxGNUSparseMap = "GNU.sparse.map" + paxGNUSparseName = "GNU.sparse.name" + paxGNUSparseMajor = "GNU.sparse.major" + paxGNUSparseMinor = "GNU.sparse.minor" + paxGNUSparseSize = "GNU.sparse.size" + paxGNUSparseRealSize = "GNU.sparse.realsize" +) + +// Keywords for old GNU sparse headers +const ( + oldGNUSparseMainHeaderOffset = 386 + oldGNUSparseMainHeaderIsExtendedOffset = 482 + oldGNUSparseMainHeaderNumEntries = 4 + oldGNUSparseExtendedHeaderIsExtendedOffset = 504 + oldGNUSparseExtendedHeaderNumEntries = 21 + oldGNUSparseOffsetSize = 12 + oldGNUSparseNumBytesSize = 12 +) + +// NewReader creates a new Reader reading from r. +func NewReader(r io.Reader) *Reader { return &Reader{r: r} } + +// Next advances to the next entry in the tar archive. +// +// io.EOF is returned at the end of the input. +func (tr *Reader) Next() (*Header, error) { + if tr.RawAccounting { + if tr.rawBytes == nil { + tr.rawBytes = bytes.NewBuffer(nil) + } else { + tr.rawBytes.Reset() + } + } + + if tr.err != nil { + return nil, tr.err + } + + var hdr *Header + var extHdrs map[string]string + + // Externally, Next iterates through the tar archive as if it is a series of + // files. Internally, the tar format often uses fake "files" to add meta + // data that describes the next file. These meta data "files" should not + // normally be visible to the outside. As such, this loop iterates through + // one or more "header files" until it finds a "normal file". +loop: + for { + tr.err = tr.skipUnread() + if tr.err != nil { + return nil, tr.err + } + + hdr = tr.readHeader() + if tr.err != nil { + return nil, tr.err + } + // Check for PAX/GNU special headers and files. + switch hdr.Typeflag { + case TypeXHeader: + extHdrs, tr.err = parsePAX(tr) + if tr.err != nil { + return nil, tr.err + } + continue loop // This is a meta header affecting the next header + case TypeGNULongName, TypeGNULongLink: + var realname []byte + realname, tr.err = ioutil.ReadAll(tr) + if tr.err != nil { + return nil, tr.err + } + + if tr.RawAccounting { + if _, tr.err = tr.rawBytes.Write(realname); tr.err != nil { + return nil, tr.err + } + } + + // Convert GNU extensions to use PAX headers. + if extHdrs == nil { + extHdrs = make(map[string]string) + } + var p parser + switch hdr.Typeflag { + case TypeGNULongName: + extHdrs[paxPath] = p.parseString(realname) + case TypeGNULongLink: + extHdrs[paxLinkpath] = p.parseString(realname) + } + if p.err != nil { + tr.err = p.err + return nil, tr.err + } + continue loop // This is a meta header affecting the next header + default: + mergePAX(hdr, extHdrs) + + // Check for a PAX format sparse file + sp, err := tr.checkForGNUSparsePAXHeaders(hdr, extHdrs) + if err != nil { + tr.err = err + return nil, err + } + if sp != nil { + // Current file is a PAX format GNU sparse file. + // Set the current file reader to a sparse file reader. + tr.curr, tr.err = newSparseFileReader(tr.curr, sp, hdr.Size) + if tr.err != nil { + return nil, tr.err + } + } + break loop // This is a file, so stop + } + } + return hdr, nil +} + +// checkForGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers. If they are found, then +// this function reads the sparse map and returns it. Unknown sparse formats are ignored, causing the file to +// be treated as a regular file. +func (tr *Reader) checkForGNUSparsePAXHeaders(hdr *Header, headers map[string]string) ([]sparseEntry, error) { + var sparseFormat string + + // Check for sparse format indicators + major, majorOk := headers[paxGNUSparseMajor] + minor, minorOk := headers[paxGNUSparseMinor] + sparseName, sparseNameOk := headers[paxGNUSparseName] + _, sparseMapOk := headers[paxGNUSparseMap] + sparseSize, sparseSizeOk := headers[paxGNUSparseSize] + sparseRealSize, sparseRealSizeOk := headers[paxGNUSparseRealSize] + + // Identify which, if any, sparse format applies from which PAX headers are set + if majorOk && minorOk { + sparseFormat = major + "." + minor + } else if sparseNameOk && sparseMapOk { + sparseFormat = "0.1" + } else if sparseSizeOk { + sparseFormat = "0.0" + } else { + // Not a PAX format GNU sparse file. + return nil, nil + } + + // Check for unknown sparse format + if sparseFormat != "0.0" && sparseFormat != "0.1" && sparseFormat != "1.0" { + return nil, nil + } + + // Update hdr from GNU sparse PAX headers + if sparseNameOk { + hdr.Name = sparseName + } + if sparseSizeOk { + realSize, err := strconv.ParseInt(sparseSize, 10, 0) + if err != nil { + return nil, ErrHeader + } + hdr.Size = realSize + } else if sparseRealSizeOk { + realSize, err := strconv.ParseInt(sparseRealSize, 10, 0) + if err != nil { + return nil, ErrHeader + } + hdr.Size = realSize + } + + // Set up the sparse map, according to the particular sparse format in use + var sp []sparseEntry + var err error + switch sparseFormat { + case "0.0", "0.1": + sp, err = readGNUSparseMap0x1(headers) + case "1.0": + sp, err = readGNUSparseMap1x0(tr.curr) + } + return sp, err +} + +// mergePAX merges well known headers according to PAX standard. +// In general headers with the same name as those found +// in the header struct overwrite those found in the header +// struct with higher precision or longer values. Esp. useful +// for name and linkname fields. +func mergePAX(hdr *Header, headers map[string]string) error { + for k, v := range headers { + switch k { + case paxPath: + hdr.Name = v + case paxLinkpath: + hdr.Linkname = v + case paxGname: + hdr.Gname = v + case paxUname: + hdr.Uname = v + case paxUid: + uid, err := strconv.ParseInt(v, 10, 0) + if err != nil { + return err + } + hdr.Uid = int(uid) + case paxGid: + gid, err := strconv.ParseInt(v, 10, 0) + if err != nil { + return err + } + hdr.Gid = int(gid) + case paxAtime: + t, err := parsePAXTime(v) + if err != nil { + return err + } + hdr.AccessTime = t + case paxMtime: + t, err := parsePAXTime(v) + if err != nil { + return err + } + hdr.ModTime = t + case paxCtime: + t, err := parsePAXTime(v) + if err != nil { + return err + } + hdr.ChangeTime = t + case paxSize: + size, err := strconv.ParseInt(v, 10, 0) + if err != nil { + return err + } + hdr.Size = int64(size) + default: + if strings.HasPrefix(k, paxXattr) { + if hdr.Xattrs == nil { + hdr.Xattrs = make(map[string]string) + } + hdr.Xattrs[k[len(paxXattr):]] = v + } + } + } + return nil +} + +// parsePAXTime takes a string of the form %d.%d as described in +// the PAX specification. +func parsePAXTime(t string) (time.Time, error) { + buf := []byte(t) + pos := bytes.IndexByte(buf, '.') + var seconds, nanoseconds int64 + var err error + if pos == -1 { + seconds, err = strconv.ParseInt(t, 10, 0) + if err != nil { + return time.Time{}, err + } + } else { + seconds, err = strconv.ParseInt(string(buf[:pos]), 10, 0) + if err != nil { + return time.Time{}, err + } + nano_buf := string(buf[pos+1:]) + // Pad as needed before converting to a decimal. + // For example .030 -> .030000000 -> 30000000 nanoseconds + if len(nano_buf) < maxNanoSecondIntSize { + // Right pad + nano_buf += strings.Repeat("0", maxNanoSecondIntSize-len(nano_buf)) + } else if len(nano_buf) > maxNanoSecondIntSize { + // Right truncate + nano_buf = nano_buf[:maxNanoSecondIntSize] + } + nanoseconds, err = strconv.ParseInt(string(nano_buf), 10, 0) + if err != nil { + return time.Time{}, err + } + } + ts := time.Unix(seconds, nanoseconds) + return ts, nil +} + +// parsePAX parses PAX headers. +// If an extended header (type 'x') is invalid, ErrHeader is returned +func parsePAX(r io.Reader) (map[string]string, error) { + buf, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + // leaving this function for io.Reader makes it more testable + if tr, ok := r.(*Reader); ok && tr.RawAccounting { + if _, err = tr.rawBytes.Write(buf); err != nil { + return nil, err + } + } + sbuf := string(buf) + + // For GNU PAX sparse format 0.0 support. + // This function transforms the sparse format 0.0 headers into sparse format 0.1 headers. + var sparseMap bytes.Buffer + + headers := make(map[string]string) + // Each record is constructed as + // "%d %s=%s\n", length, keyword, value + for len(sbuf) > 0 { + key, value, residual, err := parsePAXRecord(sbuf) + if err != nil { + return nil, ErrHeader + } + sbuf = residual + + keyStr := string(key) + if keyStr == paxGNUSparseOffset || keyStr == paxGNUSparseNumBytes { + // GNU sparse format 0.0 special key. Write to sparseMap instead of using the headers map. + sparseMap.WriteString(value) + sparseMap.Write([]byte{','}) + } else { + // Normal key. Set the value in the headers map. + headers[keyStr] = string(value) + } + } + if sparseMap.Len() != 0 { + // Add sparse info to headers, chopping off the extra comma + sparseMap.Truncate(sparseMap.Len() - 1) + headers[paxGNUSparseMap] = sparseMap.String() + } + return headers, nil +} + +// parsePAXRecord parses the input PAX record string into a key-value pair. +// If parsing is successful, it will slice off the currently read record and +// return the remainder as r. +// +// A PAX record is of the following form: +// "%d %s=%s\n" % (size, key, value) +func parsePAXRecord(s string) (k, v, r string, err error) { + // The size field ends at the first space. + sp := strings.IndexByte(s, ' ') + if sp == -1 { + return "", "", s, ErrHeader + } + + // Parse the first token as a decimal integer. + n, perr := strconv.ParseInt(s[:sp], 10, 0) // Intentionally parse as native int + if perr != nil || n < 5 || int64(len(s)) < n { + return "", "", s, ErrHeader + } + + // Extract everything between the space and the final newline. + rec, nl, rem := s[sp+1:n-1], s[n-1:n], s[n:] + if nl != "\n" { + return "", "", s, ErrHeader + } + + // The first equals separates the key from the value. + eq := strings.IndexByte(rec, '=') + if eq == -1 { + return "", "", s, ErrHeader + } + return rec[:eq], rec[eq+1:], rem, nil +} + +// parseString parses bytes as a NUL-terminated C-style string. +// If a NUL byte is not found then the whole slice is returned as a string. +func (*parser) parseString(b []byte) string { + n := 0 + for n < len(b) && b[n] != 0 { + n++ + } + return string(b[0:n]) +} + +// parseNumeric parses the input as being encoded in either base-256 or octal. +// This function may return negative numbers. +// If parsing fails or an integer overflow occurs, err will be set. +func (p *parser) parseNumeric(b []byte) int64 { + // Check for base-256 (binary) format first. + // If the first bit is set, then all following bits constitute a two's + // complement encoded number in big-endian byte order. + if len(b) > 0 && b[0]&0x80 != 0 { + // Handling negative numbers relies on the following identity: + // -a-1 == ^a + // + // If the number is negative, we use an inversion mask to invert the + // data bytes and treat the value as an unsigned number. + var inv byte // 0x00 if positive or zero, 0xff if negative + if b[0]&0x40 != 0 { + inv = 0xff + } + + var x uint64 + for i, c := range b { + c ^= inv // Inverts c only if inv is 0xff, otherwise does nothing + if i == 0 { + c &= 0x7f // Ignore signal bit in first byte + } + if (x >> 56) > 0 { + p.err = ErrHeader // Integer overflow + return 0 + } + x = x<<8 | uint64(c) + } + if (x >> 63) > 0 { + p.err = ErrHeader // Integer overflow + return 0 + } + if inv == 0xff { + return ^int64(x) + } + return int64(x) + } + + // Normal case is base-8 (octal) format. + return p.parseOctal(b) +} + +func (p *parser) parseOctal(b []byte) int64 { + // Because unused fields are filled with NULs, we need + // to skip leading NULs. Fields may also be padded with + // spaces or NULs. + // So we remove leading and trailing NULs and spaces to + // be sure. + b = bytes.Trim(b, " \x00") + + if len(b) == 0 { + return 0 + } + x, perr := strconv.ParseUint(p.parseString(b), 8, 64) + if perr != nil { + p.err = ErrHeader + } + return int64(x) +} + +// skipUnread skips any unread bytes in the existing file entry, as well as any +// alignment padding. It returns io.ErrUnexpectedEOF if any io.EOF is +// encountered in the data portion; it is okay to hit io.EOF in the padding. +// +// Note that this function still works properly even when sparse files are being +// used since numBytes returns the bytes remaining in the underlying io.Reader. +func (tr *Reader) skipUnread() error { + dataSkip := tr.numBytes() // Number of data bytes to skip + totalSkip := dataSkip + tr.pad // Total number of bytes to skip + tr.curr, tr.pad = nil, 0 + if tr.RawAccounting { + _, tr.err = io.CopyN(tr.rawBytes, tr.r, totalSkip) + return tr.err + } + // If possible, Seek to the last byte before the end of the data section. + // Do this because Seek is often lazy about reporting errors; this will mask + // the fact that the tar stream may be truncated. We can rely on the + // io.CopyN done shortly afterwards to trigger any IO errors. + var seekSkipped int64 // Number of bytes skipped via Seek + if sr, ok := tr.r.(io.Seeker); ok && dataSkip > 1 { + // Not all io.Seeker can actually Seek. For example, os.Stdin implements + // io.Seeker, but calling Seek always returns an error and performs + // no action. Thus, we try an innocent seek to the current position + // to see if Seek is really supported. + pos1, err := sr.Seek(0, os.SEEK_CUR) + if err == nil { + // Seek seems supported, so perform the real Seek. + pos2, err := sr.Seek(dataSkip-1, os.SEEK_CUR) + if err != nil { + tr.err = err + return tr.err + } + seekSkipped = pos2 - pos1 + } + } + + var copySkipped int64 // Number of bytes skipped via CopyN + copySkipped, tr.err = io.CopyN(ioutil.Discard, tr.r, totalSkip-seekSkipped) + if tr.err == io.EOF && seekSkipped+copySkipped < dataSkip { + tr.err = io.ErrUnexpectedEOF + } + return tr.err +} + +func (tr *Reader) verifyChecksum(header []byte) bool { + if tr.err != nil { + return false + } + + var p parser + given := p.parseOctal(header[148:156]) + unsigned, signed := checksum(header) + return p.err == nil && (given == unsigned || given == signed) +} + +// readHeader reads the next block header and assumes that the underlying reader +// is already aligned to a block boundary. +// +// The err will be set to io.EOF only when one of the following occurs: +// * Exactly 0 bytes are read and EOF is hit. +// * Exactly 1 block of zeros is read and EOF is hit. +// * At least 2 blocks of zeros are read. +func (tr *Reader) readHeader() *Header { + header := tr.hdrBuff[:] + copy(header, zeroBlock) + + if n, err := io.ReadFull(tr.r, header); err != nil { + tr.err = err + // because it could read some of the block, but reach EOF first + if tr.err == io.EOF && tr.RawAccounting { + if _, err := tr.rawBytes.Write(header[:n]); err != nil { + tr.err = err + } + } + return nil // io.EOF is okay here + } + if tr.RawAccounting { + if _, tr.err = tr.rawBytes.Write(header); tr.err != nil { + return nil + } + } + + // Two blocks of zero bytes marks the end of the archive. + if bytes.Equal(header, zeroBlock[0:blockSize]) { + if n, err := io.ReadFull(tr.r, header); err != nil { + tr.err = err + // because it could read some of the block, but reach EOF first + if tr.err == io.EOF && tr.RawAccounting { + if _, err := tr.rawBytes.Write(header[:n]); err != nil { + tr.err = err + } + } + return nil // io.EOF is okay here + } + if tr.RawAccounting { + if _, tr.err = tr.rawBytes.Write(header); tr.err != nil { + return nil + } + } + if bytes.Equal(header, zeroBlock[0:blockSize]) { + tr.err = io.EOF + } else { + tr.err = ErrHeader // zero block and then non-zero block + } + return nil + } + + if !tr.verifyChecksum(header) { + tr.err = ErrHeader + return nil + } + + // Unpack + var p parser + hdr := new(Header) + s := slicer(header) + + hdr.Name = p.parseString(s.next(100)) + hdr.Mode = p.parseNumeric(s.next(8)) + hdr.Uid = int(p.parseNumeric(s.next(8))) + hdr.Gid = int(p.parseNumeric(s.next(8))) + hdr.Size = p.parseNumeric(s.next(12)) + hdr.ModTime = time.Unix(p.parseNumeric(s.next(12)), 0) + s.next(8) // chksum + hdr.Typeflag = s.next(1)[0] + hdr.Linkname = p.parseString(s.next(100)) + + // The remainder of the header depends on the value of magic. + // The original (v7) version of tar had no explicit magic field, + // so its magic bytes, like the rest of the block, are NULs. + magic := string(s.next(8)) // contains version field as well. + var format string + switch { + case magic[:6] == "ustar\x00": // POSIX tar (1003.1-1988) + if string(header[508:512]) == "tar\x00" { + format = "star" + } else { + format = "posix" + } + case magic == "ustar \x00": // old GNU tar + format = "gnu" + } + + switch format { + case "posix", "gnu", "star": + hdr.Uname = p.parseString(s.next(32)) + hdr.Gname = p.parseString(s.next(32)) + devmajor := s.next(8) + devminor := s.next(8) + if hdr.Typeflag == TypeChar || hdr.Typeflag == TypeBlock { + hdr.Devmajor = p.parseNumeric(devmajor) + hdr.Devminor = p.parseNumeric(devminor) + } + var prefix string + switch format { + case "posix", "gnu": + prefix = p.parseString(s.next(155)) + case "star": + prefix = p.parseString(s.next(131)) + hdr.AccessTime = time.Unix(p.parseNumeric(s.next(12)), 0) + hdr.ChangeTime = time.Unix(p.parseNumeric(s.next(12)), 0) + } + if len(prefix) > 0 { + hdr.Name = prefix + "/" + hdr.Name + } + } + + if p.err != nil { + tr.err = p.err + return nil + } + + nb := hdr.Size + if isHeaderOnlyType(hdr.Typeflag) { + nb = 0 + } + if nb < 0 { + tr.err = ErrHeader + return nil + } + + // Set the current file reader. + tr.pad = -nb & (blockSize - 1) // blockSize is a power of two + tr.curr = ®FileReader{r: tr.r, nb: nb} + + // Check for old GNU sparse format entry. + if hdr.Typeflag == TypeGNUSparse { + // Get the real size of the file. + hdr.Size = p.parseNumeric(header[483:495]) + if p.err != nil { + tr.err = p.err + return nil + } + + // Read the sparse map. + sp := tr.readOldGNUSparseMap(header) + if tr.err != nil { + return nil + } + + // Current file is a GNU sparse file. Update the current file reader. + tr.curr, tr.err = newSparseFileReader(tr.curr, sp, hdr.Size) + if tr.err != nil { + return nil + } + } + + return hdr +} + +// readOldGNUSparseMap reads the sparse map as stored in the old GNU sparse format. +// The sparse map is stored in the tar header if it's small enough. If it's larger than four entries, +// then one or more extension headers are used to store the rest of the sparse map. +func (tr *Reader) readOldGNUSparseMap(header []byte) []sparseEntry { + var p parser + isExtended := header[oldGNUSparseMainHeaderIsExtendedOffset] != 0 + spCap := oldGNUSparseMainHeaderNumEntries + if isExtended { + spCap += oldGNUSparseExtendedHeaderNumEntries + } + sp := make([]sparseEntry, 0, spCap) + s := slicer(header[oldGNUSparseMainHeaderOffset:]) + + // Read the four entries from the main tar header + for i := 0; i < oldGNUSparseMainHeaderNumEntries; i++ { + offset := p.parseNumeric(s.next(oldGNUSparseOffsetSize)) + numBytes := p.parseNumeric(s.next(oldGNUSparseNumBytesSize)) + if p.err != nil { + tr.err = p.err + return nil + } + if offset == 0 && numBytes == 0 { + break + } + sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) + } + + for isExtended { + // There are more entries. Read an extension header and parse its entries. + sparseHeader := make([]byte, blockSize) + if _, tr.err = io.ReadFull(tr.r, sparseHeader); tr.err != nil { + return nil + } + if tr.RawAccounting { + if _, tr.err = tr.rawBytes.Write(sparseHeader); tr.err != nil { + return nil + } + } + + isExtended = sparseHeader[oldGNUSparseExtendedHeaderIsExtendedOffset] != 0 + s = slicer(sparseHeader) + for i := 0; i < oldGNUSparseExtendedHeaderNumEntries; i++ { + offset := p.parseNumeric(s.next(oldGNUSparseOffsetSize)) + numBytes := p.parseNumeric(s.next(oldGNUSparseNumBytesSize)) + if p.err != nil { + tr.err = p.err + return nil + } + if offset == 0 && numBytes == 0 { + break + } + sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) + } + } + return sp +} + +// readGNUSparseMap1x0 reads the sparse map as stored in GNU's PAX sparse format +// version 1.0. The format of the sparse map consists of a series of +// newline-terminated numeric fields. The first field is the number of entries +// and is always present. Following this are the entries, consisting of two +// fields (offset, numBytes). This function must stop reading at the end +// boundary of the block containing the last newline. +// +// Note that the GNU manual says that numeric values should be encoded in octal +// format. However, the GNU tar utility itself outputs these values in decimal. +// As such, this library treats values as being encoded in decimal. +func readGNUSparseMap1x0(r io.Reader) ([]sparseEntry, error) { + var cntNewline int64 + var buf bytes.Buffer + var blk = make([]byte, blockSize) + + // feedTokens copies data in numBlock chunks from r into buf until there are + // at least cnt newlines in buf. It will not read more blocks than needed. + var feedTokens = func(cnt int64) error { + for cntNewline < cnt { + if _, err := io.ReadFull(r, blk); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + buf.Write(blk) + for _, c := range blk { + if c == '\n' { + cntNewline++ + } + } + } + return nil + } + + // nextToken gets the next token delimited by a newline. This assumes that + // at least one newline exists in the buffer. + var nextToken = func() string { + cntNewline-- + tok, _ := buf.ReadString('\n') + return tok[:len(tok)-1] // Cut off newline + } + + // Parse for the number of entries. + // Use integer overflow resistant math to check this. + if err := feedTokens(1); err != nil { + return nil, err + } + numEntries, err := strconv.ParseInt(nextToken(), 10, 0) // Intentionally parse as native int + if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) { + return nil, ErrHeader + } + + // Parse for all member entries. + // numEntries is trusted after this since a potential attacker must have + // committed resources proportional to what this library used. + if err := feedTokens(2 * numEntries); err != nil { + return nil, err + } + sp := make([]sparseEntry, 0, numEntries) + for i := int64(0); i < numEntries; i++ { + offset, err := strconv.ParseInt(nextToken(), 10, 64) + if err != nil { + return nil, ErrHeader + } + numBytes, err := strconv.ParseInt(nextToken(), 10, 64) + if err != nil { + return nil, ErrHeader + } + sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) + } + return sp, nil +} + +// readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format +// version 0.1. The sparse map is stored in the PAX headers. +func readGNUSparseMap0x1(extHdrs map[string]string) ([]sparseEntry, error) { + // Get number of entries. + // Use integer overflow resistant math to check this. + numEntriesStr := extHdrs[paxGNUSparseNumBlocks] + numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0) // Intentionally parse as native int + if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) { + return nil, ErrHeader + } + + // There should be two numbers in sparseMap for each entry. + sparseMap := strings.Split(extHdrs[paxGNUSparseMap], ",") + if int64(len(sparseMap)) != 2*numEntries { + return nil, ErrHeader + } + + // Loop through the entries in the sparse map. + // numEntries is trusted now. + sp := make([]sparseEntry, 0, numEntries) + for i := int64(0); i < numEntries; i++ { + offset, err := strconv.ParseInt(sparseMap[2*i], 10, 64) + if err != nil { + return nil, ErrHeader + } + numBytes, err := strconv.ParseInt(sparseMap[2*i+1], 10, 64) + if err != nil { + return nil, ErrHeader + } + sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) + } + return sp, nil +} + +// numBytes returns the number of bytes left to read in the current file's entry +// in the tar archive, or 0 if there is no current file. +func (tr *Reader) numBytes() int64 { + if tr.curr == nil { + // No current file, so no bytes + return 0 + } + return tr.curr.numBytes() +} + +// Read reads from the current entry in the tar archive. +// It returns 0, io.EOF when it reaches the end of that entry, +// until Next is called to advance to the next entry. +// +// Calling Read on special types like TypeLink, TypeSymLink, TypeChar, +// TypeBlock, TypeDir, and TypeFifo returns 0, io.EOF regardless of what +// the Header.Size claims. +func (tr *Reader) Read(b []byte) (n int, err error) { + if tr.err != nil { + return 0, tr.err + } + if tr.curr == nil { + return 0, io.EOF + } + + n, err = tr.curr.Read(b) + if err != nil && err != io.EOF { + tr.err = err + } + return +} + +func (rfr *regFileReader) Read(b []byte) (n int, err error) { + if rfr.nb == 0 { + // file consumed + return 0, io.EOF + } + if int64(len(b)) > rfr.nb { + b = b[0:rfr.nb] + } + n, err = rfr.r.Read(b) + rfr.nb -= int64(n) + + if err == io.EOF && rfr.nb > 0 { + err = io.ErrUnexpectedEOF + } + return +} + +// numBytes returns the number of bytes left to read in the file's data in the tar archive. +func (rfr *regFileReader) numBytes() int64 { + return rfr.nb +} + +// newSparseFileReader creates a new sparseFileReader, but validates all of the +// sparse entries before doing so. +func newSparseFileReader(rfr numBytesReader, sp []sparseEntry, total int64) (*sparseFileReader, error) { + if total < 0 { + return nil, ErrHeader // Total size cannot be negative + } + + // Validate all sparse entries. These are the same checks as performed by + // the BSD tar utility. + for i, s := range sp { + switch { + case s.offset < 0 || s.numBytes < 0: + return nil, ErrHeader // Negative values are never okay + case s.offset > math.MaxInt64-s.numBytes: + return nil, ErrHeader // Integer overflow with large length + case s.offset+s.numBytes > total: + return nil, ErrHeader // Region extends beyond the "real" size + case i > 0 && sp[i-1].offset+sp[i-1].numBytes > s.offset: + return nil, ErrHeader // Regions can't overlap and must be in order + } + } + return &sparseFileReader{rfr: rfr, sp: sp, total: total}, nil +} + +// readHole reads a sparse hole ending at endOffset. +func (sfr *sparseFileReader) readHole(b []byte, endOffset int64) int { + n64 := endOffset - sfr.pos + if n64 > int64(len(b)) { + n64 = int64(len(b)) + } + n := int(n64) + for i := 0; i < n; i++ { + b[i] = 0 + } + sfr.pos += n64 + return n +} + +// Read reads the sparse file data in expanded form. +func (sfr *sparseFileReader) Read(b []byte) (n int, err error) { + // Skip past all empty fragments. + for len(sfr.sp) > 0 && sfr.sp[0].numBytes == 0 { + sfr.sp = sfr.sp[1:] + } + + // If there are no more fragments, then it is possible that there + // is one last sparse hole. + if len(sfr.sp) == 0 { + // This behavior matches the BSD tar utility. + // However, GNU tar stops returning data even if sfr.total is unmet. + if sfr.pos < sfr.total { + return sfr.readHole(b, sfr.total), nil + } + return 0, io.EOF + } + + // In front of a data fragment, so read a hole. + if sfr.pos < sfr.sp[0].offset { + return sfr.readHole(b, sfr.sp[0].offset), nil + } + + // In a data fragment, so read from it. + // This math is overflow free since we verify that offset and numBytes can + // be safely added when creating the sparseFileReader. + endPos := sfr.sp[0].offset + sfr.sp[0].numBytes // End offset of fragment + bytesLeft := endPos - sfr.pos // Bytes left in fragment + if int64(len(b)) > bytesLeft { + b = b[:bytesLeft] + } + + n, err = sfr.rfr.Read(b) + sfr.pos += int64(n) + if err == io.EOF { + if sfr.pos < endPos { + err = io.ErrUnexpectedEOF // There was supposed to be more data + } else if sfr.pos < sfr.total { + err = nil // There is still an implicit sparse hole at the end + } + } + + if sfr.pos == endPos { + sfr.sp = sfr.sp[1:] // We are done with this fragment, so pop it + } + return n, err +} + +// numBytes returns the number of bytes left to read in the sparse file's +// sparse-encoded data in the tar archive. +func (sfr *sparseFileReader) numBytes() int64 { + return sfr.rfr.numBytes() +} diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/stat_atim.go b/vendor/github.com/vbatts/tar-split/archive/tar/stat_atim.go new file mode 100644 index 000000000000..cf9cc79c5915 --- /dev/null +++ b/vendor/github.com/vbatts/tar-split/archive/tar/stat_atim.go @@ -0,0 +1,20 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux dragonfly openbsd solaris + +package tar + +import ( + "syscall" + "time" +) + +func statAtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Atim.Unix()) +} + +func statCtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Ctim.Unix()) +} diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/stat_atimespec.go b/vendor/github.com/vbatts/tar-split/archive/tar/stat_atimespec.go new file mode 100644 index 000000000000..6f17dbe30725 --- /dev/null +++ b/vendor/github.com/vbatts/tar-split/archive/tar/stat_atimespec.go @@ -0,0 +1,20 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin freebsd netbsd + +package tar + +import ( + "syscall" + "time" +) + +func statAtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Atimespec.Unix()) +} + +func statCtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Ctimespec.Unix()) +} diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/stat_unix.go b/vendor/github.com/vbatts/tar-split/archive/tar/stat_unix.go new file mode 100644 index 000000000000..cb843db4cfd6 --- /dev/null +++ b/vendor/github.com/vbatts/tar-split/archive/tar/stat_unix.go @@ -0,0 +1,32 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux darwin dragonfly freebsd openbsd netbsd solaris + +package tar + +import ( + "os" + "syscall" +) + +func init() { + sysStat = statUnix +} + +func statUnix(fi os.FileInfo, h *Header) error { + sys, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return nil + } + h.Uid = int(sys.Uid) + h.Gid = int(sys.Gid) + // TODO(bradfitz): populate username & group. os/user + // doesn't cache LookupId lookups, and lacks group + // lookup functions. + h.AccessTime = statAtime(sys) + h.ChangeTime = statCtime(sys) + // TODO(bradfitz): major/minor device numbers? + return nil +} diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/writer.go b/vendor/github.com/vbatts/tar-split/archive/tar/writer.go new file mode 100644 index 000000000000..042638175cd8 --- /dev/null +++ b/vendor/github.com/vbatts/tar-split/archive/tar/writer.go @@ -0,0 +1,416 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar + +// TODO(dsymonds): +// - catch more errors (no first header, etc.) + +import ( + "bytes" + "errors" + "fmt" + "io" + "path" + "sort" + "strconv" + "strings" + "time" +) + +var ( + ErrWriteTooLong = errors.New("archive/tar: write too long") + ErrFieldTooLong = errors.New("archive/tar: header field too long") + ErrWriteAfterClose = errors.New("archive/tar: write after close") + errInvalidHeader = errors.New("archive/tar: header field too long or contains invalid values") +) + +// A Writer provides sequential writing of a tar archive in POSIX.1 format. +// A tar archive consists of a sequence of files. +// Call WriteHeader to begin a new file, and then call Write to supply that file's data, +// writing at most hdr.Size bytes in total. +type Writer struct { + w io.Writer + err error + nb int64 // number of unwritten bytes for current file entry + pad int64 // amount of padding to write after current file entry + closed bool + usedBinary bool // whether the binary numeric field extension was used + preferPax bool // use pax header instead of binary numeric header + hdrBuff [blockSize]byte // buffer to use in writeHeader when writing a regular header + paxHdrBuff [blockSize]byte // buffer to use in writeHeader when writing a pax header +} + +type formatter struct { + err error // Last error seen +} + +// NewWriter creates a new Writer writing to w. +func NewWriter(w io.Writer) *Writer { return &Writer{w: w} } + +// Flush finishes writing the current file (optional). +func (tw *Writer) Flush() error { + if tw.nb > 0 { + tw.err = fmt.Errorf("archive/tar: missed writing %d bytes", tw.nb) + return tw.err + } + + n := tw.nb + tw.pad + for n > 0 && tw.err == nil { + nr := n + if nr > blockSize { + nr = blockSize + } + var nw int + nw, tw.err = tw.w.Write(zeroBlock[0:nr]) + n -= int64(nw) + } + tw.nb = 0 + tw.pad = 0 + return tw.err +} + +// Write s into b, terminating it with a NUL if there is room. +func (f *formatter) formatString(b []byte, s string) { + if len(s) > len(b) { + f.err = ErrFieldTooLong + return + } + ascii := toASCII(s) + copy(b, ascii) + if len(ascii) < len(b) { + b[len(ascii)] = 0 + } +} + +// Encode x as an octal ASCII string and write it into b with leading zeros. +func (f *formatter) formatOctal(b []byte, x int64) { + s := strconv.FormatInt(x, 8) + // leading zeros, but leave room for a NUL. + for len(s)+1 < len(b) { + s = "0" + s + } + f.formatString(b, s) +} + +// fitsInBase256 reports whether x can be encoded into n bytes using base-256 +// encoding. Unlike octal encoding, base-256 encoding does not require that the +// string ends with a NUL character. Thus, all n bytes are available for output. +// +// If operating in binary mode, this assumes strict GNU binary mode; which means +// that the first byte can only be either 0x80 or 0xff. Thus, the first byte is +// equivalent to the sign bit in two's complement form. +func fitsInBase256(n int, x int64) bool { + var binBits = uint(n-1) * 8 + return n >= 9 || (x >= -1<= 0; i-- { + b[i] = byte(x) + x >>= 8 + } + b[0] |= 0x80 // Highest bit indicates binary format + return + } + + f.formatOctal(b, 0) // Last resort, just write zero + f.err = ErrFieldTooLong +} + +var ( + minTime = time.Unix(0, 0) + // There is room for 11 octal digits (33 bits) of mtime. + maxTime = minTime.Add((1<<33 - 1) * time.Second) +) + +// WriteHeader writes hdr and prepares to accept the file's contents. +// WriteHeader calls Flush if it is not the first header. +// Calling after a Close will return ErrWriteAfterClose. +func (tw *Writer) WriteHeader(hdr *Header) error { + return tw.writeHeader(hdr, true) +} + +// WriteHeader writes hdr and prepares to accept the file's contents. +// WriteHeader calls Flush if it is not the first header. +// Calling after a Close will return ErrWriteAfterClose. +// As this method is called internally by writePax header to allow it to +// suppress writing the pax header. +func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error { + if tw.closed { + return ErrWriteAfterClose + } + if tw.err == nil { + tw.Flush() + } + if tw.err != nil { + return tw.err + } + + // a map to hold pax header records, if any are needed + paxHeaders := make(map[string]string) + + // TODO(shanemhansen): we might want to use PAX headers for + // subsecond time resolution, but for now let's just capture + // too long fields or non ascii characters + + var f formatter + var header []byte + + // We need to select which scratch buffer to use carefully, + // since this method is called recursively to write PAX headers. + // If allowPax is true, this is the non-recursive call, and we will use hdrBuff. + // If allowPax is false, we are being called by writePAXHeader, and hdrBuff is + // already being used by the non-recursive call, so we must use paxHdrBuff. + header = tw.hdrBuff[:] + if !allowPax { + header = tw.paxHdrBuff[:] + } + copy(header, zeroBlock) + s := slicer(header) + + // Wrappers around formatter that automatically sets paxHeaders if the + // argument extends beyond the capacity of the input byte slice. + var formatString = func(b []byte, s string, paxKeyword string) { + needsPaxHeader := paxKeyword != paxNone && len(s) > len(b) || !isASCII(s) + if needsPaxHeader { + paxHeaders[paxKeyword] = s + return + } + f.formatString(b, s) + } + var formatNumeric = func(b []byte, x int64, paxKeyword string) { + // Try octal first. + s := strconv.FormatInt(x, 8) + if len(s) < len(b) { + f.formatOctal(b, x) + return + } + + // If it is too long for octal, and PAX is preferred, use a PAX header. + if paxKeyword != paxNone && tw.preferPax { + f.formatOctal(b, 0) + s := strconv.FormatInt(x, 10) + paxHeaders[paxKeyword] = s + return + } + + tw.usedBinary = true + f.formatNumeric(b, x) + } + + // keep a reference to the filename to allow to overwrite it later if we detect that we can use ustar longnames instead of pax + pathHeaderBytes := s.next(fileNameSize) + + formatString(pathHeaderBytes, hdr.Name, paxPath) + + // Handle out of range ModTime carefully. + var modTime int64 + if !hdr.ModTime.Before(minTime) && !hdr.ModTime.After(maxTime) { + modTime = hdr.ModTime.Unix() + } + + f.formatOctal(s.next(8), hdr.Mode) // 100:108 + formatNumeric(s.next(8), int64(hdr.Uid), paxUid) // 108:116 + formatNumeric(s.next(8), int64(hdr.Gid), paxGid) // 116:124 + formatNumeric(s.next(12), hdr.Size, paxSize) // 124:136 + formatNumeric(s.next(12), modTime, paxNone) // 136:148 --- consider using pax for finer granularity + s.next(8) // chksum (148:156) + s.next(1)[0] = hdr.Typeflag // 156:157 + + formatString(s.next(100), hdr.Linkname, paxLinkpath) + + copy(s.next(8), []byte("ustar\x0000")) // 257:265 + formatString(s.next(32), hdr.Uname, paxUname) // 265:297 + formatString(s.next(32), hdr.Gname, paxGname) // 297:329 + formatNumeric(s.next(8), hdr.Devmajor, paxNone) // 329:337 + formatNumeric(s.next(8), hdr.Devminor, paxNone) // 337:345 + + // keep a reference to the prefix to allow to overwrite it later if we detect that we can use ustar longnames instead of pax + prefixHeaderBytes := s.next(155) + formatString(prefixHeaderBytes, "", paxNone) // 345:500 prefix + + // Use the GNU magic instead of POSIX magic if we used any GNU extensions. + if tw.usedBinary { + copy(header[257:265], []byte("ustar \x00")) + } + + _, paxPathUsed := paxHeaders[paxPath] + // try to use a ustar header when only the name is too long + if !tw.preferPax && len(paxHeaders) == 1 && paxPathUsed { + prefix, suffix, ok := splitUSTARPath(hdr.Name) + if ok { + // Since we can encode in USTAR format, disable PAX header. + delete(paxHeaders, paxPath) + + // Update the path fields + formatString(pathHeaderBytes, suffix, paxNone) + formatString(prefixHeaderBytes, prefix, paxNone) + } + } + + // The chksum field is terminated by a NUL and a space. + // This is different from the other octal fields. + chksum, _ := checksum(header) + f.formatOctal(header[148:155], chksum) // Never fails + header[155] = ' ' + + // Check if there were any formatting errors. + if f.err != nil { + tw.err = f.err + return tw.err + } + + if allowPax { + for k, v := range hdr.Xattrs { + paxHeaders[paxXattr+k] = v + } + } + + if len(paxHeaders) > 0 { + if !allowPax { + return errInvalidHeader + } + if err := tw.writePAXHeader(hdr, paxHeaders); err != nil { + return err + } + } + tw.nb = int64(hdr.Size) + tw.pad = (blockSize - (tw.nb % blockSize)) % blockSize + + _, tw.err = tw.w.Write(header) + return tw.err +} + +// splitUSTARPath splits a path according to USTAR prefix and suffix rules. +// If the path is not splittable, then it will return ("", "", false). +func splitUSTARPath(name string) (prefix, suffix string, ok bool) { + length := len(name) + if length <= fileNameSize || !isASCII(name) { + return "", "", false + } else if length > fileNamePrefixSize+1 { + length = fileNamePrefixSize + 1 + } else if name[length-1] == '/' { + length-- + } + + i := strings.LastIndex(name[:length], "/") + nlen := len(name) - i - 1 // nlen is length of suffix + plen := i // plen is length of prefix + if i <= 0 || nlen > fileNameSize || nlen == 0 || plen > fileNamePrefixSize { + return "", "", false + } + return name[:i], name[i+1:], true +} + +// writePaxHeader writes an extended pax header to the +// archive. +func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) error { + // Prepare extended header + ext := new(Header) + ext.Typeflag = TypeXHeader + // Setting ModTime is required for reader parsing to + // succeed, and seems harmless enough. + ext.ModTime = hdr.ModTime + // The spec asks that we namespace our pseudo files + // with the current pid. However, this results in differing outputs + // for identical inputs. As such, the constant 0 is now used instead. + // golang.org/issue/12358 + dir, file := path.Split(hdr.Name) + fullName := path.Join(dir, "PaxHeaders.0", file) + + ascii := toASCII(fullName) + if len(ascii) > 100 { + ascii = ascii[:100] + } + ext.Name = ascii + // Construct the body + var buf bytes.Buffer + + // Keys are sorted before writing to body to allow deterministic output. + var keys []string + for k := range paxHeaders { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + fmt.Fprint(&buf, formatPAXRecord(k, paxHeaders[k])) + } + + ext.Size = int64(len(buf.Bytes())) + if err := tw.writeHeader(ext, false); err != nil { + return err + } + if _, err := tw.Write(buf.Bytes()); err != nil { + return err + } + if err := tw.Flush(); err != nil { + return err + } + return nil +} + +// formatPAXRecord formats a single PAX record, prefixing it with the +// appropriate length. +func formatPAXRecord(k, v string) string { + const padding = 3 // Extra padding for ' ', '=', and '\n' + size := len(k) + len(v) + padding + size += len(strconv.Itoa(size)) + record := fmt.Sprintf("%d %s=%s\n", size, k, v) + + // Final adjustment if adding size field increased the record size. + if len(record) != size { + size = len(record) + record = fmt.Sprintf("%d %s=%s\n", size, k, v) + } + return record +} + +// Write writes to the current entry in the tar archive. +// Write returns the error ErrWriteTooLong if more than +// hdr.Size bytes are written after WriteHeader. +func (tw *Writer) Write(b []byte) (n int, err error) { + if tw.closed { + err = ErrWriteAfterClose + return + } + overwrite := false + if int64(len(b)) > tw.nb { + b = b[0:tw.nb] + overwrite = true + } + n, err = tw.w.Write(b) + tw.nb -= int64(n) + if err == nil && overwrite { + err = ErrWriteTooLong + return + } + tw.err = err + return +} + +// Close closes the tar archive, flushing any unwritten +// data to the underlying writer. +func (tw *Writer) Close() error { + if tw.err != nil || tw.closed { + return tw.err + } + tw.Flush() + tw.closed = true + if tw.err != nil { + return tw.err + } + + // trailer: two zero blocks + for i := 0; i < 2; i++ { + _, tw.err = tw.w.Write(zeroBlock) + if tw.err != nil { + break + } + } + return tw.err +} diff --git a/vendor/github.com/vbatts/tar-split/tar/asm/README.md b/vendor/github.com/vbatts/tar-split/tar/asm/README.md new file mode 100644 index 000000000000..2a3a5b56a624 --- /dev/null +++ b/vendor/github.com/vbatts/tar-split/tar/asm/README.md @@ -0,0 +1,44 @@ +asm +=== + +This library for assembly and disassembly of tar archives, facilitated by +`github.com/vbatts/tar-split/tar/storage`. + + +Concerns +-------- + +For completely safe assembly/disassembly, there will need to be a Content +Addressable Storage (CAS) directory, that maps to a checksum in the +`storage.Entity` of `storage.FileType`. + +This is due to the fact that tar archives _can_ allow multiple records for the +same path, but the last one effectively wins. Even if the prior records had a +different payload. + +In this way, when assembling an archive from relative paths, if the archive has +multiple entries for the same path, then all payloads read in from a relative +path would be identical. + + +Thoughts +-------- + +Have a look-aside directory or storage. This way when a clobbering record is +encountered from the tar stream, then the payload of the prior/existing file is +stored to the CAS. This way the clobbering record's file payload can be +extracted, but we'll have preserved the payload needed to reassemble a precise +tar archive. + +clobbered/path/to/file.[0-N] + +*alternatively* + +We could just _not_ support tar streams that have clobbering file paths. +Appending records to the archive is not incredibly common, and doesn't happen +by default for most implementations. Not supporting them wouldn't be a +security concern either, as if it did occur, we would reassemble an archive +that doesn't validate signature/checksum, so it shouldn't be trusted anyway. + +Otherwise, this will allow us to defer support for appended files as a FUTURE FEATURE. + diff --git a/vendor/github.com/vbatts/tar-split/tar/asm/assemble.go b/vendor/github.com/vbatts/tar-split/tar/asm/assemble.go new file mode 100644 index 000000000000..d624450ab7d5 --- /dev/null +++ b/vendor/github.com/vbatts/tar-split/tar/asm/assemble.go @@ -0,0 +1,130 @@ +package asm + +import ( + "bytes" + "fmt" + "hash" + "hash/crc64" + "io" + "sync" + + "github.com/vbatts/tar-split/tar/storage" +) + +// NewOutputTarStream returns an io.ReadCloser that is an assembled tar archive +// stream. +// +// It takes a storage.FileGetter, for mapping the file payloads that are to be read in, +// and a storage.Unpacker, which has access to the rawbytes and file order +// metadata. With the combination of these two items, a precise assembled Tar +// archive is possible. +func NewOutputTarStream(fg storage.FileGetter, up storage.Unpacker) io.ReadCloser { + // ... Since these are interfaces, this is possible, so let's not have a nil pointer + if fg == nil || up == nil { + return nil + } + pr, pw := io.Pipe() + go func() { + err := WriteOutputTarStream(fg, up, pw) + if err != nil { + pw.CloseWithError(err) + } else { + pw.Close() + } + }() + return pr +} + +// WriteOutputTarStream writes assembled tar archive to a writer. +func WriteOutputTarStream(fg storage.FileGetter, up storage.Unpacker, w io.Writer) error { + // ... Since these are interfaces, this is possible, so let's not have a nil pointer + if fg == nil || up == nil { + return nil + } + var copyBuffer []byte + var crcHash hash.Hash + var crcSum []byte + var multiWriter io.Writer + for { + entry, err := up.Next() + if err != nil { + if err == io.EOF { + return nil + } + return err + } + switch entry.Type { + case storage.SegmentType: + if _, err := w.Write(entry.Payload); err != nil { + return err + } + case storage.FileType: + if entry.Size == 0 { + continue + } + fh, err := fg.Get(entry.GetName()) + if err != nil { + return err + } + if crcHash == nil { + crcHash = crc64.New(storage.CRCTable) + crcSum = make([]byte, 8) + multiWriter = io.MultiWriter(w, crcHash) + copyBuffer = byteBufferPool.Get().([]byte) + defer byteBufferPool.Put(copyBuffer) + } else { + crcHash.Reset() + } + + if _, err := copyWithBuffer(multiWriter, fh, copyBuffer); err != nil { + fh.Close() + return err + } + + if !bytes.Equal(crcHash.Sum(crcSum[:0]), entry.Payload) { + // I would rather this be a comparable ErrInvalidChecksum or such, + // but since it's coming through the PipeReader, the context of + // _which_ file would be lost... + fh.Close() + return fmt.Errorf("file integrity checksum failed for %q", entry.GetName()) + } + fh.Close() + } + } +} + +var byteBufferPool = &sync.Pool{ + New: func() interface{} { + return make([]byte, 32*1024) + }, +} + +// copyWithBuffer is taken from stdlib io.Copy implementation +// https://github.com/golang/go/blob/go1.5.1/src/io/io.go#L367 +func copyWithBuffer(dst io.Writer, src io.Reader, buf []byte) (written int64, err error) { + for { + nr, er := src.Read(buf) + if nr > 0 { + nw, ew := dst.Write(buf[0:nr]) + if nw > 0 { + written += int64(nw) + } + if ew != nil { + err = ew + break + } + if nr != nw { + err = io.ErrShortWrite + break + } + } + if er == io.EOF { + break + } + if er != nil { + err = er + break + } + } + return written, err +} diff --git a/vendor/github.com/vbatts/tar-split/tar/asm/disassemble.go b/vendor/github.com/vbatts/tar-split/tar/asm/disassemble.go new file mode 100644 index 000000000000..54ef23aed366 --- /dev/null +++ b/vendor/github.com/vbatts/tar-split/tar/asm/disassemble.go @@ -0,0 +1,141 @@ +package asm + +import ( + "io" + "io/ioutil" + + "github.com/vbatts/tar-split/archive/tar" + "github.com/vbatts/tar-split/tar/storage" +) + +// NewInputTarStream wraps the Reader stream of a tar archive and provides a +// Reader stream of the same. +// +// In the middle it will pack the segments and file metadata to storage.Packer +// `p`. +// +// The the storage.FilePutter is where payload of files in the stream are +// stashed. If this stashing is not needed, you can provide a nil +// storage.FilePutter. Since the checksumming is still needed, then a default +// of NewDiscardFilePutter will be used internally +func NewInputTarStream(r io.Reader, p storage.Packer, fp storage.FilePutter) (io.Reader, error) { + // What to do here... folks will want their own access to the Reader that is + // their tar archive stream, but we'll need that same stream to use our + // forked 'archive/tar'. + // Perhaps do an io.TeeReader that hands back an io.Reader for them to read + // from, and we'll MITM the stream to store metadata. + // We'll need a storage.FilePutter too ... + + // Another concern, whether to do any storage.FilePutter operations, such that we + // don't extract any amount of the archive. But then again, we're not making + // files/directories, hardlinks, etc. Just writing the io to the storage.FilePutter. + // Perhaps we have a DiscardFilePutter that is a bit bucket. + + // we'll return the pipe reader, since TeeReader does not buffer and will + // only read what the outputRdr Read's. Since Tar archives have padding on + // the end, we want to be the one reading the padding, even if the user's + // `archive/tar` doesn't care. + pR, pW := io.Pipe() + outputRdr := io.TeeReader(r, pW) + + // we need a putter that will generate the crc64 sums of file payloads + if fp == nil { + fp = storage.NewDiscardFilePutter() + } + + go func() { + tr := tar.NewReader(outputRdr) + tr.RawAccounting = true + for { + hdr, err := tr.Next() + if err != nil { + if err != io.EOF { + pW.CloseWithError(err) + return + } + // even when an EOF is reached, there is often 1024 null bytes on + // the end of an archive. Collect them too. + if b := tr.RawBytes(); len(b) > 0 { + _, err := p.AddEntry(storage.Entry{ + Type: storage.SegmentType, + Payload: b, + }) + if err != nil { + pW.CloseWithError(err) + return + } + } + break // not return. We need the end of the reader. + } + if hdr == nil { + break // not return. We need the end of the reader. + } + + if b := tr.RawBytes(); len(b) > 0 { + _, err := p.AddEntry(storage.Entry{ + Type: storage.SegmentType, + Payload: b, + }) + if err != nil { + pW.CloseWithError(err) + return + } + } + + var csum []byte + if hdr.Size > 0 { + var err error + _, csum, err = fp.Put(hdr.Name, tr) + if err != nil { + pW.CloseWithError(err) + return + } + } + + entry := storage.Entry{ + Type: storage.FileType, + Size: hdr.Size, + Payload: csum, + } + // For proper marshalling of non-utf8 characters + entry.SetName(hdr.Name) + + // File entries added, regardless of size + _, err = p.AddEntry(entry) + if err != nil { + pW.CloseWithError(err) + return + } + + if b := tr.RawBytes(); len(b) > 0 { + _, err = p.AddEntry(storage.Entry{ + Type: storage.SegmentType, + Payload: b, + }) + if err != nil { + pW.CloseWithError(err) + return + } + } + } + + // it is allowable, and not uncommon that there is further padding on the + // end of an archive, apart from the expected 1024 null bytes. + remainder, err := ioutil.ReadAll(outputRdr) + if err != nil && err != io.EOF { + pW.CloseWithError(err) + return + } + _, err = p.AddEntry(storage.Entry{ + Type: storage.SegmentType, + Payload: remainder, + }) + if err != nil { + pW.CloseWithError(err) + return + } + pW.Close() + }() + + return pR, nil +} diff --git a/vendor/github.com/vbatts/tar-split/tar/asm/doc.go b/vendor/github.com/vbatts/tar-split/tar/asm/doc.go new file mode 100644 index 000000000000..4367b90220eb --- /dev/null +++ b/vendor/github.com/vbatts/tar-split/tar/asm/doc.go @@ -0,0 +1,9 @@ +/* +Package asm provides the API for streaming assembly and disassembly of tar +archives. + +Using the `github.com/vbatts/tar-split/tar/storage` for Packing/Unpacking the +metadata for a stream, as well as an implementation of Getting/Putting the file +entries' payload. +*/ +package asm diff --git a/vendor/github.com/vbatts/tar-split/tar/storage/doc.go b/vendor/github.com/vbatts/tar-split/tar/storage/doc.go new file mode 100644 index 000000000000..83f7089ff156 --- /dev/null +++ b/vendor/github.com/vbatts/tar-split/tar/storage/doc.go @@ -0,0 +1,12 @@ +/* +Package storage is for metadata of a tar archive. + +Packing and unpacking the Entries of the stream. The types of streams are +either segments of raw bytes (for the raw headers and various padding) and for +an entry marking a file payload. + +The raw bytes are stored precisely in the packed (marshalled) Entry, whereas +the file payload marker include the name of the file, size, and crc64 checksum +(for basic file integrity). +*/ +package storage diff --git a/vendor/github.com/vbatts/tar-split/tar/storage/entry.go b/vendor/github.com/vbatts/tar-split/tar/storage/entry.go new file mode 100644 index 000000000000..c91e7ea1e823 --- /dev/null +++ b/vendor/github.com/vbatts/tar-split/tar/storage/entry.go @@ -0,0 +1,78 @@ +package storage + +import "unicode/utf8" + +// Entries is for sorting by Position +type Entries []Entry + +func (e Entries) Len() int { return len(e) } +func (e Entries) Swap(i, j int) { e[i], e[j] = e[j], e[i] } +func (e Entries) Less(i, j int) bool { return e[i].Position < e[j].Position } + +// Type of Entry +type Type int + +const ( + // FileType represents a file payload from the tar stream. + // + // This will be used to map to relative paths on disk. Only Size > 0 will get + // read into a resulting output stream (due to hardlinks). + FileType Type = 1 + iota + // SegmentType represents a raw bytes segment from the archive stream. These raw + // byte segments consist of the raw headers and various padding. + // + // Its payload is to be marshalled base64 encoded. + SegmentType +) + +// Entry is the structure for packing and unpacking the information read from +// the Tar archive. +// +// FileType Payload checksum is using `hash/crc64` for basic file integrity, +// _not_ for cryptography. +// From http://www.backplane.com/matt/crc64.html, CRC32 has almost 40,000 +// collisions in a sample of 18.2 million, CRC64 had none. +type Entry struct { + Type Type `json:"type"` + Name string `json:"name,omitempty"` + NameRaw []byte `json:"name_raw,omitempty"` + Size int64 `json:"size,omitempty"` + Payload []byte `json:"payload"` // SegmentType stores payload here; FileType stores crc64 checksum here; + Position int `json:"position"` +} + +// SetName will check name for valid UTF-8 string, and set the appropriate +// field. See https://github.com/vbatts/tar-split/issues/17 +func (e *Entry) SetName(name string) { + if utf8.ValidString(name) { + e.Name = name + } else { + e.NameRaw = []byte(name) + } +} + +// SetNameBytes will check name for valid UTF-8 string, and set the appropriate +// field +func (e *Entry) SetNameBytes(name []byte) { + if utf8.Valid(name) { + e.Name = string(name) + } else { + e.NameRaw = name + } +} + +// GetName returns the string for the entry's name, regardless of the field stored in +func (e *Entry) GetName() string { + if len(e.NameRaw) > 0 { + return string(e.NameRaw) + } + return e.Name +} + +// GetNameBytes returns the bytes for the entry's name, regardless of the field stored in +func (e *Entry) GetNameBytes() []byte { + if len(e.NameRaw) > 0 { + return e.NameRaw + } + return []byte(e.Name) +} diff --git a/vendor/github.com/vbatts/tar-split/tar/storage/getter.go b/vendor/github.com/vbatts/tar-split/tar/storage/getter.go new file mode 100644 index 000000000000..ae11f8ffd4e8 --- /dev/null +++ b/vendor/github.com/vbatts/tar-split/tar/storage/getter.go @@ -0,0 +1,104 @@ +package storage + +import ( + "bytes" + "errors" + "hash/crc64" + "io" + "os" + "path/filepath" +) + +// FileGetter is the interface for getting a stream of a file payload, +// addressed by name/filename. Presumably, the names will be scoped to relative +// file paths. +type FileGetter interface { + // Get returns a stream for the provided file path + Get(filename string) (output io.ReadCloser, err error) +} + +// FilePutter is the interface for storing a stream of a file payload, +// addressed by name/filename. +type FilePutter interface { + // Put returns the size of the stream received, and the crc64 checksum for + // the provided stream + Put(filename string, input io.Reader) (size int64, checksum []byte, err error) +} + +// FileGetPutter is the interface that groups both Getting and Putting file +// payloads. +type FileGetPutter interface { + FileGetter + FilePutter +} + +// NewPathFileGetter returns a FileGetter that is for files relative to path +// relpath. +func NewPathFileGetter(relpath string) FileGetter { + return &pathFileGetter{root: relpath} +} + +type pathFileGetter struct { + root string +} + +func (pfg pathFileGetter) Get(filename string) (io.ReadCloser, error) { + return os.Open(filepath.Join(pfg.root, filename)) +} + +type bufferFileGetPutter struct { + files map[string][]byte +} + +func (bfgp bufferFileGetPutter) Get(name string) (io.ReadCloser, error) { + if _, ok := bfgp.files[name]; !ok { + return nil, errors.New("no such file") + } + b := bytes.NewBuffer(bfgp.files[name]) + return &readCloserWrapper{b}, nil +} + +func (bfgp *bufferFileGetPutter) Put(name string, r io.Reader) (int64, []byte, error) { + crc := crc64.New(CRCTable) + buf := bytes.NewBuffer(nil) + cw := io.MultiWriter(crc, buf) + i, err := io.Copy(cw, r) + if err != nil { + return 0, nil, err + } + bfgp.files[name] = buf.Bytes() + return i, crc.Sum(nil), nil +} + +type readCloserWrapper struct { + io.Reader +} + +func (w *readCloserWrapper) Close() error { return nil } + +// NewBufferFileGetPutter is a simple in-memory FileGetPutter +// +// Implication is this is memory intensive... +// Probably best for testing or light weight cases. +func NewBufferFileGetPutter() FileGetPutter { + return &bufferFileGetPutter{ + files: map[string][]byte{}, + } +} + +// NewDiscardFilePutter is a bit bucket FilePutter +func NewDiscardFilePutter() FilePutter { + return &bitBucketFilePutter{} +} + +type bitBucketFilePutter struct { +} + +func (bbfp *bitBucketFilePutter) Put(name string, r io.Reader) (int64, []byte, error) { + c := crc64.New(CRCTable) + i, err := io.Copy(c, r) + return i, c.Sum(nil), err +} + +// CRCTable is the default table used for crc64 sum calculations +var CRCTable = crc64.MakeTable(crc64.ISO) diff --git a/vendor/github.com/vbatts/tar-split/tar/storage/packer.go b/vendor/github.com/vbatts/tar-split/tar/storage/packer.go new file mode 100644 index 000000000000..aba694818549 --- /dev/null +++ b/vendor/github.com/vbatts/tar-split/tar/storage/packer.go @@ -0,0 +1,127 @@ +package storage + +import ( + "encoding/json" + "errors" + "io" + "path/filepath" + "unicode/utf8" +) + +// ErrDuplicatePath occurs when a tar archive has more than one entry for the +// same file path +var ErrDuplicatePath = errors.New("duplicates of file paths not supported") + +// Packer describes the methods to pack Entries to a storage destination +type Packer interface { + // AddEntry packs the Entry and returns its position + AddEntry(e Entry) (int, error) +} + +// Unpacker describes the methods to read Entries from a source +type Unpacker interface { + // Next returns the next Entry being unpacked, or error, until io.EOF + Next() (*Entry, error) +} + +/* TODO(vbatts) figure out a good model for this +type PackUnpacker interface { + Packer + Unpacker +} +*/ + +type jsonUnpacker struct { + seen seenNames + dec *json.Decoder +} + +func (jup *jsonUnpacker) Next() (*Entry, error) { + var e Entry + err := jup.dec.Decode(&e) + if err != nil { + return nil, err + } + + // check for dup name + if e.Type == FileType { + cName := filepath.Clean(e.GetName()) + if _, ok := jup.seen[cName]; ok { + return nil, ErrDuplicatePath + } + jup.seen[cName] = struct{}{} + } + + return &e, err +} + +// NewJSONUnpacker provides an Unpacker that reads Entries (SegmentType and +// FileType) as a json document. +// +// Each Entry read are expected to be delimited by new line. +func NewJSONUnpacker(r io.Reader) Unpacker { + return &jsonUnpacker{ + dec: json.NewDecoder(r), + seen: seenNames{}, + } +} + +type jsonPacker struct { + w io.Writer + e *json.Encoder + pos int + seen seenNames +} + +type seenNames map[string]struct{} + +func (jp *jsonPacker) AddEntry(e Entry) (int, error) { + // if Name is not valid utf8, switch it to raw first. + if e.Name != "" { + if !utf8.ValidString(e.Name) { + e.NameRaw = []byte(e.Name) + e.Name = "" + } + } + + // check early for dup name + if e.Type == FileType { + cName := filepath.Clean(e.GetName()) + if _, ok := jp.seen[cName]; ok { + return -1, ErrDuplicatePath + } + jp.seen[cName] = struct{}{} + } + + e.Position = jp.pos + err := jp.e.Encode(e) + if err != nil { + return -1, err + } + + // made it this far, increment now + jp.pos++ + return e.Position, nil +} + +// NewJSONPacker provides a Packer that writes each Entry (SegmentType and +// FileType) as a json document. +// +// The Entries are delimited by new line. +func NewJSONPacker(w io.Writer) Packer { + return &jsonPacker{ + w: w, + e: json.NewEncoder(w), + seen: seenNames{}, + } +} + +/* +TODO(vbatts) perhaps have a more compact packer/unpacker, maybe using msgapck +(https://github.com/ugorji/go) + + +Even though, since our jsonUnpacker and jsonPacker just take +io.Reader/io.Writer, then we can get away with passing them a +gzip.Reader/gzip.Writer +*/