diff --git a/vendor.conf b/vendor.conf index 3cd9edb16a..6c260a16ff 100644 --- a/vendor.conf +++ b/vendor.conf @@ -1,4 +1,6 @@ github.com/urfave/cli v1.17.0 +github.com/kr/pretty v0.1.0 +github.com/kr/text v0.1.0 github.com/containers/image master github.com/opencontainers/go-digest master gopkg.in/cheggaaa/pb.v1 ad4efe000aa550bb54918c06ebbadc0ff17687b9 https://github.com/cheggaaa/pb @@ -10,6 +12,7 @@ github.com/davecgh/go-spew master github.com/pmezard/go-difflib master github.com/pkg/errors master golang.org/x/crypto master +github.com/ulikunitz/xz v0.5.4 # docker deps from https://github.com/docker/docker/blob/v1.11.2/hack/vendor.sh github.com/docker/docker da99009bbb1165d1ac5688b5c81d2f589d418341 github.com/docker/go-connections 7beb39f0b969b075d1325fecb092faf27fd357b6 diff --git a/vendor/github.com/containers/image/README.md b/vendor/github.com/containers/image/README.md index 237e9e38e2..017211b2c8 100644 --- a/vendor/github.com/containers/image/README.md +++ b/vendor/github.com/containers/image/README.md @@ -65,7 +65,9 @@ the primary downside is that creating new signatures with the Golang-only implem - `containers_image_ostree_stub`: Instead of importing `ostree:` transport in `github.com/containers/image/transports/alltransports`, use a stub which reports that the transport is not supported. This allows building the library without requiring the `libostree` development libraries. The `github.com/containers/image/ostree` package is completely disabled and impossible to import when this build tag is in use. -## Contributing +## [Contributing](CONTRIBUTING.md)** + +Information about contributing to this project. When developing this library, please use `make` (or `make … BUILDTAGS=…`) to take advantage of the tests and validation. diff --git a/vendor/github.com/containers/image/copy/copy.go b/vendor/github.com/containers/image/copy/copy.go index 65c3853be5..6b1df75a98 100644 --- a/vendor/github.com/containers/image/copy/copy.go +++ b/vendor/github.com/containers/image/copy/copy.go @@ -347,6 +347,9 @@ func checkImageDestinationForCurrentRuntimeOS(ctx context.Context, sys *types.Sy // updateEmbeddedDockerReference handles the Docker reference embedded in Docker schema1 manifests. func (ic *imageCopier) updateEmbeddedDockerReference() error { + if ic.c.dest.IgnoresEmbeddedDockerReference() { + return nil // Destination would prefer us not to update the embedded reference. + } destRef := ic.c.dest.Reference().DockerReference() if destRef == nil { return nil // Destination does not care about Docker references @@ -601,6 +604,7 @@ func computeDiffID(stream io.Reader, decompressor compression.DecompressorFunc) if err != nil { return "", err } + defer s.Close() stream = s } @@ -670,10 +674,12 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr inputInfo.Size = -1 } else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Decompress && isCompressed { logrus.Debugf("Blob will be decompressed") - destStream, err = decompressor(destStream) + s, err := decompressor(destStream) if err != nil { return types.BlobInfo{}, err } + defer s.Close() + destStream = s inputInfo.Digest = "" inputInfo.Size = -1 } else { diff --git a/vendor/github.com/containers/image/directory/directory_dest.go b/vendor/github.com/containers/image/directory/directory_dest.go index a38d7d5cfb..d888931fe6 100644 --- a/vendor/github.com/containers/image/directory/directory_dest.go +++ b/vendor/github.com/containers/image/directory/directory_dest.go @@ -117,6 +117,13 @@ func (d *dirImageDestination) MustMatchRuntimeOS() bool { return false } +// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), +// and would prefer to receive an unmodified manifest instead of one modified for the destination. +// Does not make a difference if Reference().DockerReference() is nil. +func (d *dirImageDestination) IgnoresEmbeddedDockerReference() bool { + return false // N/A, DockerReference() returns nil. +} + // PutBlob writes contents of stream and returns data representing the result (with all data filled in). // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. // inputInfo.Size is the expected length of stream, if known. diff --git a/vendor/github.com/containers/image/docker/docker_client.go b/vendor/github.com/containers/image/docker/docker_client.go index 4a1d421496..9825b40cff 100644 --- a/vendor/github.com/containers/image/docker/docker_client.go +++ b/vendor/github.com/containers/image/docker/docker_client.go @@ -280,13 +280,19 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima v2Res := &V2Results{} v1Res := &V1Results{} + // Get credentials from authfile for the underlying hostname + username, password, err := config.GetAuthentication(sys, registry) + if err != nil { + return nil, errors.Wrapf(err, "error getting username and password") + } + // The /v2/_catalog endpoint has been disabled for docker.io therefore the call made to that endpoint will fail // So using the v1 hostname for docker.io for simplicity of implementation and the fact that it returns search results if registry == dockerHostname { registry = dockerV1Hostname } - client, err := newDockerClientWithDetails(sys, registry, "", "", "", nil, "") + client, err := newDockerClientWithDetails(sys, registry, username, password, "", nil, "") if err != nil { return nil, errors.Wrapf(err, "error creating new docker client") } @@ -443,6 +449,9 @@ func (c *dockerClient) getBearerToken(ctx context.Context, realm, service, scope } authReq = authReq.WithContext(ctx) getParams := authReq.URL.Query() + if c.username != "" { + getParams.Add("account", c.username) + } if service != "" { getParams.Add("service", service) } diff --git a/vendor/github.com/containers/image/docker/docker_image_dest.go b/vendor/github.com/containers/image/docker/docker_image_dest.go index 782a048cec..85d5689952 100644 --- a/vendor/github.com/containers/image/docker/docker_image_dest.go +++ b/vendor/github.com/containers/image/docker/docker_image_dest.go @@ -95,6 +95,13 @@ func (d *dockerImageDestination) MustMatchRuntimeOS() bool { return false } +// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), +// and would prefer to receive an unmodified manifest instead of one modified for the destination. +// Does not make a difference if Reference().DockerReference() is nil. +func (d *dockerImageDestination) IgnoresEmbeddedDockerReference() bool { + return false // We do want the manifest updated; older registry versions refuse manifests if the embedded reference does not match. +} + // sizeCounter is an io.Writer which only counts the total size of its input. type sizeCounter struct{ size int64 } diff --git a/vendor/github.com/containers/image/docker/tarfile/dest.go b/vendor/github.com/containers/image/docker/tarfile/dest.go index b6791af95b..d6510ccf15 100644 --- a/vendor/github.com/containers/image/docker/tarfile/dest.go +++ b/vendor/github.com/containers/image/docker/tarfile/dest.go @@ -75,6 +75,13 @@ func (d *Destination) MustMatchRuntimeOS() bool { return false } +// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), +// and would prefer to receive an unmodified manifest instead of one modified for the destination. +// Does not make a difference if Reference().DockerReference() is nil. +func (d *Destination) IgnoresEmbeddedDockerReference() bool { + return false // N/A, we only accept schema2 images where EmbeddedDockerReferenceConflicts() is always false. +} + // PutBlob writes contents of stream and returns data representing the result (with all data filled in). // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. // inputInfo.Size is the expected length of stream, if known. diff --git a/vendor/github.com/containers/image/docker/tarfile/src.go b/vendor/github.com/containers/image/docker/tarfile/src.go index 2a0d9966e6..942893a812 100644 --- a/vendor/github.com/containers/image/docker/tarfile/src.go +++ b/vendor/github.com/containers/image/docker/tarfile/src.go @@ -3,7 +3,6 @@ package tarfile import ( "archive/tar" "bytes" - "compress/gzip" "context" "encoding/json" "io" @@ -43,8 +42,7 @@ type layerInfo struct { // the source of an image. // To do for both the NewSourceFromFile and NewSourceFromStream functions -// NewSourceFromFile returns a tarfile.Source for the specified path -// NewSourceFromFile supports both conpressed and uncompressed input +// NewSourceFromFile returns a tarfile.Source for the specified path. func NewSourceFromFile(path string) (*Source, error) { file, err := os.Open(path) if err != nil { @@ -52,19 +50,24 @@ func NewSourceFromFile(path string) (*Source, error) { } defer file.Close() - reader, err := gzip.NewReader(file) + // If the file is already not compressed we can just return the file itself + // as a source. Otherwise we pass the stream to NewSourceFromStream. + stream, isCompressed, err := compression.AutoDecompress(file) if err != nil { + return nil, errors.Wrapf(err, "Error detecting compression for file %q", path) + } + defer stream.Close() + if !isCompressed { return &Source{ tarPath: path, }, nil } - defer reader.Close() - - return NewSourceFromStream(reader) + return NewSourceFromStream(stream) } -// NewSourceFromStream returns a tarfile.Source for the specified inputStream, which must be uncompressed. -// The caller can close the inputStream immediately after NewSourceFromFile returns. +// NewSourceFromStream returns a tarfile.Source for the specified inputStream, +// which can be either compressed or uncompressed. The caller can close the +// inputStream immediately after NewSourceFromFile returns. func NewSourceFromStream(inputStream io.Reader) (*Source, error) { // FIXME: use SystemContext here. // Save inputStream to a temporary file @@ -81,8 +84,20 @@ func NewSourceFromStream(inputStream io.Reader) (*Source, error) { } }() - // TODO: This can take quite some time, and should ideally be cancellable using a context.Context. - if _, err := io.Copy(tarCopyFile, inputStream); err != nil { + // In order to be compatible with docker-load, we need to support + // auto-decompression (it's also a nice quality-of-life thing to avoid + // giving users really confusing "invalid tar header" errors). + uncompressedStream, _, err := compression.AutoDecompress(inputStream) + if err != nil { + return nil, errors.Wrap(err, "Error auto-decompressing input") + } + defer uncompressedStream.Close() + + // Copy the plain archive to the temporary file. + // + // TODO: This can take quite some time, and should ideally be cancellable + // using a context.Context. + if _, err := io.Copy(tarCopyFile, uncompressedStream); err != nil { return nil, errors.Wrapf(err, "error copying contents to temporary file %q", tarCopyFile.Name()) } succeeded = true @@ -292,7 +307,25 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif return nil, err } if li, ok := unknownLayerSizes[h.Name]; ok { - li.size = h.Size + // Since GetBlob will decompress layers that are compressed we need + // to do the decompression here as well, otherwise we will + // incorrectly report the size. Pretty critical, since tools like + // umoci always compress layer blobs. Obviously we only bother with + // the slower method of checking if it's compressed. + uncompressedStream, isCompressed, err := compression.AutoDecompress(t) + if err != nil { + return nil, errors.Wrapf(err, "Error auto-decompressing %s to determine its size", h.Name) + } + defer uncompressedStream.Close() + + uncompressedSize := h.Size + if isCompressed { + uncompressedSize, err = io.Copy(ioutil.Discard, uncompressedStream) + if err != nil { + return nil, errors.Wrapf(err, "Error reading %s to find its size", h.Name) + } + } + li.size = uncompressedSize delete(unknownLayerSizes, h.Name) } } @@ -346,16 +379,22 @@ func (s *Source) GetManifest(ctx context.Context, instanceDigest *digest.Digest) return s.generatedManifest, manifest.DockerV2Schema2MediaType, nil } -type readCloseWrapper struct { +// uncompressedReadCloser is an io.ReadCloser that closes both the uncompressed stream and the underlying input. +type uncompressedReadCloser struct { io.Reader - closeFunc func() error + underlyingCloser func() error + uncompressedCloser func() error } -func (r readCloseWrapper) Close() error { - if r.closeFunc != nil { - return r.closeFunc() +func (r uncompressedReadCloser) Close() error { + var res error + if err := r.uncompressedCloser(); err != nil { + res = err } - return nil + if err := r.underlyingCloser(); err != nil && res == nil { + res = err + } + return res } // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). @@ -369,10 +408,16 @@ func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadClose } if li, ok := s.knownLayers[info.Digest]; ok { // diffID is a digest of the uncompressed tarball, - stream, err := s.openTarComponent(li.path) + underlyingStream, err := s.openTarComponent(li.path) if err != nil { return nil, 0, err } + closeUnderlyingStream := true + defer func() { + if closeUnderlyingStream { + underlyingStream.Close() + } + }() // In order to handle the fact that digests != diffIDs (and thus that a // caller which is trying to verify the blob will run into problems), @@ -386,22 +431,17 @@ func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadClose // be verifing a "digest" which is not the actual layer's digest (but // is instead the DiffID). - decompressFunc, reader, err := compression.DetectCompression(stream) + uncompressedStream, _, err := compression.AutoDecompress(underlyingStream) if err != nil { - return nil, 0, errors.Wrapf(err, "Detecting compression in blob %s", info.Digest) - } - - if decompressFunc != nil { - reader, err = decompressFunc(reader) - if err != nil { - return nil, 0, errors.Wrapf(err, "Decompressing blob %s stream", info.Digest) - } + return nil, 0, errors.Wrapf(err, "Error auto-decompressing blob %s", info.Digest) } - newStream := readCloseWrapper{ - Reader: reader, - closeFunc: stream.Close, + newStream := uncompressedReadCloser{ + Reader: uncompressedStream, + underlyingCloser: underlyingStream.Close, + uncompressedCloser: uncompressedStream.Close, } + closeUnderlyingStream = false return newStream, li.size, nil } diff --git a/vendor/github.com/containers/image/docs/registries.conf.5.md b/vendor/github.com/containers/image/docs/registries.conf.5.md index 3aa4ad5a55..45fe632435 100644 --- a/vendor/github.com/containers/image/docs/registries.conf.5.md +++ b/vendor/github.com/containers/image/docs/registries.conf.5.md @@ -7,35 +7,44 @@ registries.conf - Syntax of System Registry Configuration File # DESCRIPTION The REGISTRIES configuration file is a system-wide configuration file for container image -registries. The file format is TOML. +registries. The file format is TOML. The valid categories are: 'registries.search', +'registries.insecure', and 'registries.block'. # FORMAT -The TOML_format is used to build simple list format for registries under two -categories: `search` and `insecure`. You can list multiple registries using -as a comma separated list. +The TOML_format is used to build a simple list format for registries under three +categories: `registries.search`, `registries.insecure`, and `registries.block`. +You can list multiple registries using a comma separated list. Search registries are used when the caller of a container runtime does not fully specify the container image that they want to execute. These registries are prepended onto the front - of the specified container image until the named image is found at a registry. +of the specified container image until the named image is found at a registry. Insecure Registries. By default container runtimes use TLS when retrieving images from a registry. If the registry is not setup with TLS, then the container runtime will fail to pull images from the registry. If you add the registry to the list of insecure registries then the container runtime will attempt use standard web protocols to pull the image. It also allows you to pull from a registry with self-signed certificates. -Note insecure registries can be used for any registry, not just the -registries listed under search. +Note insecure registries can be used for any registry, not just the registries listed +under search. -The following example configuration defines two searchable registries and one -insecure registry. +Block Registries. The registries in this category are are not pulled from when +retrieving images. + +# EXAMPLE +The following example configuration defines two searchable registries, one +insecure registry, and two blocked registries. ``` [registries.search] -registries = ["registry1.com", "registry2.com"] +registries = ['registry1.com', 'registry2.com'] [registries.insecure] -registries = ["registry3.com"] +registries = ['registry3.com'] + +[registries.block] +registries = ['registry.untrusted.com', 'registry.unsafe.com'] ``` # HISTORY Aug 2017, Originally compiled by Brent Baude +Jun 2018, Updated by Tom Sweeney diff --git a/vendor/github.com/containers/image/image/docker_schema1.go b/vendor/github.com/containers/image/image/docker_schema1.go index 929381813a..28cec7ddb1 100644 --- a/vendor/github.com/containers/image/image/docker_schema1.go +++ b/vendor/github.com/containers/image/image/docker_schema1.go @@ -2,7 +2,6 @@ package image import ( "context" - "encoding/json" "github.com/containers/image/docker/reference" "github.com/containers/image/manifest" @@ -25,8 +24,12 @@ func manifestSchema1FromManifest(manifestBlob []byte) (genericManifest, error) { } // manifestSchema1FromComponents builds a new manifestSchema1 from the supplied data. -func manifestSchema1FromComponents(ref reference.Named, fsLayers []manifest.Schema1FSLayers, history []manifest.Schema1History, architecture string) genericManifest { - return &manifestSchema1{m: manifest.Schema1FromComponents(ref, fsLayers, history, architecture)} +func manifestSchema1FromComponents(ref reference.Named, fsLayers []manifest.Schema1FSLayers, history []manifest.Schema1History, architecture string) (genericManifest, error) { + m, err := manifest.Schema1FromComponents(ref, fsLayers, history, architecture) + if err != nil { + return nil, err + } + return &manifestSchema1{m: m}, nil } func (m *manifestSchema1) serialize() ([]byte, error) { @@ -64,7 +67,7 @@ func (m *manifestSchema1) OCIConfig(ctx context.Context) (*imgspecv1.Image, erro // The Digest field is guaranteed to be provided; Size may be -1. // WARNING: The list may contain duplicates, and they are semantically relevant. func (m *manifestSchema1) LayerInfos() []types.BlobInfo { - return m.m.LayerInfos() + return manifestLayerInfosToBlobInfos(m.m.LayerInfos()) } // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. @@ -148,12 +151,12 @@ func (m *manifestSchema1) UpdatedImage(ctx context.Context, options types.Manife // Based on github.com/docker/docker/distribution/pull_v2.go func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.BlobInfo, layerDiffIDs []digest.Digest) (genericManifest, error) { - if len(m.m.History) == 0 { - // What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing. + if len(m.m.ExtractedV1Compatibility) == 0 { + // What would this even mean?! Anyhow, the rest of the code depends on FSLayers[0] and ExtractedV1Compatibility[0] existing. return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema2MediaType) } - if len(m.m.History) != len(m.m.FSLayers) { - return nil, errors.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.m.History), len(m.m.FSLayers)) + if len(m.m.ExtractedV1Compatibility) != len(m.m.FSLayers) { + return nil, errors.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.m.ExtractedV1Compatibility), len(m.m.FSLayers)) } if uploadedLayerInfos != nil && len(uploadedLayerInfos) != len(m.m.FSLayers) { return nil, errors.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.m.FSLayers)) @@ -165,14 +168,10 @@ func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.Bl // Build a list of the diffIDs for the non-empty layers. diffIDs := []digest.Digest{} var layers []manifest.Schema2Descriptor - for v1Index := len(m.m.History) - 1; v1Index >= 0; v1Index-- { - v2Index := (len(m.m.History) - 1) - v1Index + for v1Index := len(m.m.ExtractedV1Compatibility) - 1; v1Index >= 0; v1Index-- { + v2Index := (len(m.m.ExtractedV1Compatibility) - 1) - v1Index - var v1compat manifest.Schema1V1Compatibility - if err := json.Unmarshal([]byte(m.m.History[v1Index].V1Compatibility), &v1compat); err != nil { - return nil, errors.Wrapf(err, "Error decoding history entry %d", v1Index) - } - if !v1compat.ThrowAway { + if !m.m.ExtractedV1Compatibility[v1Index].ThrowAway { var size int64 if uploadedLayerInfos != nil { size = uploadedLayerInfos[v2Index].Size diff --git a/vendor/github.com/containers/image/image/docker_schema2.go b/vendor/github.com/containers/image/image/docker_schema2.go index 95542e4a10..b639ab714f 100644 --- a/vendor/github.com/containers/image/image/docker_schema2.go +++ b/vendor/github.com/containers/image/image/docker_schema2.go @@ -18,17 +18,17 @@ import ( "github.com/sirupsen/logrus" ) -// gzippedEmptyLayer is a gzip-compressed version of an empty tar file (1024 NULL bytes) +// GzippedEmptyLayer is a gzip-compressed version of an empty tar file (1024 NULL bytes) // This comes from github.com/docker/distribution/manifest/schema1/config_builder.go; there is // a non-zero embedded timestamp; we could zero that, but that would just waste storage space // in registries, so let’s use the same values. -var gzippedEmptyLayer = []byte{ +var GzippedEmptyLayer = []byte{ 31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88, 0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0, } -// gzippedEmptyLayerDigest is a digest of gzippedEmptyLayer -const gzippedEmptyLayerDigest = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") +// GzippedEmptyLayerDigest is a digest of GzippedEmptyLayer +const GzippedEmptyLayerDigest = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") type manifestSchema2 struct { src types.ImageSource // May be nil if configBlob is not nil @@ -95,11 +95,7 @@ func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) { if m.src == nil { return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2") } - stream, _, err := m.src.GetBlob(ctx, types.BlobInfo{ - Digest: m.m.ConfigDescriptor.Digest, - Size: m.m.ConfigDescriptor.Size, - URLs: m.m.ConfigDescriptor.URLs, - }) + stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor)) if err != nil { return nil, err } @@ -121,7 +117,7 @@ func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) { // The Digest field is guaranteed to be provided; Size may be -1. // WARNING: The list may contain duplicates, and they are semantically relevant. func (m *manifestSchema2) LayerInfos() []types.BlobInfo { - return m.m.LayerInfos() + return manifestLayerInfosToBlobInfos(m.m.LayerInfos()) } // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. @@ -253,16 +249,16 @@ func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, dest typ if historyEntry.EmptyLayer { if !haveGzippedEmptyLayer { logrus.Debugf("Uploading empty layer during conversion to schema 1") - info, err := dest.PutBlob(ctx, bytes.NewReader(gzippedEmptyLayer), types.BlobInfo{Digest: gzippedEmptyLayerDigest, Size: int64(len(gzippedEmptyLayer))}, false) + info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))}, false) if err != nil { return nil, errors.Wrap(err, "Error uploading empty layer") } - if info.Digest != gzippedEmptyLayerDigest { - return nil, errors.Errorf("Internal error: Uploaded empty layer has digest %#v instead of %s", info.Digest, gzippedEmptyLayerDigest) + if info.Digest != GzippedEmptyLayerDigest { + return nil, errors.Errorf("Internal error: Uploaded empty layer has digest %#v instead of %s", info.Digest, GzippedEmptyLayerDigest) } haveGzippedEmptyLayer = true } - blobDigest = gzippedEmptyLayerDigest + blobDigest = GzippedEmptyLayerDigest } else { if nonemptyLayerIndex >= len(m.m.LayersDescriptors) { return nil, errors.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.m.LayersDescriptors)) @@ -308,7 +304,10 @@ func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, dest typ } history[0].V1Compatibility = string(v1Config) - m1 := manifestSchema1FromComponents(dest.Reference().DockerReference(), fsLayers, history, imageConfig.Architecture) + m1, err := manifestSchema1FromComponents(dest.Reference().DockerReference(), fsLayers, history, imageConfig.Architecture) + if err != nil { + return nil, err // This should never happen, we should have created all the components correctly. + } return memoryImageFromManifest(m1), nil } diff --git a/vendor/github.com/containers/image/image/manifest.go b/vendor/github.com/containers/image/image/manifest.go index 09d30ff386..c5ca5b12e2 100644 --- a/vendor/github.com/containers/image/image/manifest.go +++ b/vendor/github.com/containers/image/image/manifest.go @@ -62,3 +62,12 @@ func manifestInstanceFromBlob(ctx context.Context, sys *types.SystemContext, src return nil, fmt.Errorf("Unimplemented manifest MIME type %s", mt) } } + +// manifestLayerInfosToBlobInfos extracts a []types.BlobInfo from a []manifest.LayerInfo. +func manifestLayerInfosToBlobInfos(layers []manifest.LayerInfo) []types.BlobInfo { + blobs := make([]types.BlobInfo, len(layers)) + for i, layer := range layers { + blobs[i] = layer.BlobInfo + } + return blobs +} diff --git a/vendor/github.com/containers/image/image/oci.go b/vendor/github.com/containers/image/image/oci.go index 417738753d..298db360dc 100644 --- a/vendor/github.com/containers/image/image/oci.go +++ b/vendor/github.com/containers/image/image/oci.go @@ -60,11 +60,7 @@ func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) { if m.src == nil { return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestOCI1") } - stream, _, err := m.src.GetBlob(ctx, types.BlobInfo{ - Digest: m.m.Config.Digest, - Size: m.m.Config.Size, - URLs: m.m.Config.URLs, - }) + stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromOCI1Descriptor(m.m.Config)) if err != nil { return nil, err } @@ -101,7 +97,7 @@ func (m *manifestOCI1) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) // The Digest field is guaranteed to be provided; Size may be -1. // WARNING: The list may contain duplicates, and they are semantically relevant. func (m *manifestOCI1) LayerInfos() []types.BlobInfo { - return m.m.LayerInfos() + return manifestLayerInfosToBlobInfos(m.m.LayerInfos()) } // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. diff --git a/vendor/github.com/containers/image/manifest/docker_schema1.go b/vendor/github.com/containers/image/manifest/docker_schema1.go index 42751cec1c..234c8e63f1 100644 --- a/vendor/github.com/containers/image/manifest/docker_schema1.go +++ b/vendor/github.com/containers/image/manifest/docker_schema1.go @@ -25,25 +25,28 @@ type Schema1History struct { // Schema1 is a manifest in docker/distribution schema 1. type Schema1 struct { - Name string `json:"name"` - Tag string `json:"tag"` - Architecture string `json:"architecture"` - FSLayers []Schema1FSLayers `json:"fsLayers"` - History []Schema1History `json:"history"` - SchemaVersion int `json:"schemaVersion"` + Name string `json:"name"` + Tag string `json:"tag"` + Architecture string `json:"architecture"` + FSLayers []Schema1FSLayers `json:"fsLayers"` + History []Schema1History `json:"history"` // Keep this in sync with ExtractedV1Compatibility! + ExtractedV1Compatibility []Schema1V1Compatibility `json:"-"` // Keep this in sync with History! Does not contain the full config (Schema2V1Image) + SchemaVersion int `json:"schemaVersion"` +} + +type schema1V1CompatibilityContainerConfig struct { + Cmd []string } // Schema1V1Compatibility is a v1Compatibility in docker/distribution schema 1. type Schema1V1Compatibility struct { - ID string `json:"id"` - Parent string `json:"parent,omitempty"` - Comment string `json:"comment,omitempty"` - Created time.Time `json:"created"` - ContainerConfig struct { - Cmd []string - } `json:"container_config,omitempty"` - Author string `json:"author,omitempty"` - ThrowAway bool `json:"throwaway,omitempty"` + ID string `json:"id"` + Parent string `json:"parent,omitempty"` + Comment string `json:"comment,omitempty"` + Created time.Time `json:"created"` + ContainerConfig schema1V1CompatibilityContainerConfig `json:"container_config,omitempty"` + Author string `json:"author,omitempty"` + ThrowAway bool `json:"throwaway,omitempty"` } // Schema1FromManifest creates a Schema1 manifest instance from a manifest blob. @@ -57,11 +60,8 @@ func Schema1FromManifest(manifest []byte) (*Schema1, error) { if s1.SchemaVersion != 1 { return nil, errors.Errorf("unsupported schema version %d", s1.SchemaVersion) } - if len(s1.FSLayers) != len(s1.History) { - return nil, errors.New("length of history not equal to number of layers") - } - if len(s1.FSLayers) == 0 { - return nil, errors.New("no FSLayers in manifest") + if err := s1.initialize(); err != nil { + return nil, err } if err := s1.fixManifestLayers(); err != nil { return nil, err @@ -70,7 +70,7 @@ func Schema1FromManifest(manifest []byte) (*Schema1, error) { } // Schema1FromComponents creates an Schema1 manifest instance from the supplied data. -func Schema1FromComponents(ref reference.Named, fsLayers []Schema1FSLayers, history []Schema1History, architecture string) *Schema1 { +func Schema1FromComponents(ref reference.Named, fsLayers []Schema1FSLayers, history []Schema1History, architecture string) (*Schema1, error) { var name, tag string if ref != nil { // Well, what to do if it _is_ nil? Most consumers actually don't use these fields nowadays, so we might as well try not supplying them. name = reference.Path(ref) @@ -78,7 +78,7 @@ func Schema1FromComponents(ref reference.Named, fsLayers []Schema1FSLayers, hist tag = tagged.Tag() } } - return &Schema1{ + s1 := Schema1{ Name: name, Tag: tag, Architecture: architecture, @@ -86,6 +86,10 @@ func Schema1FromComponents(ref reference.Named, fsLayers []Schema1FSLayers, hist History: history, SchemaVersion: 1, } + if err := s1.initialize(); err != nil { + return nil, err + } + return &s1, nil } // Schema1Clone creates a copy of the supplied Schema1 manifest. @@ -94,31 +98,51 @@ func Schema1Clone(src *Schema1) *Schema1 { return © } +// initialize initializes ExtractedV1Compatibility and verifies invariants, so that the rest of this code can assume a minimally healthy manifest. +func (m *Schema1) initialize() error { + if len(m.FSLayers) != len(m.History) { + return errors.New("length of history not equal to number of layers") + } + if len(m.FSLayers) == 0 { + return errors.New("no FSLayers in manifest") + } + m.ExtractedV1Compatibility = make([]Schema1V1Compatibility, len(m.History)) + for i, h := range m.History { + if err := json.Unmarshal([]byte(h.V1Compatibility), &m.ExtractedV1Compatibility[i]); err != nil { + return errors.Wrapf(err, "Error parsing v2s1 history entry %d", i) + } + } + return nil +} + // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. func (m *Schema1) ConfigInfo() types.BlobInfo { return types.BlobInfo{} } -// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). // The Digest field is guaranteed to be provided; Size may be -1. // WARNING: The list may contain duplicates, and they are semantically relevant. -func (m *Schema1) LayerInfos() []types.BlobInfo { - layers := make([]types.BlobInfo, len(m.FSLayers)) +func (m *Schema1) LayerInfos() []LayerInfo { + layers := make([]LayerInfo, len(m.FSLayers)) for i, layer := range m.FSLayers { // NOTE: This includes empty layers (where m.History.V1Compatibility->ThrowAway) - layers[(len(m.FSLayers)-1)-i] = types.BlobInfo{Digest: layer.BlobSum, Size: -1} + layers[(len(m.FSLayers)-1)-i] = LayerInfo{ + BlobInfo: types.BlobInfo{Digest: layer.BlobSum, Size: -1}, + EmptyLayer: m.ExtractedV1Compatibility[i].ThrowAway, + } } return layers } // UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) func (m *Schema1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { - // Our LayerInfos includes empty layers (where m.History.V1Compatibility->ThrowAway), so expect them to be included here as well. + // Our LayerInfos includes empty layers (where m.ExtractedV1Compatibility[].ThrowAway), so expect them to be included here as well. if len(m.FSLayers) != len(layerInfos) { return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.FSLayers), len(layerInfos)) } m.FSLayers = make([]Schema1FSLayers, len(layerInfos)) for i, info := range layerInfos { - // (docker push) sets up m.History.V1Compatibility->{Id,Parent} based on values of info.Digest, + // (docker push) sets up m.ExtractedV1Compatibility[].{Id,Parent} based on values of info.Digest, // but (docker pull) ignores them in favor of computing DiffIDs from uncompressed data, except verifying the child->parent links and uniqueness. // So, we don't bother recomputing the IDs in m.History.V1Compatibility. m.FSLayers[(len(layerInfos)-1)-i].BlobSum = info.Digest @@ -144,31 +168,19 @@ func (m *Schema1) Serialize() ([]byte, error) { // Note that even after this succeeds, m.FSLayers may contain duplicate entries // (for Dockerfile operations which change the configuration but not the filesystem). func (m *Schema1) fixManifestLayers() error { - type imageV1 struct { - ID string - Parent string - } - // Per the specification, we can assume that len(m.FSLayers) == len(m.History) - imgs := make([]*imageV1, len(m.FSLayers)) - for i := range m.FSLayers { - img := &imageV1{} - - if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil { - return err - } - - imgs[i] = img - if err := validateV1ID(img.ID); err != nil { + // m.initialize() has verified that len(m.FSLayers) == len(m.History) + for _, compat := range m.ExtractedV1Compatibility { + if err := validateV1ID(compat.ID); err != nil { return err } } - if imgs[len(imgs)-1].Parent != "" { + if m.ExtractedV1Compatibility[len(m.ExtractedV1Compatibility)-1].Parent != "" { return errors.New("Invalid parent ID in the base layer of the image") } // check general duplicates to error instead of a deadlock idmap := make(map[string]struct{}) var lastID string - for _, img := range imgs { + for _, img := range m.ExtractedV1Compatibility { // skip IDs that appear after each other, we handle those later if _, exists := idmap[img.ID]; img.ID != lastID && exists { return errors.Errorf("ID %+v appears multiple times in manifest", img.ID) @@ -177,12 +189,13 @@ func (m *Schema1) fixManifestLayers() error { idmap[lastID] = struct{}{} } // backwards loop so that we keep the remaining indexes after removing items - for i := len(imgs) - 2; i >= 0; i-- { - if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue + for i := len(m.ExtractedV1Compatibility) - 2; i >= 0; i-- { + if m.ExtractedV1Compatibility[i].ID == m.ExtractedV1Compatibility[i+1].ID { // repeated ID. remove and continue m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...) m.History = append(m.History[:i], m.History[i+1:]...) - } else if imgs[i].Parent != imgs[i+1].ID { - return errors.Errorf("Invalid parent ID. Expected %v, got %v", imgs[i+1].ID, imgs[i].Parent) + m.ExtractedV1Compatibility = append(m.ExtractedV1Compatibility[:i], m.ExtractedV1Compatibility[i+1:]...) + } else if m.ExtractedV1Compatibility[i].Parent != m.ExtractedV1Compatibility[i+1].ID { + return errors.Errorf("Invalid parent ID. Expected %v, got %v", m.ExtractedV1Compatibility[i+1].ID, m.ExtractedV1Compatibility[i].Parent) } } return nil @@ -209,7 +222,7 @@ func (m *Schema1) Inspect(_ func(types.BlobInfo) ([]byte, error)) (*types.ImageI DockerVersion: s1.DockerVersion, Architecture: s1.Architecture, Os: s1.OS, - Layers: LayerInfosToStrings(m.LayerInfos()), + Layers: layerInfosToStrings(m.LayerInfos()), } if s1.Config != nil { i.Labels = s1.Config.Labels @@ -240,11 +253,7 @@ func (m *Schema1) ToSchema2Config(diffIDs []digest.Digest) ([]byte, error) { } // Build the history. convertedHistory := []Schema2History{} - for _, h := range m.History { - compat := Schema1V1Compatibility{} - if err := json.Unmarshal([]byte(h.V1Compatibility), &compat); err != nil { - return nil, errors.Wrapf(err, "error decoding history information") - } + for _, compat := range m.ExtractedV1Compatibility { hitem := Schema2History{ Created: compat.Created, CreatedBy: strings.Join(compat.ContainerConfig.Cmd, " "), diff --git a/vendor/github.com/containers/image/manifest/docker_schema2.go b/vendor/github.com/containers/image/manifest/docker_schema2.go index ef176271dc..a7f9451bd9 100644 --- a/vendor/github.com/containers/image/manifest/docker_schema2.go +++ b/vendor/github.com/containers/image/manifest/docker_schema2.go @@ -18,6 +18,16 @@ type Schema2Descriptor struct { URLs []string `json:"urls,omitempty"` } +// BlobInfoFromSchema2Descriptor returns a types.BlobInfo based on the input schema 2 descriptor. +func BlobInfoFromSchema2Descriptor(desc Schema2Descriptor) types.BlobInfo { + return types.BlobInfo{ + Digest: desc.Digest, + Size: desc.Size, + URLs: desc.URLs, + MediaType: desc.MediaType, + } +} + // Schema2 is a manifest in docker/distribution schema 2. type Schema2 struct { SchemaVersion int `json:"schemaVersion"` @@ -171,19 +181,18 @@ func Schema2Clone(src *Schema2) *Schema2 { // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. func (m *Schema2) ConfigInfo() types.BlobInfo { - return types.BlobInfo{Digest: m.ConfigDescriptor.Digest, Size: m.ConfigDescriptor.Size} + return BlobInfoFromSchema2Descriptor(m.ConfigDescriptor) } -// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). // The Digest field is guaranteed to be provided; Size may be -1. // WARNING: The list may contain duplicates, and they are semantically relevant. -func (m *Schema2) LayerInfos() []types.BlobInfo { - blobs := []types.BlobInfo{} +func (m *Schema2) LayerInfos() []LayerInfo { + blobs := []LayerInfo{} for _, layer := range m.LayersDescriptors { - blobs = append(blobs, types.BlobInfo{ - Digest: layer.Digest, - Size: layer.Size, - URLs: layer.URLs, + blobs = append(blobs, LayerInfo{ + BlobInfo: BlobInfoFromSchema2Descriptor(layer), + EmptyLayer: false, }) } return blobs @@ -227,7 +236,7 @@ func (m *Schema2) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*t DockerVersion: s2.DockerVersion, Architecture: s2.Architecture, Os: s2.OS, - Layers: LayerInfosToStrings(m.LayerInfos()), + Layers: layerInfosToStrings(m.LayerInfos()), } if s2.Config != nil { i.Labels = s2.Config.Labels diff --git a/vendor/github.com/containers/image/manifest/manifest.go b/vendor/github.com/containers/image/manifest/manifest.go index 2bc801d815..ae1921b6cc 100644 --- a/vendor/github.com/containers/image/manifest/manifest.go +++ b/vendor/github.com/containers/image/manifest/manifest.go @@ -50,10 +50,10 @@ var DefaultRequestedManifestMIMETypes = []string{ type Manifest interface { // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. ConfigInfo() types.BlobInfo - // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). + // LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). // The Digest field is guaranteed to be provided; Size may be -1. // WARNING: The list may contain duplicates, and they are semantically relevant. - LayerInfos() []types.BlobInfo + LayerInfos() []LayerInfo // UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) UpdateLayerInfos(layerInfos []types.BlobInfo) error @@ -73,6 +73,12 @@ type Manifest interface { Serialize() ([]byte, error) } +// LayerInfo is an extended version of types.BlobInfo for low-level users of Manifest.LayerInfos. +type LayerInfo struct { + types.BlobInfo + EmptyLayer bool // The layer is an “empty”/“throwaway” one, and may or may not be physically represented in various transport / storage systems. false if the manifest type does not have the concept. +} + // GuessMIMEType guesses MIME type of a manifest and returns it _if it is recognized_, or "" if unknown or unrecognized. // FIXME? We should, in general, prefer out-of-band MIME type instead of blindly parsing the manifest, // but we may not have such metadata available (e.g. when the manifest is a local file). @@ -227,9 +233,9 @@ func FromBlob(manblob []byte, mt string) (Manifest, error) { } } -// LayerInfosToStrings converts a list of layer infos, presumably obtained from a Manifest.LayerInfos() +// layerInfosToStrings converts a list of layer infos, presumably obtained from a Manifest.LayerInfos() // method call, into a format suitable for inclusion in a types.ImageInspectInfo structure. -func LayerInfosToStrings(infos []types.BlobInfo) []string { +func layerInfosToStrings(infos []LayerInfo) []string { layers := make([]string, len(infos)) for i, info := range infos { layers[i] = info.Digest.String() diff --git a/vendor/github.com/containers/image/manifest/oci.go b/vendor/github.com/containers/image/manifest/oci.go index 8cdc3e5b93..91705045bb 100644 --- a/vendor/github.com/containers/image/manifest/oci.go +++ b/vendor/github.com/containers/image/manifest/oci.go @@ -10,6 +10,17 @@ import ( "github.com/pkg/errors" ) +// BlobInfoFromOCI1Descriptor returns a types.BlobInfo based on the input OCI1 descriptor. +func BlobInfoFromOCI1Descriptor(desc imgspecv1.Descriptor) types.BlobInfo { + return types.BlobInfo{ + Digest: desc.Digest, + Size: desc.Size, + URLs: desc.URLs, + Annotations: desc.Annotations, + MediaType: desc.MediaType, + } +} + // OCI1 is a manifest.Manifest implementation for OCI images. // The underlying data from imgspecv1.Manifest is also available. type OCI1 struct { @@ -45,16 +56,19 @@ func OCI1Clone(src *OCI1) *OCI1 { // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. func (m *OCI1) ConfigInfo() types.BlobInfo { - return types.BlobInfo{Digest: m.Config.Digest, Size: m.Config.Size, Annotations: m.Config.Annotations} + return BlobInfoFromOCI1Descriptor(m.Config) } -// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). // The Digest field is guaranteed to be provided; Size may be -1. // WARNING: The list may contain duplicates, and they are semantically relevant. -func (m *OCI1) LayerInfos() []types.BlobInfo { - blobs := []types.BlobInfo{} +func (m *OCI1) LayerInfos() []LayerInfo { + blobs := []LayerInfo{} for _, layer := range m.Layers { - blobs = append(blobs, types.BlobInfo{Digest: layer.Digest, Size: layer.Size, Annotations: layer.Annotations, URLs: layer.URLs, MediaType: layer.MediaType}) + blobs = append(blobs, LayerInfo{ + BlobInfo: BlobInfoFromOCI1Descriptor(layer), + EmptyLayer: false, + }) } return blobs } @@ -101,7 +115,7 @@ func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*type Labels: v1.Config.Labels, Architecture: v1.Architecture, Os: v1.OS, - Layers: LayerInfosToStrings(m.LayerInfos()), + Layers: layerInfosToStrings(m.LayerInfos()), } return i, nil } diff --git a/vendor/github.com/containers/image/oci/archive/oci_dest.go b/vendor/github.com/containers/image/oci/archive/oci_dest.go index 432b310b42..93f3f80f96 100644 --- a/vendor/github.com/containers/image/oci/archive/oci_dest.go +++ b/vendor/github.com/containers/image/oci/archive/oci_dest.go @@ -70,6 +70,13 @@ func (d *ociArchiveImageDestination) MustMatchRuntimeOS() bool { return d.unpackedDest.MustMatchRuntimeOS() } +// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), +// and would prefer to receive an unmodified manifest instead of one modified for the destination. +// Does not make a difference if Reference().DockerReference() is nil. +func (d *ociArchiveImageDestination) IgnoresEmbeddedDockerReference() bool { + return d.unpackedDest.IgnoresEmbeddedDockerReference() +} + // PutBlob writes contents of stream and returns data representing the result (with all data filled in). // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. // inputInfo.Size is the expected length of stream, if known. diff --git a/vendor/github.com/containers/image/oci/layout/oci_dest.go b/vendor/github.com/containers/image/oci/layout/oci_dest.go index a1e27b5d52..3516327508 100644 --- a/vendor/github.com/containers/image/oci/layout/oci_dest.go +++ b/vendor/github.com/containers/image/oci/layout/oci_dest.go @@ -18,9 +18,10 @@ import ( ) type ociImageDestination struct { - ref ociReference - index imgspecv1.Index - sharedBlobDir string + ref ociReference + index imgspecv1.Index + sharedBlobDir string + acceptUncompressedLayers bool } // newImageDestination returns an ImageDestination for writing to an existing directory. @@ -43,6 +44,7 @@ func newImageDestination(sys *types.SystemContext, ref ociReference) (types.Imag d := &ociImageDestination{ref: ref, index: *index} if sys != nil { d.sharedBlobDir = sys.OCISharedBlobDirPath + d.acceptUncompressedLayers = sys.OCIAcceptUncompressedLayers } if err := ensureDirectoryExists(d.ref.dir); err != nil { @@ -81,6 +83,9 @@ func (d *ociImageDestination) SupportsSignatures(ctx context.Context) error { } func (d *ociImageDestination) DesiredLayerCompression() types.LayerCompression { + if d.acceptUncompressedLayers { + return types.PreserveOriginal + } return types.Compress } @@ -95,6 +100,13 @@ func (d *ociImageDestination) MustMatchRuntimeOS() bool { return false } +// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), +// and would prefer to receive an unmodified manifest instead of one modified for the destination. +// Does not make a difference if Reference().DockerReference() is nil. +func (d *ociImageDestination) IgnoresEmbeddedDockerReference() bool { + return false // N/A, DockerReference() returns nil. +} + // PutBlob writes contents of stream and returns data representing the result (with all data filled in). // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. // inputInfo.Size is the expected length of stream, if known. diff --git a/vendor/github.com/containers/image/oci/layout/oci_transport.go b/vendor/github.com/containers/image/oci/layout/oci_transport.go index 857c808183..4e5cecff2e 100644 --- a/vendor/github.com/containers/image/oci/layout/oci_transport.go +++ b/vendor/github.com/containers/image/oci/layout/oci_transport.go @@ -23,8 +23,14 @@ func init() { transports.Register(Transport) } -// Transport is an ImageTransport for OCI directories. -var Transport = ociTransport{} +var ( + // Transport is an ImageTransport for OCI directories. + Transport = ociTransport{} + + // ErrMoreThanOneImage is an error returned when the manifest includes + // more than one image and the user should choose which one to use. + ErrMoreThanOneImage = errors.New("more than one image in oci, choose an image") +) type ociTransport struct{} @@ -184,7 +190,7 @@ func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, error) { d = &index.Manifests[0] } else { // ask user to choose image when more than one image in the oci directory - return imgspecv1.Descriptor{}, errors.Wrapf(err, "more than one image in oci, choose an image") + return imgspecv1.Descriptor{}, ErrMoreThanOneImage } } else { // if image specified, look through all manifests for a match diff --git a/vendor/github.com/containers/image/openshift/openshift.go b/vendor/github.com/containers/image/openshift/openshift.go index 3504d8ce14..2cadb0ce28 100644 --- a/vendor/github.com/containers/image/openshift/openshift.go +++ b/vendor/github.com/containers/image/openshift/openshift.go @@ -369,6 +369,13 @@ func (d *openshiftImageDestination) MustMatchRuntimeOS() bool { return false } +// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), +// and would prefer to receive an unmodified manifest instead of one modified for the destination. +// Does not make a difference if Reference().DockerReference() is nil. +func (d *openshiftImageDestination) IgnoresEmbeddedDockerReference() bool { + return d.docker.IgnoresEmbeddedDockerReference() +} + // PutBlob writes contents of stream and returns data representing the result (with all data filled in). // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. // inputInfo.Size is the expected length of stream, if known. diff --git a/vendor/github.com/containers/image/ostree/ostree_dest.go b/vendor/github.com/containers/image/ostree/ostree_dest.go index 464377613e..0ae30f1f4d 100644 --- a/vendor/github.com/containers/image/ostree/ostree_dest.go +++ b/vendor/github.com/containers/image/ostree/ostree_dest.go @@ -125,6 +125,13 @@ func (d *ostreeImageDestination) MustMatchRuntimeOS() bool { return true } +// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), +// and would prefer to receive an unmodified manifest instead of one modified for the destination. +// Does not make a difference if Reference().DockerReference() is nil. +func (d *ostreeImageDestination) IgnoresEmbeddedDockerReference() bool { + return false // N/A, DockerReference() returns nil. +} + func (d *ostreeImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) { tmpDir, err := ioutil.TempDir(d.tmpDirPath, "blob") if err != nil { diff --git a/vendor/github.com/containers/image/pkg/compression/compression.go b/vendor/github.com/containers/image/pkg/compression/compression.go index c19d962ee5..a39523e4f4 100644 --- a/vendor/github.com/containers/image/pkg/compression/compression.go +++ b/vendor/github.com/containers/image/pkg/compression/compression.go @@ -5,28 +5,34 @@ import ( "compress/bzip2" "compress/gzip" "io" + "io/ioutil" "github.com/pkg/errors" - "github.com/sirupsen/logrus" + "github.com/ulikunitz/xz" ) // DecompressorFunc returns the decompressed stream, given a compressed stream. -type DecompressorFunc func(io.Reader) (io.Reader, error) +// The caller must call Close() on the decompressed stream (even if the compressed input stream does not need closing!). +type DecompressorFunc func(io.Reader) (io.ReadCloser, error) // GzipDecompressor is a DecompressorFunc for the gzip compression algorithm. -func GzipDecompressor(r io.Reader) (io.Reader, error) { +func GzipDecompressor(r io.Reader) (io.ReadCloser, error) { return gzip.NewReader(r) } // Bzip2Decompressor is a DecompressorFunc for the bzip2 compression algorithm. -func Bzip2Decompressor(r io.Reader) (io.Reader, error) { - return bzip2.NewReader(r), nil +func Bzip2Decompressor(r io.Reader) (io.ReadCloser, error) { + return ioutil.NopCloser(bzip2.NewReader(r)), nil } // XzDecompressor is a DecompressorFunc for the xz compression algorithm. -func XzDecompressor(r io.Reader) (io.Reader, error) { - return nil, errors.New("Decompressing xz streams is not supported") +func XzDecompressor(r io.Reader) (io.ReadCloser, error) { + r, err := xz.NewReader(r) + if err != nil { + return nil, err + } + return ioutil.NopCloser(r), nil } // compressionAlgos is an internal implementation detail of DetectCompression @@ -65,3 +71,24 @@ func DetectCompression(input io.Reader) (DecompressorFunc, io.Reader, error) { return decompressor, io.MultiReader(bytes.NewReader(buffer[:n]), input), nil } + +// AutoDecompress takes a stream and returns an uncompressed version of the +// same stream. +// The caller must call Close() on the returned stream (even if the input does not need, +// or does not even support, closing!). +func AutoDecompress(stream io.Reader) (io.ReadCloser, bool, error) { + decompressor, stream, err := DetectCompression(stream) + if err != nil { + return nil, false, errors.Wrapf(err, "Error detecting compression") + } + var res io.ReadCloser + if decompressor != nil { + res, err = decompressor(stream) + if err != nil { + return nil, false, errors.Wrapf(err, "Error initializing decompression") + } + } else { + res = ioutil.NopCloser(stream) + } + return res, decompressor != nil, nil +} diff --git a/vendor/github.com/containers/image/pkg/docker/config/config.go b/vendor/github.com/containers/image/pkg/docker/config/config.go index 724aef4e6f..58e8d50221 100644 --- a/vendor/github.com/containers/image/pkg/docker/config/config.go +++ b/vendor/github.com/containers/image/pkg/docker/config/config.go @@ -7,7 +7,6 @@ import ( "io/ioutil" "os" "path/filepath" - "strconv" "strings" "github.com/containers/image/types" @@ -27,16 +26,12 @@ type dockerConfigFile struct { CredHelpers map[string]string `json:"credHelpers,omitempty"` } -const ( - defaultPath = "/run" - authCfg = "containers" - authCfgFileName = "auth.json" - dockerCfg = ".docker" - dockerCfgFileName = "config.json" - dockerLegacyCfg = ".dockercfg" -) - var ( + defaultPerUIDPathFormat = filepath.FromSlash("/run/containers/%d/auth.json") + xdgRuntimeDirPath = filepath.FromSlash("containers/auth.json") + dockerHomePath = filepath.FromSlash(".docker/config.json") + dockerLegacyHomePath = ".dockercfg" + // ErrNotLoggedIn is returned for users not logged into a registry // that they are trying to logout of ErrNotLoggedIn = errors.New("not logged in") @@ -64,7 +59,7 @@ func GetAuthentication(sys *types.SystemContext, registry string) (string, strin return sys.DockerAuthConfig.Username, sys.DockerAuthConfig.Password, nil } - dockerLegacyPath := filepath.Join(homedir.Get(), dockerLegacyCfg) + dockerLegacyPath := filepath.Join(homedir.Get(), dockerLegacyHomePath) var paths []string pathToAuth, err := getPathToAuth(sys) if err == nil { @@ -75,7 +70,7 @@ func GetAuthentication(sys *types.SystemContext, registry string) (string, strin // Logging the error as a warning instead and moving on to pulling the image logrus.Warnf("%v: Trying to pull image in the event that it is a public image.", err) } - paths = append(paths, filepath.Join(homedir.Get(), dockerCfg, dockerCfgFileName), dockerLegacyPath) + paths = append(paths, filepath.Join(homedir.Get(), dockerHomePath), dockerLegacyPath) for _, path := range paths { legacyFormat := path == dockerLegacyPath @@ -135,15 +130,15 @@ func RemoveAllAuthentication(sys *types.SystemContext) error { // getPath gets the path of the auth.json file // The path can be overriden by the user if the overwrite-path flag is set -// If the flag is not set and XDG_RUNTIME_DIR is ser, the auth.json file is saved in XDG_RUNTIME_DIR/containers -// Otherwise, the auth.json file is stored in /run/user/UID/containers +// If the flag is not set and XDG_RUNTIME_DIR is set, the auth.json file is saved in XDG_RUNTIME_DIR/containers +// Otherwise, the auth.json file is stored in /run/containers/UID func getPathToAuth(sys *types.SystemContext) (string, error) { if sys != nil { if sys.AuthFilePath != "" { return sys.AuthFilePath, nil } if sys.RootForImplicitAbsolutePaths != "" { - return filepath.Join(sys.RootForImplicitAbsolutePaths, defaultPath, strconv.Itoa(os.Getuid()), authCfg, authCfgFileName), nil + return filepath.Join(sys.RootForImplicitAbsolutePaths, fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid())), nil } } @@ -155,14 +150,12 @@ func getPathToAuth(sys *types.SystemContext) (string, error) { if os.IsNotExist(err) { // This means the user set the XDG_RUNTIME_DIR variable and either forgot to create the directory // or made a typo while setting the environment variable, - // so return an error referring to $XDG_RUNTIME_DIR instead of …/authCfgFileName inside. + // so return an error referring to $XDG_RUNTIME_DIR instead of xdgRuntimeDirPath inside. return "", errors.Wrapf(err, "%q directory set by $XDG_RUNTIME_DIR does not exist. Either create the directory or unset $XDG_RUNTIME_DIR.", runtimeDir) - } // else ignore err and let the caller fail accessing …/authCfgFileName. - runtimeDir = filepath.Join(runtimeDir, authCfg) - } else { - runtimeDir = filepath.Join(defaultPath, authCfg, strconv.Itoa(os.Getuid())) + } // else ignore err and let the caller fail accessing xdgRuntimeDirPath. + return filepath.Join(runtimeDir, xdgRuntimeDirPath), nil } - return filepath.Join(runtimeDir, authCfgFileName), nil + return fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid()), nil } // readJSONFile unmarshals the authentications stored in the auth.json file and returns it diff --git a/vendor/github.com/containers/image/storage/storage_image.go b/vendor/github.com/containers/image/storage/storage_image.go index b43caf1a05..6ae525df41 100644 --- a/vendor/github.com/containers/image/storage/storage_image.go +++ b/vendor/github.com/containers/image/storage/storage_image.go @@ -50,8 +50,7 @@ type storageImageSource struct { } type storageImageDestination struct { - imageRef storageReference // The reference we'll use to name the image - publicRef storageReference // The reference we return when asked about the name we'll give to the image + imageRef storageReference directory string // Temporary directory where we store blobs until Commit() time nextTempFileID int32 // A counter that we use for computing filenames to assign to blobs manifest []byte // Manifest contents, temporary @@ -102,6 +101,9 @@ func (s storageImageSource) Close() error { // GetBlob reads the data blob or filesystem layer which matches the digest and size, if given. func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (rc io.ReadCloser, n int64, err error) { + if info.Digest == image.GzippedEmptyLayerDigest { + return ioutil.NopCloser(bytes.NewReader(image.GzippedEmptyLayer)), int64(len(image.GzippedEmptyLayer)), nil + } rc, n, _, err = s.getBlobAndLayerID(info) return rc, n, err } @@ -175,11 +177,15 @@ func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *di // LayerInfosForCopy() returns the list of layer blobs that make up the root filesystem of // the image, after they've been decompressed. func (s *storageImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { - updatedBlobInfos := []types.BlobInfo{} - _, manifestType, err := s.GetManifest(ctx, nil) + manifestBlob, manifestType, err := s.GetManifest(ctx, nil) if err != nil { return nil, errors.Wrapf(err, "error reading image manifest for %q", s.image.ID) } + man, err := manifest.FromBlob(manifestBlob, manifestType) + if err != nil { + return nil, errors.Wrapf(err, "error parsing image manifest for %q", s.image.ID) + } + uncompressedLayerType := "" switch manifestType { case imgspecv1.MediaTypeImageManifest: @@ -188,6 +194,8 @@ func (s *storageImageSource) LayerInfosForCopy(ctx context.Context) ([]types.Blo // This is actually a compressed type, but there's no uncompressed type defined uncompressedLayerType = manifest.DockerV2Schema2LayerMediaType } + + physicalBlobInfos := []types.BlobInfo{} layerID := s.image.TopLayer for layerID != "" { layer, err := s.imageRef.transport.store.Layer(layerID) @@ -205,10 +213,43 @@ func (s *storageImageSource) LayerInfosForCopy(ctx context.Context) ([]types.Blo Size: layer.UncompressedSize, MediaType: uncompressedLayerType, } - updatedBlobInfos = append([]types.BlobInfo{blobInfo}, updatedBlobInfos...) + physicalBlobInfos = append([]types.BlobInfo{blobInfo}, physicalBlobInfos...) layerID = layer.Parent } - return updatedBlobInfos, nil + + res, err := buildLayerInfosForCopy(man.LayerInfos(), physicalBlobInfos) + if err != nil { + return nil, errors.Wrapf(err, "error creating LayerInfosForCopy of image %q", s.image.ID) + } + return res, nil +} + +// buildLayerInfosForCopy builds a LayerInfosForCopy return value based on manifestInfos from the original manifest, +// but using layer data which we can actually produce — physicalInfos for non-empty layers, +// and image.GzippedEmptyLayer for empty ones. +// (This is split basically only to allow easily unit-testing the part that has no dependencies on the external environment.) +func buildLayerInfosForCopy(manifestInfos []manifest.LayerInfo, physicalInfos []types.BlobInfo) ([]types.BlobInfo, error) { + nextPhysical := 0 + res := make([]types.BlobInfo, len(manifestInfos)) + for i, mi := range manifestInfos { + if mi.EmptyLayer { + res[i] = types.BlobInfo{ + Digest: image.GzippedEmptyLayerDigest, + Size: int64(len(image.GzippedEmptyLayer)), + MediaType: mi.MediaType, + } + } else { + if nextPhysical >= len(physicalInfos) { + return nil, fmt.Errorf("expected more than %d physical layers to exist", len(physicalInfos)) + } + res[i] = physicalInfos[nextPhysical] + nextPhysical++ + } + } + if nextPhysical != len(physicalInfos) { + return nil, fmt.Errorf("used only %d out of %d physical layers", nextPhysical, len(physicalInfos)) + } + return res, nil } // GetSignatures() parses the image's signatures blob into a slice of byte slices. @@ -243,15 +284,8 @@ func newImageDestination(imageRef storageReference) (*storageImageDestination, e if err != nil { return nil, errors.Wrapf(err, "error creating a temporary directory") } - // Break reading of the reference we're writing, so that copy.Image() won't try to rewrite - // schema1 image manifests to remove embedded references, since that changes the manifest's - // digest, and that makes the image unusable if we subsequently try to access it using a - // reference that mentions the no-longer-correct digest. - publicRef := imageRef - publicRef.name = nil image := &storageImageDestination{ imageRef: imageRef, - publicRef: publicRef, directory: directory, blobDiffIDs: make(map[digest.Digest]digest.Digest), fileSizes: make(map[digest.Digest]int64), @@ -261,11 +295,10 @@ func newImageDestination(imageRef storageReference) (*storageImageDestination, e return image, nil } -// Reference returns a mostly-usable image reference that can't return a DockerReference, to -// avoid triggering logic in copy.Image() that rewrites schema 1 image manifests in order to -// remove image names that they contain which don't match the value we're using. +// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, +// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. func (s storageImageDestination) Reference() types.ImageReference { - return s.publicRef + return s.imageRef } // Close cleans up the temporary directory. @@ -408,12 +441,7 @@ func (s *storageImageDestination) computeID(m manifest.Manifest) string { case *manifest.Schema1: // Build a list of the diffIDs we've generated for the non-throwaway FS layers, // in reverse of the order in which they were originally listed. - for i, history := range m.History { - compat := manifest.Schema1V1Compatibility{} - if err := json.Unmarshal([]byte(history.V1Compatibility), &compat); err != nil { - logrus.Debugf("internal error reading schema 1 history: %v", err) - return "" - } + for i, compat := range m.ExtractedV1Compatibility { if compat.ThrowAway { continue } @@ -471,8 +499,11 @@ func (s *storageImageDestination) Commit(ctx context.Context) error { layerBlobs := man.LayerInfos() // Extract or find the layers. lastLayer := "" - addedLayers := []string{} for _, blob := range layerBlobs { + if blob.EmptyLayer { + continue + } + var diff io.ReadCloser // Check if there's already a layer with the ID that we'd give to the result of applying // this layer blob to its parent, if it has one, or the blob's hex value otherwise. @@ -481,7 +512,7 @@ func (s *storageImageDestination) Commit(ctx context.Context) error { // Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(), // or to even check if we had it. logrus.Debugf("looking for diffID for blob %+v", blob.Digest) - has, _, err := s.HasBlob(ctx, blob) + has, _, err := s.HasBlob(ctx, blob.BlobInfo) if err != nil { return errors.Wrapf(err, "error checking for a layer based on blob %q", blob.Digest.String()) } @@ -550,7 +581,6 @@ func (s *storageImageDestination) Commit(ctx context.Context) error { return errors.Wrapf(err, "error adding layer with blob %q", blob.Digest) } lastLayer = layer.ID - addedLayers = append([]string{lastLayer}, addedLayers...) } // If one of those blobs was a configuration blob, then we can try to dig out the date when the image // was originally created, in case we're just copying it. If not, no harm done. @@ -613,7 +643,7 @@ func (s *storageImageDestination) Commit(ctx context.Context) error { if name := s.imageRef.DockerReference(); len(oldNames) > 0 || name != nil { names := []string{} if name != nil { - names = append(names, verboseName(name)) + names = append(names, name.String()) } if len(oldNames) > 0 { names = append(names, oldNames...) @@ -703,6 +733,13 @@ func (s *storageImageDestination) MustMatchRuntimeOS() bool { return true } +// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), +// and would prefer to receive an unmodified manifest instead of one modified for the destination. +// Does not make a difference if Reference().DockerReference() is nil. +func (s *storageImageDestination) IgnoresEmbeddedDockerReference() bool { + return true // Yes, we want the unmodified manifest +} + // PutSignatures records the image's signatures for committing as a single data blob. func (s *storageImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error { sizes := []int{} diff --git a/vendor/github.com/containers/image/storage/storage_reference.go b/vendor/github.com/containers/image/storage/storage_reference.go index 5327169411..73306b972a 100644 --- a/vendor/github.com/containers/image/storage/storage_reference.go +++ b/vendor/github.com/containers/image/storage/storage_reference.go @@ -9,7 +9,6 @@ import ( "github.com/containers/image/docker/reference" "github.com/containers/image/types" "github.com/containers/storage" - digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -17,55 +16,65 @@ import ( // A storageReference holds an arbitrary name and/or an ID, which is a 32-byte // value hex-encoded into a 64-character string, and a reference to a Store // where an image is, or would be, kept. +// Either "named" or "id" must be set. type storageReference struct { transport storageTransport - reference string + named reference.Named // may include a tag and/or a digest id string - name reference.Named - tag string - digest digest.Digest } -func newReference(transport storageTransport, reference, id string, name reference.Named, tag string, digest digest.Digest) *storageReference { +func newReference(transport storageTransport, named reference.Named, id string) (*storageReference, error) { + if named == nil && id == "" { + return nil, ErrInvalidReference + } // We take a copy of the transport, which contains a pointer to the // store that it used for resolving this reference, so that the // transport that we'll return from Transport() won't be affected by // further calls to the original transport's SetStore() method. return &storageReference{ transport: transport, - reference: reference, + named: named, id: id, - name: name, - tag: tag, - digest: digest, + }, nil +} + +// imageMatchesRepo returns true iff image.Names contains an element with the same repo as ref +func imageMatchesRepo(image *storage.Image, ref reference.Named) bool { + repo := ref.Name() + for _, name := range image.Names { + if named, err := reference.ParseNormalizedNamed(name); err == nil { + if named.Name() == repo { + return true + } + } } + return false } // Resolve the reference's name to an image ID in the store, if there's already // one present with the same name or ID, and return the image. func (s *storageReference) resolveImage() (*storage.Image, error) { + var loadedImage *storage.Image if s.id == "" { // Look for an image that has the expanded reference name as an explicit Name value. - image, err := s.transport.store.Image(s.reference) + image, err := s.transport.store.Image(s.named.String()) if image != nil && err == nil { + loadedImage = image s.id = image.ID } } - if s.id == "" && s.name != nil && s.digest != "" { - // Look for an image with the specified digest that has the same name, - // though possibly with a different tag or digest, as a Name value, so - // that the canonical reference can be implicitly resolved to the image. - images, err := s.transport.store.ImagesByDigest(s.digest) - if images != nil && err == nil { - repo := reference.FamiliarName(reference.TrimNamed(s.name)) - search: - for _, image := range images { - for _, name := range image.Names { - if named, err := reference.ParseNormalizedNamed(name); err == nil { - if reference.FamiliarName(reference.TrimNamed(named)) == repo { - s.id = image.ID - break search - } + if s.id == "" && s.named != nil { + if digested, ok := s.named.(reference.Digested); ok { + // Look for an image with the specified digest that has the same name, + // though possibly with a different tag or digest, as a Name value, so + // that the canonical reference can be implicitly resolved to the image. + images, err := s.transport.store.ImagesByDigest(digested.Digest()) + if images != nil && err == nil { + for _, image := range images { + if imageMatchesRepo(image, s.named) { + loadedImage = image + s.id = image.ID + break } } } @@ -75,27 +84,20 @@ func (s *storageReference) resolveImage() (*storage.Image, error) { logrus.Debugf("reference %q does not resolve to an image ID", s.StringWithinTransport()) return nil, errors.Wrapf(ErrNoSuchImage, "reference %q does not resolve to an image ID", s.StringWithinTransport()) } - img, err := s.transport.store.Image(s.id) - if err != nil { - return nil, errors.Wrapf(err, "error reading image %q", s.id) - } - if s.name != nil { - repo := reference.FamiliarName(reference.TrimNamed(s.name)) - nameMatch := false - for _, name := range img.Names { - if named, err := reference.ParseNormalizedNamed(name); err == nil { - if reference.FamiliarName(reference.TrimNamed(named)) == repo { - nameMatch = true - break - } - } + if loadedImage == nil { + img, err := s.transport.store.Image(s.id) + if err != nil { + return nil, errors.Wrapf(err, "error reading image %q", s.id) } - if !nameMatch { + loadedImage = img + } + if s.named != nil { + if !imageMatchesRepo(loadedImage, s.named) { logrus.Errorf("no image matching reference %q found", s.StringWithinTransport()) return nil, ErrNoSuchImage } } - return img, nil + return loadedImage, nil } // Return a Transport object that defaults to using the same store that we used @@ -110,20 +112,7 @@ func (s storageReference) Transport() types.ImageTransport { // Return a name with a tag or digest, if we have either, else return it bare. func (s storageReference) DockerReference() reference.Named { - if s.name == nil { - return nil - } - if s.tag != "" { - if namedTagged, err := reference.WithTag(s.name, s.tag); err == nil { - return namedTagged - } - } - if s.digest != "" { - if canonical, err := reference.WithDigest(s.name, s.digest); err == nil { - return canonical - } - } - return s.name + return s.named } // Return a name with a tag, prefixed with the graph root and driver name, to @@ -135,25 +124,25 @@ func (s storageReference) StringWithinTransport() string { if len(options) > 0 { optionsList = ":" + strings.Join(options, ",") } - storeSpec := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "+" + s.transport.store.RunRoot() + optionsList + "]" - if s.reference == "" { - return storeSpec + "@" + s.id + res := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "+" + s.transport.store.RunRoot() + optionsList + "]" + if s.named != nil { + res = res + s.named.String() } - if s.id == "" { - return storeSpec + s.reference + if s.id != "" { + res = res + "@" + s.id } - return storeSpec + s.reference + "@" + s.id + return res } func (s storageReference) PolicyConfigurationIdentity() string { - storeSpec := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "]" - if s.name == nil { - return storeSpec + "@" + s.id + res := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "]" + if s.named != nil { + res = res + s.named.String() } - if s.id == "" { - return storeSpec + s.reference + if s.id != "" { + res = res + "@" + s.id } - return storeSpec + s.reference + "@" + s.id + return res } // Also accept policy that's tied to the combination of the graph root and @@ -164,9 +153,17 @@ func (s storageReference) PolicyConfigurationNamespaces() []string { storeSpec := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "]" driverlessStoreSpec := "[" + s.transport.store.GraphRoot() + "]" namespaces := []string{} - if s.name != nil { - name := reference.TrimNamed(s.name) - components := strings.Split(name.String(), "/") + if s.named != nil { + if s.id != "" { + // The reference without the ID is also a valid namespace. + namespaces = append(namespaces, storeSpec+s.named.String()) + } + tagged, isTagged := s.named.(reference.Tagged) + _, isDigested := s.named.(reference.Digested) + if isTagged && isDigested { // s.named is "name:tag@digest"; add a "name:tag" parent namespace. + namespaces = append(namespaces, storeSpec+s.named.Name()+":"+tagged.Tag()) + } + components := strings.Split(s.named.Name(), "/") for len(components) > 0 { namespaces = append(namespaces, storeSpec+strings.Join(components, "/")) components = components[:len(components)-1] diff --git a/vendor/github.com/containers/image/storage/storage_transport.go b/vendor/github.com/containers/image/storage/storage_transport.go index f6ebcdc4a3..8886f92503 100644 --- a/vendor/github.com/containers/image/storage/storage_transport.go +++ b/vendor/github.com/containers/image/storage/storage_transport.go @@ -3,6 +3,7 @@ package storage import ( + "fmt" "path/filepath" "strings" @@ -105,7 +106,6 @@ func (s *storageTransport) DefaultGIDMap() []idtools.IDMap { // ParseStoreReference takes a name or an ID, tries to figure out which it is // relative to the given store, and returns it in a reference object. func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (*storageReference, error) { - var name reference.Named if ref == "" { return nil, errors.Wrapf(ErrInvalidReference, "%q is an empty reference") } @@ -118,66 +118,36 @@ func (s storageTransport) ParseStoreReference(store storage.Store, ref string) ( ref = ref[closeIndex+1:] } - // The last segment, if there's more than one, is either a digest from a reference, or an image ID. + // The reference may end with an image ID. Image IDs and digests use the same "@" separator; + // here we only peel away an image ID, and leave digests alone. split := strings.LastIndex(ref, "@") - idOrDigest := "" - if split != -1 { - // Peel off that last bit so that we can work on the rest. - idOrDigest = ref[split+1:] - if idOrDigest == "" { - return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like a digest or image ID", idOrDigest) - } - ref = ref[:split] - } - - // The middle segment (now the last segment), if there is one, is a digest. - split = strings.LastIndex(ref, "@") - sum := digest.Digest("") - if split != -1 { - sum = digest.Digest(ref[split+1:]) - if sum == "" { - return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image digest", sum) - } - ref = ref[:split] - } - - // If we have something that unambiguously should be a digest, validate it, and then the third part, - // if we have one, as an ID. id := "" - if sum != "" { - if idSum, err := digest.Parse("sha256:" + idOrDigest); err != nil || idSum.Validate() != nil { - return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image ID", idOrDigest) - } - if err := sum.Validate(); err != nil { - return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image digest", sum) - } - id = idOrDigest - if img, err := store.Image(idOrDigest); err == nil && img != nil && len(idOrDigest) >= minimumTruncatedIDLength && strings.HasPrefix(img.ID, idOrDigest) { - // The ID is a truncated version of the ID of an image that's present in local storage, - // so we might as well use the expanded value. - id = img.ID + if split != -1 { + possibleID := ref[split+1:] + if possibleID == "" { + return nil, errors.Wrapf(ErrInvalidReference, "empty trailing digest or ID in %q", ref) } - } else if idOrDigest != "" { - // There was no middle portion, so the final portion could be either a digest or an ID. - if idSum, err := digest.Parse("sha256:" + idOrDigest); err == nil && idSum.Validate() == nil { - // It's an ID. - id = idOrDigest - } else if idSum, err := digest.Parse(idOrDigest); err == nil && idSum.Validate() == nil { - // It's a digest. - sum = idSum - } else if img, err := store.Image(idOrDigest); err == nil && img != nil && len(idOrDigest) >= minimumTruncatedIDLength && strings.HasPrefix(img.ID, idOrDigest) { - // It's a truncated version of the ID of an image that's present in local storage, - // and we may need the expanded value. - id = img.ID - } else { - return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like a digest or image ID", idOrDigest) + // If it looks like a digest, leave it alone for now. + if _, err := digest.Parse(possibleID); err != nil { + // Otherwise… + if idSum, err := digest.Parse("sha256:" + possibleID); err == nil && idSum.Validate() == nil { + id = possibleID // … it is a full ID + } else if img, err := store.Image(possibleID); err == nil && img != nil && len(possibleID) >= minimumTruncatedIDLength && strings.HasPrefix(img.ID, possibleID) { + // … it is a truncated version of the ID of an image that's present in local storage, + // so we might as well use the expanded value. + id = img.ID + } else { + return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image ID or digest", possibleID) + } + // We have recognized an image ID; peel it off. + ref = ref[:split] } } - // If we only had one portion, then _maybe_ it's a truncated image ID. Only check on that if it's + // If we only have one @-delimited portion, then _maybe_ it's a truncated image ID. Only check on that if it's // at least of what we guess is a reasonable minimum length, because we don't want a really short value // like "a" matching an image by ID prefix when the input was actually meant to specify an image name. - if len(ref) >= minimumTruncatedIDLength && sum == "" && id == "" { + if id == "" && len(ref) >= minimumTruncatedIDLength && !strings.ContainsAny(ref, "@:") { if img, err := store.Image(ref); err == nil && img != nil && strings.HasPrefix(img.ID, ref) { // It's a truncated version of the ID of an image that's present in local storage; // we need to expand it. @@ -186,53 +156,24 @@ func (s storageTransport) ParseStoreReference(store storage.Store, ref string) ( } } - // The initial portion is probably a name, possibly with a tag. + var named reference.Named + // Unless we have an un-named "ID" or "@ID" reference (where ID might only have been a prefix), which has been + // completely parsed above, the initial portion should be a name, possibly with a tag and/or a digest.. if ref != "" { var err error - if name, err = reference.ParseNormalizedNamed(ref); err != nil { + named, err = reference.ParseNormalizedNamed(ref) + if err != nil { return nil, errors.Wrapf(err, "error parsing named reference %q", ref) } - } - if name == nil && sum == "" && id == "" { - return nil, errors.Errorf("error parsing reference") + named = reference.TagNameOnly(named) } - // Construct a copy of the store spec. - optionsList := "" - options := store.GraphOptions() - if len(options) > 0 { - optionsList = ":" + strings.Join(options, ",") - } - storeSpec := "[" + store.GraphDriverName() + "@" + store.GraphRoot() + "+" + store.RunRoot() + optionsList + "]" - - // Convert the name back into a reference string, if we got a name. - refname := "" - tag := "" - if name != nil { - if sum.Validate() == nil { - canonical, err := reference.WithDigest(name, sum) - if err != nil { - return nil, errors.Wrapf(err, "error mixing name %q with digest %q", name, sum) - } - refname = verboseName(canonical) - } else { - name = reference.TagNameOnly(name) - tagged, ok := name.(reference.Tagged) - if !ok { - return nil, errors.Errorf("error parsing possibly-tagless name %q", ref) - } - refname = verboseName(name) - tag = tagged.Tag() - } - } - if refname == "" { - logrus.Debugf("parsed reference to id into %q", storeSpec+"@"+id) - } else if id == "" { - logrus.Debugf("parsed reference to refname into %q", storeSpec+refname) - } else { - logrus.Debugf("parsed reference to refname@id into %q", storeSpec+refname+"@"+id) + result, err := newReference(storageTransport{store: store, defaultUIDMap: s.defaultUIDMap, defaultGIDMap: s.defaultGIDMap}, named, id) + if err != nil { + return nil, err } - return newReference(storageTransport{store: store, defaultUIDMap: s.defaultUIDMap, defaultGIDMap: s.defaultGIDMap}, refname, id, name, tag, sum), nil + logrus.Debugf("parsed reference into %q", result.StringWithinTransport()) + return result, nil } func (s *storageTransport) GetStore() (storage.Store, error) { @@ -252,7 +193,7 @@ func (s *storageTransport) GetStore() (storage.Store, error) { } // ParseReference takes a name and a tag or digest and/or ID -// ("_name_"/"@_id_"/"_name_:_tag_"/"_name_:_tag_@_id_"/"_name_@_digest_"/"_name_@_digest_@_id_"), +// ("_name_"/"@_id_"/"_name_:_tag_"/"_name_:_tag_@_id_"/"_name_@_digest_"/"_name_@_digest_@_id_"/"_name_:_tag_@_digest_"/"_name_:_tag_@_digest_@_id_"), // possibly prefixed with a store specifier in the form "[_graphroot_]" or // "[_driver_@_graphroot_]" or "[_driver_@_graphroot_+_runroot_]" or // "[_driver_@_graphroot_:_options_]" or "[_driver_@_graphroot_+_runroot_:_options_]", @@ -338,7 +279,7 @@ func (s *storageTransport) ParseReference(reference string) (types.ImageReferenc func (s storageTransport) GetStoreImage(store storage.Store, ref types.ImageReference) (*storage.Image, error) { dref := ref.DockerReference() if dref != nil { - if img, err := store.Image(verboseName(dref)); err == nil { + if img, err := store.Image(dref.String()); err == nil { return img, nil } } @@ -399,52 +340,29 @@ func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error { if scope == "" { return nil } - // But if there is anything left, it has to be a name, with or without - // a tag, with or without an ID, since we don't return namespace values - // that are just bare IDs. - scopeInfo := strings.SplitN(scope, "@", 2) - if len(scopeInfo) == 1 && scopeInfo[0] != "" { - _, err := reference.ParseNormalizedNamed(scopeInfo[0]) - if err != nil { - return err + + fields := strings.SplitN(scope, "@", 3) + switch len(fields) { + case 1: // name only + case 2: // name:tag@ID or name[:tag]@digest + if _, idErr := digest.Parse("sha256:" + fields[1]); idErr != nil { + if _, digestErr := digest.Parse(fields[1]); digestErr != nil { + return fmt.Errorf("%v is neither a valid digest(%s) nor a valid ID(%s)", fields[1], digestErr.Error(), idErr.Error()) + } } - } else if len(scopeInfo) == 2 && scopeInfo[0] != "" && scopeInfo[1] != "" { - _, err := reference.ParseNormalizedNamed(scopeInfo[0]) - if err != nil { + case 3: // name[:tag]@digest@ID + if _, err := digest.Parse(fields[1]); err != nil { return err } - _, err = digest.Parse("sha256:" + scopeInfo[1]) - if err != nil { + if _, err := digest.Parse("sha256:" + fields[2]); err != nil { return err } - } else { - return ErrInvalidReference + default: // Coverage: This should never happen + return errors.New("Internal error: unexpected number of fields form strings.SplitN") } + // As for field[0], if it is non-empty at all: + // FIXME? We could be verifying the various character set and length restrictions + // from docker/distribution/reference.regexp.go, but other than that there + // are few semantically invalid strings. return nil } - -func verboseName(r reference.Reference) string { - if r == nil { - return "" - } - named, isNamed := r.(reference.Named) - digested, isDigested := r.(reference.Digested) - tagged, isTagged := r.(reference.Tagged) - name := "" - tag := "" - sum := "" - if isNamed { - name = (reference.TrimNamed(named)).String() - } - if isTagged { - if tagged.Tag() != "" { - tag = ":" + tagged.Tag() - } - } - if isDigested { - if digested.Digest().Validate() == nil { - sum = "@" + digested.Digest().String() - } - } - return name + tag + sum -} diff --git a/vendor/github.com/containers/image/types/types.go b/vendor/github.com/containers/image/types/types.go index 2823a05c07..5d05b711a3 100644 --- a/vendor/github.com/containers/image/types/types.go +++ b/vendor/github.com/containers/image/types/types.go @@ -174,6 +174,11 @@ type ImageDestination interface { AcceptsForeignLayerURLs() bool // MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. MustMatchRuntimeOS() bool + // IgnoresEmbeddedDockerReference() returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), + // and would prefer to receive an unmodified manifest instead of one modified for the destination. + // Does not make a difference if Reference().DockerReference() is nil. + IgnoresEmbeddedDockerReference() bool + // PutBlob writes contents of stream and returns data representing the result. // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. // inputInfo.Size is the expected length of stream, if known. @@ -359,6 +364,8 @@ type SystemContext struct { OCIInsecureSkipTLSVerify bool // If not "", use a shared directory for storing blobs rather than within OCI layouts OCISharedBlobDirPath string + // Allow UnCompress image layer for OCI image layer + OCIAcceptUncompressedLayers bool // === docker.Transport overrides === // If not "", a directory containing a CA certificate (ending with ".crt"), diff --git a/vendor/github.com/containers/image/vendor.conf b/vendor/github.com/containers/image/vendor.conf index 123bde98ad..246c0096a3 100644 --- a/vendor/github.com/containers/image/vendor.conf +++ b/vendor/github.com/containers/image/vendor.conf @@ -40,3 +40,6 @@ github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460 github.com/gogo/protobuf fcdc5011193ff531a548e9b0301828d5a5b97fd8 github.com/pquerna/ffjson master github.com/syndtr/gocapability master +github.com/Microsoft/go-winio ab35fc04b6365e8fcb18e6e9e41ea4a02b10b175 +github.com/Microsoft/hcsshim eca7177590cdcbd25bbc5df27e3b693a54b53a6a +github.com/ulikunitz/xz v0.5.4 diff --git a/vendor/github.com/containers/storage/README.md b/vendor/github.com/containers/storage/README.md index 00a47fd985..f68cc55c37 100644 --- a/vendor/github.com/containers/storage/README.md +++ b/vendor/github.com/containers/storage/README.md @@ -41,3 +41,6 @@ memory and stored along with the library's own bookkeeping information. Additionally, the library can store one or more of what it calls *big data* for images and containers. This is a named chunk of larger data, which is only in memory when it is being read from or being written to its own disk file. + +**[Contributing](CONTRIBUTING.md)** +Information about contributing to this project. diff --git a/vendor/github.com/containers/storage/containers.go b/vendor/github.com/containers/storage/containers.go index ec54a502e9..ebc1e99a0f 100644 --- a/vendor/github.com/containers/storage/containers.go +++ b/vendor/github.com/containers/storage/containers.go @@ -2,6 +2,7 @@ package storage import ( "encoding/json" + "fmt" "io/ioutil" "os" "path/filepath" @@ -278,7 +279,8 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat names = dedupeNames(names) for _, name := range names { if _, nameInUse := r.byname[name]; nameInUse { - return nil, ErrDuplicateName + return nil, errors.Wrapf(ErrDuplicateName, + fmt.Sprintf("the container name \"%s\" is already in use by \"%s\". You have to remove that container to be able to reuse that name.", name, r.byname[name].ID)) } } if err == nil { diff --git a/vendor/github.com/containers/storage/containers_ffjson.go b/vendor/github.com/containers/storage/containers_ffjson.go index 40b912bb3c..6e83808d4f 100644 --- a/vendor/github.com/containers/storage/containers_ffjson.go +++ b/vendor/github.com/containers/storage/containers_ffjson.go @@ -1,5 +1,6 @@ // Code generated by ffjson . DO NOT EDIT. -// source: ./containers.go +// source: containers.go +// package storage diff --git a/vendor/github.com/containers/storage/drivers/chown.go b/vendor/github.com/containers/storage/drivers/chown.go index c12e73b3bc..bcba12de96 100644 --- a/vendor/github.com/containers/storage/drivers/chown.go +++ b/vendor/github.com/containers/storage/drivers/chown.go @@ -6,7 +6,6 @@ import ( "fmt" "os" "path/filepath" - "syscall" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/reexec" @@ -56,47 +55,7 @@ func chownByMapsMain() { if err != nil { return fmt.Errorf("error walking to %q: %v", path, err) } - sysinfo := info.Sys() - if st, ok := sysinfo.(*syscall.Stat_t); ok { - // Map an on-disk UID/GID pair from host to container - // using the first map, then back to the host using the - // second map. Skip that first step if they're 0, to - // compensate for cases where a parent layer should - // have had a mapped value, but didn't. - uid, gid := int(st.Uid), int(st.Gid) - if toContainer != nil { - pair := idtools.IDPair{ - UID: uid, - GID: gid, - } - mappedUid, mappedGid, err := toContainer.ToContainer(pair) - if err != nil { - if (uid != 0) || (gid != 0) { - return fmt.Errorf("error mapping host ID pair %#v for %q to container: %v", pair, path, err) - } - mappedUid, mappedGid = uid, gid - } - uid, gid = mappedUid, mappedGid - } - if toHost != nil { - pair := idtools.IDPair{ - UID: uid, - GID: gid, - } - mappedPair, err := toHost.ToHost(pair) - if err != nil { - return fmt.Errorf("error mapping container ID pair %#v for %q to host: %v", pair, path, err) - } - uid, gid = mappedPair.UID, mappedPair.GID - } - if uid != int(st.Uid) || gid != int(st.Gid) { - // Make the change. - if err := syscall.Lchown(path, uid, gid); err != nil { - return fmt.Errorf("%s: chown(%q): %v", os.Args[0], path, err) - } - } - } - return nil + return platformLChown(path, info, toHost, toContainer) } if err := filepath.Walk(".", chown); err != nil { fmt.Fprintf(os.Stderr, "error during chown: %v", err) diff --git a/vendor/github.com/containers/storage/drivers/chown_unix.go b/vendor/github.com/containers/storage/drivers/chown_unix.go new file mode 100644 index 0000000000..5454657ec9 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/chown_unix.go @@ -0,0 +1,55 @@ +// +build !windows + +package graphdriver + +import ( + "fmt" + "os" + "syscall" + + "github.com/containers/storage/pkg/idtools" +) + +func platformLChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error { + sysinfo := info.Sys() + if st, ok := sysinfo.(*syscall.Stat_t); ok { + // Map an on-disk UID/GID pair from host to container + // using the first map, then back to the host using the + // second map. Skip that first step if they're 0, to + // compensate for cases where a parent layer should + // have had a mapped value, but didn't. + uid, gid := int(st.Uid), int(st.Gid) + if toContainer != nil { + pair := idtools.IDPair{ + UID: uid, + GID: gid, + } + mappedUid, mappedGid, err := toContainer.ToContainer(pair) + if err != nil { + if (uid != 0) || (gid != 0) { + return fmt.Errorf("error mapping host ID pair %#v for %q to container: %v", pair, path, err) + } + mappedUid, mappedGid = uid, gid + } + uid, gid = mappedUid, mappedGid + } + if toHost != nil { + pair := idtools.IDPair{ + UID: uid, + GID: gid, + } + mappedPair, err := toHost.ToHost(pair) + if err != nil { + return fmt.Errorf("error mapping container ID pair %#v for %q to host: %v", pair, path, err) + } + uid, gid = mappedPair.UID, mappedPair.GID + } + if uid != int(st.Uid) || gid != int(st.Gid) { + // Make the change. + if err := syscall.Lchown(path, uid, gid); err != nil { + return fmt.Errorf("%s: chown(%q): %v", os.Args[0], path, err) + } + } + } + return nil +} diff --git a/vendor/github.com/containers/storage/drivers/chown_windows.go b/vendor/github.com/containers/storage/drivers/chown_windows.go new file mode 100644 index 0000000000..31bd5bb52d --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/chown_windows.go @@ -0,0 +1,14 @@ +// +build windows + +package graphdriver + +import ( + "os" + "syscall" + + "github.com/containers/storage/pkg/idtools" +) + +func platformLChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error { + return &os.PathError{"lchown", path, syscall.EWINDOWS} +} diff --git a/vendor/github.com/containers/storage/drivers/chroot_windows.go b/vendor/github.com/containers/storage/drivers/chroot_windows.go index 1df0317898..f4dc22a961 100644 --- a/vendor/github.com/containers/storage/drivers/chroot_windows.go +++ b/vendor/github.com/containers/storage/drivers/chroot_windows.go @@ -1,7 +1,7 @@ package graphdriver import ( - "os" + "fmt" "syscall" ) diff --git a/vendor/github.com/containers/storage/drivers/fsdiff.go b/vendor/github.com/containers/storage/drivers/fsdiff.go index 35869d9063..9c11a069c8 100644 --- a/vendor/github.com/containers/storage/drivers/fsdiff.go +++ b/vendor/github.com/containers/storage/drivers/fsdiff.go @@ -163,6 +163,7 @@ func (gdw *NaiveDiffDriver) ApplyDiff(id string, applyMappings *idtools.IDMappin start := time.Now().UTC() logrus.Debug("Start untar layer") if size, err = ApplyUncompressedLayer(layerFs, diff, options); err != nil { + logrus.Errorf("Error while applying layer: %s", err) return } logrus.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds()) diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go index 6b5f6912f7..fbe36f4a46 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go @@ -24,6 +24,7 @@ import ( "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/locker" "github.com/containers/storage/pkg/mount" + "github.com/containers/storage/pkg/ostree" "github.com/containers/storage/pkg/parsers" "github.com/containers/storage/pkg/system" units "github.com/docker/go-units" @@ -84,6 +85,9 @@ type overlayOptions struct { overrideKernelCheck bool imageStores []string quota quota.Quota + mountProgram string + ostreeRepo string + skipMountHome bool } // Driver contains information about the home directory and the list of active mounts that are created using this driver. @@ -98,6 +102,7 @@ type Driver struct { naiveDiff graphdriver.DiffDriver supportsDType bool locker *locker.Locker + convert map[string]bool } var ( @@ -147,15 +152,28 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap return nil, err } - supportsDType, err := supportsOverlay(home, fsMagic, rootUID, rootGID) - if err != nil { - os.Remove(filepath.Join(home, linkDir)) - os.Remove(home) - return nil, errors.Wrap(err, "kernel does not support overlay fs") + var supportsDType bool + if opts.mountProgram != "" { + supportsDType = true + } else { + supportsDType, err = supportsOverlay(home, fsMagic, rootUID, rootGID) + if err != nil { + os.Remove(filepath.Join(home, linkDir)) + os.Remove(home) + return nil, errors.Wrap(err, "kernel does not support overlay fs") + } } - if err := mount.MakePrivate(home); err != nil { - return nil, err + if !opts.skipMountHome { + if err := mount.MakePrivate(home); err != nil { + return nil, err + } + } + + if opts.ostreeRepo != "" { + if err := ostree.CreateOSTreeRepository(opts.ostreeRepo, rootUID, rootGID); err != nil { + return nil, err + } } d := &Driver{ @@ -167,6 +185,7 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap supportsDType: supportsDType, locker: locker.New(), options: *opts, + convert: make(map[string]bool), } d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, d) @@ -227,6 +246,25 @@ func parseOptions(options []string) (*overlayOptions, error) { } o.imageStores = append(o.imageStores, store) } + case ".mount_program", "overlay.mount_program", "overlay2.mount_program": + logrus.Debugf("overlay: mount_program=%s", val) + _, err := os.Stat(val) + if err != nil { + return nil, fmt.Errorf("overlay: can't stat program %s: %v", val, err) + } + o.mountProgram = val + case "overlay2.ostree_repo", "overlay.ostree_repo", ".ostree_repo": + logrus.Debugf("overlay: ostree_repo=%s", val) + if !ostree.OstreeSupport() { + return nil, fmt.Errorf("overlay: ostree_repo specified but support for ostree is missing") + } + o.ostreeRepo = val + case "overlay2.skip_mount_home", "overlay.skip_mount_home", ".skip_mount_home": + logrus.Debugf("overlay: skip_mount_home=%s", val) + o.skipMountHome, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } default: return nil, fmt.Errorf("overlay: Unknown option %s", key) } @@ -236,6 +274,7 @@ func parseOptions(options []string) (*overlayOptions, error) { func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGID int) (supportsDType bool, err error) { // We can try to modprobe overlay first + exec.Command("modprobe", "overlay").Run() layerDir, err := ioutil.TempDir(home, "compat") @@ -380,6 +419,11 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr return fmt.Errorf("--storage-opt size is only supported for ReadWrite Layers") } } + + if d.options.ostreeRepo != "" { + d.convert[id] = true + } + return d.create(id, parent, opts) } @@ -547,6 +591,12 @@ func (d *Driver) getLowerDirs(id string) ([]string, error) { func (d *Driver) Remove(id string) error { d.locker.Lock(id) defer d.locker.Unlock(id) + + // Ignore errors, we don't want to fail if the ostree branch doesn't exist, + if d.options.ostreeRepo != "" { + ostree.DeleteOSTree(d.options.ostreeRepo, id) + } + dir := d.dir(id) lid, err := ioutil.ReadFile(path.Join(dir, "link")) if err == nil { @@ -663,7 +713,7 @@ func (d *Driver) Get(id, mountLabel string) (_ string, retErr error) { // the page size. The mount syscall fails if the mount data cannot // fit within a page and relative links make the mount data much // smaller at the expense of requiring a fork exec to chroot. - if len(mountData) > pageSize { + if len(mountData) > pageSize || d.options.mountProgram != "" { //FIXME: We need to figure out to get this to work with additional stores opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(relLowers, ":"), path.Join(id, "diff"), path.Join(id, "work")) mountData = label.FormatMountLabel(opts, mountLabel) @@ -671,8 +721,16 @@ func (d *Driver) Get(id, mountLabel string) (_ string, retErr error) { return "", fmt.Errorf("cannot mount layer, mount label too large %d", len(mountData)) } - mount = func(source string, target string, mType string, flags uintptr, label string) error { - return mountFrom(d.home, source, target, mType, flags, label) + if d.options.mountProgram != "" { + mount = func(source string, target string, mType string, flags uintptr, label string) error { + mountProgram := exec.Command(d.options.mountProgram, "-o", label, target) + mountProgram.Dir = d.home + return mountProgram.Run() + } + } else { + mount = func(source string, target string, mType string, flags uintptr, label string) error { + return mountFrom(d.home, source, target, mType, flags, label) + } } mountTarget = path.Join(id, "merged") } @@ -764,6 +822,13 @@ func (d *Driver) ApplyDiff(id string, idMappings *idtools.IDMappings, parent str return 0, err } + _, convert := d.convert[id] + if convert { + if err := ostree.ConvertToOSTree(d.options.ostreeRepo, applyDir, id); err != nil { + return 0, err + } + } + return directory.Size(applyDir) } diff --git a/vendor/github.com/containers/storage/drivers/vfs/driver.go b/vendor/github.com/containers/storage/drivers/vfs/driver.go index cee55f8d16..9314cb8760 100644 --- a/vendor/github.com/containers/storage/drivers/vfs/driver.go +++ b/vendor/github.com/containers/storage/drivers/vfs/driver.go @@ -9,6 +9,7 @@ import ( "github.com/containers/storage/drivers" "github.com/containers/storage/pkg/chrootarchive" "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/ostree" "github.com/containers/storage/pkg/system" "github.com/opencontainers/selinux/go-selinux/label" ) @@ -42,6 +43,27 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap d.homes = append(d.homes, strings.Split(option[12:], ",")...) continue } + if strings.HasPrefix(option, "vfs.ostree_repo=") { + if !ostree.OstreeSupport() { + return nil, fmt.Errorf("vfs: ostree_repo specified but support for ostree is missing") + } + d.ostreeRepo = option[16:] + } + if strings.HasPrefix(option, ".ostree_repo=") { + if !ostree.OstreeSupport() { + return nil, fmt.Errorf("vfs: ostree_repo specified but support for ostree is missing") + } + d.ostreeRepo = option[13:] + } + } + if d.ostreeRepo != "" { + rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) + if err != nil { + return nil, err + } + if err := ostree.CreateOSTreeRepository(d.ostreeRepo, rootUID, rootGID); err != nil { + return nil, err + } } return graphdriver.NewNaiveDiffDriver(d, graphdriver.NewNaiveLayerIDMapUpdater(d)), nil } @@ -53,6 +75,7 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap type Driver struct { homes []string idMappings *idtools.IDMappings + ostreeRepo string } func (d *Driver) String() string { @@ -77,11 +100,15 @@ func (d *Driver) Cleanup() error { // CreateReadWrite creates a layer that is writable for use as a container // file system. func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { - return d.Create(id, parent, opts) + return d.create(id, parent, opts, false) } // Create prepares the filesystem for the VFS driver and copies the directory for the given id under the parent. func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { + return d.create(id, parent, opts, true) +} + +func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool) error { if opts != nil && len(opts.StorageOpt) != 0 { return fmt.Errorf("--storage-opt is not supported for vfs") } @@ -106,14 +133,23 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { if _, mountLabel, err := label.InitLabels(labelOpts); err == nil { label.SetFileLabel(dir, mountLabel) } - if parent == "" { - return nil + if parent != "" { + parentDir, err := d.Get(parent, "") + if err != nil { + return fmt.Errorf("%s: %s", parent, err) + } + if err := CopyWithTar(parentDir, dir); err != nil { + return err + } } - parentDir, err := d.Get(parent, "") - if err != nil { - return fmt.Errorf("%s: %s", parent, err) + + if ro && d.ostreeRepo != "" { + if err := ostree.ConvertToOSTree(d.ostreeRepo, dir, id); err != nil { + return err + } } - return CopyWithTar(parentDir, dir) + return nil + } func (d *Driver) dir(id string) string { @@ -132,6 +168,10 @@ func (d *Driver) dir(id string) string { // Remove deletes the content from the directory for a given id. func (d *Driver) Remove(id string) error { + if d.ostreeRepo != "" { + // Ignore errors, we don't want to fail if the ostree branch doesn't exist, + ostree.DeleteOSTree(d.ostreeRepo, id) + } return system.EnsureRemoveAll(d.dir(id)) } diff --git a/vendor/github.com/containers/storage/drivers/windows/windows.go b/vendor/github.com/containers/storage/drivers/windows/windows.go index b750715bfe..1d84b0b6af 100644 --- a/vendor/github.com/containers/storage/drivers/windows/windows.go +++ b/vendor/github.com/containers/storage/drivers/windows/windows.go @@ -940,11 +940,6 @@ func (d *Driver) AdditionalImageStores() []string { return nil } -// AdditionalImageStores returns additional image stores supported by the driver -func (d *Driver) AdditionalImageStores() []string { - return nil -} - // UpdateLayerIDMap changes ownerships in the layer's filesystem tree from // matching those in toContainer to matching those in toHost. func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error { diff --git a/vendor/github.com/containers/storage/images.go b/vendor/github.com/containers/storage/images.go index 76a11fb0cd..80fae6dcee 100644 --- a/vendor/github.com/containers/storage/images.go +++ b/vendor/github.com/containers/storage/images.go @@ -40,6 +40,11 @@ type Image struct { // same top layer. TopLayer string `json:"layer,omitempty"` + // MappedTopLayers are the IDs of alternate versions of the top layer + // which have the same contents and parent, and which differ from + // TopLayer only in which ID mappings they use. + MappedTopLayers []string `json:"mapped-layers,omitempty"` + // Metadata is data we keep for the convenience of the caller. It is not // expected to be large, since it is kept in memory. Metadata string `json:"metadata,omitempty"` @@ -126,16 +131,17 @@ type imageStore struct { func copyImage(i *Image) *Image { return &Image{ - ID: i.ID, - Digest: i.Digest, - Names: copyStringSlice(i.Names), - TopLayer: i.TopLayer, - Metadata: i.Metadata, - BigDataNames: copyStringSlice(i.BigDataNames), - BigDataSizes: copyStringInt64Map(i.BigDataSizes), - BigDataDigests: copyStringDigestMap(i.BigDataDigests), - Created: i.Created, - Flags: copyStringInterfaceMap(i.Flags), + ID: i.ID, + Digest: i.Digest, + Names: copyStringSlice(i.Names), + TopLayer: i.TopLayer, + MappedTopLayers: copyStringSlice(i.MappedTopLayers), + Metadata: i.Metadata, + BigDataNames: copyStringSlice(i.BigDataNames), + BigDataSizes: copyStringInt64Map(i.BigDataSizes), + BigDataDigests: copyStringDigestMap(i.BigDataDigests), + Created: i.Created, + Flags: copyStringInterfaceMap(i.Flags), } } @@ -362,6 +368,14 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c return image, err } +func (r *imageStore) addMappedTopLayer(id, layer string) error { + if image, ok := r.lookup(id); ok { + image.MappedTopLayers = append(image.MappedTopLayers, layer) + return r.Save() + } + return ErrImageUnknown +} + func (r *imageStore) Metadata(id string) (string, error) { if image, ok := r.lookup(id); ok { return image.Metadata, nil diff --git a/vendor/github.com/containers/storage/images_ffjson.go b/vendor/github.com/containers/storage/images_ffjson.go index f91ee6d4f9..6b40ebd597 100644 --- a/vendor/github.com/containers/storage/images_ffjson.go +++ b/vendor/github.com/containers/storage/images_ffjson.go @@ -1,5 +1,5 @@ // Code generated by ffjson . DO NOT EDIT. -// source: ./images.go +// source: images.go package storage @@ -64,6 +64,22 @@ func (j *Image) MarshalJSONBuf(buf fflib.EncodingBuffer) error { fflib.WriteJsonString(buf, string(j.TopLayer)) buf.WriteByte(',') } + if len(j.MappedTopLayers) != 0 { + buf.WriteString(`"mapped-layers":`) + if j.MappedTopLayers != nil { + buf.WriteString(`[`) + for i, v := range j.MappedTopLayers { + if i != 0 { + buf.WriteString(`,`) + } + fflib.WriteJsonString(buf, string(v)) + } + buf.WriteString(`]`) + } else { + buf.WriteString(`null`) + } + buf.WriteByte(',') + } if len(j.Metadata) != 0 { buf.WriteString(`"metadata":`) fflib.WriteJsonString(buf, string(j.Metadata)) @@ -157,6 +173,8 @@ const ( ffjtImageTopLayer + ffjtImageMappedTopLayers + ffjtImageMetadata ffjtImageBigDataNames @@ -178,6 +196,8 @@ var ffjKeyImageNames = []byte("names") var ffjKeyImageTopLayer = []byte("layer") +var ffjKeyImageMappedTopLayers = []byte("mapped-layers") + var ffjKeyImageMetadata = []byte("metadata") var ffjKeyImageBigDataNames = []byte("big-data-names") @@ -311,7 +331,12 @@ mainparse: case 'm': - if bytes.Equal(ffjKeyImageMetadata, kn) { + if bytes.Equal(ffjKeyImageMappedTopLayers, kn) { + currentKey = ffjtImageMappedTopLayers + state = fflib.FFParse_want_colon + goto mainparse + + } else if bytes.Equal(ffjKeyImageMetadata, kn) { currentKey = ffjtImageMetadata state = fflib.FFParse_want_colon goto mainparse @@ -363,6 +388,12 @@ mainparse: goto mainparse } + if fflib.EqualFoldRight(ffjKeyImageMappedTopLayers, kn) { + currentKey = ffjtImageMappedTopLayers + state = fflib.FFParse_want_colon + goto mainparse + } + if fflib.SimpleLetterEqualFold(ffjKeyImageTopLayer, kn) { currentKey = ffjtImageTopLayer state = fflib.FFParse_want_colon @@ -416,6 +447,9 @@ mainparse: case ffjtImageTopLayer: goto handle_TopLayer + case ffjtImageMappedTopLayers: + goto handle_MappedTopLayers + case ffjtImageMetadata: goto handle_Metadata @@ -600,6 +634,80 @@ handle_TopLayer: state = fflib.FFParse_after_value goto mainparse +handle_MappedTopLayers: + + /* handler: j.MappedTopLayers type=[]string kind=slice quoted=false*/ + + { + + { + if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) + } + } + + if tok == fflib.FFTok_null { + j.MappedTopLayers = nil + } else { + + j.MappedTopLayers = []string{} + + wantVal := true + + for { + + var tmpJMappedTopLayers string + + tok = fs.Scan() + if tok == fflib.FFTok_error { + goto tokerror + } + if tok == fflib.FFTok_right_brace { + break + } + + if tok == fflib.FFTok_comma { + if wantVal == true { + // TODO(pquerna): this isn't an ideal error message, this handles + // things like [,,,] as an array value. + return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) + } + continue + } else { + wantVal = true + } + + /* handler: tmpJMappedTopLayers type=string kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + tmpJMappedTopLayers = string(string(outBuf)) + + } + } + + j.MappedTopLayers = append(j.MappedTopLayers, tmpJMappedTopLayers) + + wantVal = false + } + } + } + + state = fflib.FFParse_after_value + goto mainparse + handle_Metadata: /* handler: j.Metadata type=string kind=string quoted=false*/ diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go index f9006c7e63..4baa5f1862 100644 --- a/vendor/github.com/containers/storage/layers.go +++ b/vendor/github.com/containers/storage/layers.go @@ -211,7 +211,10 @@ type LayerStore interface { Mount(id, mountLabel string) (string, error) // Unmount unmounts a layer when it is no longer in use. - Unmount(id string) error + Unmount(id string, force bool) (bool, error) + + // Mounted returns whether or not the layer is mounted. + Mounted(id string) (bool, error) // ParentOwners returns the UIDs and GIDs of parents of the layer's mountpoint // for which the layer's UID and GID maps don't contain corresponding entries. @@ -624,6 +627,14 @@ func (r *layerStore) Create(id string, parent *Layer, names []string, mountLabel return r.CreateWithFlags(id, parent, names, mountLabel, options, moreOptions, writeable, nil) } +func (r *layerStore) Mounted(id string) (bool, error) { + layer, ok := r.lookup(id) + if !ok { + return false, ErrLayerUnknown + } + return layer.MountCount > 0, nil +} + func (r *layerStore) Mount(id, mountLabel string) (string, error) { if !r.IsReadWrite() { return "", errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath()) @@ -652,21 +663,24 @@ func (r *layerStore) Mount(id, mountLabel string) (string, error) { return mountpoint, err } -func (r *layerStore) Unmount(id string) error { +func (r *layerStore) Unmount(id string, force bool) (bool, error) { if !r.IsReadWrite() { - return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath()) + return false, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath()) } layer, ok := r.lookup(id) if !ok { layerByMount, ok := r.bymount[filepath.Clean(id)] if !ok { - return ErrLayerUnknown + return false, ErrLayerUnknown } layer = layerByMount } + if force { + layer.MountCount = 1 + } if layer.MountCount > 1 { layer.MountCount-- - return r.Save() + return true, r.Save() } err := r.driver.Put(id) if err == nil || os.IsNotExist(err) { @@ -675,9 +689,9 @@ func (r *layerStore) Unmount(id string) error { } layer.MountCount-- layer.MountPoint = "" - err = r.Save() + return false, r.Save() } - return err + return true, err } func (r *layerStore) ParentOwners(id string) (uids, gids []int, err error) { @@ -797,10 +811,8 @@ func (r *layerStore) Delete(id string) error { return ErrLayerUnknown } id = layer.ID - for layer.MountCount > 0 { - if err := r.Unmount(id); err != nil { - return err - } + if _, err := r.Unmount(id, true); err != nil { + return err } err := r.driver.Remove(id) if err == nil { @@ -917,7 +929,8 @@ func (s *simpleGetCloser) Get(path string) (io.ReadCloser, error) { } func (s *simpleGetCloser) Close() error { - return s.r.Unmount(s.id) + _, err := s.r.Unmount(s.id, false) + return err } func (r *layerStore) newFileGetter(id string) (drivers.FileGetCloser, error) { diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go index dcc5227feb..4c43826256 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive.go @@ -56,6 +56,11 @@ type ( // replaced with the matching name from this map. RebaseNames map[string]string InUserNS bool + // CopyPass indicates that the contents of any archive we're creating + // will instantly be extracted and written to disk, so we can deviate + // from the traditional behavior/format to get features like subsecond + // precision in timestamps. + CopyPass bool } ) @@ -396,6 +401,11 @@ type tarAppender struct { // by the AUFS standard are used as the tar whiteout // standard. WhiteoutConverter tarWhiteoutConverter + // CopyPass indicates that the contents of any archive we're creating + // will instantly be extracted and written to disk, so we can deviate + // from the traditional behavior/format to get features like subsecond + // precision in timestamps. + CopyPass bool } func newTarAppender(idMapping *idtools.IDMappings, writer io.Writer, chownOpts *idtools.IDPair) *tarAppender { @@ -446,6 +456,9 @@ func (ta *tarAppender) addTarFile(path, name string) error { if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { return err } + if ta.CopyPass { + copyPassHeader(hdr) + } // if it's not a directory and has more than 1 link, // it's hard linked, so set the type flag accordingly @@ -710,6 +723,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) options.ChownOpts, ) ta.WhiteoutConverter = getWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData) + ta.CopyPass = options.CopyPass defer func() { // Make sure to check the error on Close. @@ -1039,6 +1053,7 @@ func (archiver *Archiver) TarUntar(src, dst string) error { UIDMaps: tarMappings.UIDs(), GIDMaps: tarMappings.GIDs(), Compression: Uncompressed, + CopyPass: true, } archive, err := TarWithOptions(src, options) if err != nil { @@ -1145,6 +1160,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { } hdr.Name = filepath.Base(dst) hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + copyPassHeader(hdr) if err := remapIDs(archiver.TarIDMappings, nil, archiver.ChownOpts, hdr); err != nil { return err diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_110.go b/vendor/github.com/containers/storage/pkg/archive/archive_110.go new file mode 100644 index 0000000000..22b8b48cc1 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_110.go @@ -0,0 +1,11 @@ +// +build go1.10 + +package archive + +import ( + "archive/tar" +) + +func copyPassHeader(hdr *tar.Header) { + hdr.Format = tar.FormatPAX +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_19.go b/vendor/github.com/containers/storage/pkg/archive/archive_19.go new file mode 100644 index 0000000000..d10d595fa8 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/archive/archive_19.go @@ -0,0 +1,10 @@ +// +build !go1.10 + +package archive + +import ( + "archive/tar" +) + +func copyPassHeader(hdr *tar.Header) { +} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_ffjson.go b/vendor/github.com/containers/storage/pkg/archive/archive_ffjson.go index 211f4e92b5..9b8103e4da 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive_ffjson.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive_ffjson.go @@ -1,5 +1,5 @@ // Code generated by ffjson . DO NOT EDIT. -// source: ./pkg/archive/archive.go +// source: pkg/archive/archive.go package archive @@ -491,6 +491,11 @@ func (j *TarOptions) MarshalJSONBuf(buf fflib.EncodingBuffer) error { } else { buf.WriteString(`,"InUserNS":false`) } + if j.CopyPass { + buf.WriteString(`,"CopyPass":true`) + } else { + buf.WriteString(`,"CopyPass":false`) + } buf.WriteByte('}') return nil } @@ -524,6 +529,8 @@ const ( ffjtTarOptionsRebaseNames ffjtTarOptionsInUserNS + + ffjtTarOptionsCopyPass ) var ffjKeyTarOptionsIncludeFiles = []byte("IncludeFiles") @@ -552,6 +559,8 @@ var ffjKeyTarOptionsRebaseNames = []byte("RebaseNames") var ffjKeyTarOptionsInUserNS = []byte("InUserNS") +var ffjKeyTarOptionsCopyPass = []byte("CopyPass") + // UnmarshalJSON umarshall json - template of ffjson func (j *TarOptions) UnmarshalJSON(input []byte) error { fs := fflib.NewFFLexer(input) @@ -624,6 +633,11 @@ mainparse: currentKey = ffjtTarOptionsChownOpts state = fflib.FFParse_want_colon goto mainparse + + } else if bytes.Equal(ffjKeyTarOptionsCopyPass, kn) { + currentKey = ffjtTarOptionsCopyPass + state = fflib.FFParse_want_colon + goto mainparse } case 'E': @@ -704,6 +718,12 @@ mainparse: } + if fflib.EqualFoldRight(ffjKeyTarOptionsCopyPass, kn) { + currentKey = ffjtTarOptionsCopyPass + state = fflib.FFParse_want_colon + goto mainparse + } + if fflib.EqualFoldRight(ffjKeyTarOptionsInUserNS, kn) { currentKey = ffjtTarOptionsInUserNS state = fflib.FFParse_want_colon @@ -838,6 +858,9 @@ mainparse: case ffjtTarOptionsInUserNS: goto handle_InUserNS + case ffjtTarOptionsCopyPass: + goto handle_CopyPass + case ffjtTarOptionsnosuchkey: err = fs.SkipField(tok) if err != nil { @@ -1481,6 +1504,41 @@ handle_InUserNS: state = fflib.FFParse_after_value goto mainparse +handle_CopyPass: + + /* handler: j.CopyPass type=bool kind=bool quoted=false*/ + + { + if tok != fflib.FFTok_bool && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok)) + } + } + + { + if tok == fflib.FFTok_null { + + } else { + tmpb := fs.Output.Bytes() + + if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 { + + j.CopyPass = true + + } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 { + + j.CopyPass = false + + } else { + err = errors.New("unexpected bytes for true/false value") + return fs.WrapErr(err) + } + + } + } + + state = fflib.FFParse_after_value + goto mainparse + wantedvalue: return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) wrongtokenerror: @@ -1773,6 +1831,11 @@ func (j *tarAppender) MarshalJSONBuf(buf fflib.EncodingBuffer) error { if err != nil { return err } + if j.CopyPass { + buf.WriteString(`,"CopyPass":true`) + } else { + buf.WriteString(`,"CopyPass":false`) + } buf.WriteByte('}') return nil } @@ -1792,6 +1855,8 @@ const ( ffjttarAppenderChownOpts ffjttarAppenderWhiteoutConverter + + ffjttarAppenderCopyPass ) var ffjKeytarAppenderTarWriter = []byte("TarWriter") @@ -1806,6 +1871,8 @@ var ffjKeytarAppenderChownOpts = []byte("ChownOpts") var ffjKeytarAppenderWhiteoutConverter = []byte("WhiteoutConverter") +var ffjKeytarAppenderCopyPass = []byte("CopyPass") + // UnmarshalJSON umarshall json - template of ffjson func (j *tarAppender) UnmarshalJSON(input []byte) error { fs := fflib.NewFFLexer(input) @@ -1881,6 +1948,11 @@ mainparse: currentKey = ffjttarAppenderChownOpts state = fflib.FFParse_want_colon goto mainparse + + } else if bytes.Equal(ffjKeytarAppenderCopyPass, kn) { + currentKey = ffjttarAppenderCopyPass + state = fflib.FFParse_want_colon + goto mainparse } case 'I': @@ -1917,6 +1989,12 @@ mainparse: } + if fflib.EqualFoldRight(ffjKeytarAppenderCopyPass, kn) { + currentKey = ffjttarAppenderCopyPass + state = fflib.FFParse_want_colon + goto mainparse + } + if fflib.SimpleLetterEqualFold(ffjKeytarAppenderWhiteoutConverter, kn) { currentKey = ffjttarAppenderWhiteoutConverter state = fflib.FFParse_want_colon @@ -1988,6 +2066,9 @@ mainparse: case ffjttarAppenderWhiteoutConverter: goto handle_WhiteoutConverter + case ffjttarAppenderCopyPass: + goto handle_CopyPass + case ffjttarAppendernosuchkey: err = fs.SkipField(tok) if err != nil { @@ -2211,6 +2292,41 @@ handle_WhiteoutConverter: state = fflib.FFParse_after_value goto mainparse +handle_CopyPass: + + /* handler: j.CopyPass type=bool kind=bool quoted=false*/ + + { + if tok != fflib.FFTok_bool && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok)) + } + } + + { + if tok == fflib.FFTok_null { + + } else { + tmpb := fs.Output.Bytes() + + if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 { + + j.CopyPass = true + + } else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 { + + j.CopyPass = false + + } else { + err = errors.New("unexpected bytes for true/false value") + return fs.WrapErr(err) + } + + } + } + + state = fflib.FFParse_after_value + goto mainparse + wantedvalue: return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) wrongtokenerror: diff --git a/vendor/github.com/containers/storage/pkg/directory/directory_unix.go b/vendor/github.com/containers/storage/pkg/directory/directory_unix.go index 397251bdb8..05522d68bb 100644 --- a/vendor/github.com/containers/storage/pkg/directory/directory_unix.go +++ b/vendor/github.com/containers/storage/pkg/directory/directory_unix.go @@ -1,4 +1,4 @@ -// +build linux freebsd solaris +// +build linux darwin freebsd solaris package directory diff --git a/vendor/github.com/containers/storage/pkg/ostree/no_ostree.go b/vendor/github.com/containers/storage/pkg/ostree/no_ostree.go new file mode 100644 index 0000000000..0a8e7d6796 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ostree/no_ostree.go @@ -0,0 +1,19 @@ +// +build !ostree + +package ostree + +func OstreeSupport() bool { + return false +} + +func DeleteOSTree(repoLocation, id string) error { + return nil +} + +func CreateOSTreeRepository(repoLocation string, rootUID int, rootGID int) error { + return nil +} + +func ConvertToOSTree(repoLocation, root, id string) error { + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/ostree/ostree.go b/vendor/github.com/containers/storage/pkg/ostree/ostree.go new file mode 100644 index 0000000000..e9b57a0fb4 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/ostree/ostree.go @@ -0,0 +1,198 @@ +// +build ostree + +package ostree + +import ( + "fmt" + "golang.org/x/sys/unix" + "os" + "path/filepath" + "runtime" + "syscall" + "time" + "unsafe" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/system" + glib "github.com/ostreedev/ostree-go/pkg/glibobject" + "github.com/ostreedev/ostree-go/pkg/otbuiltin" + "github.com/pkg/errors" +) + +// #cgo pkg-config: glib-2.0 gobject-2.0 ostree-1 +// #include +// #include +// #include +// #include +// #include +// #include +import "C" + +func OstreeSupport() bool { + return true +} + +func fixFiles(dir string, usermode bool) (bool, []string, error) { + var SkipOstree = errors.New("skip ostree deduplication") + + var whiteouts []string + + err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if info.Mode()&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 { + if !usermode { + stat, ok := info.Sys().(*syscall.Stat_t) + if !ok { + return errors.New("not syscall.Stat_t") + } + + if stat.Rdev == 0 && (stat.Mode&unix.S_IFCHR) != 0 { + whiteouts = append(whiteouts, path) + return nil + } + } + // Skip the ostree deduplication if we encounter a file type that + // ostree does not manage. + return SkipOstree + } + if info.IsDir() { + if usermode { + if err := os.Chmod(path, info.Mode()|0700); err != nil { + return err + } + } + } else if usermode && (info.Mode().IsRegular()) { + if err := os.Chmod(path, info.Mode()|0600); err != nil { + return err + } + } + return nil + }) + if err == SkipOstree { + return true, nil, nil + } + if err != nil { + return false, nil, err + } + return false, whiteouts, nil +} + +// Create prepares the filesystem for the OSTREE driver and copies the directory for the given id under the parent. +func ConvertToOSTree(repoLocation, root, id string) error { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + repo, err := otbuiltin.OpenRepo(repoLocation) + if err != nil { + return errors.Wrap(err, "could not open the OSTree repository") + } + + skip, whiteouts, err := fixFiles(root, os.Getuid() != 0) + if err != nil { + return errors.Wrap(err, "could not prepare the OSTree directory") + } + if skip { + return nil + } + + if _, err := repo.PrepareTransaction(); err != nil { + return errors.Wrap(err, "could not prepare the OSTree transaction") + } + + if skip { + return nil + } + + commitOpts := otbuiltin.NewCommitOptions() + commitOpts.Timestamp = time.Now() + commitOpts.LinkCheckoutSpeedup = true + commitOpts.Parent = "0000000000000000000000000000000000000000000000000000000000000000" + branch := fmt.Sprintf("containers-storage/%s", id) + + for _, w := range whiteouts { + if err := os.Remove(w); err != nil { + return errors.Wrap(err, "could not delete whiteout file") + } + } + + if _, err := repo.Commit(root, branch, commitOpts); err != nil { + return errors.Wrap(err, "could not commit the layer") + } + + if _, err := repo.CommitTransaction(); err != nil { + return errors.Wrap(err, "could not complete the OSTree transaction") + } + + if err := system.EnsureRemoveAll(root); err != nil { + return errors.Wrap(err, "could not delete layer") + } + + checkoutOpts := otbuiltin.NewCheckoutOptions() + checkoutOpts.RequireHardlinks = true + checkoutOpts.Whiteouts = false + if err := otbuiltin.Checkout(repoLocation, root, branch, checkoutOpts); err != nil { + return errors.Wrap(err, "could not checkout from OSTree") + } + + for _, w := range whiteouts { + if err := unix.Mknod(w, unix.S_IFCHR, 0); err != nil { + return errors.Wrap(err, "could not recreate whiteout file") + } + } + return nil +} + +func CreateOSTreeRepository(repoLocation string, rootUID int, rootGID int) error { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + _, err := os.Stat(repoLocation) + if err != nil && !os.IsNotExist(err) { + return err + } else if err != nil { + if err := idtools.MkdirAllAs(repoLocation, 0700, rootUID, rootGID); err != nil { + return errors.Wrap(err, "could not create OSTree repository directory: %v") + } + + if _, err := otbuiltin.Init(repoLocation, otbuiltin.NewInitOptions()); err != nil { + return errors.Wrap(err, "could not create OSTree repository") + } + } + return nil +} + +func openRepo(path string) (*C.struct_OstreeRepo, error) { + var cerr *C.GError + cpath := C.CString(path) + defer C.free(unsafe.Pointer(cpath)) + pathc := C.g_file_new_for_path(cpath) + defer C.g_object_unref(C.gpointer(pathc)) + repo := C.ostree_repo_new(pathc) + r := glib.GoBool(glib.GBoolean(C.ostree_repo_open(repo, nil, &cerr))) + if !r { + C.g_object_unref(C.gpointer(repo)) + return nil, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + return repo, nil +} + +func DeleteOSTree(repoLocation, id string) error { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + repo, err := openRepo(repoLocation) + if err != nil { + return err + } + defer C.g_object_unref(C.gpointer(repo)) + + branch := fmt.Sprintf("containers-storage/%s", id) + + cbranch := C.CString(branch) + defer C.free(unsafe.Pointer(cbranch)) + + var cerr *C.GError + r := glib.GoBool(glib.GBoolean(C.ostree_repo_set_ref_immediate(repo, nil, cbranch, nil, nil, &cerr))) + if !r { + return glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/stringutils/README.md b/vendor/github.com/containers/storage/pkg/stringutils/README.md new file mode 100644 index 0000000000..b3e454573c --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/stringutils/README.md @@ -0,0 +1 @@ +This package provides helper functions for dealing with strings diff --git a/vendor/github.com/containers/storage/pkg/stringutils/stringutils.go b/vendor/github.com/containers/storage/pkg/stringutils/stringutils.go new file mode 100644 index 0000000000..8c4c39875e --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/stringutils/stringutils.go @@ -0,0 +1,99 @@ +// Package stringutils provides helper functions for dealing with strings. +package stringutils + +import ( + "bytes" + "math/rand" + "strings" +) + +// GenerateRandomAlphaOnlyString generates an alphabetical random string with length n. +func GenerateRandomAlphaOnlyString(n int) string { + // make a really long string + letters := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + b := make([]byte, n) + for i := range b { + b[i] = letters[rand.Intn(len(letters))] + } + return string(b) +} + +// GenerateRandomASCIIString generates an ASCII random string with length n. +func GenerateRandomASCIIString(n int) string { + chars := "abcdefghijklmnopqrstuvwxyz" + + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + + "~!@#$%^&*()-_+={}[]\\|<,>.?/\"';:` " + res := make([]byte, n) + for i := 0; i < n; i++ { + res[i] = chars[rand.Intn(len(chars))] + } + return string(res) +} + +// Ellipsis truncates a string to fit within maxlen, and appends ellipsis (...). +// For maxlen of 3 and lower, no ellipsis is appended. +func Ellipsis(s string, maxlen int) string { + r := []rune(s) + if len(r) <= maxlen { + return s + } + if maxlen <= 3 { + return string(r[:maxlen]) + } + return string(r[:maxlen-3]) + "..." +} + +// Truncate truncates a string to maxlen. +func Truncate(s string, maxlen int) string { + r := []rune(s) + if len(r) <= maxlen { + return s + } + return string(r[:maxlen]) +} + +// InSlice tests whether a string is contained in a slice of strings or not. +// Comparison is case insensitive +func InSlice(slice []string, s string) bool { + for _, ss := range slice { + if strings.ToLower(s) == strings.ToLower(ss) { + return true + } + } + return false +} + +func quote(word string, buf *bytes.Buffer) { + // Bail out early for "simple" strings + if word != "" && !strings.ContainsAny(word, "\\'\"`${[|&;<>()~*?! \t\n") { + buf.WriteString(word) + return + } + + buf.WriteString("'") + + for i := 0; i < len(word); i++ { + b := word[i] + if b == '\'' { + // Replace literal ' with a close ', a \', and an open ' + buf.WriteString("'\\''") + } else { + buf.WriteByte(b) + } + } + + buf.WriteString("'") +} + +// ShellQuoteArguments takes a list of strings and escapes them so they will be +// handled right when passed as arguments to a program via a shell +func ShellQuoteArguments(args []string) string { + var buf bytes.Buffer + for i, arg := range args { + if i != 0 { + buf.WriteByte(' ') + } + quote(arg, &buf) + } + return buf.String() +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_windows.go b/vendor/github.com/containers/storage/pkg/system/stat_windows.go index 6c63972682..d306360520 100644 --- a/vendor/github.com/containers/storage/pkg/system/stat_windows.go +++ b/vendor/github.com/containers/storage/pkg/system/stat_windows.go @@ -28,6 +28,20 @@ func (s StatT) Mtim() time.Time { return time.Time(s.mtim) } +// UID returns file's user id of owner. +// +// on windows this is always 0 because there is no concept of UID +func (s StatT) UID() uint32 { + return 0 +} + +// GID returns file's group id of owner. +// +// on windows this is always 0 because there is no concept of GID +func (s StatT) GID() uint32 { + return 0 +} + // Stat takes a path to a file and returns // a system.StatT type pertaining to that file. // diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go index 088d9c0c5b..41b66a356f 100644 --- a/vendor/github.com/containers/storage/store.go +++ b/vendor/github.com/containers/storage/store.go @@ -7,6 +7,7 @@ import ( "io/ioutil" "os" "path/filepath" + "reflect" "strconv" "strings" "sync" @@ -18,9 +19,11 @@ import ( "github.com/BurntSushi/toml" drivers "github.com/containers/storage/drivers" "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/directory" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/stringid" + "github.com/containers/storage/pkg/stringutils" digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" ) @@ -259,8 +262,11 @@ type Store interface { Mount(id, mountLabel string) (string, error) // Unmount attempts to unmount a layer, image, or container, given an ID, a - // name, or a mount path. - Unmount(id string) error + // name, or a mount path. Returns whether or not the layer is still mounted. + Unmount(id string, force bool) (bool, error) + + // Unmount attempts to discover whether the specified id is mounted. + Mounted(id string) (bool, error) // Changes returns a summary of the changes which would need to be made // to one layer to make its contents the same as a second layer. If @@ -346,6 +352,9 @@ type Store interface { // with an image. SetImageBigData(id, key string, data []byte) error + // ImageSize computes the size of the image's layers and ancillary data. + ImageSize(id string) (int64, error) + // ListContainerBigData retrieves a list of the (possibly large) chunks of // named data associated with a container. ListContainerBigData(id string) ([]string, error) @@ -366,6 +375,10 @@ type Store interface { // associated with a container. SetContainerBigData(id, key string, data []byte) error + // ContainerSize computes the size of the container's layer and ancillary + // data. Warning: this is a potentially expensive operation. + ContainerSize(id string) (int64, error) + // Layer returns a specific layer. Layer(id string) (*Layer, error) @@ -949,6 +962,106 @@ func (s *store) CreateImage(id string, names []string, layer, metadata string, o return ristore.Create(id, names, layer, metadata, creationDate, options.Digest) } +func (s *store) imageTopLayerForMapping(image *Image, ristore ROImageStore, readWrite bool, rlstore LayerStore, lstores []ROLayerStore, options IDMappingOptions) (*Layer, error) { + layerMatchesMappingOptions := func(layer *Layer, options IDMappingOptions) bool { + // If we want host mapping, and the layer uses mappings, it's not the best match. + if options.HostUIDMapping && len(layer.UIDMap) != 0 { + return false + } + if options.HostGIDMapping && len(layer.GIDMap) != 0 { + return false + } + // If we don't care about the mapping, it's fine. + if len(options.UIDMap) == 0 && len(options.GIDMap) == 0 { + return true + } + // Compare the maps. + return reflect.DeepEqual(layer.UIDMap, options.UIDMap) && reflect.DeepEqual(layer.GIDMap, options.GIDMap) + } + var layer, parentLayer *Layer + var layerHomeStore ROLayerStore + // Locate the image's top layer and its parent, if it has one. + for _, store := range append([]ROLayerStore{rlstore}, lstores...) { + if store != rlstore { + store.Lock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + store.Load() + } + } + // Walk the top layer list. + for _, candidate := range append([]string{image.TopLayer}, image.MappedTopLayers...) { + if cLayer, err := store.Get(candidate); err == nil { + // We want the layer's parent, too, if it has one. + var cParentLayer *Layer + if cLayer.Parent != "" { + // Its parent should be around here, somewhere. + if cParentLayer, err = store.Get(cLayer.Parent); err != nil { + // Nope, couldn't find it. We're not going to be able + // to diff this one properly. + continue + } + } + // If the layer matches the desired mappings, it's a perfect match, + // so we're actually done here. + if layerMatchesMappingOptions(cLayer, options) { + return cLayer, nil + } + // Record the first one that we found, even if it's not ideal, so that + // we have a starting point. + if layer == nil { + layer = cLayer + parentLayer = cParentLayer + layerHomeStore = store + } + } + } + } + if layer == nil { + return nil, ErrLayerUnknown + } + // The top layer's mappings don't match the ones we want, but it's in a read-only + // image store, so we can't create and add a mapped copy of the layer to the image. + if !readWrite { + return layer, nil + } + // The top layer's mappings don't match the ones we want, and it's in an image store + // that lets us edit image metadata... + if istore, ok := ristore.(*imageStore); ok { + // ... so extract the layer's contents, create a new copy of it with the + // desired mappings, and register it as an alternate top layer in the image. + noCompression := archive.Uncompressed + diffOptions := DiffOptions{ + Compression: &noCompression, + } + rc, err := layerHomeStore.Diff("", layer.ID, &diffOptions) + if err != nil { + return nil, errors.Wrapf(err, "error reading layer %q to create an ID-mapped version of it") + } + defer rc.Close() + layerOptions := LayerOptions{ + IDMappingOptions: IDMappingOptions{ + HostUIDMapping: options.HostUIDMapping, + HostGIDMapping: options.HostGIDMapping, + UIDMap: copyIDMap(options.UIDMap), + GIDMap: copyIDMap(options.GIDMap), + }, + } + mappedLayer, _, err := rlstore.Put("", parentLayer, nil, layer.MountLabel, nil, &layerOptions, false, nil, rc) + if err != nil { + return nil, errors.Wrapf(err, "error creating ID-mapped copy of layer %q") + } + if err = istore.addMappedTopLayer(image.ID, mappedLayer.ID); err != nil { + if err2 := rlstore.Delete(mappedLayer.ID); err2 != nil { + err = errors.WithMessage(err, fmt.Sprintf("error deleting layer %q: %v", mappedLayer.ID, err2)) + } + return nil, errors.Wrapf(err, "error registering ID-mapped layer with image %q", image.ID) + } + layer = mappedLayer + } + return layer, nil +} + func (s *store) CreateContainer(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error) { if options == nil { options = &ContainerOptions{} @@ -977,6 +1090,7 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat uidMap := options.UIDMap gidMap := options.GIDMap if image != "" { + var imageHomeStore ROImageStore istore, err := s.ImageStore() if err != nil { return nil, err @@ -994,6 +1108,7 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat } cimage, err = store.Get(image) if err == nil { + imageHomeStore = store break } } @@ -1006,22 +1121,9 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat if err != nil { return nil, err } - var ilayer *Layer - for _, store := range append([]ROLayerStore{rlstore}, lstores...) { - if store != rlstore { - store.Lock() - defer store.Unlock() - if modified, err := store.Modified(); modified || err != nil { - store.Load() - } - } - ilayer, err = store.Get(cimage.TopLayer) - if err == nil { - break - } - } - if ilayer == nil { - return nil, ErrLayerUnknown + ilayer, err := s.imageTopLayerForMapping(cimage, imageHomeStore, imageHomeStore == istore, rlstore, lstores, options.IDMappingOptions) + if err != nil { + return nil, err } imageTopLayer = ilayer if !options.HostUIDMapping && len(options.UIDMap) == 0 { @@ -1279,6 +1381,200 @@ func (s *store) SetImageBigData(id, key string, data []byte) error { return ristore.SetBigData(id, key, data) } +func (s *store) ImageSize(id string) (int64, error) { + var image *Image + + lstore, err := s.LayerStore() + if err != nil { + return -1, errors.Wrapf(err, "error loading primary layer store data") + } + lstores, err := s.ROLayerStores() + if err != nil { + return -1, errors.Wrapf(err, "error loading additional layer stores") + } + for _, store := range append([]ROLayerStore{lstore}, lstores...) { + store.Lock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + store.Load() + } + } + + var imageStore ROBigDataStore + istore, err := s.ImageStore() + if err != nil { + return -1, errors.Wrapf(err, "error loading primary image store data") + } + istores, err := s.ROImageStores() + if err != nil { + return -1, errors.Wrapf(err, "error loading additional image stores") + } + + // Look for the image's record. + for _, store := range append([]ROImageStore{istore}, istores...) { + store.Lock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + store.Load() + } + if image, err = store.Get(id); err == nil { + imageStore = store + break + } + } + if image == nil { + return -1, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id) + } + + // Start with a list of the image's top layers. + queue := make(map[string]struct{}) + for _, layerID := range append([]string{image.TopLayer}, image.MappedTopLayers...) { + queue[layerID] = struct{}{} + } + visited := make(map[string]struct{}) + // Walk all of the layers. + var size int64 + for len(visited) < len(queue) { + for layerID := range queue { + // Visit each layer only once. + if _, ok := visited[layerID]; ok { + continue + } + visited[layerID] = struct{}{} + // Look for the layer and the store that knows about it. + var layerStore ROLayerStore + var layer *Layer + for _, store := range append([]ROLayerStore{lstore}, lstores...) { + if layer, err = store.Get(layerID); err == nil { + layerStore = store + break + } + } + if layer == nil { + return -1, errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q", layerID) + } + // The UncompressedSize is only valid if there's a digest to go with it. + n := layer.UncompressedSize + if layer.UncompressedDigest == "" { + // Compute the size. + n, err = layerStore.DiffSize("", layer.ID) + if err != nil { + return -1, errors.Wrapf(err, "size/digest of layer with ID %q could not be calculated", layerID) + } + } + // Count this layer. + size += n + // Make a note to visit the layer's parent if we haven't already. + if layer.Parent != "" { + queue[layer.Parent] = struct{}{} + } + } + } + + // Count big data items. + names, err := imageStore.BigDataNames(id) + if err != nil { + return -1, errors.Wrapf(err, "error reading list of big data items for image %q", id) + } + for _, name := range names { + n, err := imageStore.BigDataSize(id, name) + if err != nil { + return -1, errors.Wrapf(err, "error reading size of big data item %q for image %q", name, id) + } + size += n + } + + return size, nil +} + +func (s *store) ContainerSize(id string) (int64, error) { + lstore, err := s.LayerStore() + if err != nil { + return -1, err + } + lstores, err := s.ROLayerStores() + if err != nil { + return -1, err + } + for _, store := range append([]ROLayerStore{lstore}, lstores...) { + store.Lock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + store.Load() + } + } + + // Get the location of the container directory and container run directory. + // Do it before we lock the container store because they do, too. + cdir, err := s.ContainerDirectory(id) + if err != nil { + return -1, err + } + rdir, err := s.ContainerRunDirectory(id) + if err != nil { + return -1, err + } + + rcstore, err := s.ContainerStore() + if err != nil { + return -1, err + } + rcstore.Lock() + defer rcstore.Unlock() + if modified, err := rcstore.Modified(); modified || err != nil { + rcstore.Load() + } + + // Read the container record. + container, err := rcstore.Get(id) + if err != nil { + return -1, err + } + + // Read the container's layer's size. + var layer *Layer + var size int64 + for _, store := range append([]ROLayerStore{lstore}, lstores...) { + if layer, err = store.Get(container.LayerID); err == nil { + size, err = store.DiffSize("", layer.ID) + if err != nil { + return -1, errors.Wrapf(err, "error determining size of layer with ID %q", layer.ID) + } + break + } + } + if layer == nil { + return -1, errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q", container.LayerID) + } + + // Count big data items. + names, err := rcstore.BigDataNames(id) + if err != nil { + return -1, errors.Wrapf(err, "error reading list of big data items for container %q", container.ID) + } + for _, name := range names { + n, err := rcstore.BigDataSize(id, name) + if err != nil { + return -1, errors.Wrapf(err, "error reading size of big data item %q for container %q", name, id) + } + size += n + } + + // Count the size of our container directory and container run directory. + n, err := directory.Size(cdir) + if err != nil { + return -1, err + } + size += n + n, err = directory.Size(rdir) + if err != nil { + return -1, err + } + size += n + + return size, nil +} + func (s *store) ListContainerBigData(id string) ([]string, error) { rcstore, err := s.ContainerStore() if err != nil { @@ -1614,7 +1910,7 @@ func (s *store) DeleteLayer(id string) error { return err } for _, image := range images { - if image.TopLayer == id { + if image.TopLayer == id || stringutils.InSlice(image.MappedTopLayers, id) { return errors.Wrapf(ErrLayerUsedByImage, "Layer %v used by image %v", id, image.ID) } } @@ -1697,10 +1993,13 @@ func (s *store) DeleteImage(id string, commit bool) (layers []string, err error) childrenByParent[parent] = &([]string{layer.ID}) } } - anyImageByTopLayer := make(map[string]string) + otherImagesByTopLayer := make(map[string]string) for _, img := range images { if img.ID != id { - anyImageByTopLayer[img.TopLayer] = img.ID + otherImagesByTopLayer[img.TopLayer] = img.ID + for _, layerID := range img.MappedTopLayers { + otherImagesByTopLayer[layerID] = img.ID + } } } if commit { @@ -1714,27 +2013,38 @@ func (s *store) DeleteImage(id string, commit bool) (layers []string, err error) if rcstore.Exists(layer) { break } - if _, ok := anyImageByTopLayer[layer]; ok { + if _, ok := otherImagesByTopLayer[layer]; ok { break } parent := "" if l, err := rlstore.Get(layer); err == nil { parent = l.Parent } - otherRefs := 0 - if childList, ok := childrenByParent[layer]; ok && childList != nil { - children := *childList - for _, child := range children { - if child != lastRemoved { - otherRefs++ + hasOtherRefs := func() bool { + layersToCheck := []string{layer} + if layer == image.TopLayer { + layersToCheck = append(layersToCheck, image.MappedTopLayers...) + } + for _, layer := range layersToCheck { + if childList, ok := childrenByParent[layer]; ok && childList != nil { + children := *childList + for _, child := range children { + if child != lastRemoved { + return true + } + } } } + return false } - if otherRefs != 0 { + if hasOtherRefs() { break } lastRemoved = layer layersToRemove = append(layersToRemove, lastRemoved) + if layer == image.TopLayer { + layersToRemove = append(layersToRemove, image.MappedTopLayers...) + } layer = parent } } else { @@ -1938,13 +2248,30 @@ func (s *store) Mount(id, mountLabel string) (string, error) { return "", ErrLayerUnknown } -func (s *store) Unmount(id string) error { +func (s *store) Mounted(id string) (bool, error) { if layerID, err := s.ContainerLayerID(id); err == nil { id = layerID } rlstore, err := s.LayerStore() if err != nil { - return err + return false, err + } + rlstore.Lock() + defer rlstore.Unlock() + if modified, err := rlstore.Modified(); modified || err != nil { + rlstore.Load() + } + + return rlstore.Mounted(id) +} + +func (s *store) Unmount(id string, force bool) (bool, error) { + if layerID, err := s.ContainerLayerID(id); err == nil { + id = layerID + } + rlstore, err := s.LayerStore() + if err != nil { + return false, err } rlstore.Lock() defer rlstore.Unlock() @@ -1952,9 +2279,9 @@ func (s *store) Unmount(id string) error { rlstore.Load() } if rlstore.Exists(id) { - return rlstore.Unmount(id) + return rlstore.Unmount(id, force) } - return ErrLayerUnknown + return false, ErrLayerUnknown } func (s *store) Changes(from, to string) ([]archive.Change, error) { @@ -2293,7 +2620,7 @@ func (s *store) ImagesByTopLayer(id string) ([]*Image, error) { return nil, err } for _, image := range imageList { - if image.TopLayer == layer.ID { + if image.TopLayer == layer.ID || stringutils.InSlice(image.MappedTopLayers, layer.ID) { images = append(images, &image) } } @@ -2504,7 +2831,7 @@ func (s *store) Shutdown(force bool) ([]string, error) { mounted = append(mounted, layer.ID) if force { for layer.MountCount > 0 { - err2 := rlstore.Unmount(layer.ID) + _, err2 := rlstore.Unmount(layer.ID, force) if err2 != nil { if err == nil { err = err2 @@ -2593,7 +2920,7 @@ func copyStringInterfaceMap(m map[string]interface{}) map[string]interface{} { return ret } -const configFile = "/etc/containers/storage.conf" +const defaultConfigFile = "/etc/containers/storage.conf" // ThinpoolOptionsConfig represents the "storage.options.thinpool" // TOML config table. @@ -2680,6 +3007,14 @@ type OptionsConfig struct { RemapGroup string `toml:"remap-group"` // Thinpool container options to be handed to thinpool drivers Thinpool struct{ ThinpoolOptionsConfig } `toml:"thinpool"` + // OSTree repository + OstreeRepo string `toml:"ostree_repo"` + + // Do not create a bind mount on the storage home + SkipMountHome string `toml:"skip_mount_home"` + + // Alternative program to use for the mount of the file system + MountProgram string `toml:"mount_program"` } // TOML-friendly explicit tables used for conversions. @@ -2692,11 +3027,9 @@ type tomlConfig struct { } `toml:"storage"` } -func init() { - DefaultStoreOptions.RunRoot = "/var/run/containers/storage" - DefaultStoreOptions.GraphRoot = "/var/lib/containers/storage" - DefaultStoreOptions.GraphDriverName = "" - +// ReloadConfigurationFile parses the specified configuration file and overrides +// the configuration in storeOptions. +func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) { data, err := ioutil.ReadFile(configFile) if err != nil { if !os.IsNotExist(err) { @@ -2712,66 +3045,75 @@ func init() { return } if config.Storage.Driver != "" { - DefaultStoreOptions.GraphDriverName = config.Storage.Driver + storeOptions.GraphDriverName = config.Storage.Driver } if config.Storage.RunRoot != "" { - DefaultStoreOptions.RunRoot = config.Storage.RunRoot + storeOptions.RunRoot = config.Storage.RunRoot } if config.Storage.GraphRoot != "" { - DefaultStoreOptions.GraphRoot = config.Storage.GraphRoot + storeOptions.GraphRoot = config.Storage.GraphRoot } if config.Storage.Options.Thinpool.AutoExtendPercent != "" { - DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("dm.thinp_autoextend_percent=%s", config.Storage.Options.Thinpool.AutoExtendPercent)) + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.thinp_autoextend_percent=%s", config.Storage.Options.Thinpool.AutoExtendPercent)) } if config.Storage.Options.Thinpool.AutoExtendThreshold != "" { - DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("dm.thinp_autoextend_threshold=%s", config.Storage.Options.Thinpool.AutoExtendThreshold)) + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.thinp_autoextend_threshold=%s", config.Storage.Options.Thinpool.AutoExtendThreshold)) } if config.Storage.Options.Thinpool.BaseSize != "" { - DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("dm.basesize=%s", config.Storage.Options.Thinpool.BaseSize)) + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.basesize=%s", config.Storage.Options.Thinpool.BaseSize)) } if config.Storage.Options.Thinpool.BlockSize != "" { - DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("dm.blocksize=%s", config.Storage.Options.Thinpool.BlockSize)) + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.blocksize=%s", config.Storage.Options.Thinpool.BlockSize)) } if config.Storage.Options.Thinpool.DirectLvmDevice != "" { - DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("dm.directlvm_device=%s", config.Storage.Options.Thinpool.DirectLvmDevice)) + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.directlvm_device=%s", config.Storage.Options.Thinpool.DirectLvmDevice)) } if config.Storage.Options.Thinpool.DirectLvmDeviceForce != "" { - DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("dm.directlvm_device_force=%s", config.Storage.Options.Thinpool.DirectLvmDeviceForce)) + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.directlvm_device_force=%s", config.Storage.Options.Thinpool.DirectLvmDeviceForce)) } if config.Storage.Options.Thinpool.Fs != "" { - DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("dm.fs=%s", config.Storage.Options.Thinpool.Fs)) + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.fs=%s", config.Storage.Options.Thinpool.Fs)) } if config.Storage.Options.Thinpool.LogLevel != "" { - DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("dm.libdm_log_level=%s", config.Storage.Options.Thinpool.LogLevel)) + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.libdm_log_level=%s", config.Storage.Options.Thinpool.LogLevel)) } if config.Storage.Options.Thinpool.MinFreeSpace != "" { - DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("dm.min_free_space=%s", config.Storage.Options.Thinpool.MinFreeSpace)) + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.min_free_space=%s", config.Storage.Options.Thinpool.MinFreeSpace)) } if config.Storage.Options.Thinpool.MkfsArg != "" { - DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("dm.mkfsarg=%s", config.Storage.Options.Thinpool.MkfsArg)) + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.mkfsarg=%s", config.Storage.Options.Thinpool.MkfsArg)) } if config.Storage.Options.Thinpool.MountOpt != "" { - DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("dm.mountopt=%s", config.Storage.Options.Thinpool.MountOpt)) + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.mountopt=%s", config.Storage.Options.Thinpool.MountOpt)) } if config.Storage.Options.Thinpool.UseDeferredDeletion != "" { - DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("dm.use_deferred_deletion=%s", config.Storage.Options.Thinpool.UseDeferredDeletion)) + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.use_deferred_deletion=%s", config.Storage.Options.Thinpool.UseDeferredDeletion)) } if config.Storage.Options.Thinpool.UseDeferredRemoval != "" { - DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("dm.use_deferred_removal=%s", config.Storage.Options.Thinpool.UseDeferredRemoval)) + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.use_deferred_removal=%s", config.Storage.Options.Thinpool.UseDeferredRemoval)) } if config.Storage.Options.Thinpool.XfsNoSpaceMaxRetries != "" { - DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("dm.xfs_nospace_max_retries=%s", config.Storage.Options.Thinpool.XfsNoSpaceMaxRetries)) + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.xfs_nospace_max_retries=%s", config.Storage.Options.Thinpool.XfsNoSpaceMaxRetries)) } for _, s := range config.Storage.Options.AdditionalImageStores { - DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("%s.imagestore=%s", config.Storage.Driver, s)) + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.imagestore=%s", config.Storage.Driver, s)) } if config.Storage.Options.Size != "" { - DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("%s.size=%s", config.Storage.Driver, config.Storage.Options.Size)) + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.size=%s", config.Storage.Driver, config.Storage.Options.Size)) + } + if config.Storage.Options.OstreeRepo != "" { + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.ostree_repo=%s", config.Storage.Driver, config.Storage.Options.OstreeRepo)) + } + if config.Storage.Options.SkipMountHome != "" { + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.skip_mount_home=%s", config.Storage.Driver, config.Storage.Options.SkipMountHome)) + } + if config.Storage.Options.MountProgram != "" { + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.mount_program=%s", config.Storage.Driver, config.Storage.Options.MountProgram)) } if config.Storage.Options.OverrideKernelCheck != "" { - DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("%s.override_kernel_check=%s", config.Storage.Driver, config.Storage.Options.OverrideKernelCheck)) + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.override_kernel_check=%s", config.Storage.Driver, config.Storage.Options.OverrideKernelCheck)) } if config.Storage.Options.RemapUser != "" && config.Storage.Options.RemapGroup == "" { config.Storage.Options.RemapGroup = config.Storage.Options.RemapUser @@ -2785,8 +3127,8 @@ func init() { fmt.Printf("Error initializing ID mappings for %s:%s %v\n", config.Storage.Options.RemapUser, config.Storage.Options.RemapGroup, err) return } - DefaultStoreOptions.UIDMap = mappings.UIDs() - DefaultStoreOptions.GIDMap = mappings.GIDs() + storeOptions.UIDMap = mappings.UIDs() + storeOptions.GIDMap = mappings.GIDs() } nonDigitsToWhitespace := func(r rune) rune { if strings.IndexRune("0123456789", r) == -1 { @@ -2836,15 +3178,23 @@ func init() { } return idmap } - DefaultStoreOptions.UIDMap = append(DefaultStoreOptions.UIDMap, parseIDMap(config.Storage.Options.RemapUIDs, "remap-uids")...) - DefaultStoreOptions.GIDMap = append(DefaultStoreOptions.GIDMap, parseIDMap(config.Storage.Options.RemapGIDs, "remap-gids")...) + storeOptions.UIDMap = append(storeOptions.UIDMap, parseIDMap(config.Storage.Options.RemapUIDs, "remap-uids")...) + storeOptions.GIDMap = append(storeOptions.GIDMap, parseIDMap(config.Storage.Options.RemapGIDs, "remap-gids")...) if os.Getenv("STORAGE_DRIVER") != "" { - DefaultStoreOptions.GraphDriverName = os.Getenv("STORAGE_DRIVER") + storeOptions.GraphDriverName = os.Getenv("STORAGE_DRIVER") } if os.Getenv("STORAGE_OPTS") != "" { - DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, strings.Split(os.Getenv("STORAGE_OPTS"), ",")...) + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, strings.Split(os.Getenv("STORAGE_OPTS"), ",")...) } - if len(DefaultStoreOptions.GraphDriverOptions) == 1 && DefaultStoreOptions.GraphDriverOptions[0] == "" { - DefaultStoreOptions.GraphDriverOptions = nil + if len(storeOptions.GraphDriverOptions) == 1 && storeOptions.GraphDriverOptions[0] == "" { + storeOptions.GraphDriverOptions = nil } } + +func init() { + DefaultStoreOptions.RunRoot = "/var/run/containers/storage" + DefaultStoreOptions.GraphRoot = "/var/lib/containers/storage" + DefaultStoreOptions.GraphDriverName = "" + + ReloadConfigurationFile(defaultConfigFile, &DefaultStoreOptions) +} diff --git a/vendor/github.com/containers/storage/vendor.conf b/vendor/github.com/containers/storage/vendor.conf index 54c78ab1ca..c0498a02d1 100644 --- a/vendor/github.com/containers/storage/vendor.conf +++ b/vendor/github.com/containers/storage/vendor.conf @@ -20,3 +20,4 @@ github.com/tchap/go-patricia v2.2.6 github.com/vbatts/tar-split v0.10.2 golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6 golang.org/x/sys 07c182904dbd53199946ba614a412c61d3c548f5 +github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460 diff --git a/vendor/github.com/docker/distribution/vendor.conf b/vendor/github.com/docker/distribution/vendor.conf index d0ebadf8b9..b67e03ade4 100644 --- a/vendor/github.com/docker/distribution/vendor.conf +++ b/vendor/github.com/docker/distribution/vendor.conf @@ -1,5 +1,5 @@ -github.com/Azure/azure-sdk-for-go 088007b3b08cc02b27f2eadfdcd870958460ce7e -github.com/Azure/go-autorest ec5f4903f77ed9927ac95b19ab8e44ada64c1356 +github.com/Azure/azure-sdk-for-go 4650843026a7fdec254a8d9cf893693a254edd0b +github.com/Azure/go-autorest eaa7994b2278094c904d31993d26f56324db3052 github.com/sirupsen/logrus 3d4380f53a34dcdc95f0c1db702615992b38d9a4 github.com/aws/aws-sdk-go 5bcc0a238d880469f949fc7cd24e35f32ab80cbd github.com/bshuster-repo/logrus-logstash-hook d2c0ecc1836d91814e15e23bb5dc309c3ef51f4a @@ -19,6 +19,8 @@ github.com/gorilla/handlers 60c7bfde3e33c201519a200a4507a158cc03a17b github.com/gorilla/mux 599cba5e7b6137d46ddf58fb1765f5d928e69604 github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d +github.com/marstr/guid 8bd9a64bf37eb297b492a4101fb28e80ac0b290f +github.com/satori/go.uuid f58768cc1a7a7e77a3bd49e98cdd21419399b6a3 github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c github.com/miekg/dns 271c58e0c14f552178ea321a545ff9af38930f39 github.com/mitchellh/mapstructure 482a9fd5fa83e8c4e7817413b80f3eb8feec03ef diff --git a/vendor/github.com/go-check/check/check.go b/vendor/github.com/go-check/check/check.go index 137a2749a8..de714c2712 100644 --- a/vendor/github.com/go-check/check/check.go +++ b/vendor/github.com/go-check/check/check.go @@ -239,7 +239,7 @@ func (c *C) logValue(label string, value interface{}) { } } -func (c *C) logMultiLine(s string) { +func formatMultiLine(s string, quote bool) []byte { b := make([]byte, 0, len(s)*2) i := 0 n := len(s) @@ -249,14 +249,23 @@ func (c *C) logMultiLine(s string) { j++ } b = append(b, "... "...) - b = strconv.AppendQuote(b, s[i:j]) - if j < n { + if quote { + b = strconv.AppendQuote(b, s[i:j]) + } else { + b = append(b, s[i:j]...) + b = bytes.TrimSpace(b) + } + if quote && j < n { b = append(b, " +"...) } b = append(b, '\n') i = j } - c.writeLog(b) + return b +} + +func (c *C) logMultiLine(s string) { + c.writeLog(formatMultiLine(s, true)) } func isMultiLine(s string) bool { diff --git a/vendor/github.com/go-check/check/checkers.go b/vendor/github.com/go-check/check/checkers.go index 3749545873..cdd7f9aeb2 100644 --- a/vendor/github.com/go-check/check/checkers.go +++ b/vendor/github.com/go-check/check/checkers.go @@ -4,6 +4,9 @@ import ( "fmt" "reflect" "regexp" + "strings" + + "github.com/kr/pretty" ) // ----------------------------------------------------------------------- @@ -90,6 +93,10 @@ func (checker *notChecker) Info() *CheckerInfo { func (checker *notChecker) Check(params []interface{}, names []string) (result bool, error string) { result, error = checker.sub.Check(params, names) result = !result + if result { + // clear error message if the new result is true + error = "" + } return } @@ -153,6 +160,56 @@ func (checker *notNilChecker) Check(params []interface{}, names []string) (resul // ----------------------------------------------------------------------- // Equals checker. +func diffworthy(a interface{}) bool { + t := reflect.TypeOf(a) + switch t.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.Struct, reflect.String, reflect.Ptr: + return true + } + return false +} + +// formatUnequal will dump the actual and expected values into a textual +// representation and return an error message containing a diff. +func formatUnequal(obtained interface{}, expected interface{}) string { + // We do not do diffs for basic types because go-check already + // shows them very cleanly. + if !diffworthy(obtained) || !diffworthy(expected) { + return "" + } + + // Handle strings, short strings are ignored (go-check formats + // them very nicely already). We do multi-line strings by + // generating two string slices and using kr.Diff to compare + // those (kr.Diff does not do string diffs by itself). + aStr, aOK := obtained.(string) + bStr, bOK := expected.(string) + if aOK && bOK { + l1 := strings.Split(aStr, "\n") + l2 := strings.Split(bStr, "\n") + // the "2" here is a bit arbitrary + if len(l1) > 2 && len(l2) > 2 { + diff := pretty.Diff(l1, l2) + return fmt.Sprintf(`String difference: +%s`, formatMultiLine(strings.Join(diff, "\n"), false)) + } + // string too short + return "" + } + + // generic diff + diff := pretty.Diff(obtained, expected) + if len(diff) == 0 { + // No diff, this happens when e.g. just struct + // pointers are different but the structs have + // identical values. + return "" + } + + return fmt.Sprintf(`Difference: +%s`, formatMultiLine(strings.Join(diff, "\n"), false)) +} + type equalsChecker struct { *CheckerInfo } @@ -175,7 +232,12 @@ func (checker *equalsChecker) Check(params []interface{}, names []string) (resul error = fmt.Sprint(v) } }() - return params[0] == params[1], "" + + result = params[0] == params[1] + if !result { + error = formatUnequal(params[0], params[1]) + } + return } // ----------------------------------------------------------------------- @@ -200,7 +262,11 @@ var DeepEquals Checker = &deepEqualsChecker{ } func (checker *deepEqualsChecker) Check(params []interface{}, names []string) (result bool, error string) { - return reflect.DeepEqual(params[0], params[1]), "" + result = reflect.DeepEqual(params[0], params[1]) + if !result { + error = formatUnequal(params[0], params[1]) + } + return } // ----------------------------------------------------------------------- diff --git a/vendor/github.com/kr/pretty/License b/vendor/github.com/kr/pretty/License new file mode 100644 index 0000000000..05c783ccf6 --- /dev/null +++ b/vendor/github.com/kr/pretty/License @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright 2012 Keith Rarick + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/kr/pretty/Readme b/vendor/github.com/kr/pretty/Readme new file mode 100644 index 0000000000..c589fc622b --- /dev/null +++ b/vendor/github.com/kr/pretty/Readme @@ -0,0 +1,9 @@ +package pretty + + import "github.com/kr/pretty" + + Package pretty provides pretty-printing for Go values. + +Documentation + + http://godoc.org/github.com/kr/pretty diff --git a/vendor/github.com/kr/pretty/diff.go b/vendor/github.com/kr/pretty/diff.go new file mode 100644 index 0000000000..6aa7f743a2 --- /dev/null +++ b/vendor/github.com/kr/pretty/diff.go @@ -0,0 +1,265 @@ +package pretty + +import ( + "fmt" + "io" + "reflect" +) + +type sbuf []string + +func (p *sbuf) Printf(format string, a ...interface{}) { + s := fmt.Sprintf(format, a...) + *p = append(*p, s) +} + +// Diff returns a slice where each element describes +// a difference between a and b. +func Diff(a, b interface{}) (desc []string) { + Pdiff((*sbuf)(&desc), a, b) + return desc +} + +// wprintfer calls Fprintf on w for each Printf call +// with a trailing newline. +type wprintfer struct{ w io.Writer } + +func (p *wprintfer) Printf(format string, a ...interface{}) { + fmt.Fprintf(p.w, format+"\n", a...) +} + +// Fdiff writes to w a description of the differences between a and b. +func Fdiff(w io.Writer, a, b interface{}) { + Pdiff(&wprintfer{w}, a, b) +} + +type Printfer interface { + Printf(format string, a ...interface{}) +} + +// Pdiff prints to p a description of the differences between a and b. +// It calls Printf once for each difference, with no trailing newline. +// The standard library log.Logger is a Printfer. +func Pdiff(p Printfer, a, b interface{}) { + diffPrinter{w: p}.diff(reflect.ValueOf(a), reflect.ValueOf(b)) +} + +type Logfer interface { + Logf(format string, a ...interface{}) +} + +// logprintfer calls Fprintf on w for each Printf call +// with a trailing newline. +type logprintfer struct{ l Logfer } + +func (p *logprintfer) Printf(format string, a ...interface{}) { + p.l.Logf(format, a...) +} + +// Ldiff prints to l a description of the differences between a and b. +// It calls Logf once for each difference, with no trailing newline. +// The standard library testing.T and testing.B are Logfers. +func Ldiff(l Logfer, a, b interface{}) { + Pdiff(&logprintfer{l}, a, b) +} + +type diffPrinter struct { + w Printfer + l string // label +} + +func (w diffPrinter) printf(f string, a ...interface{}) { + var l string + if w.l != "" { + l = w.l + ": " + } + w.w.Printf(l+f, a...) +} + +func (w diffPrinter) diff(av, bv reflect.Value) { + if !av.IsValid() && bv.IsValid() { + w.printf("nil != %# v", formatter{v: bv, quote: true}) + return + } + if av.IsValid() && !bv.IsValid() { + w.printf("%# v != nil", formatter{v: av, quote: true}) + return + } + if !av.IsValid() && !bv.IsValid() { + return + } + + at := av.Type() + bt := bv.Type() + if at != bt { + w.printf("%v != %v", at, bt) + return + } + + switch kind := at.Kind(); kind { + case reflect.Bool: + if a, b := av.Bool(), bv.Bool(); a != b { + w.printf("%v != %v", a, b) + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if a, b := av.Int(), bv.Int(); a != b { + w.printf("%d != %d", a, b) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + if a, b := av.Uint(), bv.Uint(); a != b { + w.printf("%d != %d", a, b) + } + case reflect.Float32, reflect.Float64: + if a, b := av.Float(), bv.Float(); a != b { + w.printf("%v != %v", a, b) + } + case reflect.Complex64, reflect.Complex128: + if a, b := av.Complex(), bv.Complex(); a != b { + w.printf("%v != %v", a, b) + } + case reflect.Array: + n := av.Len() + for i := 0; i < n; i++ { + w.relabel(fmt.Sprintf("[%d]", i)).diff(av.Index(i), bv.Index(i)) + } + case reflect.Chan, reflect.Func, reflect.UnsafePointer: + if a, b := av.Pointer(), bv.Pointer(); a != b { + w.printf("%#x != %#x", a, b) + } + case reflect.Interface: + w.diff(av.Elem(), bv.Elem()) + case reflect.Map: + ak, both, bk := keyDiff(av.MapKeys(), bv.MapKeys()) + for _, k := range ak { + w := w.relabel(fmt.Sprintf("[%#v]", k)) + w.printf("%q != (missing)", av.MapIndex(k)) + } + for _, k := range both { + w := w.relabel(fmt.Sprintf("[%#v]", k)) + w.diff(av.MapIndex(k), bv.MapIndex(k)) + } + for _, k := range bk { + w := w.relabel(fmt.Sprintf("[%#v]", k)) + w.printf("(missing) != %q", bv.MapIndex(k)) + } + case reflect.Ptr: + switch { + case av.IsNil() && !bv.IsNil(): + w.printf("nil != %# v", formatter{v: bv, quote: true}) + case !av.IsNil() && bv.IsNil(): + w.printf("%# v != nil", formatter{v: av, quote: true}) + case !av.IsNil() && !bv.IsNil(): + w.diff(av.Elem(), bv.Elem()) + } + case reflect.Slice: + lenA := av.Len() + lenB := bv.Len() + if lenA != lenB { + w.printf("%s[%d] != %s[%d]", av.Type(), lenA, bv.Type(), lenB) + break + } + for i := 0; i < lenA; i++ { + w.relabel(fmt.Sprintf("[%d]", i)).diff(av.Index(i), bv.Index(i)) + } + case reflect.String: + if a, b := av.String(), bv.String(); a != b { + w.printf("%q != %q", a, b) + } + case reflect.Struct: + for i := 0; i < av.NumField(); i++ { + w.relabel(at.Field(i).Name).diff(av.Field(i), bv.Field(i)) + } + default: + panic("unknown reflect Kind: " + kind.String()) + } +} + +func (d diffPrinter) relabel(name string) (d1 diffPrinter) { + d1 = d + if d.l != "" && name[0] != '[' { + d1.l += "." + } + d1.l += name + return d1 +} + +// keyEqual compares a and b for equality. +// Both a and b must be valid map keys. +func keyEqual(av, bv reflect.Value) bool { + if !av.IsValid() && !bv.IsValid() { + return true + } + if !av.IsValid() || !bv.IsValid() || av.Type() != bv.Type() { + return false + } + switch kind := av.Kind(); kind { + case reflect.Bool: + a, b := av.Bool(), bv.Bool() + return a == b + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + a, b := av.Int(), bv.Int() + return a == b + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + a, b := av.Uint(), bv.Uint() + return a == b + case reflect.Float32, reflect.Float64: + a, b := av.Float(), bv.Float() + return a == b + case reflect.Complex64, reflect.Complex128: + a, b := av.Complex(), bv.Complex() + return a == b + case reflect.Array: + for i := 0; i < av.Len(); i++ { + if !keyEqual(av.Index(i), bv.Index(i)) { + return false + } + } + return true + case reflect.Chan, reflect.UnsafePointer, reflect.Ptr: + a, b := av.Pointer(), bv.Pointer() + return a == b + case reflect.Interface: + return keyEqual(av.Elem(), bv.Elem()) + case reflect.String: + a, b := av.String(), bv.String() + return a == b + case reflect.Struct: + for i := 0; i < av.NumField(); i++ { + if !keyEqual(av.Field(i), bv.Field(i)) { + return false + } + } + return true + default: + panic("invalid map key type " + av.Type().String()) + } +} + +func keyDiff(a, b []reflect.Value) (ak, both, bk []reflect.Value) { + for _, av := range a { + inBoth := false + for _, bv := range b { + if keyEqual(av, bv) { + inBoth = true + both = append(both, av) + break + } + } + if !inBoth { + ak = append(ak, av) + } + } + for _, bv := range b { + inBoth := false + for _, av := range a { + if keyEqual(av, bv) { + inBoth = true + break + } + } + if !inBoth { + bk = append(bk, bv) + } + } + return +} diff --git a/vendor/github.com/kr/pretty/formatter.go b/vendor/github.com/kr/pretty/formatter.go new file mode 100644 index 0000000000..a317d7b8ee --- /dev/null +++ b/vendor/github.com/kr/pretty/formatter.go @@ -0,0 +1,328 @@ +package pretty + +import ( + "fmt" + "io" + "reflect" + "strconv" + "text/tabwriter" + + "github.com/kr/text" +) + +type formatter struct { + v reflect.Value + force bool + quote bool +} + +// Formatter makes a wrapper, f, that will format x as go source with line +// breaks and tabs. Object f responds to the "%v" formatting verb when both the +// "#" and " " (space) flags are set, for example: +// +// fmt.Sprintf("%# v", Formatter(x)) +// +// If one of these two flags is not set, or any other verb is used, f will +// format x according to the usual rules of package fmt. +// In particular, if x satisfies fmt.Formatter, then x.Format will be called. +func Formatter(x interface{}) (f fmt.Formatter) { + return formatter{v: reflect.ValueOf(x), quote: true} +} + +func (fo formatter) String() string { + return fmt.Sprint(fo.v.Interface()) // unwrap it +} + +func (fo formatter) passThrough(f fmt.State, c rune) { + s := "%" + for i := 0; i < 128; i++ { + if f.Flag(i) { + s += string(i) + } + } + if w, ok := f.Width(); ok { + s += fmt.Sprintf("%d", w) + } + if p, ok := f.Precision(); ok { + s += fmt.Sprintf(".%d", p) + } + s += string(c) + fmt.Fprintf(f, s, fo.v.Interface()) +} + +func (fo formatter) Format(f fmt.State, c rune) { + if fo.force || c == 'v' && f.Flag('#') && f.Flag(' ') { + w := tabwriter.NewWriter(f, 4, 4, 1, ' ', 0) + p := &printer{tw: w, Writer: w, visited: make(map[visit]int)} + p.printValue(fo.v, true, fo.quote) + w.Flush() + return + } + fo.passThrough(f, c) +} + +type printer struct { + io.Writer + tw *tabwriter.Writer + visited map[visit]int + depth int +} + +func (p *printer) indent() *printer { + q := *p + q.tw = tabwriter.NewWriter(p.Writer, 4, 4, 1, ' ', 0) + q.Writer = text.NewIndentWriter(q.tw, []byte{'\t'}) + return &q +} + +func (p *printer) printInline(v reflect.Value, x interface{}, showType bool) { + if showType { + io.WriteString(p, v.Type().String()) + fmt.Fprintf(p, "(%#v)", x) + } else { + fmt.Fprintf(p, "%#v", x) + } +} + +// printValue must keep track of already-printed pointer values to avoid +// infinite recursion. +type visit struct { + v uintptr + typ reflect.Type +} + +func (p *printer) printValue(v reflect.Value, showType, quote bool) { + if p.depth > 10 { + io.WriteString(p, "!%v(DEPTH EXCEEDED)") + return + } + + switch v.Kind() { + case reflect.Bool: + p.printInline(v, v.Bool(), showType) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p.printInline(v, v.Int(), showType) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + p.printInline(v, v.Uint(), showType) + case reflect.Float32, reflect.Float64: + p.printInline(v, v.Float(), showType) + case reflect.Complex64, reflect.Complex128: + fmt.Fprintf(p, "%#v", v.Complex()) + case reflect.String: + p.fmtString(v.String(), quote) + case reflect.Map: + t := v.Type() + if showType { + io.WriteString(p, t.String()) + } + writeByte(p, '{') + if nonzero(v) { + expand := !canInline(v.Type()) + pp := p + if expand { + writeByte(p, '\n') + pp = p.indent() + } + keys := v.MapKeys() + for i := 0; i < v.Len(); i++ { + showTypeInStruct := true + k := keys[i] + mv := v.MapIndex(k) + pp.printValue(k, false, true) + writeByte(pp, ':') + if expand { + writeByte(pp, '\t') + } + showTypeInStruct = t.Elem().Kind() == reflect.Interface + pp.printValue(mv, showTypeInStruct, true) + if expand { + io.WriteString(pp, ",\n") + } else if i < v.Len()-1 { + io.WriteString(pp, ", ") + } + } + if expand { + pp.tw.Flush() + } + } + writeByte(p, '}') + case reflect.Struct: + t := v.Type() + if v.CanAddr() { + addr := v.UnsafeAddr() + vis := visit{addr, t} + if vd, ok := p.visited[vis]; ok && vd < p.depth { + p.fmtString(t.String()+"{(CYCLIC REFERENCE)}", false) + break // don't print v again + } + p.visited[vis] = p.depth + } + + if showType { + io.WriteString(p, t.String()) + } + writeByte(p, '{') + if nonzero(v) { + expand := !canInline(v.Type()) + pp := p + if expand { + writeByte(p, '\n') + pp = p.indent() + } + for i := 0; i < v.NumField(); i++ { + showTypeInStruct := true + if f := t.Field(i); f.Name != "" { + io.WriteString(pp, f.Name) + writeByte(pp, ':') + if expand { + writeByte(pp, '\t') + } + showTypeInStruct = labelType(f.Type) + } + pp.printValue(getField(v, i), showTypeInStruct, true) + if expand { + io.WriteString(pp, ",\n") + } else if i < v.NumField()-1 { + io.WriteString(pp, ", ") + } + } + if expand { + pp.tw.Flush() + } + } + writeByte(p, '}') + case reflect.Interface: + switch e := v.Elem(); { + case e.Kind() == reflect.Invalid: + io.WriteString(p, "nil") + case e.IsValid(): + pp := *p + pp.depth++ + pp.printValue(e, showType, true) + default: + io.WriteString(p, v.Type().String()) + io.WriteString(p, "(nil)") + } + case reflect.Array, reflect.Slice: + t := v.Type() + if showType { + io.WriteString(p, t.String()) + } + if v.Kind() == reflect.Slice && v.IsNil() && showType { + io.WriteString(p, "(nil)") + break + } + if v.Kind() == reflect.Slice && v.IsNil() { + io.WriteString(p, "nil") + break + } + writeByte(p, '{') + expand := !canInline(v.Type()) + pp := p + if expand { + writeByte(p, '\n') + pp = p.indent() + } + for i := 0; i < v.Len(); i++ { + showTypeInSlice := t.Elem().Kind() == reflect.Interface + pp.printValue(v.Index(i), showTypeInSlice, true) + if expand { + io.WriteString(pp, ",\n") + } else if i < v.Len()-1 { + io.WriteString(pp, ", ") + } + } + if expand { + pp.tw.Flush() + } + writeByte(p, '}') + case reflect.Ptr: + e := v.Elem() + if !e.IsValid() { + writeByte(p, '(') + io.WriteString(p, v.Type().String()) + io.WriteString(p, ")(nil)") + } else { + pp := *p + pp.depth++ + writeByte(pp, '&') + pp.printValue(e, true, true) + } + case reflect.Chan: + x := v.Pointer() + if showType { + writeByte(p, '(') + io.WriteString(p, v.Type().String()) + fmt.Fprintf(p, ")(%#v)", x) + } else { + fmt.Fprintf(p, "%#v", x) + } + case reflect.Func: + io.WriteString(p, v.Type().String()) + io.WriteString(p, " {...}") + case reflect.UnsafePointer: + p.printInline(v, v.Pointer(), showType) + case reflect.Invalid: + io.WriteString(p, "nil") + } +} + +func canInline(t reflect.Type) bool { + switch t.Kind() { + case reflect.Map: + return !canExpand(t.Elem()) + case reflect.Struct: + for i := 0; i < t.NumField(); i++ { + if canExpand(t.Field(i).Type) { + return false + } + } + return true + case reflect.Interface: + return false + case reflect.Array, reflect.Slice: + return !canExpand(t.Elem()) + case reflect.Ptr: + return false + case reflect.Chan, reflect.Func, reflect.UnsafePointer: + return false + } + return true +} + +func canExpand(t reflect.Type) bool { + switch t.Kind() { + case reflect.Map, reflect.Struct, + reflect.Interface, reflect.Array, reflect.Slice, + reflect.Ptr: + return true + } + return false +} + +func labelType(t reflect.Type) bool { + switch t.Kind() { + case reflect.Interface, reflect.Struct: + return true + } + return false +} + +func (p *printer) fmtString(s string, quote bool) { + if quote { + s = strconv.Quote(s) + } + io.WriteString(p, s) +} + +func writeByte(w io.Writer, b byte) { + w.Write([]byte{b}) +} + +func getField(v reflect.Value, i int) reflect.Value { + val := v.Field(i) + if val.Kind() == reflect.Interface && !val.IsNil() { + val = val.Elem() + } + return val +} diff --git a/vendor/github.com/kr/pretty/pretty.go b/vendor/github.com/kr/pretty/pretty.go new file mode 100644 index 0000000000..49423ec7f5 --- /dev/null +++ b/vendor/github.com/kr/pretty/pretty.go @@ -0,0 +1,108 @@ +// Package pretty provides pretty-printing for Go values. This is +// useful during debugging, to avoid wrapping long output lines in +// the terminal. +// +// It provides a function, Formatter, that can be used with any +// function that accepts a format string. It also provides +// convenience wrappers for functions in packages fmt and log. +package pretty + +import ( + "fmt" + "io" + "log" + "reflect" +) + +// Errorf is a convenience wrapper for fmt.Errorf. +// +// Calling Errorf(f, x, y) is equivalent to +// fmt.Errorf(f, Formatter(x), Formatter(y)). +func Errorf(format string, a ...interface{}) error { + return fmt.Errorf(format, wrap(a, false)...) +} + +// Fprintf is a convenience wrapper for fmt.Fprintf. +// +// Calling Fprintf(w, f, x, y) is equivalent to +// fmt.Fprintf(w, f, Formatter(x), Formatter(y)). +func Fprintf(w io.Writer, format string, a ...interface{}) (n int, error error) { + return fmt.Fprintf(w, format, wrap(a, false)...) +} + +// Log is a convenience wrapper for log.Printf. +// +// Calling Log(x, y) is equivalent to +// log.Print(Formatter(x), Formatter(y)), but each operand is +// formatted with "%# v". +func Log(a ...interface{}) { + log.Print(wrap(a, true)...) +} + +// Logf is a convenience wrapper for log.Printf. +// +// Calling Logf(f, x, y) is equivalent to +// log.Printf(f, Formatter(x), Formatter(y)). +func Logf(format string, a ...interface{}) { + log.Printf(format, wrap(a, false)...) +} + +// Logln is a convenience wrapper for log.Printf. +// +// Calling Logln(x, y) is equivalent to +// log.Println(Formatter(x), Formatter(y)), but each operand is +// formatted with "%# v". +func Logln(a ...interface{}) { + log.Println(wrap(a, true)...) +} + +// Print pretty-prints its operands and writes to standard output. +// +// Calling Print(x, y) is equivalent to +// fmt.Print(Formatter(x), Formatter(y)), but each operand is +// formatted with "%# v". +func Print(a ...interface{}) (n int, errno error) { + return fmt.Print(wrap(a, true)...) +} + +// Printf is a convenience wrapper for fmt.Printf. +// +// Calling Printf(f, x, y) is equivalent to +// fmt.Printf(f, Formatter(x), Formatter(y)). +func Printf(format string, a ...interface{}) (n int, errno error) { + return fmt.Printf(format, wrap(a, false)...) +} + +// Println pretty-prints its operands and writes to standard output. +// +// Calling Print(x, y) is equivalent to +// fmt.Println(Formatter(x), Formatter(y)), but each operand is +// formatted with "%# v". +func Println(a ...interface{}) (n int, errno error) { + return fmt.Println(wrap(a, true)...) +} + +// Sprint is a convenience wrapper for fmt.Sprintf. +// +// Calling Sprint(x, y) is equivalent to +// fmt.Sprint(Formatter(x), Formatter(y)), but each operand is +// formatted with "%# v". +func Sprint(a ...interface{}) string { + return fmt.Sprint(wrap(a, true)...) +} + +// Sprintf is a convenience wrapper for fmt.Sprintf. +// +// Calling Sprintf(f, x, y) is equivalent to +// fmt.Sprintf(f, Formatter(x), Formatter(y)). +func Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, wrap(a, false)...) +} + +func wrap(a []interface{}, force bool) []interface{} { + w := make([]interface{}, len(a)) + for i, x := range a { + w[i] = formatter{v: reflect.ValueOf(x), force: force} + } + return w +} diff --git a/vendor/github.com/kr/pretty/zero.go b/vendor/github.com/kr/pretty/zero.go new file mode 100644 index 0000000000..abb5b6fc14 --- /dev/null +++ b/vendor/github.com/kr/pretty/zero.go @@ -0,0 +1,41 @@ +package pretty + +import ( + "reflect" +) + +func nonzero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() != 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() != 0 + case reflect.Float32, reflect.Float64: + return v.Float() != 0 + case reflect.Complex64, reflect.Complex128: + return v.Complex() != complex(0, 0) + case reflect.String: + return v.String() != "" + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + if nonzero(getField(v, i)) { + return true + } + } + return false + case reflect.Array: + for i := 0; i < v.Len(); i++ { + if nonzero(v.Index(i)) { + return true + } + } + return false + case reflect.Map, reflect.Interface, reflect.Slice, reflect.Ptr, reflect.Chan, reflect.Func: + return !v.IsNil() + case reflect.UnsafePointer: + return v.Pointer() != 0 + } + return true +} diff --git a/vendor/github.com/kr/text/License b/vendor/github.com/kr/text/License new file mode 100644 index 0000000000..480a328059 --- /dev/null +++ b/vendor/github.com/kr/text/License @@ -0,0 +1,19 @@ +Copyright 2012 Keith Rarick + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/kr/text/Readme b/vendor/github.com/kr/text/Readme new file mode 100644 index 0000000000..7e6e7c0687 --- /dev/null +++ b/vendor/github.com/kr/text/Readme @@ -0,0 +1,3 @@ +This is a Go package for manipulating paragraphs of text. + +See http://go.pkgdoc.org/github.com/kr/text for full documentation. diff --git a/vendor/github.com/kr/text/doc.go b/vendor/github.com/kr/text/doc.go new file mode 100644 index 0000000000..cf4c198f95 --- /dev/null +++ b/vendor/github.com/kr/text/doc.go @@ -0,0 +1,3 @@ +// Package text provides rudimentary functions for manipulating text in +// paragraphs. +package text diff --git a/vendor/github.com/kr/text/indent.go b/vendor/github.com/kr/text/indent.go new file mode 100644 index 0000000000..4ebac45c09 --- /dev/null +++ b/vendor/github.com/kr/text/indent.go @@ -0,0 +1,74 @@ +package text + +import ( + "io" +) + +// Indent inserts prefix at the beginning of each non-empty line of s. The +// end-of-line marker is NL. +func Indent(s, prefix string) string { + return string(IndentBytes([]byte(s), []byte(prefix))) +} + +// IndentBytes inserts prefix at the beginning of each non-empty line of b. +// The end-of-line marker is NL. +func IndentBytes(b, prefix []byte) []byte { + var res []byte + bol := true + for _, c := range b { + if bol && c != '\n' { + res = append(res, prefix...) + } + res = append(res, c) + bol = c == '\n' + } + return res +} + +// Writer indents each line of its input. +type indentWriter struct { + w io.Writer + bol bool + pre [][]byte + sel int + off int +} + +// NewIndentWriter makes a new write filter that indents the input +// lines. Each line is prefixed in order with the corresponding +// element of pre. If there are more lines than elements, the last +// element of pre is repeated for each subsequent line. +func NewIndentWriter(w io.Writer, pre ...[]byte) io.Writer { + return &indentWriter{ + w: w, + pre: pre, + bol: true, + } +} + +// The only errors returned are from the underlying indentWriter. +func (w *indentWriter) Write(p []byte) (n int, err error) { + for _, c := range p { + if w.bol { + var i int + i, err = w.w.Write(w.pre[w.sel][w.off:]) + w.off += i + if err != nil { + return n, err + } + } + _, err = w.w.Write([]byte{c}) + if err != nil { + return n, err + } + n++ + w.bol = c == '\n' + if w.bol { + w.off = 0 + if w.sel < len(w.pre)-1 { + w.sel++ + } + } + } + return n, nil +} diff --git a/vendor/github.com/kr/text/wrap.go b/vendor/github.com/kr/text/wrap.go new file mode 100644 index 0000000000..b09bb03736 --- /dev/null +++ b/vendor/github.com/kr/text/wrap.go @@ -0,0 +1,86 @@ +package text + +import ( + "bytes" + "math" +) + +var ( + nl = []byte{'\n'} + sp = []byte{' '} +) + +const defaultPenalty = 1e5 + +// Wrap wraps s into a paragraph of lines of length lim, with minimal +// raggedness. +func Wrap(s string, lim int) string { + return string(WrapBytes([]byte(s), lim)) +} + +// WrapBytes wraps b into a paragraph of lines of length lim, with minimal +// raggedness. +func WrapBytes(b []byte, lim int) []byte { + words := bytes.Split(bytes.Replace(bytes.TrimSpace(b), nl, sp, -1), sp) + var lines [][]byte + for _, line := range WrapWords(words, 1, lim, defaultPenalty) { + lines = append(lines, bytes.Join(line, sp)) + } + return bytes.Join(lines, nl) +} + +// WrapWords is the low-level line-breaking algorithm, useful if you need more +// control over the details of the text wrapping process. For most uses, either +// Wrap or WrapBytes will be sufficient and more convenient. +// +// WrapWords splits a list of words into lines with minimal "raggedness", +// treating each byte as one unit, accounting for spc units between adjacent +// words on each line, and attempting to limit lines to lim units. Raggedness +// is the total error over all lines, where error is the square of the +// difference of the length of the line and lim. Too-long lines (which only +// happen when a single word is longer than lim units) have pen penalty units +// added to the error. +func WrapWords(words [][]byte, spc, lim, pen int) [][][]byte { + n := len(words) + + length := make([][]int, n) + for i := 0; i < n; i++ { + length[i] = make([]int, n) + length[i][i] = len(words[i]) + for j := i + 1; j < n; j++ { + length[i][j] = length[i][j-1] + spc + len(words[j]) + } + } + + nbrk := make([]int, n) + cost := make([]int, n) + for i := range cost { + cost[i] = math.MaxInt32 + } + for i := n - 1; i >= 0; i-- { + if length[i][n-1] <= lim || i == n-1 { + cost[i] = 0 + nbrk[i] = n + } else { + for j := i + 1; j < n; j++ { + d := lim - length[i][j-1] + c := d*d + cost[j] + if length[i][j-1] > lim { + c += pen // too-long lines get a worse penalty + } + if c < cost[i] { + cost[i] = c + nbrk[i] = j + } + } + } + } + + var lines [][][]byte + i := 0 + for i < n { + lines = append(lines, words[i:nbrk[i]]) + i = nbrk[i] + } + return lines +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go b/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go index 5f124cd8bb..a4ae8901ac 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go @@ -3,13 +3,12 @@ package system import ( - "bufio" - "fmt" "os" "os/exec" "syscall" // only for exec "unsafe" + "github.com/opencontainers/runc/libcontainer/user" "golang.org/x/sys/unix" ) @@ -102,34 +101,43 @@ func Setctty() error { } // RunningInUserNS detects whether we are currently running in a user namespace. -// Copied from github.com/lxc/lxd/shared/util.go +// Originally copied from github.com/lxc/lxd/shared/util.go func RunningInUserNS() bool { - file, err := os.Open("/proc/self/uid_map") + uidmap, err := user.CurrentProcessUIDMap() if err != nil { // This kernel-provided file only exists if user namespaces are supported return false } - defer file.Close() - - buf := bufio.NewReader(file) - l, _, err := buf.ReadLine() - if err != nil { - return false - } + return UIDMapInUserNS(uidmap) +} - line := string(l) - var a, b, c int64 - fmt.Sscanf(line, "%d %d %d", &a, &b, &c) +func UIDMapInUserNS(uidmap []user.IDMap) bool { /* * We assume we are in the initial user namespace if we have a full * range - 4294967295 uids starting at uid 0. */ - if a == 0 && b == 0 && c == 4294967295 { + if len(uidmap) == 1 && uidmap[0].ID == 0 && uidmap[0].ParentID == 0 && uidmap[0].Count == 4294967295 { return false } return true } +// GetParentNSeuid returns the euid within the parent user namespace +func GetParentNSeuid() int64 { + euid := int64(os.Geteuid()) + uidmap, err := user.CurrentProcessUIDMap() + if err != nil { + // This kernel-provided file only exists if user namespaces are supported + return euid + } + for _, um := range uidmap { + if um.ID <= euid && euid <= um.ID+um.Count-1 { + return um.ParentID + euid - um.ID + } + } + return euid +} + // SetSubreaper sets the value i as the subreaper setting for the calling process func SetSubreaper(i int) error { return unix.Prctl(PR_SET_CHILD_SUBREAPER, uintptr(i), 0, 0, 0) diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/system/unsupported.go index e7cfd62b29..b94be74a66 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/unsupported.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/system/unsupported.go @@ -2,8 +2,26 @@ package system +import ( + "os" + + "github.com/opencontainers/runc/libcontainer/user" +) + // RunningInUserNS is a stub for non-Linux systems // Always returns false func RunningInUserNS() bool { return false } + +// UIDMapInUserNS is a stub for non-Linux systems +// Always returns false +func UIDMapInUserNS(uidmap []user.IDMap) bool { + return false +} + +// GetParentNSeuid returns the euid within the parent user namespace +// Always returns os.Geteuid on non-linux +func GetParentNSeuid() int { + return os.Geteuid() +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go index c45e300411..c1e634c949 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go @@ -114,3 +114,29 @@ func CurrentUser() (User, error) { func CurrentGroup() (Group, error) { return LookupGid(unix.Getgid()) } + +func CurrentUserSubUIDs() ([]SubID, error) { + u, err := CurrentUser() + if err != nil { + return nil, err + } + return ParseSubIDFileFilter("/etc/subuid", + func(entry SubID) bool { return entry.Name == u.Name }) +} + +func CurrentGroupSubGIDs() ([]SubID, error) { + g, err := CurrentGroup() + if err != nil { + return nil, err + } + return ParseSubIDFileFilter("/etc/subgid", + func(entry SubID) bool { return entry.Name == g.Name }) +} + +func CurrentProcessUIDMap() ([]IDMap, error) { + return ParseIDMapFile("/proc/self/uid_map") +} + +func CurrentProcessGIDMap() ([]IDMap, error) { + return ParseIDMapFile("/proc/self/gid_map") +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/user.go b/vendor/github.com/opencontainers/runc/libcontainer/user/user.go index 93414516ca..7b912bbf8b 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/user/user.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/user/user.go @@ -75,12 +75,29 @@ func groupFromOS(g *user.Group) (Group, error) { return newGroup, nil } +// SubID represents an entry in /etc/sub{u,g}id +type SubID struct { + Name string + SubID int64 + Count int64 +} + +// IDMap represents an entry in /proc/PID/{u,g}id_map +type IDMap struct { + ID int64 + ParentID int64 + Count int64 +} + func parseLine(line string, v ...interface{}) { - if line == "" { + parseParts(strings.Split(line, ":"), v...) +} + +func parseParts(parts []string, v ...interface{}) { + if len(parts) == 0 { return } - parts := strings.Split(line, ":") for i, p := range parts { // Ignore cases where we don't have enough fields to populate the arguments. // Some configuration files like to misbehave. @@ -96,6 +113,8 @@ func parseLine(line string, v ...interface{}) { case *int: // "numbers", with conversion errors ignored because of some misbehaving configuration files. *e, _ = strconv.Atoi(p) + case *int64: + *e, _ = strconv.ParseInt(p, 10, 64) case *[]string: // Comma-separated lists. if p != "" { @@ -105,7 +124,7 @@ func parseLine(line string, v ...interface{}) { } default: // Someone goof'd when writing code using this function. Scream so they can hear us. - panic(fmt.Sprintf("parseLine only accepts {*string, *int, *[]string} as arguments! %#v is not a pointer!", e)) + panic(fmt.Sprintf("parseLine only accepts {*string, *int, *int64, *[]string} as arguments! %#v is not a pointer!", e)) } } } @@ -479,3 +498,111 @@ func GetAdditionalGroupsPath(additionalGroups []string, groupPath string) ([]int } return GetAdditionalGroups(additionalGroups, group) } + +func ParseSubIDFile(path string) ([]SubID, error) { + subid, err := os.Open(path) + if err != nil { + return nil, err + } + defer subid.Close() + return ParseSubID(subid) +} + +func ParseSubID(subid io.Reader) ([]SubID, error) { + return ParseSubIDFilter(subid, nil) +} + +func ParseSubIDFileFilter(path string, filter func(SubID) bool) ([]SubID, error) { + subid, err := os.Open(path) + if err != nil { + return nil, err + } + defer subid.Close() + return ParseSubIDFilter(subid, filter) +} + +func ParseSubIDFilter(r io.Reader, filter func(SubID) bool) ([]SubID, error) { + if r == nil { + return nil, fmt.Errorf("nil source for subid-formatted data") + } + + var ( + s = bufio.NewScanner(r) + out = []SubID{} + ) + + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + + line := strings.TrimSpace(s.Text()) + if line == "" { + continue + } + + // see: man 5 subuid + p := SubID{} + parseLine(line, &p.Name, &p.SubID, &p.Count) + + if filter == nil || filter(p) { + out = append(out, p) + } + } + + return out, nil +} + +func ParseIDMapFile(path string) ([]IDMap, error) { + r, err := os.Open(path) + if err != nil { + return nil, err + } + defer r.Close() + return ParseIDMap(r) +} + +func ParseIDMap(r io.Reader) ([]IDMap, error) { + return ParseIDMapFilter(r, nil) +} + +func ParseIDMapFileFilter(path string, filter func(IDMap) bool) ([]IDMap, error) { + r, err := os.Open(path) + if err != nil { + return nil, err + } + defer r.Close() + return ParseIDMapFilter(r, filter) +} + +func ParseIDMapFilter(r io.Reader, filter func(IDMap) bool) ([]IDMap, error) { + if r == nil { + return nil, fmt.Errorf("nil source for idmap-formatted data") + } + + var ( + s = bufio.NewScanner(r) + out = []IDMap{} + ) + + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + + line := strings.TrimSpace(s.Text()) + if line == "" { + continue + } + + // see: man 7 user_namespaces + p := IDMap{} + parseParts(strings.Fields(line), &p.ID, &p.ParentID, &p.Count) + + if filter == nil || filter(p) { + out = append(out, p) + } + } + + return out, nil +} diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go index c008a387bf..f0a055b87e 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go @@ -87,9 +87,6 @@ func FormatMountLabel(src, mountLabel string) string { // SetProcessLabel takes a process label and tells the kernel to assign the // label to the next program executed by the current process. func SetProcessLabel(processLabel string) error { - if processLabel == "" { - return nil - } return selinux.SetExecLabel(processLabel) } @@ -133,7 +130,7 @@ func Relabel(path string, fileLabel string, shared bool) error { return nil } - exclude_paths := map[string]bool{"/": true, "/usr": true, "/etc": true} + exclude_paths := map[string]bool{"/": true, "/usr": true, "/etc": true, "/tmp": true, "/home": true, "/run": true, "/var": true, "/root": true} if exclude_paths[path] { return fmt.Errorf("SELinux relabeling of %s is not allowed", path) } diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go similarity index 87% rename from vendor/github.com/opencontainers/selinux/go-selinux/selinux.go rename to vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go index 17ba2c5561..5dc09a51eb 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go @@ -1,4 +1,4 @@ -// +build linux +// +build selinux,linux package selinux @@ -7,6 +7,7 @@ import ( "bytes" "crypto/rand" "encoding/binary" + "errors" "fmt" "io" "io/ioutil" @@ -25,7 +26,8 @@ const ( // Permissive constant to indicate SELinux is in permissive mode Permissive = 0 // Disabled constant to indicate SELinux is disabled - Disabled = -1 + Disabled = -1 + selinuxDir = "/etc/selinux/" selinuxConfig = selinuxDir + "config" selinuxfsMount = "/sys/fs/selinux" @@ -46,7 +48,13 @@ type selinuxState struct { } var ( + // ErrMCSAlreadyExists is returned when trying to allocate a duplicate MCS. + ErrMCSAlreadyExists = errors.New("MCS label already exists") + // ErrEmptyPath is returned when an empty path has been specified. + ErrEmptyPath = errors.New("empty path") + assignRegex = regexp.MustCompile(`^([^=]+)=(.*)$`) + roFileLabel string state = selinuxState{ mcsList: make(map[string]bool), } @@ -197,7 +205,7 @@ func GetEnabled() bool { return state.getEnabled() } -func readConfig(target string) (value string) { +func readConfig(target string) string { var ( val, key string bufin *bufio.Reader @@ -239,30 +247,42 @@ func readConfig(target string) (value string) { } func getSELinuxPolicyRoot() string { - return selinuxDir + readConfig(selinuxTypeTag) + return filepath.Join(selinuxDir, readConfig(selinuxTypeTag)) } -func readCon(name string) (string, error) { - var val string +func readCon(fpath string) (string, error) { + if fpath == "" { + return "", ErrEmptyPath + } - in, err := os.Open(name) + in, err := os.Open(fpath) if err != nil { return "", err } defer in.Close() - _, err = fmt.Fscanf(in, "%s", &val) - return strings.Trim(val, "\x00"), err + var retval string + if _, err := fmt.Fscanf(in, "%s", &retval); err != nil { + return "", err + } + return strings.Trim(retval, "\x00"), nil } // SetFileLabel sets the SELinux label for this path or returns an error. -func SetFileLabel(path string, label string) error { - return lsetxattr(path, xattrNameSelinux, []byte(label), 0) +func SetFileLabel(fpath string, label string) error { + if fpath == "" { + return ErrEmptyPath + } + return lsetxattr(fpath, xattrNameSelinux, []byte(label), 0) } // FileLabel returns the SELinux label for this path or returns an error. -func FileLabel(path string) (string, error) { - label, err := lgetxattr(path, xattrNameSelinux) +func FileLabel(fpath string) (string, error) { + if fpath == "" { + return "", ErrEmptyPath + } + + label, err := lgetxattr(fpath, xattrNameSelinux) if err != nil { return "", err } @@ -307,8 +327,12 @@ func ExecLabel() (string, error) { return readCon(fmt.Sprintf("/proc/self/task/%d/attr/exec", syscall.Gettid())) } -func writeCon(name string, val string) error { - out, err := os.OpenFile(name, os.O_WRONLY, 0) +func writeCon(fpath string, val string) error { + if fpath == "" { + return ErrEmptyPath + } + + out, err := os.OpenFile(fpath, os.O_WRONLY, 0) if err != nil { return err } @@ -331,9 +355,11 @@ func CanonicalizeContext(val string) (string, error) { return readWriteCon(filepath.Join(getSelinuxMountPoint(), "context"), val) } -func readWriteCon(name string, val string) (string, error) { - var retval string - f, err := os.OpenFile(name, os.O_RDWR, 0) +func readWriteCon(fpath string, val string) (string, error) { + if fpath == "" { + return "", ErrEmptyPath + } + f, err := os.OpenFile(fpath, os.O_RDWR, 0) if err != nil { return "", err } @@ -344,8 +370,11 @@ func readWriteCon(name string, val string) (string, error) { return "", err } - _, err = fmt.Fscanf(f, "%s", &retval) - return strings.Trim(retval, "\x00"), err + var retval string + if _, err := fmt.Fscanf(f, "%s", &retval); err != nil { + return "", err + } + return strings.Trim(retval, "\x00"), nil } /* @@ -440,7 +469,7 @@ func mcsAdd(mcs string) error { state.Lock() defer state.Unlock() if state.mcsList[mcs] { - return fmt.Errorf("MCS Label already exists") + return ErrMCSAlreadyExists } state.mcsList[mcs] = true return nil @@ -516,10 +545,8 @@ func ReleaseLabel(label string) { } } -var roFileLabel string - // ROFileLabel returns the specified SELinux readonly file label -func ROFileLabel() (fileLabel string) { +func ROFileLabel() string { return roFileLabel } @@ -603,7 +630,7 @@ func SecurityCheckContext(val string) error { } /* -CopyLevel returns a label with the MLS/MCS level from src label replaces on +CopyLevel returns a label with the MLS/MCS level from src label replaced on the dest label. */ func CopyLevel(src, dest string) (string, error) { @@ -626,20 +653,26 @@ func CopyLevel(src, dest string) (string, error) { // Prevent users from relabing system files func badPrefix(fpath string) error { - var badprefixes = []string{"/usr"} + if fpath == "" { + return ErrEmptyPath + } - for _, prefix := range badprefixes { - if fpath == prefix || strings.HasPrefix(fpath, fmt.Sprintf("%s/", prefix)) { + badPrefixes := []string{"/usr"} + for _, prefix := range badPrefixes { + if strings.HasPrefix(fpath, prefix) { return fmt.Errorf("relabeling content in %s is not allowed", prefix) } } return nil } -// Chcon changes the fpath file object to the SELinux label label. -// If the fpath is a directory and recurse is true Chcon will walk the -// directory tree setting the label +// Chcon changes the `fpath` file object to the SELinux label `label`. +// If `fpath` is a directory and `recurse`` is true, Chcon will walk the +// directory tree setting the label. func Chcon(fpath string, label string, recurse bool) error { + if fpath == "" { + return ErrEmptyPath + } if label == "" { return nil } @@ -658,7 +691,7 @@ func Chcon(fpath string, label string, recurse bool) error { } // DupSecOpt takes an SELinux process label and returns security options that -// can will set the SELinux Type and Level for future container processes +// can be used to set the SELinux Type and Level for future container processes. func DupSecOpt(src string) []string { if src == "" { return nil @@ -681,8 +714,8 @@ func DupSecOpt(src string) []string { return dup } -// DisableSecOpt returns a security opt that can be used to disabling SELinux -// labeling support for future container processes +// DisableSecOpt returns a security opt that can be used to disable SELinux +// labeling support for future container processes. func DisableSecOpt() []string { return []string{"disable"} } diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go new file mode 100644 index 0000000000..4dbfd83eda --- /dev/null +++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go @@ -0,0 +1,188 @@ +// +build !selinux + +package selinux + +import ( + "errors" +) + +const ( + // Enforcing constant indicate SELinux is in enforcing mode + Enforcing = 1 + // Permissive constant to indicate SELinux is in permissive mode + Permissive = 0 + // Disabled constant to indicate SELinux is disabled + Disabled = -1 +) + +var ( + // ErrMCSAlreadyExists is returned when trying to allocate a duplicate MCS. + ErrMCSAlreadyExists = errors.New("MCS label already exists") + // ErrEmptyPath is returned when an empty path has been specified. + ErrEmptyPath = errors.New("empty path") +) + +// Context is a representation of the SELinux label broken into 4 parts +type Context map[string]string + +// SetDisabled disables selinux support for the package +func SetDisabled() { + return +} + +// GetEnabled returns whether selinux is currently enabled. +func GetEnabled() bool { + return false +} + +// SetFileLabel sets the SELinux label for this path or returns an error. +func SetFileLabel(fpath string, label string) error { + return nil +} + +// FileLabel returns the SELinux label for this path or returns an error. +func FileLabel(fpath string) (string, error) { + return "", nil +} + +/* +SetFSCreateLabel tells kernel the label to create all file system objects +created by this task. Setting label="" to return to default. +*/ +func SetFSCreateLabel(label string) error { + return nil +} + +/* +FSCreateLabel returns the default label the kernel which the kernel is using +for file system objects created by this task. "" indicates default. +*/ +func FSCreateLabel() (string, error) { + return "", nil +} + +// CurrentLabel returns the SELinux label of the current process thread, or an error. +func CurrentLabel() (string, error) { + return "", nil +} + +// PidLabel returns the SELinux label of the given pid, or an error. +func PidLabel(pid int) (string, error) { + return "", nil +} + +/* +ExecLabel returns the SELinux label that the kernel will use for any programs +that are executed by the current process thread, or an error. +*/ +func ExecLabel() (string, error) { + return "", nil +} + +/* +CanonicalizeContext takes a context string and writes it to the kernel +the function then returns the context that the kernel will use. This function +can be used to see if two contexts are equivalent +*/ +func CanonicalizeContext(val string) (string, error) { + return "", nil +} + +/* +SetExecLabel sets the SELinux label that the kernel will use for any programs +that are executed by the current process thread, or an error. +*/ +func SetExecLabel(label string) error { + return nil +} + +// Get returns the Context as a string +func (c Context) Get() string { + return "" +} + +// NewContext creates a new Context struct from the specified label +func NewContext(label string) Context { + c := make(Context) + return c +} + +// ReserveLabel reserves the MLS/MCS level component of the specified label +func ReserveLabel(label string) { + return +} + +// EnforceMode returns the current SELinux mode Enforcing, Permissive, Disabled +func EnforceMode() int { + return Disabled +} + +/* +SetEnforceMode sets the current SELinux mode Enforcing, Permissive. +Disabled is not valid, since this needs to be set at boot time. +*/ +func SetEnforceMode(mode int) error { + return nil +} + +/* +DefaultEnforceMode returns the systems default SELinux mode Enforcing, +Permissive or Disabled. Note this is is just the default at boot time. +EnforceMode tells you the systems current mode. +*/ +func DefaultEnforceMode() int { + return Disabled +} + +/* +ReleaseLabel will unreserve the MLS/MCS Level field of the specified label. +Allowing it to be used by another process. +*/ +func ReleaseLabel(label string) { + return +} + +// ROFileLabel returns the specified SELinux readonly file label +func ROFileLabel() string { + return "" +} + +/* +ContainerLabels returns an allocated processLabel and fileLabel to be used for +container labeling by the calling process. +*/ +func ContainerLabels() (processLabel string, fileLabel string) { + return "", "" +} + +// SecurityCheckContext validates that the SELinux label is understood by the kernel +func SecurityCheckContext(val string) error { + return nil +} + +/* +CopyLevel returns a label with the MLS/MCS level from src label replaced on +the dest label. +*/ +func CopyLevel(src, dest string) (string, error) { + return "", nil +} + +// Chcon changes the `fpath` file object to the SELinux label `label`. +// If `fpath` is a directory and `recurse`` is true, Chcon will walk the +// directory tree setting the label. +func Chcon(fpath string, label string, recurse bool) error { + return nil +} + +// DupSecOpt takes an SELinux process label and returns security options that +// can be used to set the SELinux Type and Level for future container processes. +func DupSecOpt(src string) []string { + return nil +} + +// DisableSecOpt returns a security opt that can be used to disable SELinux +// labeling support for future container processes. +func DisableSecOpt() []string { + return []string{"disable"} +} diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/xattrs.go b/vendor/github.com/opencontainers/selinux/go-selinux/xattrs.go index 7f2ef85049..67a9d8ee85 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/xattrs.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/xattrs.go @@ -1,4 +1,4 @@ -// +build linux +// +build selinux,linux package selinux diff --git a/vendor/github.com/ulikunitz/xz/LICENSE b/vendor/github.com/ulikunitz/xz/LICENSE new file mode 100644 index 0000000000..58ebdc162f --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/LICENSE @@ -0,0 +1,26 @@ +Copyright (c) 2014-2016 Ulrich Kunitz +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* My name, Ulrich Kunitz, may not be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/ulikunitz/xz/README.md b/vendor/github.com/ulikunitz/xz/README.md new file mode 100644 index 0000000000..969ae7a00b --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/README.md @@ -0,0 +1,71 @@ +# Package xz + +This Go language package supports the reading and writing of xz +compressed streams. It includes also a gxz command for compressing and +decompressing data. The package is completely written in Go and doesn't +have any dependency on any C code. + +The package is currently under development. There might be bugs and APIs +are not considered stable. At this time the package cannot compete with +the xz tool regarding compression speed and size. The algorithms there +have been developed over a long time and are highly optimized. However +there are a number of improvements planned and I'm very optimistic about +parallel compression and decompression. Stay tuned! + +# Using the API + +The following example program shows how to use the API. + + package main + + import ( + "bytes" + "io" + "log" + "os" + + "github.com/ulikunitz/xz" + ) + + func main() { + const text = "The quick brown fox jumps over the lazy dog.\n" + var buf bytes.Buffer + // compress text + w, err := xz.NewWriter(&buf) + if err != nil { + log.Fatalf("xz.NewWriter error %s", err) + } + if _, err := io.WriteString(w, text); err != nil { + log.Fatalf("WriteString error %s", err) + } + if err := w.Close(); err != nil { + log.Fatalf("w.Close error %s", err) + } + // decompress buffer and write output to stdout + r, err := xz.NewReader(&buf) + if err != nil { + log.Fatalf("NewReader error %s", err) + } + if _, err = io.Copy(os.Stdout, r); err != nil { + log.Fatalf("io.Copy error %s", err) + } + } + +# Using the gxz compression tool + +The package includes a gxz command line utility for compression and +decompression. + +Use following command for installation: + + $ go get github.com/ulikunitz/xz/cmd/gxz + +To test it call the following command. + + $ gxz bigfile + +After some time a much smaller file bigfile.xz will replace bigfile. +To decompress it use the following command. + + $ gxz -d bigfile.xz + diff --git a/vendor/github.com/ulikunitz/xz/bits.go b/vendor/github.com/ulikunitz/xz/bits.go new file mode 100644 index 0000000000..fadc1a5944 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/bits.go @@ -0,0 +1,74 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xz + +import ( + "errors" + "io" +) + +// putUint32LE puts the little-endian representation of x into the first +// four bytes of p. +func putUint32LE(p []byte, x uint32) { + p[0] = byte(x) + p[1] = byte(x >> 8) + p[2] = byte(x >> 16) + p[3] = byte(x >> 24) +} + +// putUint64LE puts the little-endian representation of x into the first +// eight bytes of p. +func putUint64LE(p []byte, x uint64) { + p[0] = byte(x) + p[1] = byte(x >> 8) + p[2] = byte(x >> 16) + p[3] = byte(x >> 24) + p[4] = byte(x >> 32) + p[5] = byte(x >> 40) + p[6] = byte(x >> 48) + p[7] = byte(x >> 56) +} + +// uint32LE converts a little endian representation to an uint32 value. +func uint32LE(p []byte) uint32 { + return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | + uint32(p[3])<<24 +} + +// putUvarint puts a uvarint representation of x into the byte slice. +func putUvarint(p []byte, x uint64) int { + i := 0 + for x >= 0x80 { + p[i] = byte(x) | 0x80 + x >>= 7 + i++ + } + p[i] = byte(x) + return i + 1 +} + +// errOverflow indicates an overflow of the 64-bit unsigned integer. +var errOverflowU64 = errors.New("xz: uvarint overflows 64-bit unsigned integer") + +// readUvarint reads a uvarint from the given byte reader. +func readUvarint(r io.ByteReader) (x uint64, n int, err error) { + var s uint + i := 0 + for { + b, err := r.ReadByte() + if err != nil { + return x, i, err + } + i++ + if b < 0x80 { + if i > 10 || i == 10 && b > 1 { + return x, i, errOverflowU64 + } + return x | uint64(b)< 0 { + k = 4 - k + } + return k +} + +/*** Header ***/ + +// headerMagic stores the magic bytes for the header +var headerMagic = []byte{0xfd, '7', 'z', 'X', 'Z', 0x00} + +// HeaderLen provides the length of the xz file header. +const HeaderLen = 12 + +// Constants for the checksum methods supported by xz. +const ( + CRC32 byte = 0x1 + CRC64 = 0x4 + SHA256 = 0xa +) + +// errInvalidFlags indicates that flags are invalid. +var errInvalidFlags = errors.New("xz: invalid flags") + +// verifyFlags returns the error errInvalidFlags if the value is +// invalid. +func verifyFlags(flags byte) error { + switch flags { + case CRC32, CRC64, SHA256: + return nil + default: + return errInvalidFlags + } +} + +// flagstrings maps flag values to strings. +var flagstrings = map[byte]string{ + CRC32: "CRC-32", + CRC64: "CRC-64", + SHA256: "SHA-256", +} + +// flagString returns the string representation for the given flags. +func flagString(flags byte) string { + s, ok := flagstrings[flags] + if !ok { + return "invalid" + } + return s +} + +// newHashFunc returns a function that creates hash instances for the +// hash method encoded in flags. +func newHashFunc(flags byte) (newHash func() hash.Hash, err error) { + switch flags { + case CRC32: + newHash = newCRC32 + case CRC64: + newHash = newCRC64 + case SHA256: + newHash = sha256.New + default: + err = errInvalidFlags + } + return +} + +// header provides the actual content of the xz file header: the flags. +type header struct { + flags byte +} + +// Errors returned by readHeader. +var errHeaderMagic = errors.New("xz: invalid header magic bytes") + +// ValidHeader checks whether data is a correct xz file header. The +// length of data must be HeaderLen. +func ValidHeader(data []byte) bool { + var h header + err := h.UnmarshalBinary(data) + return err == nil +} + +// String returns a string representation of the flags. +func (h header) String() string { + return flagString(h.flags) +} + +// UnmarshalBinary reads header from the provided data slice. +func (h *header) UnmarshalBinary(data []byte) error { + // header length + if len(data) != HeaderLen { + return errors.New("xz: wrong file header length") + } + + // magic header + if !bytes.Equal(headerMagic, data[:6]) { + return errHeaderMagic + } + + // checksum + crc := crc32.NewIEEE() + crc.Write(data[6:8]) + if uint32LE(data[8:]) != crc.Sum32() { + return errors.New("xz: invalid checksum for file header") + } + + // stream flags + if data[6] != 0 { + return errInvalidFlags + } + flags := data[7] + if err := verifyFlags(flags); err != nil { + return err + } + + h.flags = flags + return nil +} + +// MarshalBinary generates the xz file header. +func (h *header) MarshalBinary() (data []byte, err error) { + if err = verifyFlags(h.flags); err != nil { + return nil, err + } + + data = make([]byte, 12) + copy(data, headerMagic) + data[7] = h.flags + + crc := crc32.NewIEEE() + crc.Write(data[6:8]) + putUint32LE(data[8:], crc.Sum32()) + + return data, nil +} + +/*** Footer ***/ + +// footerLen defines the length of the footer. +const footerLen = 12 + +// footerMagic contains the footer magic bytes. +var footerMagic = []byte{'Y', 'Z'} + +// footer represents the content of the xz file footer. +type footer struct { + indexSize int64 + flags byte +} + +// String prints a string representation of the footer structure. +func (f footer) String() string { + return fmt.Sprintf("%s index size %d", flagString(f.flags), f.indexSize) +} + +// Minimum and maximum for the size of the index (backward size). +const ( + minIndexSize = 4 + maxIndexSize = (1 << 32) * 4 +) + +// MarshalBinary converts footer values into an xz file footer. Note +// that the footer value is checked for correctness. +func (f *footer) MarshalBinary() (data []byte, err error) { + if err = verifyFlags(f.flags); err != nil { + return nil, err + } + if !(minIndexSize <= f.indexSize && f.indexSize <= maxIndexSize) { + return nil, errors.New("xz: index size out of range") + } + if f.indexSize%4 != 0 { + return nil, errors.New( + "xz: index size not aligned to four bytes") + } + + data = make([]byte, footerLen) + + // backward size (index size) + s := (f.indexSize / 4) - 1 + putUint32LE(data[4:], uint32(s)) + // flags + data[9] = f.flags + // footer magic + copy(data[10:], footerMagic) + + // CRC-32 + crc := crc32.NewIEEE() + crc.Write(data[4:10]) + putUint32LE(data, crc.Sum32()) + + return data, nil +} + +// UnmarshalBinary sets the footer value by unmarshalling an xz file +// footer. +func (f *footer) UnmarshalBinary(data []byte) error { + if len(data) != footerLen { + return errors.New("xz: wrong footer length") + } + + // magic bytes + if !bytes.Equal(data[10:], footerMagic) { + return errors.New("xz: footer magic invalid") + } + + // CRC-32 + crc := crc32.NewIEEE() + crc.Write(data[4:10]) + if uint32LE(data) != crc.Sum32() { + return errors.New("xz: footer checksum error") + } + + var g footer + // backward size (index size) + g.indexSize = (int64(uint32LE(data[4:])) + 1) * 4 + + // flags + if data[8] != 0 { + return errInvalidFlags + } + g.flags = data[9] + if err := verifyFlags(g.flags); err != nil { + return err + } + + *f = g + return nil +} + +/*** Block Header ***/ + +// blockHeader represents the content of an xz block header. +type blockHeader struct { + compressedSize int64 + uncompressedSize int64 + filters []filter +} + +// String converts the block header into a string. +func (h blockHeader) String() string { + var buf bytes.Buffer + first := true + if h.compressedSize >= 0 { + fmt.Fprintf(&buf, "compressed size %d", h.compressedSize) + first = false + } + if h.uncompressedSize >= 0 { + if !first { + buf.WriteString(" ") + } + fmt.Fprintf(&buf, "uncompressed size %d", h.uncompressedSize) + first = false + } + for _, f := range h.filters { + if !first { + buf.WriteString(" ") + } + fmt.Fprintf(&buf, "filter %s", f) + first = false + } + return buf.String() +} + +// Masks for the block flags. +const ( + filterCountMask = 0x03 + compressedSizePresent = 0x40 + uncompressedSizePresent = 0x80 + reservedBlockFlags = 0x3C +) + +// errIndexIndicator signals that an index indicator (0x00) has been found +// instead of an expected block header indicator. +var errIndexIndicator = errors.New("xz: found index indicator") + +// readBlockHeader reads the block header. +func readBlockHeader(r io.Reader) (h *blockHeader, n int, err error) { + var buf bytes.Buffer + buf.Grow(20) + + // block header size + z, err := io.CopyN(&buf, r, 1) + n = int(z) + if err != nil { + return nil, n, err + } + s := buf.Bytes()[0] + if s == 0 { + return nil, n, errIndexIndicator + } + + // read complete header + headerLen := (int(s) + 1) * 4 + buf.Grow(headerLen - 1) + z, err = io.CopyN(&buf, r, int64(headerLen-1)) + n += int(z) + if err != nil { + return nil, n, err + } + + // unmarshal block header + h = new(blockHeader) + if err = h.UnmarshalBinary(buf.Bytes()); err != nil { + return nil, n, err + } + + return h, n, nil +} + +// readSizeInBlockHeader reads the uncompressed or compressed size +// fields in the block header. The present value informs the function +// whether the respective field is actually present in the header. +func readSizeInBlockHeader(r io.ByteReader, present bool) (n int64, err error) { + if !present { + return -1, nil + } + x, _, err := readUvarint(r) + if err != nil { + return 0, err + } + if x >= 1<<63 { + return 0, errors.New("xz: size overflow in block header") + } + return int64(x), nil +} + +// UnmarshalBinary unmarshals the block header. +func (h *blockHeader) UnmarshalBinary(data []byte) error { + // Check header length + s := data[0] + if data[0] == 0 { + return errIndexIndicator + } + headerLen := (int(s) + 1) * 4 + if len(data) != headerLen { + return fmt.Errorf("xz: data length %d; want %d", len(data), + headerLen) + } + n := headerLen - 4 + + // Check CRC-32 + crc := crc32.NewIEEE() + crc.Write(data[:n]) + if crc.Sum32() != uint32LE(data[n:]) { + return errors.New("xz: checksum error for block header") + } + + // Block header flags + flags := data[1] + if flags&reservedBlockFlags != 0 { + return errors.New("xz: reserved block header flags set") + } + + r := bytes.NewReader(data[2:n]) + + // Compressed size + var err error + h.compressedSize, err = readSizeInBlockHeader( + r, flags&compressedSizePresent != 0) + if err != nil { + return err + } + + // Uncompressed size + h.uncompressedSize, err = readSizeInBlockHeader( + r, flags&uncompressedSizePresent != 0) + if err != nil { + return err + } + + h.filters, err = readFilters(r, int(flags&filterCountMask)+1) + if err != nil { + return err + } + + // Check padding + // Since headerLen is a multiple of 4 we don't need to check + // alignment. + k := r.Len() + // The standard spec says that the padding should have not more + // than 3 bytes. However we found paddings of 4 or 5 in the + // wild. See https://github.com/ulikunitz/xz/pull/11 and + // https://github.com/ulikunitz/xz/issues/15 + // + // The only reasonable approach seems to be to ignore the + // padding size. We still check that all padding bytes are zero. + if !allZeros(data[n-k : n]) { + return errPadding + } + return nil +} + +// MarshalBinary marshals the binary header. +func (h *blockHeader) MarshalBinary() (data []byte, err error) { + if !(minFilters <= len(h.filters) && len(h.filters) <= maxFilters) { + return nil, errors.New("xz: filter count wrong") + } + for i, f := range h.filters { + if i < len(h.filters)-1 { + if f.id() == lzmaFilterID { + return nil, errors.New( + "xz: LZMA2 filter is not the last") + } + } else { + // last filter + if f.id() != lzmaFilterID { + return nil, errors.New("xz: " + + "last filter must be the LZMA2 filter") + } + } + } + + var buf bytes.Buffer + // header size must set at the end + buf.WriteByte(0) + + // flags + flags := byte(len(h.filters) - 1) + if h.compressedSize >= 0 { + flags |= compressedSizePresent + } + if h.uncompressedSize >= 0 { + flags |= uncompressedSizePresent + } + buf.WriteByte(flags) + + p := make([]byte, 10) + if h.compressedSize >= 0 { + k := putUvarint(p, uint64(h.compressedSize)) + buf.Write(p[:k]) + } + if h.uncompressedSize >= 0 { + k := putUvarint(p, uint64(h.uncompressedSize)) + buf.Write(p[:k]) + } + + for _, f := range h.filters { + fp, err := f.MarshalBinary() + if err != nil { + return nil, err + } + buf.Write(fp) + } + + // padding + for i := padLen(int64(buf.Len())); i > 0; i-- { + buf.WriteByte(0) + } + + // crc place holder + buf.Write(p[:4]) + + data = buf.Bytes() + if len(data)%4 != 0 { + panic("data length not aligned") + } + s := len(data)/4 - 1 + if !(1 < s && s <= 255) { + panic("wrong block header size") + } + data[0] = byte(s) + + crc := crc32.NewIEEE() + crc.Write(data[:len(data)-4]) + putUint32LE(data[len(data)-4:], crc.Sum32()) + + return data, nil +} + +// Constants used for marshalling and unmarshalling filters in the xz +// block header. +const ( + minFilters = 1 + maxFilters = 4 + minReservedID = 1 << 62 +) + +// filter represents a filter in the block header. +type filter interface { + id() uint64 + UnmarshalBinary(data []byte) error + MarshalBinary() (data []byte, err error) + reader(r io.Reader, c *ReaderConfig) (fr io.Reader, err error) + writeCloser(w io.WriteCloser, c *WriterConfig) (fw io.WriteCloser, err error) + // filter must be last filter + last() bool +} + +// readFilter reads a block filter from the block header. At this point +// in time only the LZMA2 filter is supported. +func readFilter(r io.Reader) (f filter, err error) { + br := lzma.ByteReader(r) + + // index + id, _, err := readUvarint(br) + if err != nil { + return nil, err + } + + var data []byte + switch id { + case lzmaFilterID: + data = make([]byte, lzmaFilterLen) + data[0] = lzmaFilterID + if _, err = io.ReadFull(r, data[1:]); err != nil { + return nil, err + } + f = new(lzmaFilter) + default: + if id >= minReservedID { + return nil, errors.New( + "xz: reserved filter id in block stream header") + } + return nil, errors.New("xz: invalid filter id") + } + if err = f.UnmarshalBinary(data); err != nil { + return nil, err + } + return f, err +} + +// readFilters reads count filters. At this point in time only the count +// 1 is supported. +func readFilters(r io.Reader, count int) (filters []filter, err error) { + if count != 1 { + return nil, errors.New("xz: unsupported filter count") + } + f, err := readFilter(r) + if err != nil { + return nil, err + } + return []filter{f}, err +} + +// writeFilters writes the filters. +func writeFilters(w io.Writer, filters []filter) (n int, err error) { + for _, f := range filters { + p, err := f.MarshalBinary() + if err != nil { + return n, err + } + k, err := w.Write(p) + n += k + if err != nil { + return n, err + } + } + return n, nil +} + +/*** Index ***/ + +// record describes a block in the xz file index. +type record struct { + unpaddedSize int64 + uncompressedSize int64 +} + +// readRecord reads an index record. +func readRecord(r io.ByteReader) (rec record, n int, err error) { + u, k, err := readUvarint(r) + n += k + if err != nil { + return rec, n, err + } + rec.unpaddedSize = int64(u) + if rec.unpaddedSize < 0 { + return rec, n, errors.New("xz: unpadded size negative") + } + + u, k, err = readUvarint(r) + n += k + if err != nil { + return rec, n, err + } + rec.uncompressedSize = int64(u) + if rec.uncompressedSize < 0 { + return rec, n, errors.New("xz: uncompressed size negative") + } + + return rec, n, nil +} + +// MarshalBinary converts an index record in its binary encoding. +func (rec *record) MarshalBinary() (data []byte, err error) { + // maximum length of a uvarint is 10 + p := make([]byte, 20) + n := putUvarint(p, uint64(rec.unpaddedSize)) + n += putUvarint(p[n:], uint64(rec.uncompressedSize)) + return p[:n], nil +} + +// writeIndex writes the index, a sequence of records. +func writeIndex(w io.Writer, index []record) (n int64, err error) { + crc := crc32.NewIEEE() + mw := io.MultiWriter(w, crc) + + // index indicator + k, err := mw.Write([]byte{0}) + n += int64(k) + if err != nil { + return n, err + } + + // number of records + p := make([]byte, 10) + k = putUvarint(p, uint64(len(index))) + k, err = mw.Write(p[:k]) + n += int64(k) + if err != nil { + return n, err + } + + // list of records + for _, rec := range index { + p, err := rec.MarshalBinary() + if err != nil { + return n, err + } + k, err = mw.Write(p) + n += int64(k) + if err != nil { + return n, err + } + } + + // index padding + k, err = mw.Write(make([]byte, padLen(int64(n)))) + n += int64(k) + if err != nil { + return n, err + } + + // crc32 checksum + putUint32LE(p, crc.Sum32()) + k, err = w.Write(p[:4]) + n += int64(k) + + return n, err +} + +// readIndexBody reads the index from the reader. It assumes that the +// index indicator has already been read. +func readIndexBody(r io.Reader) (records []record, n int64, err error) { + crc := crc32.NewIEEE() + // index indicator + crc.Write([]byte{0}) + + br := lzma.ByteReader(io.TeeReader(r, crc)) + + // number of records + u, k, err := readUvarint(br) + n += int64(k) + if err != nil { + return nil, n, err + } + recLen := int(u) + if recLen < 0 || uint64(recLen) != u { + return nil, n, errors.New("xz: record number overflow") + } + + // list of records + records = make([]record, recLen) + for i := range records { + records[i], k, err = readRecord(br) + n += int64(k) + if err != nil { + return nil, n, err + } + } + + p := make([]byte, padLen(int64(n+1)), 4) + k, err = io.ReadFull(br.(io.Reader), p) + n += int64(k) + if err != nil { + return nil, n, err + } + if !allZeros(p) { + return nil, n, errors.New("xz: non-zero byte in index padding") + } + + // crc32 + s := crc.Sum32() + p = p[:4] + k, err = io.ReadFull(br.(io.Reader), p) + n += int64(k) + if err != nil { + return records, n, err + } + if uint32LE(p) != s { + return nil, n, errors.New("xz: wrong checksum for index") + } + + return records, n, nil +} diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go b/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go new file mode 100644 index 0000000000..a32887872e --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go @@ -0,0 +1,181 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hash + +// CyclicPoly provides a cyclic polynomial rolling hash. +type CyclicPoly struct { + h uint64 + p []uint64 + i int +} + +// ror rotates the unsigned 64-bit integer to right. The argument s must be +// less than 64. +func ror(x uint64, s uint) uint64 { + return (x >> s) | (x << (64 - s)) +} + +// NewCyclicPoly creates a new instance of the CyclicPoly structure. The +// argument n gives the number of bytes for which a hash will be executed. +// This number must be positive; the method panics if this isn't the case. +func NewCyclicPoly(n int) *CyclicPoly { + if n < 1 { + panic("argument n must be positive") + } + return &CyclicPoly{p: make([]uint64, 0, n)} +} + +// Len returns the length of the byte sequence for which a hash is generated. +func (r *CyclicPoly) Len() int { + return cap(r.p) +} + +// RollByte hashes the next byte and returns a hash value. The complete becomes +// available after at least Len() bytes have been hashed. +func (r *CyclicPoly) RollByte(x byte) uint64 { + y := hash[x] + if len(r.p) < cap(r.p) { + r.h = ror(r.h, 1) ^ y + r.p = append(r.p, y) + } else { + r.h ^= ror(r.p[r.i], uint(cap(r.p)-1)) + r.h = ror(r.h, 1) ^ y + r.p[r.i] = y + r.i = (r.i + 1) % cap(r.p) + } + return r.h +} + +// Stores the hash for the individual bytes. +var hash = [256]uint64{ + 0x2e4fc3f904065142, 0xc790984cfbc99527, + 0x879f95eb8c62f187, 0x3b61be86b5021ef2, + 0x65a896a04196f0a5, 0xc5b307b80470b59e, + 0xd3bff376a70df14b, 0xc332f04f0b3f1701, + 0x753b5f0e9abf3e0d, 0xb41538fdfe66ef53, + 0x1906a10c2c1c0208, 0xfb0c712a03421c0d, + 0x38be311a65c9552b, 0xfee7ee4ca6445c7e, + 0x71aadeded184f21e, 0xd73426fccda23b2d, + 0x29773fb5fb9600b5, 0xce410261cd32981a, + 0xfe2848b3c62dbc2d, 0x459eaaff6e43e11c, + 0xc13e35fc9c73a887, 0xf30ed5c201e76dbc, + 0xa5f10b3910482cea, 0x2945d59be02dfaad, + 0x06ee334ff70571b5, 0xbabf9d8070f44380, + 0xee3e2e9912ffd27c, 0x2a7118d1ea6b8ea7, + 0x26183cb9f7b1664c, 0xea71dac7da068f21, + 0xea92eca5bd1d0bb7, 0x415595862defcd75, + 0x248a386023c60648, 0x9cf021ab284b3c8a, + 0xfc9372df02870f6c, 0x2b92d693eeb3b3fc, + 0x73e799d139dc6975, 0x7b15ae312486363c, + 0xb70e5454a2239c80, 0x208e3fb31d3b2263, + 0x01f563cabb930f44, 0x2ac4533d2a3240d8, + 0x84231ed1064f6f7c, 0xa9f020977c2a6d19, + 0x213c227271c20122, 0x09fe8a9a0a03d07a, + 0x4236dc75bcaf910c, 0x460a8b2bead8f17e, + 0xd9b27be1aa07055f, 0xd202d5dc4b11c33e, + 0x70adb010543bea12, 0xcdae938f7ea6f579, + 0x3f3d870208672f4d, 0x8e6ccbce9d349536, + 0xe4c0871a389095ae, 0xf5f2a49152bca080, + 0x9a43f9b97269934e, 0xc17b3753cb6f475c, + 0xd56d941e8e206bd4, 0xac0a4f3e525eda00, + 0xa06d5a011912a550, 0x5537ed19537ad1df, + 0xa32fe713d611449d, 0x2a1d05b47c3b579f, + 0x991d02dbd30a2a52, 0x39e91e7e28f93eb0, + 0x40d06adb3e92c9ac, 0x9b9d3afde1c77c97, + 0x9a3f3f41c02c616f, 0x22ecd4ba00f60c44, + 0x0b63d5d801708420, 0x8f227ca8f37ffaec, + 0x0256278670887c24, 0x107e14877dbf540b, + 0x32c19f2786ac1c05, 0x1df5b12bb4bc9c61, + 0xc0cac129d0d4c4e2, 0x9fdb52ee9800b001, + 0x31f601d5d31c48c4, 0x72ff3c0928bcaec7, + 0xd99264421147eb03, 0x535a2d6d38aefcfe, + 0x6ba8b4454a916237, 0xfa39366eaae4719c, + 0x10f00fd7bbb24b6f, 0x5bd23185c76c84d4, + 0xb22c3d7e1b00d33f, 0x3efc20aa6bc830a8, + 0xd61c2503fe639144, 0x30ce625441eb92d3, + 0xe5d34cf359e93100, 0xa8e5aa13f2b9f7a5, + 0x5c2b8d851ca254a6, 0x68fb6c5e8b0d5fdf, + 0xc7ea4872c96b83ae, 0x6dd5d376f4392382, + 0x1be88681aaa9792f, 0xfef465ee1b6c10d9, + 0x1f98b65ed43fcb2e, 0x4d1ca11eb6e9a9c9, + 0x7808e902b3857d0b, 0x171c9c4ea4607972, + 0x58d66274850146df, 0x42b311c10d3981d1, + 0x647fa8c621c41a4c, 0xf472771c66ddfedc, + 0x338d27e3f847b46b, 0x6402ce3da97545ce, + 0x5162db616fc38638, 0x9c83be97bc22a50e, + 0x2d3d7478a78d5e72, 0xe621a9b938fd5397, + 0x9454614eb0f81c45, 0x395fb6e742ed39b6, + 0x77dd9179d06037bf, 0xc478d0fee4d2656d, + 0x35d9d6cb772007af, 0x83a56e92c883f0f6, + 0x27937453250c00a1, 0x27bd6ebc3a46a97d, + 0x9f543bf784342d51, 0xd158f38c48b0ed52, + 0x8dd8537c045f66b4, 0x846a57230226f6d5, + 0x6b13939e0c4e7cdf, 0xfca25425d8176758, + 0x92e5fc6cd52788e6, 0x9992e13d7a739170, + 0x518246f7a199e8ea, 0xf104c2a71b9979c7, + 0x86b3ffaabea4768f, 0x6388061cf3e351ad, + 0x09d9b5295de5bbb5, 0x38bf1638c2599e92, + 0x1d759846499e148d, 0x4c0ff015e5f96ef4, + 0xa41a94cfa270f565, 0x42d76f9cb2326c0b, + 0x0cf385dd3c9c23ba, 0x0508a6c7508d6e7a, + 0x337523aabbe6cf8d, 0x646bb14001d42b12, + 0xc178729d138adc74, 0xf900ef4491f24086, + 0xee1a90d334bb5ac4, 0x9755c92247301a50, + 0xb999bf7c4ff1b610, 0x6aeeb2f3b21e8fc9, + 0x0fa8084cf91ac6ff, 0x10d226cf136e6189, + 0xd302057a07d4fb21, 0x5f03800e20a0fcc3, + 0x80118d4ae46bd210, 0x58ab61a522843733, + 0x51edd575c5432a4b, 0x94ee6ff67f9197f7, + 0x765669e0e5e8157b, 0xa5347830737132f0, + 0x3ba485a69f01510c, 0x0b247d7b957a01c3, + 0x1b3d63449fd807dc, 0x0fdc4721c30ad743, + 0x8b535ed3829b2b14, 0xee41d0cad65d232c, + 0xe6a99ed97a6a982f, 0x65ac6194c202003d, + 0x692accf3a70573eb, 0xcc3c02c3e200d5af, + 0x0d419e8b325914a3, 0x320f160f42c25e40, + 0x00710d647a51fe7a, 0x3c947692330aed60, + 0x9288aa280d355a7a, 0xa1806a9b791d1696, + 0x5d60e38496763da1, 0x6c69e22e613fd0f4, + 0x977fc2a5aadffb17, 0xfb7bd063fc5a94ba, + 0x460c17992cbaece1, 0xf7822c5444d3297f, + 0x344a9790c69b74aa, 0xb80a42e6cae09dce, + 0x1b1361eaf2b1e757, 0xd84c1e758e236f01, + 0x88e0b7be347627cc, 0x45246009b7a99490, + 0x8011c6dd3fe50472, 0xc341d682bffb99d7, + 0x2511be93808e2d15, 0xd5bc13d7fd739840, + 0x2a3cd030679ae1ec, 0x8ad9898a4b9ee157, + 0x3245fef0a8eaf521, 0x3d6d8dbbb427d2b0, + 0x1ed146d8968b3981, 0x0c6a28bf7d45f3fc, + 0x4a1fd3dbcee3c561, 0x4210ff6a476bf67e, + 0xa559cce0d9199aac, 0xde39d47ef3723380, + 0xe5b69d848ce42e35, 0xefa24296f8e79f52, + 0x70190b59db9a5afc, 0x26f166cdb211e7bf, + 0x4deaf2df3c6b8ef5, 0xf171dbdd670f1017, + 0xb9059b05e9420d90, 0x2f0da855c9388754, + 0x611d5e9ab77949cc, 0x2912038ac01163f4, + 0x0231df50402b2fba, 0x45660fc4f3245f58, + 0xb91cc97c7c8dac50, 0xb72d2aafe4953427, + 0xfa6463f87e813d6b, 0x4515f7ee95d5c6a2, + 0x1310e1c1a48d21c3, 0xad48a7810cdd8544, + 0x4d5bdfefd5c9e631, 0xa43ed43f1fdcb7de, + 0xe70cfc8fe1ee9626, 0xef4711b0d8dda442, + 0xb80dd9bd4dab6c93, 0xa23be08d31ba4d93, + 0x9b37db9d0335a39c, 0x494b6f870f5cfebc, + 0x6d1b3c1149dda943, 0x372c943a518c1093, + 0xad27af45e77c09c4, 0x3b6f92b646044604, + 0xac2917909f5fcf4f, 0x2069a60e977e5557, + 0x353a469e71014de5, 0x24be356281f55c15, + 0x2b6d710ba8e9adea, 0x404ad1751c749c29, + 0xed7311bf23d7f185, 0xba4f6976b4acc43e, + 0x32d7198d2bc39000, 0xee667019014d6e01, + 0x494ef3e128d14c83, 0x1f95a152baecd6be, + 0x201648dff1f483a5, 0x68c28550c8384af6, + 0x5fc834a6824a7f48, 0x7cd06cb7365eaf28, + 0xd82bbd95e9b30909, 0x234f0d1694c53f6d, + 0xd2fb7f4a96d83f4a, 0xff0d5da83acac05e, + 0xf8f6b97f5585080a, 0x74236084be57b95b, + 0xa25e40c03bbc36ad, 0x6b6e5c14ce88465b, + 0x4378ffe93e1528c5, 0x94ca92a17118e2d2, +} diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/doc.go b/vendor/github.com/ulikunitz/xz/internal/hash/doc.go new file mode 100644 index 0000000000..f99ec22068 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/internal/hash/doc.go @@ -0,0 +1,14 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package hash provides rolling hashes. + +Rolling hashes have to be used for maintaining the positions of n-byte +sequences in the dictionary buffer. + +The package provides currently the Rabin-Karp rolling hash and a Cyclic +Polynomial hash. Both support the Hashes method to be used with an interface. +*/ +package hash diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go b/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go new file mode 100644 index 0000000000..58635b113a --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go @@ -0,0 +1,66 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hash + +// A is the default constant for Robin-Karp rolling hash. This is a random +// prime. +const A = 0x97b548add41d5da1 + +// RabinKarp supports the computation of a rolling hash. +type RabinKarp struct { + A uint64 + // a^n + aOldest uint64 + h uint64 + p []byte + i int +} + +// NewRabinKarp creates a new RabinKarp value. The argument n defines the +// length of the byte sequence to be hashed. The default constant will will be +// used. +func NewRabinKarp(n int) *RabinKarp { + return NewRabinKarpConst(n, A) +} + +// NewRabinKarpConst creates a new RabinKarp value. The argument n defines the +// length of the byte sequence to be hashed. The argument a provides the +// constant used to compute the hash. +func NewRabinKarpConst(n int, a uint64) *RabinKarp { + if n <= 0 { + panic("number of bytes n must be positive") + } + aOldest := uint64(1) + // There are faster methods. For the small n required by the LZMA + // compressor O(n) is sufficient. + for i := 0; i < n; i++ { + aOldest *= a + } + return &RabinKarp{ + A: a, aOldest: aOldest, + p: make([]byte, 0, n), + } +} + +// Len returns the length of the byte sequence. +func (r *RabinKarp) Len() int { + return cap(r.p) +} + +// RollByte computes the hash after x has been added. +func (r *RabinKarp) RollByte(x byte) uint64 { + if len(r.p) < cap(r.p) { + r.h += uint64(x) + r.h *= r.A + r.p = append(r.p, x) + } else { + r.h -= uint64(r.p[r.i]) * r.aOldest + r.h += uint64(x) + r.h *= r.A + r.p[r.i] = x + r.i = (r.i + 1) % cap(r.p) + } + return r.h +} diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/roller.go b/vendor/github.com/ulikunitz/xz/internal/hash/roller.go new file mode 100644 index 0000000000..ab6a19ca4c --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/internal/hash/roller.go @@ -0,0 +1,29 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hash + +// Roller provides an interface for rolling hashes. The hash value will become +// valid after hash has been called Len times. +type Roller interface { + Len() int + RollByte(x byte) uint64 +} + +// Hashes computes all hash values for the array p. Note that the state of the +// roller is changed. +func Hashes(r Roller, p []byte) []uint64 { + n := r.Len() + if len(p) < n { + return nil + } + h := make([]uint64, len(p)-n+1) + for i := 0; i < n-1; i++ { + r.RollByte(p[i]) + } + for i := range h { + h[i] = r.RollByte(p[i+n-1]) + } + return h +} diff --git a/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go b/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go new file mode 100644 index 0000000000..0ba45e8ff3 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go @@ -0,0 +1,457 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xlog provides a simple logging package that allows to disable +// certain message categories. It defines a type, Logger, with multiple +// methods for formatting output. The package has also a predefined +// 'standard' Logger accessible through helper function Print[f|ln], +// Fatal[f|ln], Panic[f|ln], Warn[f|ln], Print[f|ln] and Debug[f|ln] +// that are easier to use then creating a Logger manually. That logger +// writes to standard error and prints the date and time of each logged +// message, which can be configured using the function SetFlags. +// +// The Fatal functions call os.Exit(1) after the message is output +// unless not suppressed by the flags. The Panic functions call panic +// after the writing the log message unless suppressed. +package xlog + +import ( + "fmt" + "io" + "os" + "runtime" + "sync" + "time" +) + +// The flags define what information is prefixed to each log entry +// generated by the Logger. The Lno* versions allow the suppression of +// specific output. The bits are or'ed together to control what will be +// printed. There is no control over the order of the items printed and +// the format. The full format is: +// +// 2009-01-23 01:23:23.123123 /a/b/c/d.go:23: message +// +const ( + Ldate = 1 << iota // the date: 2009-01-23 + Ltime // the time: 01:23:23 + Lmicroseconds // microsecond resolution: 01:23:23.123123 + Llongfile // full file name and line number: /a/b/c/d.go:23 + Lshortfile // final file name element and line number: d.go:23 + Lnopanic // suppresses output from Panic[f|ln] but not the panic call + Lnofatal // suppresses output from Fatal[f|ln] but not the exit + Lnowarn // suppresses output from Warn[f|ln] + Lnoprint // suppresses output from Print[f|ln] + Lnodebug // suppresses output from Debug[f|ln] + // initial values for the standard logger + Lstdflags = Ldate | Ltime | Lnodebug +) + +// A Logger represents an active logging object that generates lines of +// output to an io.Writer. Each logging operation if not suppressed +// makes a single call to the Writer's Write method. A Logger can be +// used simultaneously from multiple goroutines; it guarantees to +// serialize access to the Writer. +type Logger struct { + mu sync.Mutex // ensures atomic writes; and protects the following + // fields + prefix string // prefix to write at beginning of each line + flag int // properties + out io.Writer // destination for output + buf []byte // for accumulating text to write +} + +// New creates a new Logger. The out argument sets the destination to +// which the log output will be written. The prefix appears at the +// beginning of each log line. The flag argument defines the logging +// properties. +func New(out io.Writer, prefix string, flag int) *Logger { + return &Logger{out: out, prefix: prefix, flag: flag} +} + +// std is the standard logger used by the package scope functions. +var std = New(os.Stderr, "", Lstdflags) + +// itoa converts the integer to ASCII. A negative widths will avoid +// zero-padding. The function supports only non-negative integers. +func itoa(buf *[]byte, i int, wid int) { + var u = uint(i) + if u == 0 && wid <= 1 { + *buf = append(*buf, '0') + return + } + var b [32]byte + bp := len(b) + for ; u > 0 || wid > 0; u /= 10 { + bp-- + wid-- + b[bp] = byte(u%10) + '0' + } + *buf = append(*buf, b[bp:]...) +} + +// formatHeader puts the header into the buf field of the buffer. +func (l *Logger) formatHeader(t time.Time, file string, line int) { + l.buf = append(l.buf, l.prefix...) + if l.flag&(Ldate|Ltime|Lmicroseconds) != 0 { + if l.flag&Ldate != 0 { + year, month, day := t.Date() + itoa(&l.buf, year, 4) + l.buf = append(l.buf, '-') + itoa(&l.buf, int(month), 2) + l.buf = append(l.buf, '-') + itoa(&l.buf, day, 2) + l.buf = append(l.buf, ' ') + } + if l.flag&(Ltime|Lmicroseconds) != 0 { + hour, min, sec := t.Clock() + itoa(&l.buf, hour, 2) + l.buf = append(l.buf, ':') + itoa(&l.buf, min, 2) + l.buf = append(l.buf, ':') + itoa(&l.buf, sec, 2) + if l.flag&Lmicroseconds != 0 { + l.buf = append(l.buf, '.') + itoa(&l.buf, t.Nanosecond()/1e3, 6) + } + l.buf = append(l.buf, ' ') + } + } + if l.flag&(Lshortfile|Llongfile) != 0 { + if l.flag&Lshortfile != 0 { + short := file + for i := len(file) - 1; i > 0; i-- { + if file[i] == '/' { + short = file[i+1:] + break + } + } + file = short + } + l.buf = append(l.buf, file...) + l.buf = append(l.buf, ':') + itoa(&l.buf, line, -1) + l.buf = append(l.buf, ": "...) + } +} + +func (l *Logger) output(calldepth int, now time.Time, s string) error { + var file string + var line int + if l.flag&(Lshortfile|Llongfile) != 0 { + l.mu.Unlock() + var ok bool + _, file, line, ok = runtime.Caller(calldepth) + if !ok { + file = "???" + line = 0 + } + l.mu.Lock() + } + l.buf = l.buf[:0] + l.formatHeader(now, file, line) + l.buf = append(l.buf, s...) + if len(s) == 0 || s[len(s)-1] != '\n' { + l.buf = append(l.buf, '\n') + } + _, err := l.out.Write(l.buf) + return err +} + +// Output writes the string s with the header controlled by the flags to +// the l.out writer. A newline will be appended if s doesn't end in a +// newline. Calldepth is used to recover the PC, although all current +// calls of Output use the call depth 2. Access to the function is serialized. +func (l *Logger) Output(calldepth, noflag int, v ...interface{}) error { + now := time.Now() + l.mu.Lock() + defer l.mu.Unlock() + if l.flag&noflag != 0 { + return nil + } + s := fmt.Sprint(v...) + return l.output(calldepth+1, now, s) +} + +// Outputf works like output but formats the output like Printf. +func (l *Logger) Outputf(calldepth int, noflag int, format string, v ...interface{}) error { + now := time.Now() + l.mu.Lock() + defer l.mu.Unlock() + if l.flag&noflag != 0 { + return nil + } + s := fmt.Sprintf(format, v...) + return l.output(calldepth+1, now, s) +} + +// Outputln works like output but formats the output like Println. +func (l *Logger) Outputln(calldepth int, noflag int, v ...interface{}) error { + now := time.Now() + l.mu.Lock() + defer l.mu.Unlock() + if l.flag&noflag != 0 { + return nil + } + s := fmt.Sprintln(v...) + return l.output(calldepth+1, now, s) +} + +// Panic prints the message like Print and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func (l *Logger) Panic(v ...interface{}) { + l.Output(2, Lnopanic, v...) + s := fmt.Sprint(v...) + panic(s) +} + +// Panic prints the message like Print and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func Panic(v ...interface{}) { + std.Output(2, Lnopanic, v...) + s := fmt.Sprint(v...) + panic(s) +} + +// Panicf prints the message like Printf and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func (l *Logger) Panicf(format string, v ...interface{}) { + l.Outputf(2, Lnopanic, format, v...) + s := fmt.Sprintf(format, v...) + panic(s) +} + +// Panicf prints the message like Printf and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func Panicf(format string, v ...interface{}) { + std.Outputf(2, Lnopanic, format, v...) + s := fmt.Sprintf(format, v...) + panic(s) +} + +// Panicln prints the message like Println and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func (l *Logger) Panicln(v ...interface{}) { + l.Outputln(2, Lnopanic, v...) + s := fmt.Sprintln(v...) + panic(s) +} + +// Panicln prints the message like Println and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func Panicln(v ...interface{}) { + std.Outputln(2, Lnopanic, v...) + s := fmt.Sprintln(v...) + panic(s) +} + +// Fatal prints the message like Print and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func (l *Logger) Fatal(v ...interface{}) { + l.Output(2, Lnofatal, v...) + os.Exit(1) +} + +// Fatal prints the message like Print and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func Fatal(v ...interface{}) { + std.Output(2, Lnofatal, v...) + os.Exit(1) +} + +// Fatalf prints the message like Printf and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func (l *Logger) Fatalf(format string, v ...interface{}) { + l.Outputf(2, Lnofatal, format, v...) + os.Exit(1) +} + +// Fatalf prints the message like Printf and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func Fatalf(format string, v ...interface{}) { + std.Outputf(2, Lnofatal, format, v...) + os.Exit(1) +} + +// Fatalln prints the message like Println and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func (l *Logger) Fatalln(format string, v ...interface{}) { + l.Outputln(2, Lnofatal, v...) + os.Exit(1) +} + +// Fatalln prints the message like Println and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func Fatalln(format string, v ...interface{}) { + std.Outputln(2, Lnofatal, v...) + os.Exit(1) +} + +// Warn prints the message like Print. The printing might be suppressed +// by the flag Lnowarn. +func (l *Logger) Warn(v ...interface{}) { + l.Output(2, Lnowarn, v...) +} + +// Warn prints the message like Print. The printing might be suppressed +// by the flag Lnowarn. +func Warn(v ...interface{}) { + std.Output(2, Lnowarn, v...) +} + +// Warnf prints the message like Printf. The printing might be suppressed +// by the flag Lnowarn. +func (l *Logger) Warnf(format string, v ...interface{}) { + l.Outputf(2, Lnowarn, format, v...) +} + +// Warnf prints the message like Printf. The printing might be suppressed +// by the flag Lnowarn. +func Warnf(format string, v ...interface{}) { + std.Outputf(2, Lnowarn, format, v...) +} + +// Warnln prints the message like Println. The printing might be suppressed +// by the flag Lnowarn. +func (l *Logger) Warnln(v ...interface{}) { + l.Outputln(2, Lnowarn, v...) +} + +// Warnln prints the message like Println. The printing might be suppressed +// by the flag Lnowarn. +func Warnln(v ...interface{}) { + std.Outputln(2, Lnowarn, v...) +} + +// Print prints the message like fmt.Print. The printing might be suppressed +// by the flag Lnoprint. +func (l *Logger) Print(v ...interface{}) { + l.Output(2, Lnoprint, v...) +} + +// Print prints the message like fmt.Print. The printing might be suppressed +// by the flag Lnoprint. +func Print(v ...interface{}) { + std.Output(2, Lnoprint, v...) +} + +// Printf prints the message like fmt.Printf. The printing might be suppressed +// by the flag Lnoprint. +func (l *Logger) Printf(format string, v ...interface{}) { + l.Outputf(2, Lnoprint, format, v...) +} + +// Printf prints the message like fmt.Printf. The printing might be suppressed +// by the flag Lnoprint. +func Printf(format string, v ...interface{}) { + std.Outputf(2, Lnoprint, format, v...) +} + +// Println prints the message like fmt.Println. The printing might be +// suppressed by the flag Lnoprint. +func (l *Logger) Println(v ...interface{}) { + l.Outputln(2, Lnoprint, v...) +} + +// Println prints the message like fmt.Println. The printing might be +// suppressed by the flag Lnoprint. +func Println(v ...interface{}) { + std.Outputln(2, Lnoprint, v...) +} + +// Debug prints the message like Print. The printing might be suppressed +// by the flag Lnodebug. +func (l *Logger) Debug(v ...interface{}) { + l.Output(2, Lnodebug, v...) +} + +// Debug prints the message like Print. The printing might be suppressed +// by the flag Lnodebug. +func Debug(v ...interface{}) { + std.Output(2, Lnodebug, v...) +} + +// Debugf prints the message like Printf. The printing might be suppressed +// by the flag Lnodebug. +func (l *Logger) Debugf(format string, v ...interface{}) { + l.Outputf(2, Lnodebug, format, v...) +} + +// Debugf prints the message like Printf. The printing might be suppressed +// by the flag Lnodebug. +func Debugf(format string, v ...interface{}) { + std.Outputf(2, Lnodebug, format, v...) +} + +// Debugln prints the message like Println. The printing might be suppressed +// by the flag Lnodebug. +func (l *Logger) Debugln(v ...interface{}) { + l.Outputln(2, Lnodebug, v...) +} + +// Debugln prints the message like Println. The printing might be suppressed +// by the flag Lnodebug. +func Debugln(v ...interface{}) { + std.Outputln(2, Lnodebug, v...) +} + +// Flags returns the current flags used by the logger. +func (l *Logger) Flags() int { + l.mu.Lock() + defer l.mu.Unlock() + return l.flag +} + +// Flags returns the current flags used by the standard logger. +func Flags() int { + return std.Flags() +} + +// SetFlags sets the flags of the logger. +func (l *Logger) SetFlags(flag int) { + l.mu.Lock() + defer l.mu.Unlock() + l.flag = flag +} + +// SetFlags sets the flags for the standard logger. +func SetFlags(flag int) { + std.SetFlags(flag) +} + +// Prefix returns the prefix used by the logger. +func (l *Logger) Prefix() string { + l.mu.Lock() + defer l.mu.Unlock() + return l.prefix +} + +// Prefix returns the prefix used by the standard logger of the package. +func Prefix() string { + return std.Prefix() +} + +// SetPrefix sets the prefix for the logger. +func (l *Logger) SetPrefix(prefix string) { + l.mu.Lock() + defer l.mu.Unlock() + l.prefix = prefix +} + +// SetPrefix sets the prefix of the standard logger of the package. +func SetPrefix(prefix string) { + std.SetPrefix(prefix) +} + +// SetOutput sets the output of the logger. +func (l *Logger) SetOutput(w io.Writer) { + l.mu.Lock() + defer l.mu.Unlock() + l.out = w +} + +// SetOutput sets the output for the standard logger of the package. +func SetOutput(w io.Writer) { + std.SetOutput(w) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/bintree.go b/vendor/github.com/ulikunitz/xz/lzma/bintree.go new file mode 100644 index 0000000000..a781bd1953 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/bintree.go @@ -0,0 +1,523 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "bufio" + "errors" + "fmt" + "io" + "unicode" +) + +// node represents a node in the binary tree. +type node struct { + // x is the search value + x uint32 + // p parent node + p uint32 + // l left child + l uint32 + // r right child + r uint32 +} + +// wordLen is the number of bytes represented by the v field of a node. +const wordLen = 4 + +// binTree supports the identification of the next operation based on a +// binary tree. +// +// Nodes will be identified by their index into the ring buffer. +type binTree struct { + dict *encoderDict + // ring buffer of nodes + node []node + // absolute offset of the entry for the next node. Position 4 + // byte larger. + hoff int64 + // front position in the node ring buffer + front uint32 + // index of the root node + root uint32 + // current x value + x uint32 + // preallocated array + data []byte +} + +// null represents the nonexistent index. We can't use zero because it +// would always exist or we would need to decrease the index for each +// reference. +const null uint32 = 1<<32 - 1 + +// newBinTree initializes the binTree structure. The capacity defines +// the size of the buffer and defines the maximum distance for which +// matches will be found. +func newBinTree(capacity int) (t *binTree, err error) { + if capacity < 1 { + return nil, errors.New( + "newBinTree: capacity must be larger than zero") + } + if int64(capacity) >= int64(null) { + return nil, errors.New( + "newBinTree: capacity must less 2^{32}-1") + } + t = &binTree{ + node: make([]node, capacity), + hoff: -int64(wordLen), + root: null, + data: make([]byte, maxMatchLen), + } + return t, nil +} + +func (t *binTree) SetDict(d *encoderDict) { t.dict = d } + +// WriteByte writes a single byte into the binary tree. +func (t *binTree) WriteByte(c byte) error { + t.x = (t.x << 8) | uint32(c) + t.hoff++ + if t.hoff < 0 { + return nil + } + v := t.front + if int64(v) < t.hoff { + // We are overwriting old nodes stored in the tree. + t.remove(v) + } + t.node[v].x = t.x + t.add(v) + t.front++ + if int64(t.front) >= int64(len(t.node)) { + t.front = 0 + } + return nil +} + +// Writes writes a sequence of bytes into the binTree structure. +func (t *binTree) Write(p []byte) (n int, err error) { + for _, c := range p { + t.WriteByte(c) + } + return len(p), nil +} + +// add puts the node v into the tree. The node must not be part of the +// tree before. +func (t *binTree) add(v uint32) { + vn := &t.node[v] + // Set left and right to null indices. + vn.l, vn.r = null, null + // If the binary tree is empty make v the root. + if t.root == null { + t.root = v + vn.p = null + return + } + x := vn.x + p := t.root + // Search for the right leave link and add the new node. + for { + pn := &t.node[p] + if x <= pn.x { + if pn.l == null { + pn.l = v + vn.p = p + return + } + p = pn.l + } else { + if pn.r == null { + pn.r = v + vn.p = p + return + } + p = pn.r + } + } +} + +// parent returns the parent node index of v and the pointer to v value +// in the parent. +func (t *binTree) parent(v uint32) (p uint32, ptr *uint32) { + if t.root == v { + return null, &t.root + } + p = t.node[v].p + if t.node[p].l == v { + ptr = &t.node[p].l + } else { + ptr = &t.node[p].r + } + return +} + +// Remove node v. +func (t *binTree) remove(v uint32) { + vn := &t.node[v] + p, ptr := t.parent(v) + l, r := vn.l, vn.r + if l == null { + // Move the right child up. + *ptr = r + if r != null { + t.node[r].p = p + } + return + } + if r == null { + // Move the left child up. + *ptr = l + t.node[l].p = p + return + } + + // Search the in-order predecessor u. + un := &t.node[l] + ur := un.r + if ur == null { + // In order predecessor is l. Move it up. + un.r = r + t.node[r].p = l + un.p = p + *ptr = l + return + } + var u uint32 + for { + // Look for the max value in the tree where l is root. + u = ur + ur = t.node[u].r + if ur == null { + break + } + } + // replace u with ul + un = &t.node[u] + ul := un.l + up := un.p + t.node[up].r = ul + if ul != null { + t.node[ul].p = up + } + + // replace v by u + un.l, un.r = l, r + t.node[l].p = u + t.node[r].p = u + *ptr = u + un.p = p +} + +// search looks for the node that have the value x or for the nodes that +// brace it. The node highest in the tree with the value x will be +// returned. All other nodes with the same value live in left subtree of +// the returned node. +func (t *binTree) search(v uint32, x uint32) (a, b uint32) { + a, b = null, null + if v == null { + return + } + for { + vn := &t.node[v] + if x <= vn.x { + if x == vn.x { + return v, v + } + b = v + if vn.l == null { + return + } + v = vn.l + } else { + a = v + if vn.r == null { + return + } + v = vn.r + } + } +} + +// max returns the node with maximum value in the subtree with v as +// root. +func (t *binTree) max(v uint32) uint32 { + if v == null { + return null + } + for { + r := t.node[v].r + if r == null { + return v + } + v = r + } +} + +// min returns the node with the minimum value in the subtree with v as +// root. +func (t *binTree) min(v uint32) uint32 { + if v == null { + return null + } + for { + l := t.node[v].l + if l == null { + return v + } + v = l + } +} + +// pred returns the in-order predecessor of node v. +func (t *binTree) pred(v uint32) uint32 { + if v == null { + return null + } + u := t.max(t.node[v].l) + if u != null { + return u + } + for { + p := t.node[v].p + if p == null { + return null + } + if t.node[p].r == v { + return p + } + v = p + } +} + +// succ returns the in-order successor of node v. +func (t *binTree) succ(v uint32) uint32 { + if v == null { + return null + } + u := t.min(t.node[v].r) + if u != null { + return u + } + for { + p := t.node[v].p + if p == null { + return null + } + if t.node[p].l == v { + return p + } + v = p + } +} + +// xval converts the first four bytes of a into an 32-bit unsigned +// integer in big-endian order. +func xval(a []byte) uint32 { + var x uint32 + switch len(a) { + default: + x |= uint32(a[3]) + fallthrough + case 3: + x |= uint32(a[2]) << 8 + fallthrough + case 2: + x |= uint32(a[1]) << 16 + fallthrough + case 1: + x |= uint32(a[0]) << 24 + case 0: + } + return x +} + +// dumpX converts value x into a four-letter string. +func dumpX(x uint32) string { + a := make([]byte, 4) + for i := 0; i < 4; i++ { + c := byte(x >> uint((3-i)*8)) + if unicode.IsGraphic(rune(c)) { + a[i] = c + } else { + a[i] = '.' + } + } + return string(a) +} + +// dumpNode writes a representation of the node v into the io.Writer. +func (t *binTree) dumpNode(w io.Writer, v uint32, indent int) { + if v == null { + return + } + + vn := &t.node[v] + + t.dumpNode(w, vn.r, indent+2) + + for i := 0; i < indent; i++ { + fmt.Fprint(w, " ") + } + if vn.p == null { + fmt.Fprintf(w, "node %d %q parent null\n", v, dumpX(vn.x)) + } else { + fmt.Fprintf(w, "node %d %q parent %d\n", v, dumpX(vn.x), vn.p) + } + + t.dumpNode(w, vn.l, indent+2) +} + +// dump prints a representation of the binary tree into the writer. +func (t *binTree) dump(w io.Writer) error { + bw := bufio.NewWriter(w) + t.dumpNode(bw, t.root, 0) + return bw.Flush() +} + +func (t *binTree) distance(v uint32) int { + dist := int(t.front) - int(v) + if dist <= 0 { + dist += len(t.node) + } + return dist +} + +type matchParams struct { + rep [4]uint32 + // length when match will be accepted + nAccept int + // nodes to check + check int + // finish if length get shorter + stopShorter bool +} + +func (t *binTree) match(m match, distIter func() (int, bool), p matchParams, +) (r match, checked int, accepted bool) { + buf := &t.dict.buf + for { + if checked >= p.check { + return m, checked, true + } + dist, ok := distIter() + if !ok { + return m, checked, false + } + checked++ + if m.n > 0 { + i := buf.rear - dist + m.n - 1 + if i < 0 { + i += len(buf.data) + } else if i >= len(buf.data) { + i -= len(buf.data) + } + if buf.data[i] != t.data[m.n-1] { + if p.stopShorter { + return m, checked, false + } + continue + } + } + n := buf.matchLen(dist, t.data) + switch n { + case 0: + if p.stopShorter { + return m, checked, false + } + continue + case 1: + if uint32(dist-minDistance) != p.rep[0] { + continue + } + } + if n < m.n || (n == m.n && int64(dist) >= m.distance) { + continue + } + m = match{int64(dist), n} + if n >= p.nAccept { + return m, checked, true + } + } +} + +func (t *binTree) NextOp(rep [4]uint32) operation { + // retrieve maxMatchLen data + n, _ := t.dict.buf.Peek(t.data[:maxMatchLen]) + if n == 0 { + panic("no data in buffer") + } + t.data = t.data[:n] + + var ( + m match + x, u, v uint32 + iterPred, iterSucc func() (int, bool) + ) + p := matchParams{ + rep: rep, + nAccept: maxMatchLen, + check: 32, + } + i := 4 + iterSmall := func() (dist int, ok bool) { + i-- + if i <= 0 { + return 0, false + } + return i, true + } + m, checked, accepted := t.match(m, iterSmall, p) + if accepted { + goto end + } + p.check -= checked + x = xval(t.data) + u, v = t.search(t.root, x) + if u == v && len(t.data) == 4 { + iter := func() (dist int, ok bool) { + if u == null { + return 0, false + } + dist = t.distance(u) + u, v = t.search(t.node[u].l, x) + if u != v { + u = null + } + return dist, true + } + m, _, _ = t.match(m, iter, p) + goto end + } + p.stopShorter = true + iterSucc = func() (dist int, ok bool) { + if v == null { + return 0, false + } + dist = t.distance(v) + v = t.succ(v) + return dist, true + } + m, checked, accepted = t.match(m, iterSucc, p) + if accepted { + goto end + } + p.check -= checked + iterPred = func() (dist int, ok bool) { + if u == null { + return 0, false + } + dist = t.distance(u) + u = t.pred(u) + return dist, true + } + m, _, _ = t.match(m, iterPred, p) +end: + if m.n == 0 { + return lit{t.data[0]} + } + return m +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/bitops.go b/vendor/github.com/ulikunitz/xz/lzma/bitops.go new file mode 100644 index 0000000000..e9bab01990 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/bitops.go @@ -0,0 +1,45 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +/* Naming conventions follows the CodeReviewComments in the Go Wiki. */ + +// ntz32Const is used by the functions NTZ and NLZ. +const ntz32Const = 0x04d7651f + +// ntz32Table is a helper table for de Bruijn algorithm by Danny Dubé. +// See Henry S. Warren, Jr. "Hacker's Delight" section 5-1 figure 5-26. +var ntz32Table = [32]int8{ + 0, 1, 2, 24, 3, 19, 6, 25, + 22, 4, 20, 10, 16, 7, 12, 26, + 31, 23, 18, 5, 21, 9, 15, 11, + 30, 17, 8, 14, 29, 13, 28, 27, +} + +// ntz32 computes the number of trailing zeros for an unsigned 32-bit integer. +func ntz32(x uint32) int { + if x == 0 { + return 32 + } + x = (x & -x) * ntz32Const + return int(ntz32Table[x>>27]) +} + +// nlz32 computes the number of leading zeros for an unsigned 32-bit integer. +func nlz32(x uint32) int { + // Smear left most bit to the right + x |= x >> 1 + x |= x >> 2 + x |= x >> 4 + x |= x >> 8 + x |= x >> 16 + // Use ntz mechanism to calculate nlz. + x++ + if x == 0 { + return 0 + } + x *= ntz32Const + return 32 - int(ntz32Table[x>>27]) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/breader.go b/vendor/github.com/ulikunitz/xz/lzma/breader.go new file mode 100644 index 0000000000..5350d814fa --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/breader.go @@ -0,0 +1,39 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "io" +) + +// breader provides the ReadByte function for a Reader. It doesn't read +// more data from the reader than absolutely necessary. +type breader struct { + io.Reader + // helper slice to save allocations + p []byte +} + +// ByteReader converts an io.Reader into an io.ByteReader. +func ByteReader(r io.Reader) io.ByteReader { + br, ok := r.(io.ByteReader) + if !ok { + return &breader{r, make([]byte, 1)} + } + return br +} + +// ReadByte read byte function. +func (r *breader) ReadByte() (c byte, err error) { + n, err := r.Reader.Read(r.p) + if n < 1 { + if err == nil { + err = errors.New("breader.ReadByte: no data") + } + return 0, err + } + return r.p[0], nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/buffer.go b/vendor/github.com/ulikunitz/xz/lzma/buffer.go new file mode 100644 index 0000000000..50e0b6d57b --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/buffer.go @@ -0,0 +1,171 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" +) + +// buffer provides a circular buffer of bytes. If the front index equals +// the rear index the buffer is empty. As a consequence front cannot be +// equal rear for a full buffer. So a full buffer has a length that is +// one byte less the the length of the data slice. +type buffer struct { + data []byte + front int + rear int +} + +// newBuffer creates a buffer with the given size. +func newBuffer(size int) *buffer { + return &buffer{data: make([]byte, size+1)} +} + +// Cap returns the capacity of the buffer. +func (b *buffer) Cap() int { + return len(b.data) - 1 +} + +// Resets the buffer. The front and rear index are set to zero. +func (b *buffer) Reset() { + b.front = 0 + b.rear = 0 +} + +// Buffered returns the number of bytes buffered. +func (b *buffer) Buffered() int { + delta := b.front - b.rear + if delta < 0 { + delta += len(b.data) + } + return delta +} + +// Available returns the number of bytes available for writing. +func (b *buffer) Available() int { + delta := b.rear - 1 - b.front + if delta < 0 { + delta += len(b.data) + } + return delta +} + +// addIndex adds a non-negative integer to the index i and returns the +// resulting index. The function takes care of wrapping the index as +// well as potential overflow situations. +func (b *buffer) addIndex(i int, n int) int { + // subtraction of len(b.data) prevents overflow + i += n - len(b.data) + if i < 0 { + i += len(b.data) + } + return i +} + +// Read reads bytes from the buffer into p and returns the number of +// bytes read. The function never returns an error but might return less +// data than requested. +func (b *buffer) Read(p []byte) (n int, err error) { + n, err = b.Peek(p) + b.rear = b.addIndex(b.rear, n) + return n, err +} + +// Peek reads bytes from the buffer into p without changing the buffer. +// Peek will never return an error but might return less data than +// requested. +func (b *buffer) Peek(p []byte) (n int, err error) { + m := b.Buffered() + n = len(p) + if m < n { + n = m + p = p[:n] + } + k := copy(p, b.data[b.rear:]) + if k < n { + copy(p[k:], b.data) + } + return n, nil +} + +// Discard skips the n next bytes to read from the buffer, returning the +// bytes discarded. +// +// If Discards skips fewer than n bytes, it returns an error. +func (b *buffer) Discard(n int) (discarded int, err error) { + if n < 0 { + return 0, errors.New("buffer.Discard: negative argument") + } + m := b.Buffered() + if m < n { + n = m + err = errors.New( + "buffer.Discard: discarded less bytes then requested") + } + b.rear = b.addIndex(b.rear, n) + return n, err +} + +// ErrNoSpace indicates that there is insufficient space for the Write +// operation. +var ErrNoSpace = errors.New("insufficient space") + +// Write puts data into the buffer. If less bytes are written than +// requested ErrNoSpace is returned. +func (b *buffer) Write(p []byte) (n int, err error) { + m := b.Available() + n = len(p) + if m < n { + n = m + p = p[:m] + err = ErrNoSpace + } + k := copy(b.data[b.front:], p) + if k < n { + copy(b.data, p[k:]) + } + b.front = b.addIndex(b.front, n) + return n, err +} + +// WriteByte writes a single byte into the buffer. The error ErrNoSpace +// is returned if no single byte is available in the buffer for writing. +func (b *buffer) WriteByte(c byte) error { + if b.Available() < 1 { + return ErrNoSpace + } + b.data[b.front] = c + b.front = b.addIndex(b.front, 1) + return nil +} + +// prefixLen returns the length of the common prefix of a and b. +func prefixLen(a, b []byte) int { + if len(a) > len(b) { + a, b = b, a + } + for i, c := range a { + if b[i] != c { + return i + } + } + return len(a) +} + +// matchLen returns the length of the common prefix for the given +// distance from the rear and the byte slice p. +func (b *buffer) matchLen(distance int, p []byte) int { + var n int + i := b.rear - distance + if i < 0 { + if n = prefixLen(p, b.data[len(b.data)+i:]); n < -i { + return n + } + p = p[n:] + i = 0 + } + n += prefixLen(p, b.data[i:]) + return n +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go b/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go new file mode 100644 index 0000000000..a3696ba08b --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go @@ -0,0 +1,37 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "io" +) + +// ErrLimit indicates that the limit of the LimitedByteWriter has been +// reached. +var ErrLimit = errors.New("limit reached") + +// LimitedByteWriter provides a byte writer that can be written until a +// limit is reached. The field N provides the number of remaining +// bytes. +type LimitedByteWriter struct { + BW io.ByteWriter + N int64 +} + +// WriteByte writes a single byte to the limited byte writer. It returns +// ErrLimit if the limit has been reached. If the byte is successfully +// written the field N of the LimitedByteWriter will be decremented by +// one. +func (l *LimitedByteWriter) WriteByte(c byte) error { + if l.N <= 0 { + return ErrLimit + } + if err := l.BW.WriteByte(c); err != nil { + return err + } + l.N-- + return nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/decoder.go b/vendor/github.com/ulikunitz/xz/lzma/decoder.go new file mode 100644 index 0000000000..16e14db394 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/decoder.go @@ -0,0 +1,277 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + "io" +) + +// decoder decodes a raw LZMA stream without any header. +type decoder struct { + // dictionary; the rear pointer of the buffer will be used for + // reading the data. + Dict *decoderDict + // decoder state + State *state + // range decoder + rd *rangeDecoder + // start stores the head value of the dictionary for the LZMA + // stream + start int64 + // size of uncompressed data + size int64 + // end-of-stream encountered + eos bool + // EOS marker found + eosMarker bool +} + +// newDecoder creates a new decoder instance. The parameter size provides +// the expected byte size of the decompressed data. If the size is +// unknown use a negative value. In that case the decoder will look for +// a terminating end-of-stream marker. +func newDecoder(br io.ByteReader, state *state, dict *decoderDict, size int64) (d *decoder, err error) { + rd, err := newRangeDecoder(br) + if err != nil { + return nil, err + } + d = &decoder{ + State: state, + Dict: dict, + rd: rd, + size: size, + start: dict.pos(), + } + return d, nil +} + +// Reopen restarts the decoder with a new byte reader and a new size. Reopen +// resets the Decompressed counter to zero. +func (d *decoder) Reopen(br io.ByteReader, size int64) error { + var err error + if d.rd, err = newRangeDecoder(br); err != nil { + return err + } + d.start = d.Dict.pos() + d.size = size + d.eos = false + return nil +} + +// decodeLiteral decodes a single literal from the LZMA stream. +func (d *decoder) decodeLiteral() (op operation, err error) { + litState := d.State.litState(d.Dict.byteAt(1), d.Dict.head) + match := d.Dict.byteAt(int(d.State.rep[0]) + 1) + s, err := d.State.litCodec.Decode(d.rd, d.State.state, match, litState) + if err != nil { + return nil, err + } + return lit{s}, nil +} + +// errEOS indicates that an EOS marker has been found. +var errEOS = errors.New("EOS marker found") + +// readOp decodes the next operation from the compressed stream. It +// returns the operation. If an explicit end of stream marker is +// identified the eos error is returned. +func (d *decoder) readOp() (op operation, err error) { + // Value of the end of stream (EOS) marker + const eosDist = 1<<32 - 1 + + state, state2, posState := d.State.states(d.Dict.head) + + b, err := d.State.isMatch[state2].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + // literal + op, err := d.decodeLiteral() + if err != nil { + return nil, err + } + d.State.updateStateLiteral() + return op, nil + } + b, err = d.State.isRep[state].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + // simple match + d.State.rep[3], d.State.rep[2], d.State.rep[1] = + d.State.rep[2], d.State.rep[1], d.State.rep[0] + + d.State.updateStateMatch() + // The length decoder returns the length offset. + n, err := d.State.lenCodec.Decode(d.rd, posState) + if err != nil { + return nil, err + } + // The dist decoder returns the distance offset. The actual + // distance is 1 higher. + d.State.rep[0], err = d.State.distCodec.Decode(d.rd, n) + if err != nil { + return nil, err + } + if d.State.rep[0] == eosDist { + d.eosMarker = true + return nil, errEOS + } + op = match{n: int(n) + minMatchLen, + distance: int64(d.State.rep[0]) + minDistance} + return op, nil + } + b, err = d.State.isRepG0[state].Decode(d.rd) + if err != nil { + return nil, err + } + dist := d.State.rep[0] + if b == 0 { + // rep match 0 + b, err = d.State.isRepG0Long[state2].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + d.State.updateStateShortRep() + op = match{n: 1, distance: int64(dist) + minDistance} + return op, nil + } + } else { + b, err = d.State.isRepG1[state].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + dist = d.State.rep[1] + } else { + b, err = d.State.isRepG2[state].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + dist = d.State.rep[2] + } else { + dist = d.State.rep[3] + d.State.rep[3] = d.State.rep[2] + } + d.State.rep[2] = d.State.rep[1] + } + d.State.rep[1] = d.State.rep[0] + d.State.rep[0] = dist + } + n, err := d.State.repLenCodec.Decode(d.rd, posState) + if err != nil { + return nil, err + } + d.State.updateStateRep() + op = match{n: int(n) + minMatchLen, distance: int64(dist) + minDistance} + return op, nil +} + +// apply takes the operation and transforms the decoder dictionary accordingly. +func (d *decoder) apply(op operation) error { + var err error + switch x := op.(type) { + case match: + err = d.Dict.writeMatch(x.distance, x.n) + case lit: + err = d.Dict.WriteByte(x.b) + default: + panic("op is neither a match nor a literal") + } + return err +} + +// decompress fills the dictionary unless no space for new data is +// available. If the end of the LZMA stream has been reached io.EOF will +// be returned. +func (d *decoder) decompress() error { + if d.eos { + return io.EOF + } + for d.Dict.Available() >= maxMatchLen { + op, err := d.readOp() + switch err { + case nil: + break + case errEOS: + d.eos = true + if !d.rd.possiblyAtEnd() { + return errDataAfterEOS + } + if d.size >= 0 && d.size != d.Decompressed() { + return errSize + } + return io.EOF + case io.EOF: + d.eos = true + return io.ErrUnexpectedEOF + default: + return err + } + if err = d.apply(op); err != nil { + return err + } + if d.size >= 0 && d.Decompressed() >= d.size { + d.eos = true + if d.Decompressed() > d.size { + return errSize + } + if !d.rd.possiblyAtEnd() { + switch _, err = d.readOp(); err { + case nil: + return errSize + case io.EOF: + return io.ErrUnexpectedEOF + case errEOS: + break + default: + return err + } + } + return io.EOF + } + } + return nil +} + +// Errors that may be returned while decoding data. +var ( + errDataAfterEOS = errors.New("lzma: data after end of stream marker") + errSize = errors.New("lzma: wrong uncompressed data size") +) + +// Read reads data from the buffer. If no more data is available io.EOF is +// returned. +func (d *decoder) Read(p []byte) (n int, err error) { + var k int + for { + // Read of decoder dict never returns an error. + k, err = d.Dict.Read(p[n:]) + if err != nil { + panic(fmt.Errorf("dictionary read error %s", err)) + } + if k == 0 && d.eos { + return n, io.EOF + } + n += k + if n >= len(p) { + return n, nil + } + if err = d.decompress(); err != nil && err != io.EOF { + return n, err + } + } +} + +// Decompressed returns the number of bytes decompressed by the decoder. +func (d *decoder) Decompressed() int64 { + return d.Dict.pos() - d.start +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go b/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go new file mode 100644 index 0000000000..564a12b834 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go @@ -0,0 +1,135 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" +) + +// decoderDict provides the dictionary for the decoder. The whole +// dictionary is used as reader buffer. +type decoderDict struct { + buf buffer + head int64 +} + +// newDecoderDict creates a new decoder dictionary. The whole dictionary +// will be used as reader buffer. +func newDecoderDict(dictCap int) (d *decoderDict, err error) { + // lower limit supports easy test cases + if !(1 <= dictCap && int64(dictCap) <= MaxDictCap) { + return nil, errors.New("lzma: dictCap out of range") + } + d = &decoderDict{buf: *newBuffer(dictCap)} + return d, nil +} + +// Reset clears the dictionary. The read buffer is not changed, so the +// buffered data can still be read. +func (d *decoderDict) Reset() { + d.head = 0 +} + +// WriteByte writes a single byte into the dictionary. It is used to +// write literals into the dictionary. +func (d *decoderDict) WriteByte(c byte) error { + if err := d.buf.WriteByte(c); err != nil { + return err + } + d.head++ + return nil +} + +// pos returns the position of the dictionary head. +func (d *decoderDict) pos() int64 { return d.head } + +// dictLen returns the actual length of the dictionary. +func (d *decoderDict) dictLen() int { + capacity := d.buf.Cap() + if d.head >= int64(capacity) { + return capacity + } + return int(d.head) +} + +// byteAt returns a byte stored in the dictionary. If the distance is +// non-positive or exceeds the current length of the dictionary the zero +// byte is returned. +func (d *decoderDict) byteAt(dist int) byte { + if !(0 < dist && dist <= d.dictLen()) { + return 0 + } + i := d.buf.front - dist + if i < 0 { + i += len(d.buf.data) + } + return d.buf.data[i] +} + +// writeMatch writes the match at the top of the dictionary. The given +// distance must point in the current dictionary and the length must not +// exceed the maximum length 273 supported in LZMA. +// +// The error value ErrNoSpace indicates that no space is available in +// the dictionary for writing. You need to read from the dictionary +// first. +func (d *decoderDict) writeMatch(dist int64, length int) error { + if !(0 < dist && dist <= int64(d.dictLen())) { + return errors.New("writeMatch: distance out of range") + } + if !(0 < length && length <= maxMatchLen) { + return errors.New("writeMatch: length out of range") + } + if length > d.buf.Available() { + return ErrNoSpace + } + d.head += int64(length) + + i := d.buf.front - int(dist) + if i < 0 { + i += len(d.buf.data) + } + for length > 0 { + var p []byte + if i >= d.buf.front { + p = d.buf.data[i:] + i = 0 + } else { + p = d.buf.data[i:d.buf.front] + i = d.buf.front + } + if len(p) > length { + p = p[:length] + } + if _, err := d.buf.Write(p); err != nil { + panic(fmt.Errorf("d.buf.Write returned error %s", err)) + } + length -= len(p) + } + return nil +} + +// Write writes the given bytes into the dictionary and advances the +// head. +func (d *decoderDict) Write(p []byte) (n int, err error) { + n, err = d.buf.Write(p) + d.head += int64(n) + return n, err +} + +// Available returns the number of available bytes for writing into the +// decoder dictionary. +func (d *decoderDict) Available() int { return d.buf.Available() } + +// Read reads data from the buffer contained in the decoder dictionary. +func (d *decoderDict) Read(p []byte) (n int, err error) { return d.buf.Read(p) } + +// Buffered returns the number of bytes currently buffered in the +// decoder dictionary. +func (d *decoderDict) buffered() int { return d.buf.Buffered() } + +// Peek gets data from the buffer without advancing the rear index. +func (d *decoderDict) peek(p []byte) (n int, err error) { return d.buf.Peek(p) } diff --git a/vendor/github.com/ulikunitz/xz/lzma/directcodec.go b/vendor/github.com/ulikunitz/xz/lzma/directcodec.go new file mode 100644 index 0000000000..e08eb989ff --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/directcodec.go @@ -0,0 +1,49 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import "fmt" + +// directCodec allows the encoding and decoding of values with a fixed number +// of bits. The number of bits must be in the range [1,32]. +type directCodec byte + +// makeDirectCodec creates a directCodec. The function panics if the number of +// bits is not in the range [1,32]. +func makeDirectCodec(bits int) directCodec { + if !(1 <= bits && bits <= 32) { + panic(fmt.Errorf("bits=%d out of range", bits)) + } + return directCodec(bits) +} + +// Bits returns the number of bits supported by this codec. +func (dc directCodec) Bits() int { + return int(dc) +} + +// Encode uses the range encoder to encode a value with the fixed number of +// bits. The most-significant bit is encoded first. +func (dc directCodec) Encode(e *rangeEncoder, v uint32) error { + for i := int(dc) - 1; i >= 0; i-- { + if err := e.DirectEncodeBit(v >> uint(i)); err != nil { + return err + } + } + return nil +} + +// Decode uses the range decoder to decode a value with the given number of +// given bits. The most-significant bit is decoded first. +func (dc directCodec) Decode(d *rangeDecoder) (v uint32, err error) { + for i := int(dc) - 1; i >= 0; i-- { + x, err := d.DirectDecodeBit() + if err != nil { + return 0, err + } + v = (v << 1) | x + } + return v, nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/distcodec.go b/vendor/github.com/ulikunitz/xz/lzma/distcodec.go new file mode 100644 index 0000000000..b053a2dce2 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/distcodec.go @@ -0,0 +1,156 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// Constants used by the distance codec. +const ( + // minimum supported distance + minDistance = 1 + // maximum supported distance, value is used for the eos marker. + maxDistance = 1 << 32 + // number of the supported len states + lenStates = 4 + // start for the position models + startPosModel = 4 + // first index with align bits support + endPosModel = 14 + // bits for the position slots + posSlotBits = 6 + // number of align bits + alignBits = 4 + // maximum position slot + maxPosSlot = 63 +) + +// distCodec provides encoding and decoding of distance values. +type distCodec struct { + posSlotCodecs [lenStates]treeCodec + posModel [endPosModel - startPosModel]treeReverseCodec + alignCodec treeReverseCodec +} + +// deepcopy initializes dc as deep copy of the source. +func (dc *distCodec) deepcopy(src *distCodec) { + if dc == src { + return + } + for i := range dc.posSlotCodecs { + dc.posSlotCodecs[i].deepcopy(&src.posSlotCodecs[i]) + } + for i := range dc.posModel { + dc.posModel[i].deepcopy(&src.posModel[i]) + } + dc.alignCodec.deepcopy(&src.alignCodec) +} + +// distBits returns the number of bits required to encode dist. +func distBits(dist uint32) int { + if dist < startPosModel { + return 6 + } + // slot s > 3, dist d + // s = 2(bits(d)-1) + bit(d, bits(d)-2) + // s>>1 = bits(d)-1 + // bits(d) = 32-nlz32(d) + // s>>1=31-nlz32(d) + // n = 5 + (s>>1) = 36 - nlz32(d) + return 36 - nlz32(dist) +} + +// newDistCodec creates a new distance codec. +func (dc *distCodec) init() { + for i := range dc.posSlotCodecs { + dc.posSlotCodecs[i] = makeTreeCodec(posSlotBits) + } + for i := range dc.posModel { + posSlot := startPosModel + i + bits := (posSlot >> 1) - 1 + dc.posModel[i] = makeTreeReverseCodec(bits) + } + dc.alignCodec = makeTreeReverseCodec(alignBits) +} + +// lenState converts the value l to a supported lenState value. +func lenState(l uint32) uint32 { + if l >= lenStates { + l = lenStates - 1 + } + return l +} + +// Encode encodes the distance using the parameter l. Dist can have values from +// the full range of uint32 values. To get the distance offset the actual match +// distance has to be decreased by 1. A distance offset of 0xffffffff (eos) +// indicates the end of the stream. +func (dc *distCodec) Encode(e *rangeEncoder, dist uint32, l uint32) (err error) { + // Compute the posSlot using nlz32 + var posSlot uint32 + var bits uint32 + if dist < startPosModel { + posSlot = dist + } else { + bits = uint32(30 - nlz32(dist)) + posSlot = startPosModel - 2 + (bits << 1) + posSlot += (dist >> uint(bits)) & 1 + } + + if err = dc.posSlotCodecs[lenState(l)].Encode(e, posSlot); err != nil { + return + } + + switch { + case posSlot < startPosModel: + return nil + case posSlot < endPosModel: + tc := &dc.posModel[posSlot-startPosModel] + return tc.Encode(dist, e) + } + dic := directCodec(bits - alignBits) + if err = dic.Encode(e, dist>>alignBits); err != nil { + return + } + return dc.alignCodec.Encode(dist, e) +} + +// Decode decodes the distance offset using the parameter l. The dist value +// 0xffffffff (eos) indicates the end of the stream. Add one to the distance +// offset to get the actual match distance. +func (dc *distCodec) Decode(d *rangeDecoder, l uint32) (dist uint32, err error) { + posSlot, err := dc.posSlotCodecs[lenState(l)].Decode(d) + if err != nil { + return + } + + // posSlot equals distance + if posSlot < startPosModel { + return posSlot, nil + } + + // posSlot uses the individual models + bits := (posSlot >> 1) - 1 + dist = (2 | (posSlot & 1)) << bits + var u uint32 + if posSlot < endPosModel { + tc := &dc.posModel[posSlot-startPosModel] + if u, err = tc.Decode(d); err != nil { + return 0, err + } + dist += u + return dist, nil + } + + // posSlots use direct encoding and a single model for the four align + // bits. + dic := directCodec(bits - alignBits) + if u, err = dic.Decode(d); err != nil { + return 0, err + } + dist += u << alignBits + if u, err = dc.alignCodec.Decode(d); err != nil { + return 0, err + } + dist += u + return dist, nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/encoder.go b/vendor/github.com/ulikunitz/xz/lzma/encoder.go new file mode 100644 index 0000000000..18ce009928 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/encoder.go @@ -0,0 +1,268 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "fmt" + "io" +) + +// opLenMargin provides the upper limit of the number of bytes required +// to encode a single operation. +const opLenMargin = 10 + +// compressFlags control the compression process. +type compressFlags uint32 + +// Values for compressFlags. +const ( + // all data should be compressed, even if compression is not + // optimal. + all compressFlags = 1 << iota +) + +// encoderFlags provide the flags for an encoder. +type encoderFlags uint32 + +// Flags for the encoder. +const ( + // eosMarker requests an EOS marker to be written. + eosMarker encoderFlags = 1 << iota +) + +// Encoder compresses data buffered in the encoder dictionary and writes +// it into a byte writer. +type encoder struct { + dict *encoderDict + state *state + re *rangeEncoder + start int64 + // generate eos marker + marker bool + limit bool + margin int +} + +// newEncoder creates a new encoder. If the byte writer must be +// limited use LimitedByteWriter provided by this package. The flags +// argument supports the eosMarker flag, controlling whether a +// terminating end-of-stream marker must be written. +func newEncoder(bw io.ByteWriter, state *state, dict *encoderDict, + flags encoderFlags) (e *encoder, err error) { + + re, err := newRangeEncoder(bw) + if err != nil { + return nil, err + } + e = &encoder{ + dict: dict, + state: state, + re: re, + marker: flags&eosMarker != 0, + start: dict.Pos(), + margin: opLenMargin, + } + if e.marker { + e.margin += 5 + } + return e, nil +} + +// Write writes the bytes from p into the dictionary. If not enough +// space is available the data in the dictionary buffer will be +// compressed to make additional space available. If the limit of the +// underlying writer has been reached ErrLimit will be returned. +func (e *encoder) Write(p []byte) (n int, err error) { + for { + k, err := e.dict.Write(p[n:]) + n += k + if err == ErrNoSpace { + if err = e.compress(0); err != nil { + return n, err + } + continue + } + return n, err + } +} + +// Reopen reopens the encoder with a new byte writer. +func (e *encoder) Reopen(bw io.ByteWriter) error { + var err error + if e.re, err = newRangeEncoder(bw); err != nil { + return err + } + e.start = e.dict.Pos() + e.limit = false + return nil +} + +// writeLiteral writes a literal into the LZMA stream +func (e *encoder) writeLiteral(l lit) error { + var err error + state, state2, _ := e.state.states(e.dict.Pos()) + if err = e.state.isMatch[state2].Encode(e.re, 0); err != nil { + return err + } + litState := e.state.litState(e.dict.ByteAt(1), e.dict.Pos()) + match := e.dict.ByteAt(int(e.state.rep[0]) + 1) + err = e.state.litCodec.Encode(e.re, l.b, state, match, litState) + if err != nil { + return err + } + e.state.updateStateLiteral() + return nil +} + +// iverson implements the Iverson operator as proposed by Donald Knuth in his +// book Concrete Mathematics. +func iverson(ok bool) uint32 { + if ok { + return 1 + } + return 0 +} + +// writeMatch writes a repetition operation into the operation stream +func (e *encoder) writeMatch(m match) error { + var err error + if !(minDistance <= m.distance && m.distance <= maxDistance) { + panic(fmt.Errorf("match distance %d out of range", m.distance)) + } + dist := uint32(m.distance - minDistance) + if !(minMatchLen <= m.n && m.n <= maxMatchLen) && + !(dist == e.state.rep[0] && m.n == 1) { + panic(fmt.Errorf( + "match length %d out of range; dist %d rep[0] %d", + m.n, dist, e.state.rep[0])) + } + state, state2, posState := e.state.states(e.dict.Pos()) + if err = e.state.isMatch[state2].Encode(e.re, 1); err != nil { + return err + } + g := 0 + for ; g < 4; g++ { + if e.state.rep[g] == dist { + break + } + } + b := iverson(g < 4) + if err = e.state.isRep[state].Encode(e.re, b); err != nil { + return err + } + n := uint32(m.n - minMatchLen) + if b == 0 { + // simple match + e.state.rep[3], e.state.rep[2], e.state.rep[1], e.state.rep[0] = + e.state.rep[2], e.state.rep[1], e.state.rep[0], dist + e.state.updateStateMatch() + if err = e.state.lenCodec.Encode(e.re, n, posState); err != nil { + return err + } + return e.state.distCodec.Encode(e.re, dist, n) + } + b = iverson(g != 0) + if err = e.state.isRepG0[state].Encode(e.re, b); err != nil { + return err + } + if b == 0 { + // g == 0 + b = iverson(m.n != 1) + if err = e.state.isRepG0Long[state2].Encode(e.re, b); err != nil { + return err + } + if b == 0 { + e.state.updateStateShortRep() + return nil + } + } else { + // g in {1,2,3} + b = iverson(g != 1) + if err = e.state.isRepG1[state].Encode(e.re, b); err != nil { + return err + } + if b == 1 { + // g in {2,3} + b = iverson(g != 2) + err = e.state.isRepG2[state].Encode(e.re, b) + if err != nil { + return err + } + if b == 1 { + e.state.rep[3] = e.state.rep[2] + } + e.state.rep[2] = e.state.rep[1] + } + e.state.rep[1] = e.state.rep[0] + e.state.rep[0] = dist + } + e.state.updateStateRep() + return e.state.repLenCodec.Encode(e.re, n, posState) +} + +// writeOp writes a single operation to the range encoder. The function +// checks whether there is enough space available to close the LZMA +// stream. +func (e *encoder) writeOp(op operation) error { + if e.re.Available() < int64(e.margin) { + return ErrLimit + } + switch x := op.(type) { + case lit: + return e.writeLiteral(x) + case match: + return e.writeMatch(x) + default: + panic("unexpected operation") + } +} + +// compress compressed data from the dictionary buffer. If the flag all +// is set, all data in the dictionary buffer will be compressed. The +// function returns ErrLimit if the underlying writer has reached its +// limit. +func (e *encoder) compress(flags compressFlags) error { + n := 0 + if flags&all == 0 { + n = maxMatchLen - 1 + } + d := e.dict + m := d.m + for d.Buffered() > n { + op := m.NextOp(e.state.rep) + if err := e.writeOp(op); err != nil { + return err + } + d.Discard(op.Len()) + } + return nil +} + +// eosMatch is a pseudo operation that indicates the end of the stream. +var eosMatch = match{distance: maxDistance, n: minMatchLen} + +// Close terminates the LZMA stream. If requested the end-of-stream +// marker will be written. If the byte writer limit has been or will be +// reached during compression of the remaining data in the buffer the +// LZMA stream will be closed and data will remain in the buffer. +func (e *encoder) Close() error { + err := e.compress(all) + if err != nil && err != ErrLimit { + return err + } + if e.marker { + if err := e.writeMatch(eosMatch); err != nil { + return err + } + } + err = e.re.Close() + return err +} + +// Compressed returns the number bytes of the input data that been +// compressed. +func (e *encoder) Compressed() int64 { + return e.dict.Pos() - e.start +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go b/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go new file mode 100644 index 0000000000..9d0fbc7033 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go @@ -0,0 +1,149 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + "io" +) + +// matcher is an interface that supports the identification of the next +// operation. +type matcher interface { + io.Writer + SetDict(d *encoderDict) + NextOp(rep [4]uint32) operation +} + +// encoderDict provides the dictionary of the encoder. It includes an +// addtional buffer atop of the actual dictionary. +type encoderDict struct { + buf buffer + m matcher + head int64 + capacity int + // preallocated array + data [maxMatchLen]byte +} + +// newEncoderDict creates the encoder dictionary. The argument bufSize +// defines the size of the additional buffer. +func newEncoderDict(dictCap, bufSize int, m matcher) (d *encoderDict, err error) { + if !(1 <= dictCap && int64(dictCap) <= MaxDictCap) { + return nil, errors.New( + "lzma: dictionary capacity out of range") + } + if bufSize < 1 { + return nil, errors.New( + "lzma: buffer size must be larger than zero") + } + d = &encoderDict{ + buf: *newBuffer(dictCap + bufSize), + capacity: dictCap, + m: m, + } + m.SetDict(d) + return d, nil +} + +// Discard discards n bytes. Note that n must not be larger than +// MaxMatchLen. +func (d *encoderDict) Discard(n int) { + p := d.data[:n] + k, _ := d.buf.Read(p) + if k < n { + panic(fmt.Errorf("lzma: can't discard %d bytes", n)) + } + d.head += int64(n) + d.m.Write(p) +} + +// Len returns the data available in the encoder dictionary. +func (d *encoderDict) Len() int { + n := d.buf.Available() + if int64(n) > d.head { + return int(d.head) + } + return n +} + +// DictLen returns the actual length of data in the dictionary. +func (d *encoderDict) DictLen() int { + if d.head < int64(d.capacity) { + return int(d.head) + } + return d.capacity +} + +// Available returns the number of bytes that can be written by a +// following Write call. +func (d *encoderDict) Available() int { + return d.buf.Available() - d.DictLen() +} + +// Write writes data into the dictionary buffer. Note that the position +// of the dictionary head will not be moved. If there is not enough +// space in the buffer ErrNoSpace will be returned. +func (d *encoderDict) Write(p []byte) (n int, err error) { + m := d.Available() + if len(p) > m { + p = p[:m] + err = ErrNoSpace + } + var e error + if n, e = d.buf.Write(p); e != nil { + err = e + } + return n, err +} + +// Pos returns the position of the head. +func (d *encoderDict) Pos() int64 { return d.head } + +// ByteAt returns the byte at the given distance. +func (d *encoderDict) ByteAt(distance int) byte { + if !(0 < distance && distance <= d.Len()) { + return 0 + } + i := d.buf.rear - distance + if i < 0 { + i += len(d.buf.data) + } + return d.buf.data[i] +} + +// CopyN copies the last n bytes from the dictionary into the provided +// writer. This is used for copying uncompressed data into an +// uncompressed segment. +func (d *encoderDict) CopyN(w io.Writer, n int) (written int, err error) { + if n <= 0 { + return 0, nil + } + m := d.Len() + if n > m { + n = m + err = ErrNoSpace + } + i := d.buf.rear - n + var e error + if i < 0 { + i += len(d.buf.data) + if written, e = w.Write(d.buf.data[i:]); e != nil { + return written, e + } + i = 0 + } + var k int + k, e = w.Write(d.buf.data[i:d.buf.rear]) + written += k + if e != nil { + err = e + } + return written, err +} + +// Buffered returns the number of bytes in the buffer. +func (d *encoderDict) Buffered() int { return d.buf.Buffered() } diff --git a/vendor/github.com/ulikunitz/xz/lzma/hashtable.go b/vendor/github.com/ulikunitz/xz/lzma/hashtable.go new file mode 100644 index 0000000000..d786a9745d --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/hashtable.go @@ -0,0 +1,309 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + + "github.com/ulikunitz/xz/internal/hash" +) + +/* For compression we need to find byte sequences that match the byte + * sequence at the dictionary head. A hash table is a simple method to + * provide this capability. + */ + +// maxMatches limits the number of matches requested from the Matches +// function. This controls the speed of the overall encoding. +const maxMatches = 16 + +// shortDists defines the number of short distances supported by the +// implementation. +const shortDists = 8 + +// The minimum is somehow arbitrary but the maximum is limited by the +// memory requirements of the hash table. +const ( + minTableExponent = 9 + maxTableExponent = 20 +) + +// newRoller contains the function used to create an instance of the +// hash.Roller. +var newRoller = func(n int) hash.Roller { return hash.NewCyclicPoly(n) } + +// hashTable stores the hash table including the rolling hash method. +// +// We implement chained hashing into a circular buffer. Each entry in +// the circular buffer stores the delta distance to the next position with a +// word that has the same hash value. +type hashTable struct { + dict *encoderDict + // actual hash table + t []int64 + // circular list data with the offset to the next word + data []uint32 + front int + // mask for computing the index for the hash table + mask uint64 + // hash offset; initial value is -int64(wordLen) + hoff int64 + // length of the hashed word + wordLen int + // hash roller for computing the hash values for the Write + // method + wr hash.Roller + // hash roller for computing arbitrary hashes + hr hash.Roller + // preallocated slices + p [maxMatches]int64 + distances [maxMatches + shortDists]int +} + +// hashTableExponent derives the hash table exponent from the dictionary +// capacity. +func hashTableExponent(n uint32) int { + e := 30 - nlz32(n) + switch { + case e < minTableExponent: + e = minTableExponent + case e > maxTableExponent: + e = maxTableExponent + } + return e +} + +// newHashTable creates a new hash table for words of length wordLen +func newHashTable(capacity int, wordLen int) (t *hashTable, err error) { + if !(0 < capacity) { + return nil, errors.New( + "newHashTable: capacity must not be negative") + } + exp := hashTableExponent(uint32(capacity)) + if !(1 <= wordLen && wordLen <= 4) { + return nil, errors.New("newHashTable: " + + "argument wordLen out of range") + } + n := 1 << uint(exp) + if n <= 0 { + panic("newHashTable: exponent is too large") + } + t = &hashTable{ + t: make([]int64, n), + data: make([]uint32, capacity), + mask: (uint64(1) << uint(exp)) - 1, + hoff: -int64(wordLen), + wordLen: wordLen, + wr: newRoller(wordLen), + hr: newRoller(wordLen), + } + return t, nil +} + +func (t *hashTable) SetDict(d *encoderDict) { t.dict = d } + +// buffered returns the number of bytes that are currently hashed. +func (t *hashTable) buffered() int { + n := t.hoff + 1 + switch { + case n <= 0: + return 0 + case n >= int64(len(t.data)): + return len(t.data) + } + return int(n) +} + +// addIndex adds n to an index ensuring that is stays inside the +// circular buffer for the hash chain. +func (t *hashTable) addIndex(i, n int) int { + i += n - len(t.data) + if i < 0 { + i += len(t.data) + } + return i +} + +// putDelta puts the delta instance at the current front of the circular +// chain buffer. +func (t *hashTable) putDelta(delta uint32) { + t.data[t.front] = delta + t.front = t.addIndex(t.front, 1) +} + +// putEntry puts a new entry into the hash table. If there is already a +// value stored it is moved into the circular chain buffer. +func (t *hashTable) putEntry(h uint64, pos int64) { + if pos < 0 { + return + } + i := h & t.mask + old := t.t[i] - 1 + t.t[i] = pos + 1 + var delta int64 + if old >= 0 { + delta = pos - old + if delta > 1<<32-1 || delta > int64(t.buffered()) { + delta = 0 + } + } + t.putDelta(uint32(delta)) +} + +// WriteByte converts a single byte into a hash and puts them into the hash +// table. +func (t *hashTable) WriteByte(b byte) error { + h := t.wr.RollByte(b) + t.hoff++ + t.putEntry(h, t.hoff) + return nil +} + +// Write converts the bytes provided into hash tables and stores the +// abbreviated offsets into the hash table. The method will never return an +// error. +func (t *hashTable) Write(p []byte) (n int, err error) { + for _, b := range p { + // WriteByte doesn't generate an error. + t.WriteByte(b) + } + return len(p), nil +} + +// getMatches the matches for a specific hash. The functions returns the +// number of positions found. +// +// TODO: Make a getDistances because that we are actually interested in. +func (t *hashTable) getMatches(h uint64, positions []int64) (n int) { + if t.hoff < 0 || len(positions) == 0 { + return 0 + } + buffered := t.buffered() + tailPos := t.hoff + 1 - int64(buffered) + rear := t.front - buffered + if rear >= 0 { + rear -= len(t.data) + } + // get the slot for the hash + pos := t.t[h&t.mask] - 1 + delta := pos - tailPos + for { + if delta < 0 { + return n + } + positions[n] = tailPos + delta + n++ + if n >= len(positions) { + return n + } + i := rear + int(delta) + if i < 0 { + i += len(t.data) + } + u := t.data[i] + if u == 0 { + return n + } + delta -= int64(u) + } +} + +// hash computes the rolling hash for the word stored in p. For correct +// results its length must be equal to t.wordLen. +func (t *hashTable) hash(p []byte) uint64 { + var h uint64 + for _, b := range p { + h = t.hr.RollByte(b) + } + return h +} + +// Matches fills the positions slice with potential matches. The +// functions returns the number of positions filled into positions. The +// byte slice p must have word length of the hash table. +func (t *hashTable) Matches(p []byte, positions []int64) int { + if len(p) != t.wordLen { + panic(fmt.Errorf( + "byte slice must have length %d", t.wordLen)) + } + h := t.hash(p) + return t.getMatches(h, positions) +} + +// NextOp identifies the next operation using the hash table. +// +// TODO: Use all repetitions to find matches. +func (t *hashTable) NextOp(rep [4]uint32) operation { + // get positions + data := t.dict.data[:maxMatchLen] + n, _ := t.dict.buf.Peek(data) + data = data[:n] + var p []int64 + if n < t.wordLen { + p = t.p[:0] + } else { + p = t.p[:maxMatches] + n = t.Matches(data[:t.wordLen], p) + p = p[:n] + } + + // convert positions in potential distances + head := t.dict.head + dists := append(t.distances[:0], 1, 2, 3, 4, 5, 6, 7, 8) + for _, pos := range p { + dis := int(head - pos) + if dis > shortDists { + dists = append(dists, dis) + } + } + + // check distances + var m match + dictLen := t.dict.DictLen() + for _, dist := range dists { + if dist > dictLen { + continue + } + + // Here comes a trick. We are only interested in matches + // that are longer than the matches we have been found + // before. So before we test the whole byte sequence at + // the given distance, we test the first byte that would + // make the match longer. If it doesn't match the byte + // to match, we don't to care any longer. + i := t.dict.buf.rear - dist + m.n + if i < 0 { + i += len(t.dict.buf.data) + } + if t.dict.buf.data[i] != data[m.n] { + // We can't get a longer match. Jump to the next + // distance. + continue + } + + n := t.dict.buf.matchLen(dist, data) + switch n { + case 0: + continue + case 1: + if uint32(dist-minDistance) != rep[0] { + continue + } + } + if n > m.n { + m = match{int64(dist), n} + if n == len(data) { + // No better match will be found. + break + } + } + } + + if m.n == 0 { + return lit{data[0]} + } + return m +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/header.go b/vendor/github.com/ulikunitz/xz/lzma/header.go new file mode 100644 index 0000000000..bc708969fd --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/header.go @@ -0,0 +1,167 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" +) + +// uint32LE reads an uint32 integer from a byte slice +func uint32LE(b []byte) uint32 { + x := uint32(b[3]) << 24 + x |= uint32(b[2]) << 16 + x |= uint32(b[1]) << 8 + x |= uint32(b[0]) + return x +} + +// uint64LE converts the uint64 value stored as little endian to an uint64 +// value. +func uint64LE(b []byte) uint64 { + x := uint64(b[7]) << 56 + x |= uint64(b[6]) << 48 + x |= uint64(b[5]) << 40 + x |= uint64(b[4]) << 32 + x |= uint64(b[3]) << 24 + x |= uint64(b[2]) << 16 + x |= uint64(b[1]) << 8 + x |= uint64(b[0]) + return x +} + +// putUint32LE puts an uint32 integer into a byte slice that must have at least +// a length of 4 bytes. +func putUint32LE(b []byte, x uint32) { + b[0] = byte(x) + b[1] = byte(x >> 8) + b[2] = byte(x >> 16) + b[3] = byte(x >> 24) +} + +// putUint64LE puts the uint64 value into the byte slice as little endian +// value. The byte slice b must have at least place for 8 bytes. +func putUint64LE(b []byte, x uint64) { + b[0] = byte(x) + b[1] = byte(x >> 8) + b[2] = byte(x >> 16) + b[3] = byte(x >> 24) + b[4] = byte(x >> 32) + b[5] = byte(x >> 40) + b[6] = byte(x >> 48) + b[7] = byte(x >> 56) +} + +// noHeaderSize defines the value of the length field in the LZMA header. +const noHeaderSize uint64 = 1<<64 - 1 + +// HeaderLen provides the length of the LZMA file header. +const HeaderLen = 13 + +// header represents the header of an LZMA file. +type header struct { + properties Properties + dictCap int + // uncompressed size; negative value if no size is given + size int64 +} + +// marshalBinary marshals the header. +func (h *header) marshalBinary() (data []byte, err error) { + if err = h.properties.verify(); err != nil { + return nil, err + } + if !(0 <= h.dictCap && int64(h.dictCap) <= MaxDictCap) { + return nil, fmt.Errorf("lzma: DictCap %d out of range", + h.dictCap) + } + + data = make([]byte, 13) + + // property byte + data[0] = h.properties.Code() + + // dictionary capacity + putUint32LE(data[1:5], uint32(h.dictCap)) + + // uncompressed size + var s uint64 + if h.size > 0 { + s = uint64(h.size) + } else { + s = noHeaderSize + } + putUint64LE(data[5:], s) + + return data, nil +} + +// unmarshalBinary unmarshals the header. +func (h *header) unmarshalBinary(data []byte) error { + if len(data) != HeaderLen { + return errors.New("lzma.unmarshalBinary: data has wrong length") + } + + // properties + var err error + if h.properties, err = PropertiesForCode(data[0]); err != nil { + return err + } + + // dictionary capacity + h.dictCap = int(uint32LE(data[1:])) + if h.dictCap < 0 { + return errors.New( + "LZMA header: dictionary capacity exceeds maximum " + + "integer") + } + + // uncompressed size + s := uint64LE(data[5:]) + if s == noHeaderSize { + h.size = -1 + } else { + h.size = int64(s) + if h.size < 0 { + return errors.New( + "LZMA header: uncompressed size " + + "out of int64 range") + } + } + + return nil +} + +// validDictCap checks whether the dictionary capacity is correct. This +// is used to weed out wrong file headers. +func validDictCap(dictcap int) bool { + if int64(dictcap) == MaxDictCap { + return true + } + for n := uint(10); n < 32; n++ { + if dictcap == 1<= 10 or 2^32-1. If +// there is an explicit size it must not exceed 256 GiB. The length of +// the data argument must be HeaderLen. +func ValidHeader(data []byte) bool { + var h header + if err := h.unmarshalBinary(data); err != nil { + return false + } + if !validDictCap(h.dictCap) { + return false + } + return h.size < 0 || h.size <= 1<<38 +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/header2.go b/vendor/github.com/ulikunitz/xz/lzma/header2.go new file mode 100644 index 0000000000..ac6a71a5a9 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/header2.go @@ -0,0 +1,398 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + "io" +) + +const ( + // maximum size of compressed data in a chunk + maxCompressed = 1 << 16 + // maximum size of uncompressed data in a chunk + maxUncompressed = 1 << 21 +) + +// chunkType represents the type of an LZMA2 chunk. Note that this +// value is an internal representation and no actual encoding of a LZMA2 +// chunk header. +type chunkType byte + +// Possible values for the chunk type. +const ( + // end of stream + cEOS chunkType = iota + // uncompressed; reset dictionary + cUD + // uncompressed; no reset of dictionary + cU + // LZMA compressed; no reset + cL + // LZMA compressed; reset state + cLR + // LZMA compressed; reset state; new property value + cLRN + // LZMA compressed; reset state; new property value; reset dictionary + cLRND +) + +// chunkTypeStrings provide a string representation for the chunk types. +var chunkTypeStrings = [...]string{ + cEOS: "EOS", + cU: "U", + cUD: "UD", + cL: "L", + cLR: "LR", + cLRN: "LRN", + cLRND: "LRND", +} + +// String returns a string representation of the chunk type. +func (c chunkType) String() string { + if !(cEOS <= c && c <= cLRND) { + return "unknown" + } + return chunkTypeStrings[c] +} + +// Actual encodings for the chunk types in the value. Note that the high +// uncompressed size bits are stored in the header byte additionally. +const ( + hEOS = 0 + hUD = 1 + hU = 2 + hL = 1 << 7 + hLR = 1<<7 | 1<<5 + hLRN = 1<<7 | 1<<6 + hLRND = 1<<7 | 1<<6 | 1<<5 +) + +// errHeaderByte indicates an unsupported value for the chunk header +// byte. These bytes starts the variable-length chunk header. +var errHeaderByte = errors.New("lzma: unsupported chunk header byte") + +// headerChunkType converts the header byte into a chunk type. It +// ignores the uncompressed size bits in the chunk header byte. +func headerChunkType(h byte) (c chunkType, err error) { + if h&hL == 0 { + // no compression + switch h { + case hEOS: + c = cEOS + case hUD: + c = cUD + case hU: + c = cU + default: + return 0, errHeaderByte + } + return + } + switch h & hLRND { + case hL: + c = cL + case hLR: + c = cLR + case hLRN: + c = cLRN + case hLRND: + c = cLRND + default: + return 0, errHeaderByte + } + return +} + +// uncompressedHeaderLen provides the length of an uncompressed header +const uncompressedHeaderLen = 3 + +// headerLen returns the length of the LZMA2 header for a given chunk +// type. +func headerLen(c chunkType) int { + switch c { + case cEOS: + return 1 + case cU, cUD: + return uncompressedHeaderLen + case cL, cLR: + return 5 + case cLRN, cLRND: + return 6 + } + panic(fmt.Errorf("unsupported chunk type %d", c)) +} + +// chunkHeader represents the contents of a chunk header. +type chunkHeader struct { + ctype chunkType + uncompressed uint32 + compressed uint16 + props Properties +} + +// String returns a string representation of the chunk header. +func (h *chunkHeader) String() string { + return fmt.Sprintf("%s %d %d %s", h.ctype, h.uncompressed, + h.compressed, &h.props) +} + +// UnmarshalBinary reads the content of the chunk header from the data +// slice. The slice must have the correct length. +func (h *chunkHeader) UnmarshalBinary(data []byte) error { + if len(data) == 0 { + return errors.New("no data") + } + c, err := headerChunkType(data[0]) + if err != nil { + return err + } + + n := headerLen(c) + if len(data) < n { + return errors.New("incomplete data") + } + if len(data) > n { + return errors.New("invalid data length") + } + + *h = chunkHeader{ctype: c} + if c == cEOS { + return nil + } + + h.uncompressed = uint32(uint16BE(data[1:3])) + if c <= cU { + return nil + } + h.uncompressed |= uint32(data[0]&^hLRND) << 16 + + h.compressed = uint16BE(data[3:5]) + if c <= cLR { + return nil + } + + h.props, err = PropertiesForCode(data[5]) + return err +} + +// MarshalBinary encodes the chunk header value. The function checks +// whether the content of the chunk header is correct. +func (h *chunkHeader) MarshalBinary() (data []byte, err error) { + if h.ctype > cLRND { + return nil, errors.New("invalid chunk type") + } + if err = h.props.verify(); err != nil { + return nil, err + } + + data = make([]byte, headerLen(h.ctype)) + + switch h.ctype { + case cEOS: + return data, nil + case cUD: + data[0] = hUD + case cU: + data[0] = hU + case cL: + data[0] = hL + case cLR: + data[0] = hLR + case cLRN: + data[0] = hLRN + case cLRND: + data[0] = hLRND + } + + putUint16BE(data[1:3], uint16(h.uncompressed)) + if h.ctype <= cU { + return data, nil + } + data[0] |= byte(h.uncompressed>>16) &^ hLRND + + putUint16BE(data[3:5], h.compressed) + if h.ctype <= cLR { + return data, nil + } + + data[5] = h.props.Code() + return data, nil +} + +// readChunkHeader reads the chunk header from the IO reader. +func readChunkHeader(r io.Reader) (h *chunkHeader, err error) { + p := make([]byte, 1, 6) + if _, err = io.ReadFull(r, p); err != nil { + return + } + c, err := headerChunkType(p[0]) + if err != nil { + return + } + p = p[:headerLen(c)] + if _, err = io.ReadFull(r, p[1:]); err != nil { + return + } + h = new(chunkHeader) + if err = h.UnmarshalBinary(p); err != nil { + return nil, err + } + return h, nil +} + +// uint16BE converts a big-endian uint16 representation to an uint16 +// value. +func uint16BE(p []byte) uint16 { + return uint16(p[0])<<8 | uint16(p[1]) +} + +// putUint16BE puts the big-endian uint16 presentation into the given +// slice. +func putUint16BE(p []byte, x uint16) { + p[0] = byte(x >> 8) + p[1] = byte(x) +} + +// chunkState is used to manage the state of the chunks +type chunkState byte + +// start and stop define the initial and terminating state of the chunk +// state +const ( + start chunkState = 'S' + stop = 'T' +) + +// errors for the chunk state handling +var ( + errChunkType = errors.New("lzma: unexpected chunk type") + errState = errors.New("lzma: wrong chunk state") +) + +// next transitions state based on chunk type input +func (c *chunkState) next(ctype chunkType) error { + switch *c { + // start state + case 'S': + switch ctype { + case cEOS: + *c = 'T' + case cUD: + *c = 'R' + case cLRND: + *c = 'L' + default: + return errChunkType + } + // normal LZMA mode + case 'L': + switch ctype { + case cEOS: + *c = 'T' + case cUD: + *c = 'R' + case cU: + *c = 'U' + case cL, cLR, cLRN, cLRND: + break + default: + return errChunkType + } + // reset required + case 'R': + switch ctype { + case cEOS: + *c = 'T' + case cUD, cU: + break + case cLRN, cLRND: + *c = 'L' + default: + return errChunkType + } + // uncompressed + case 'U': + switch ctype { + case cEOS: + *c = 'T' + case cUD: + *c = 'R' + case cU: + break + case cL, cLR, cLRN, cLRND: + *c = 'L' + default: + return errChunkType + } + // terminal state + case 'T': + return errChunkType + default: + return errState + } + return nil +} + +// defaultChunkType returns the default chunk type for each chunk state. +func (c chunkState) defaultChunkType() chunkType { + switch c { + case 'S': + return cLRND + case 'L', 'U': + return cL + case 'R': + return cLRN + default: + // no error + return cEOS + } +} + +// maxDictCap defines the maximum dictionary capacity supported by the +// LZMA2 dictionary capacity encoding. +const maxDictCap = 1<<32 - 1 + +// maxDictCapCode defines the maximum dictionary capacity code. +const maxDictCapCode = 40 + +// The function decodes the dictionary capacity byte, but doesn't change +// for the correct range of the given byte. +func decodeDictCap(c byte) int64 { + return (2 | int64(c)&1) << (11 + (c>>1)&0x1f) +} + +// DecodeDictCap decodes the encoded dictionary capacity. The function +// returns an error if the code is out of range. +func DecodeDictCap(c byte) (n int64, err error) { + if c >= maxDictCapCode { + if c == maxDictCapCode { + return maxDictCap, nil + } + return 0, errors.New("lzma: invalid dictionary size code") + } + return decodeDictCap(c), nil +} + +// EncodeDictCap encodes a dictionary capacity. The function returns the +// code for the capacity that is greater or equal n. If n exceeds the +// maximum support dictionary capacity, the maximum value is returned. +func EncodeDictCap(n int64) byte { + a, b := byte(0), byte(40) + for a < b { + c := a + (b-a)>>1 + m := decodeDictCap(c) + if n <= m { + if n == m { + return c + } + b = c + } else { + a = c + 1 + } + } + return a +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go b/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go new file mode 100644 index 0000000000..e517730924 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go @@ -0,0 +1,129 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import "errors" + +// maxPosBits defines the number of bits of the position value that are used to +// to compute the posState value. The value is used to select the tree codec +// for length encoding and decoding. +const maxPosBits = 4 + +// minMatchLen and maxMatchLen give the minimum and maximum values for +// encoding and decoding length values. minMatchLen is also used as base +// for the encoded length values. +const ( + minMatchLen = 2 + maxMatchLen = minMatchLen + 16 + 256 - 1 +) + +// lengthCodec support the encoding of the length value. +type lengthCodec struct { + choice [2]prob + low [1 << maxPosBits]treeCodec + mid [1 << maxPosBits]treeCodec + high treeCodec +} + +// deepcopy initializes the lc value as deep copy of the source value. +func (lc *lengthCodec) deepcopy(src *lengthCodec) { + if lc == src { + return + } + lc.choice = src.choice + for i := range lc.low { + lc.low[i].deepcopy(&src.low[i]) + } + for i := range lc.mid { + lc.mid[i].deepcopy(&src.mid[i]) + } + lc.high.deepcopy(&src.high) +} + +// init initializes a new length codec. +func (lc *lengthCodec) init() { + for i := range lc.choice { + lc.choice[i] = probInit + } + for i := range lc.low { + lc.low[i] = makeTreeCodec(3) + } + for i := range lc.mid { + lc.mid[i] = makeTreeCodec(3) + } + lc.high = makeTreeCodec(8) +} + +// lBits gives the number of bits used for the encoding of the l value +// provided to the range encoder. +func lBits(l uint32) int { + switch { + case l < 8: + return 4 + case l < 16: + return 5 + default: + return 10 + } +} + +// Encode encodes the length offset. The length offset l can be compute by +// subtracting minMatchLen (2) from the actual length. +// +// l = length - minMatchLen +// +func (lc *lengthCodec) Encode(e *rangeEncoder, l uint32, posState uint32, +) (err error) { + if l > maxMatchLen-minMatchLen { + return errors.New("lengthCodec.Encode: l out of range") + } + if l < 8 { + if err = lc.choice[0].Encode(e, 0); err != nil { + return + } + return lc.low[posState].Encode(e, l) + } + if err = lc.choice[0].Encode(e, 1); err != nil { + return + } + if l < 16 { + if err = lc.choice[1].Encode(e, 0); err != nil { + return + } + return lc.mid[posState].Encode(e, l-8) + } + if err = lc.choice[1].Encode(e, 1); err != nil { + return + } + if err = lc.high.Encode(e, l-16); err != nil { + return + } + return nil +} + +// Decode reads the length offset. Add minMatchLen to compute the actual length +// to the length offset l. +func (lc *lengthCodec) Decode(d *rangeDecoder, posState uint32, +) (l uint32, err error) { + var b uint32 + if b, err = lc.choice[0].Decode(d); err != nil { + return + } + if b == 0 { + l, err = lc.low[posState].Decode(d) + return + } + if b, err = lc.choice[1].Decode(d); err != nil { + return + } + if b == 0 { + l, err = lc.mid[posState].Decode(d) + l += 8 + return + } + l, err = lc.high.Decode(d) + l += 16 + return +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go b/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go new file mode 100644 index 0000000000..c949d6ebd1 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go @@ -0,0 +1,132 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// literalCodec supports the encoding of literal. It provides 768 probability +// values per literal state. The upper 512 probabilities are used with the +// context of a match bit. +type literalCodec struct { + probs []prob +} + +// deepcopy initializes literal codec c as a deep copy of the source. +func (c *literalCodec) deepcopy(src *literalCodec) { + if c == src { + return + } + c.probs = make([]prob, len(src.probs)) + copy(c.probs, src.probs) +} + +// init initializes the literal codec. +func (c *literalCodec) init(lc, lp int) { + switch { + case !(minLC <= lc && lc <= maxLC): + panic("lc out of range") + case !(minLP <= lp && lp <= maxLP): + panic("lp out of range") + } + c.probs = make([]prob, 0x300<= 7 { + m := uint32(match) + for { + matchBit := (m >> 7) & 1 + m <<= 1 + bit := (r >> 7) & 1 + r <<= 1 + i := ((1 + matchBit) << 8) | symbol + if err = probs[i].Encode(e, bit); err != nil { + return + } + symbol = (symbol << 1) | bit + if matchBit != bit { + break + } + if symbol >= 0x100 { + break + } + } + } + for symbol < 0x100 { + bit := (r >> 7) & 1 + r <<= 1 + if err = probs[symbol].Encode(e, bit); err != nil { + return + } + symbol = (symbol << 1) | bit + } + return nil +} + +// Decode decodes a literal byte using the range decoder as well as the LZMA +// state, a match byte, and the literal state. +func (c *literalCodec) Decode(d *rangeDecoder, + state uint32, match byte, litState uint32, +) (s byte, err error) { + k := litState * 0x300 + probs := c.probs[k : k+0x300] + symbol := uint32(1) + if state >= 7 { + m := uint32(match) + for { + matchBit := (m >> 7) & 1 + m <<= 1 + i := ((1 + matchBit) << 8) | symbol + bit, err := d.DecodeBit(&probs[i]) + if err != nil { + return 0, err + } + symbol = (symbol << 1) | bit + if matchBit != bit { + break + } + if symbol >= 0x100 { + break + } + } + } + for symbol < 0x100 { + bit, err := d.DecodeBit(&probs[symbol]) + if err != nil { + return 0, err + } + symbol = (symbol << 1) | bit + } + s = byte(symbol - 0x100) + return s, nil +} + +// minLC and maxLC define the range for LC values. +const ( + minLC = 0 + maxLC = 8 +) + +// minLC and maxLC define the range for LP values. +const ( + minLP = 0 + maxLP = 4 +) + +// minState and maxState define a range for the state values stored in +// the State values. +const ( + minState = 0 + maxState = 11 +) diff --git a/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go b/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go new file mode 100644 index 0000000000..4a244eb1ac --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go @@ -0,0 +1,52 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import "errors" + +// MatchAlgorithm identifies an algorithm to find matches in the +// dictionary. +type MatchAlgorithm byte + +// Supported matcher algorithms. +const ( + HashTable4 MatchAlgorithm = iota + BinaryTree +) + +// maStrings are used by the String method. +var maStrings = map[MatchAlgorithm]string{ + HashTable4: "HashTable4", + BinaryTree: "BinaryTree", +} + +// String returns a string representation of the Matcher. +func (a MatchAlgorithm) String() string { + if s, ok := maStrings[a]; ok { + return s + } + return "unknown" +} + +var errUnsupportedMatchAlgorithm = errors.New( + "lzma: unsupported match algorithm value") + +// verify checks whether the matcher value is supported. +func (a MatchAlgorithm) verify() error { + if _, ok := maStrings[a]; !ok { + return errUnsupportedMatchAlgorithm + } + return nil +} + +func (a MatchAlgorithm) new(dictCap int) (m matcher, err error) { + switch a { + case HashTable4: + return newHashTable(dictCap, 4) + case BinaryTree: + return newBinTree(dictCap) + } + return nil, errUnsupportedMatchAlgorithm +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/operation.go b/vendor/github.com/ulikunitz/xz/lzma/operation.go new file mode 100644 index 0000000000..733bb99da4 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/operation.go @@ -0,0 +1,80 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + "unicode" +) + +// operation represents an operation on the dictionary during encoding or +// decoding. +type operation interface { + Len() int +} + +// rep represents a repetition at the given distance and the given length +type match struct { + // supports all possible distance values, including the eos marker + distance int64 + // length + n int +} + +// verify checks whether the match is valid. If that is not the case an +// error is returned. +func (m match) verify() error { + if !(minDistance <= m.distance && m.distance <= maxDistance) { + return errors.New("distance out of range") + } + if !(1 <= m.n && m.n <= maxMatchLen) { + return errors.New("length out of range") + } + return nil +} + +// l return the l-value for the match, which is the difference of length +// n and 2. +func (m match) l() uint32 { + return uint32(m.n - minMatchLen) +} + +// dist returns the dist value for the match, which is one less of the +// distance stored in the match. +func (m match) dist() uint32 { + return uint32(m.distance - minDistance) +} + +// Len returns the number of bytes matched. +func (m match) Len() int { + return m.n +} + +// String returns a string representation for the repetition. +func (m match) String() string { + return fmt.Sprintf("M{%d,%d}", m.distance, m.n) +} + +// lit represents a single byte literal. +type lit struct { + b byte +} + +// Len returns 1 for the single byte literal. +func (l lit) Len() int { + return 1 +} + +// String returns a string representation for the literal. +func (l lit) String() string { + var c byte + if unicode.IsPrint(rune(l.b)) { + c = l.b + } else { + c = '.' + } + return fmt.Sprintf("L{%c/%02x}", c, l.b) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/prob.go b/vendor/github.com/ulikunitz/xz/lzma/prob.go new file mode 100644 index 0000000000..24d50ec681 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/prob.go @@ -0,0 +1,53 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// movebits defines the number of bits used for the updates of probability +// values. +const movebits = 5 + +// probbits defines the number of bits of a probability value. +const probbits = 11 + +// probInit defines 0.5 as initial value for prob values. +const probInit prob = 1 << (probbits - 1) + +// Type prob represents probabilities. The type can also be used to encode and +// decode single bits. +type prob uint16 + +// Dec decreases the probability. The decrease is proportional to the +// probability value. +func (p *prob) dec() { + *p -= *p >> movebits +} + +// Inc increases the probability. The Increase is proportional to the +// difference of 1 and the probability value. +func (p *prob) inc() { + *p += ((1 << probbits) - *p) >> movebits +} + +// Computes the new bound for a given range using the probability value. +func (p prob) bound(r uint32) uint32 { + return (r >> probbits) * uint32(p) +} + +// Bits returns 1. One is the number of bits that can be encoded or decoded +// with a single prob value. +func (p prob) Bits() int { + return 1 +} + +// Encode encodes the least-significant bit of v. Note that the p value will be +// changed. +func (p *prob) Encode(e *rangeEncoder, v uint32) error { + return e.EncodeBit(v, p) +} + +// Decode decodes a single bit. Note that the p value will change. +func (p *prob) Decode(d *rangeDecoder) (v uint32, err error) { + return d.DecodeBit(p) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/properties.go b/vendor/github.com/ulikunitz/xz/lzma/properties.go new file mode 100644 index 0000000000..23418e25d2 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/properties.go @@ -0,0 +1,69 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" +) + +// maximum and minimum values for the LZMA properties. +const ( + minPB = 0 + maxPB = 4 +) + +// maxPropertyCode is the possible maximum of a properties code byte. +const maxPropertyCode = (maxPB+1)*(maxLP+1)*(maxLC+1) - 1 + +// Properties contains the parameters LC, LP and PB. The parameter LC +// defines the number of literal context bits; parameter LP the number +// of literal position bits and PB the number of position bits. +type Properties struct { + LC int + LP int + PB int +} + +// String returns the properties in a string representation. +func (p *Properties) String() string { + return fmt.Sprintf("LC %d LP %d PB %d", p.LC, p.LP, p.PB) +} + +// PropertiesForCode converts a properties code byte into a Properties value. +func PropertiesForCode(code byte) (p Properties, err error) { + if code > maxPropertyCode { + return p, errors.New("lzma: invalid properties code") + } + p.LC = int(code % 9) + code /= 9 + p.LP = int(code % 5) + code /= 5 + p.PB = int(code % 5) + return p, err +} + +// verify checks the properties for correctness. +func (p *Properties) verify() error { + if p == nil { + return errors.New("lzma: properties are nil") + } + if !(minLC <= p.LC && p.LC <= maxLC) { + return errors.New("lzma: lc out of range") + } + if !(minLP <= p.LP && p.LP <= maxLP) { + return errors.New("lzma: lp out of range") + } + if !(minPB <= p.PB && p.PB <= maxPB) { + return errors.New("lzma: pb out of range") + } + return nil +} + +// Code converts the properties to a byte. The function assumes that +// the properties components are all in range. +func (p Properties) Code() byte { + return byte((p.PB*5+p.LP)*9 + p.LC) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go b/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go new file mode 100644 index 0000000000..6361c5e7c8 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go @@ -0,0 +1,248 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "io" +) + +// rangeEncoder implements range encoding of single bits. The low value can +// overflow therefore we need uint64. The cache value is used to handle +// overflows. +type rangeEncoder struct { + lbw *LimitedByteWriter + nrange uint32 + low uint64 + cacheLen int64 + cache byte +} + +// maxInt64 provides the maximal value of the int64 type +const maxInt64 = 1<<63 - 1 + +// newRangeEncoder creates a new range encoder. +func newRangeEncoder(bw io.ByteWriter) (re *rangeEncoder, err error) { + lbw, ok := bw.(*LimitedByteWriter) + if !ok { + lbw = &LimitedByteWriter{BW: bw, N: maxInt64} + } + return &rangeEncoder{ + lbw: lbw, + nrange: 0xffffffff, + cacheLen: 1}, nil +} + +// Available returns the number of bytes that still can be written. The +// method takes the bytes that will be currently written by Close into +// account. +func (e *rangeEncoder) Available() int64 { + return e.lbw.N - (e.cacheLen + 4) +} + +// writeByte writes a single byte to the underlying writer. An error is +// returned if the limit is reached. The written byte will be counted if +// the underlying writer doesn't return an error. +func (e *rangeEncoder) writeByte(c byte) error { + if e.Available() < 1 { + return ErrLimit + } + return e.lbw.WriteByte(c) +} + +// DirectEncodeBit encodes the least-significant bit of b with probability 1/2. +func (e *rangeEncoder) DirectEncodeBit(b uint32) error { + e.nrange >>= 1 + e.low += uint64(e.nrange) & (0 - (uint64(b) & 1)) + + // normalize + const top = 1 << 24 + if e.nrange >= top { + return nil + } + e.nrange <<= 8 + return e.shiftLow() +} + +// EncodeBit encodes the least significant bit of b. The p value will be +// updated by the function depending on the bit encoded. +func (e *rangeEncoder) EncodeBit(b uint32, p *prob) error { + bound := p.bound(e.nrange) + if b&1 == 0 { + e.nrange = bound + p.inc() + } else { + e.low += uint64(bound) + e.nrange -= bound + p.dec() + } + + // normalize + const top = 1 << 24 + if e.nrange >= top { + return nil + } + e.nrange <<= 8 + return e.shiftLow() +} + +// Close writes a complete copy of the low value. +func (e *rangeEncoder) Close() error { + for i := 0; i < 5; i++ { + if err := e.shiftLow(); err != nil { + return err + } + } + return nil +} + +// shiftLow shifts the low value for 8 bit. The shifted byte is written into +// the byte writer. The cache value is used to handle overflows. +func (e *rangeEncoder) shiftLow() error { + if uint32(e.low) < 0xff000000 || (e.low>>32) != 0 { + tmp := e.cache + for { + err := e.writeByte(tmp + byte(e.low>>32)) + if err != nil { + return err + } + tmp = 0xff + e.cacheLen-- + if e.cacheLen <= 0 { + if e.cacheLen < 0 { + panic("negative cacheLen") + } + break + } + } + e.cache = byte(uint32(e.low) >> 24) + } + e.cacheLen++ + e.low = uint64(uint32(e.low) << 8) + return nil +} + +// rangeDecoder decodes single bits of the range encoding stream. +type rangeDecoder struct { + br io.ByteReader + nrange uint32 + code uint32 +} + +// init initializes the range decoder, by reading from the byte reader. +func (d *rangeDecoder) init() error { + d.nrange = 0xffffffff + d.code = 0 + + b, err := d.br.ReadByte() + if err != nil { + return err + } + if b != 0 { + return errors.New("newRangeDecoder: first byte not zero") + } + + for i := 0; i < 4; i++ { + if err = d.updateCode(); err != nil { + return err + } + } + + if d.code >= d.nrange { + return errors.New("newRangeDecoder: d.code >= d.nrange") + } + + return nil +} + +// newRangeDecoder initializes a range decoder. It reads five bytes from the +// reader and therefore may return an error. +func newRangeDecoder(br io.ByteReader) (d *rangeDecoder, err error) { + d = &rangeDecoder{br: br, nrange: 0xffffffff} + + b, err := d.br.ReadByte() + if err != nil { + return nil, err + } + if b != 0 { + return nil, errors.New("newRangeDecoder: first byte not zero") + } + + for i := 0; i < 4; i++ { + if err = d.updateCode(); err != nil { + return nil, err + } + } + + if d.code >= d.nrange { + return nil, errors.New("newRangeDecoder: d.code >= d.nrange") + } + + return d, nil +} + +// possiblyAtEnd checks whether the decoder may be at the end of the stream. +func (d *rangeDecoder) possiblyAtEnd() bool { + return d.code == 0 +} + +// DirectDecodeBit decodes a bit with probability 1/2. The return value b will +// contain the bit at the least-significant position. All other bits will be +// zero. +func (d *rangeDecoder) DirectDecodeBit() (b uint32, err error) { + d.nrange >>= 1 + d.code -= d.nrange + t := 0 - (d.code >> 31) + d.code += d.nrange & t + b = (t + 1) & 1 + + // d.code will stay less then d.nrange + + // normalize + // assume d.code < d.nrange + const top = 1 << 24 + if d.nrange >= top { + return b, nil + } + d.nrange <<= 8 + // d.code < d.nrange will be maintained + return b, d.updateCode() +} + +// decodeBit decodes a single bit. The bit will be returned at the +// least-significant position. All other bits will be zero. The probability +// value will be updated. +func (d *rangeDecoder) DecodeBit(p *prob) (b uint32, err error) { + bound := p.bound(d.nrange) + if d.code < bound { + d.nrange = bound + p.inc() + b = 0 + } else { + d.code -= bound + d.nrange -= bound + p.dec() + b = 1 + } + // normalize + // assume d.code < d.nrange + const top = 1 << 24 + if d.nrange >= top { + return b, nil + } + d.nrange <<= 8 + // d.code < d.nrange will be maintained + return b, d.updateCode() +} + +// updateCode reads a new byte into the code. +func (d *rangeDecoder) updateCode() error { + b, err := d.br.ReadByte() + if err != nil { + return err + } + d.code = (d.code << 8) | uint32(b) + return nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/reader.go b/vendor/github.com/ulikunitz/xz/lzma/reader.go new file mode 100644 index 0000000000..2ef3dcaaa9 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/reader.go @@ -0,0 +1,100 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package lzma supports the decoding and encoding of LZMA streams. +// Reader and Writer support the classic LZMA format. Reader2 and +// Writer2 support the decoding and encoding of LZMA2 streams. +// +// The package is written completely in Go and doesn't rely on any external +// library. +package lzma + +import ( + "errors" + "io" +) + +// ReaderConfig stores the parameters for the reader of the classic LZMA +// format. +type ReaderConfig struct { + DictCap int +} + +// fill converts the zero values of the configuration to the default values. +func (c *ReaderConfig) fill() { + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } +} + +// Verify checks the reader configuration for errors. Zero values will +// be replaced by default values. +func (c *ReaderConfig) Verify() error { + c.fill() + if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { + return errors.New("lzma: dictionary capacity is out of range") + } + return nil +} + +// Reader provides a reader for LZMA files or streams. +type Reader struct { + lzma io.Reader + h header + d *decoder +} + +// NewReader creates a new reader for an LZMA stream using the classic +// format. NewReader reads and checks the header of the LZMA stream. +func NewReader(lzma io.Reader) (r *Reader, err error) { + return ReaderConfig{}.NewReader(lzma) +} + +// NewReader creates a new reader for an LZMA stream in the classic +// format. The function reads and verifies the the header of the LZMA +// stream. +func (c ReaderConfig) NewReader(lzma io.Reader) (r *Reader, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + data := make([]byte, HeaderLen) + if _, err := io.ReadFull(lzma, data); err != nil { + if err == io.EOF { + return nil, errors.New("lzma: unexpected EOF") + } + return nil, err + } + r = &Reader{lzma: lzma} + if err = r.h.unmarshalBinary(data); err != nil { + return nil, err + } + if r.h.dictCap < MinDictCap { + return nil, errors.New("lzma: dictionary capacity too small") + } + dictCap := r.h.dictCap + if c.DictCap > dictCap { + dictCap = c.DictCap + } + + state := newState(r.h.properties) + dict, err := newDecoderDict(dictCap) + if err != nil { + return nil, err + } + r.d, err = newDecoder(ByteReader(lzma), state, dict, r.h.size) + if err != nil { + return nil, err + } + return r, nil +} + +// EOSMarker indicates that an EOS marker has been encountered. +func (r *Reader) EOSMarker() bool { + return r.d.eosMarker +} + +// Read returns uncompressed data. +func (r *Reader) Read(p []byte) (n int, err error) { + return r.d.Read(p) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/reader2.go b/vendor/github.com/ulikunitz/xz/lzma/reader2.go new file mode 100644 index 0000000000..a55cfaa4e3 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/reader2.go @@ -0,0 +1,232 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "io" + + "github.com/ulikunitz/xz/internal/xlog" +) + +// Reader2Config stores the parameters for the LZMA2 reader. +// format. +type Reader2Config struct { + DictCap int +} + +// fill converts the zero values of the configuration to the default values. +func (c *Reader2Config) fill() { + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } +} + +// Verify checks the reader configuration for errors. Zero configuration values +// will be replaced by default values. +func (c *Reader2Config) Verify() error { + c.fill() + if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { + return errors.New("lzma: dictionary capacity is out of range") + } + return nil +} + +// Reader2 supports the reading of LZMA2 chunk sequences. Note that the +// first chunk should have a dictionary reset and the first compressed +// chunk a properties reset. The chunk sequence may not be terminated by +// an end-of-stream chunk. +type Reader2 struct { + r io.Reader + err error + + dict *decoderDict + ur *uncompressedReader + decoder *decoder + chunkReader io.Reader + + cstate chunkState + ctype chunkType +} + +// NewReader2 creates a reader for an LZMA2 chunk sequence. +func NewReader2(lzma2 io.Reader) (r *Reader2, err error) { + return Reader2Config{}.NewReader2(lzma2) +} + +// NewReader2 creates an LZMA2 reader using the given configuration. +func (c Reader2Config) NewReader2(lzma2 io.Reader) (r *Reader2, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + r = &Reader2{r: lzma2, cstate: start} + r.dict, err = newDecoderDict(c.DictCap) + if err != nil { + return nil, err + } + if err = r.startChunk(); err != nil { + r.err = err + } + return r, nil +} + +// uncompressed tests whether the chunk type specifies an uncompressed +// chunk. +func uncompressed(ctype chunkType) bool { + return ctype == cU || ctype == cUD +} + +// startChunk parses a new chunk. +func (r *Reader2) startChunk() error { + r.chunkReader = nil + header, err := readChunkHeader(r.r) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + xlog.Debugf("chunk header %v", header) + if err = r.cstate.next(header.ctype); err != nil { + return err + } + if r.cstate == stop { + return io.EOF + } + if header.ctype == cUD || header.ctype == cLRND { + r.dict.Reset() + } + size := int64(header.uncompressed) + 1 + if uncompressed(header.ctype) { + if r.ur != nil { + r.ur.Reopen(r.r, size) + } else { + r.ur = newUncompressedReader(r.r, r.dict, size) + } + r.chunkReader = r.ur + return nil + } + br := ByteReader(io.LimitReader(r.r, int64(header.compressed)+1)) + if r.decoder == nil { + state := newState(header.props) + r.decoder, err = newDecoder(br, state, r.dict, size) + if err != nil { + return err + } + r.chunkReader = r.decoder + return nil + } + switch header.ctype { + case cLR: + r.decoder.State.Reset() + case cLRN, cLRND: + r.decoder.State = newState(header.props) + } + err = r.decoder.Reopen(br, size) + if err != nil { + return err + } + r.chunkReader = r.decoder + return nil +} + +// Read reads data from the LZMA2 chunk sequence. +func (r *Reader2) Read(p []byte) (n int, err error) { + if r.err != nil { + return 0, r.err + } + for n < len(p) { + var k int + k, err = r.chunkReader.Read(p[n:]) + n += k + if err != nil { + if err == io.EOF { + err = r.startChunk() + if err == nil { + continue + } + } + r.err = err + return n, err + } + if k == 0 { + r.err = errors.New("lzma: Reader2 doesn't get data") + return n, r.err + } + } + return n, nil +} + +// EOS returns whether the LZMA2 stream has been terminated by an +// end-of-stream chunk. +func (r *Reader2) EOS() bool { + return r.cstate == stop +} + +// uncompressedReader is used to read uncompressed chunks. +type uncompressedReader struct { + lr io.LimitedReader + Dict *decoderDict + eof bool + err error +} + +// newUncompressedReader initializes a new uncompressedReader. +func newUncompressedReader(r io.Reader, dict *decoderDict, size int64) *uncompressedReader { + ur := &uncompressedReader{ + lr: io.LimitedReader{R: r, N: size}, + Dict: dict, + } + return ur +} + +// Reopen reinitializes an uncompressed reader. +func (ur *uncompressedReader) Reopen(r io.Reader, size int64) { + ur.err = nil + ur.eof = false + ur.lr = io.LimitedReader{R: r, N: size} +} + +// fill reads uncompressed data into the dictionary. +func (ur *uncompressedReader) fill() error { + if !ur.eof { + n, err := io.CopyN(ur.Dict, &ur.lr, int64(ur.Dict.Available())) + if err != io.EOF { + return err + } + ur.eof = true + if n > 0 { + return nil + } + } + if ur.lr.N != 0 { + return io.ErrUnexpectedEOF + } + return io.EOF +} + +// Read reads uncompressed data from the limited reader. +func (ur *uncompressedReader) Read(p []byte) (n int, err error) { + if ur.err != nil { + return 0, ur.err + } + for { + var k int + k, err = ur.Dict.Read(p[n:]) + n += k + if n >= len(p) { + return n, nil + } + if err != nil { + break + } + err = ur.fill() + if err != nil { + break + } + } + ur.err = err + return n, err +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/state.go b/vendor/github.com/ulikunitz/xz/lzma/state.go new file mode 100644 index 0000000000..502351052f --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/state.go @@ -0,0 +1,151 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// states defines the overall state count +const states = 12 + +// State maintains the full state of the operation encoding or decoding +// process. +type state struct { + rep [4]uint32 + isMatch [states << maxPosBits]prob + isRepG0Long [states << maxPosBits]prob + isRep [states]prob + isRepG0 [states]prob + isRepG1 [states]prob + isRepG2 [states]prob + litCodec literalCodec + lenCodec lengthCodec + repLenCodec lengthCodec + distCodec distCodec + state uint32 + posBitMask uint32 + Properties Properties +} + +// initProbSlice initializes a slice of probabilities. +func initProbSlice(p []prob) { + for i := range p { + p[i] = probInit + } +} + +// Reset sets all state information to the original values. +func (s *state) Reset() { + p := s.Properties + *s = state{ + Properties: p, + // dict: s.dict, + posBitMask: (uint32(1) << uint(p.PB)) - 1, + } + initProbSlice(s.isMatch[:]) + initProbSlice(s.isRep[:]) + initProbSlice(s.isRepG0[:]) + initProbSlice(s.isRepG1[:]) + initProbSlice(s.isRepG2[:]) + initProbSlice(s.isRepG0Long[:]) + s.litCodec.init(p.LC, p.LP) + s.lenCodec.init() + s.repLenCodec.init() + s.distCodec.init() +} + +// initState initializes the state. +func initState(s *state, p Properties) { + *s = state{Properties: p} + s.Reset() +} + +// newState creates a new state from the give Properties. +func newState(p Properties) *state { + s := &state{Properties: p} + s.Reset() + return s +} + +// deepcopy initializes s as a deep copy of the source. +func (s *state) deepcopy(src *state) { + if s == src { + return + } + s.rep = src.rep + s.isMatch = src.isMatch + s.isRepG0Long = src.isRepG0Long + s.isRep = src.isRep + s.isRepG0 = src.isRepG0 + s.isRepG1 = src.isRepG1 + s.isRepG2 = src.isRepG2 + s.litCodec.deepcopy(&src.litCodec) + s.lenCodec.deepcopy(&src.lenCodec) + s.repLenCodec.deepcopy(&src.repLenCodec) + s.distCodec.deepcopy(&src.distCodec) + s.state = src.state + s.posBitMask = src.posBitMask + s.Properties = src.Properties +} + +// cloneState creates a new clone of the give state. +func cloneState(src *state) *state { + s := new(state) + s.deepcopy(src) + return s +} + +// updateStateLiteral updates the state for a literal. +func (s *state) updateStateLiteral() { + switch { + case s.state < 4: + s.state = 0 + return + case s.state < 10: + s.state -= 3 + return + } + s.state -= 6 +} + +// updateStateMatch updates the state for a match. +func (s *state) updateStateMatch() { + if s.state < 7 { + s.state = 7 + } else { + s.state = 10 + } +} + +// updateStateRep updates the state for a repetition. +func (s *state) updateStateRep() { + if s.state < 7 { + s.state = 8 + } else { + s.state = 11 + } +} + +// updateStateShortRep updates the state for a short repetition. +func (s *state) updateStateShortRep() { + if s.state < 7 { + s.state = 9 + } else { + s.state = 11 + } +} + +// states computes the states of the operation codec. +func (s *state) states(dictHead int64) (state1, state2, posState uint32) { + state1 = s.state + posState = uint32(dictHead) & s.posBitMask + state2 = (s.state << maxPosBits) | posState + return +} + +// litState computes the literal state. +func (s *state) litState(prev byte, dictHead int64) uint32 { + lp, lc := uint(s.Properties.LP), uint(s.Properties.LC) + litState := ((uint32(dictHead) & ((1 << lp) - 1)) << lc) | + (uint32(prev) >> (8 - lc)) + return litState +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go b/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go new file mode 100644 index 0000000000..504b3d78e4 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go @@ -0,0 +1,133 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// treeCodec encodes or decodes values with a fixed bit size. It is using a +// tree of probability value. The root of the tree is the most-significant bit. +type treeCodec struct { + probTree +} + +// makeTreeCodec makes a tree codec. The bits value must be inside the range +// [1,32]. +func makeTreeCodec(bits int) treeCodec { + return treeCodec{makeProbTree(bits)} +} + +// deepcopy initializes tc as a deep copy of the source. +func (tc *treeCodec) deepcopy(src *treeCodec) { + tc.probTree.deepcopy(&src.probTree) +} + +// Encode uses the range encoder to encode a fixed-bit-size value. +func (tc *treeCodec) Encode(e *rangeEncoder, v uint32) (err error) { + m := uint32(1) + for i := int(tc.bits) - 1; i >= 0; i-- { + b := (v >> uint(i)) & 1 + if err := e.EncodeBit(b, &tc.probs[m]); err != nil { + return err + } + m = (m << 1) | b + } + return nil +} + +// Decodes uses the range decoder to decode a fixed-bit-size value. Errors may +// be caused by the range decoder. +func (tc *treeCodec) Decode(d *rangeDecoder) (v uint32, err error) { + m := uint32(1) + for j := 0; j < int(tc.bits); j++ { + b, err := d.DecodeBit(&tc.probs[m]) + if err != nil { + return 0, err + } + m = (m << 1) | b + } + return m - (1 << uint(tc.bits)), nil +} + +// treeReverseCodec is another tree codec, where the least-significant bit is +// the start of the probability tree. +type treeReverseCodec struct { + probTree +} + +// deepcopy initializes the treeReverseCodec as a deep copy of the +// source. +func (tc *treeReverseCodec) deepcopy(src *treeReverseCodec) { + tc.probTree.deepcopy(&src.probTree) +} + +// makeTreeReverseCodec creates treeReverseCodec value. The bits argument must +// be in the range [1,32]. +func makeTreeReverseCodec(bits int) treeReverseCodec { + return treeReverseCodec{makeProbTree(bits)} +} + +// Encode uses range encoder to encode a fixed-bit-size value. The range +// encoder may cause errors. +func (tc *treeReverseCodec) Encode(v uint32, e *rangeEncoder) (err error) { + m := uint32(1) + for i := uint(0); i < uint(tc.bits); i++ { + b := (v >> i) & 1 + if err := e.EncodeBit(b, &tc.probs[m]); err != nil { + return err + } + m = (m << 1) | b + } + return nil +} + +// Decodes uses the range decoder to decode a fixed-bit-size value. Errors +// returned by the range decoder will be returned. +func (tc *treeReverseCodec) Decode(d *rangeDecoder) (v uint32, err error) { + m := uint32(1) + for j := uint(0); j < uint(tc.bits); j++ { + b, err := d.DecodeBit(&tc.probs[m]) + if err != nil { + return 0, err + } + m = (m << 1) | b + v |= b << j + } + return v, nil +} + +// probTree stores enough probability values to be used by the treeEncode and +// treeDecode methods of the range coder types. +type probTree struct { + probs []prob + bits byte +} + +// deepcopy initializes the probTree value as a deep copy of the source. +func (t *probTree) deepcopy(src *probTree) { + if t == src { + return + } + t.probs = make([]prob, len(src.probs)) + copy(t.probs, src.probs) + t.bits = src.bits +} + +// makeProbTree initializes a probTree structure. +func makeProbTree(bits int) probTree { + if !(1 <= bits && bits <= 32) { + panic("bits outside of range [1,32]") + } + t := probTree{ + bits: byte(bits), + probs: make([]prob, 1< 0 { + c.SizeInHeader = true + } + if !c.SizeInHeader { + c.EOSMarker = true + } +} + +// Verify checks WriterConfig for errors. Verify will replace zero +// values with default values. +func (c *WriterConfig) Verify() error { + c.fill() + var err error + if c == nil { + return errors.New("lzma: WriterConfig is nil") + } + if c.Properties == nil { + return errors.New("lzma: WriterConfig has no Properties set") + } + if err = c.Properties.verify(); err != nil { + return err + } + if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { + return errors.New("lzma: dictionary capacity is out of range") + } + if !(maxMatchLen <= c.BufSize) { + return errors.New("lzma: lookahead buffer size too small") + } + if c.SizeInHeader { + if c.Size < 0 { + return errors.New("lzma: negative size not supported") + } + } else if !c.EOSMarker { + return errors.New("lzma: EOS marker is required") + } + if err = c.Matcher.verify(); err != nil { + return err + } + + return nil +} + +// header returns the header structure for this configuration. +func (c *WriterConfig) header() header { + h := header{ + properties: *c.Properties, + dictCap: c.DictCap, + size: -1, + } + if c.SizeInHeader { + h.size = c.Size + } + return h +} + +// Writer writes an LZMA stream in the classic format. +type Writer struct { + h header + bw io.ByteWriter + buf *bufio.Writer + e *encoder +} + +// NewWriter creates a new LZMA writer for the classic format. The +// method will write the header to the underlying stream. +func (c WriterConfig) NewWriter(lzma io.Writer) (w *Writer, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + w = &Writer{h: c.header()} + + var ok bool + w.bw, ok = lzma.(io.ByteWriter) + if !ok { + w.buf = bufio.NewWriter(lzma) + w.bw = w.buf + } + state := newState(w.h.properties) + m, err := c.Matcher.new(w.h.dictCap) + if err != nil { + return nil, err + } + dict, err := newEncoderDict(w.h.dictCap, c.BufSize, m) + if err != nil { + return nil, err + } + var flags encoderFlags + if c.EOSMarker { + flags = eosMarker + } + if w.e, err = newEncoder(w.bw, state, dict, flags); err != nil { + return nil, err + } + + if err = w.writeHeader(); err != nil { + return nil, err + } + return w, nil +} + +// NewWriter creates a new LZMA writer using the classic format. The +// function writes the header to the underlying stream. +func NewWriter(lzma io.Writer) (w *Writer, err error) { + return WriterConfig{}.NewWriter(lzma) +} + +// writeHeader writes the LZMA header into the stream. +func (w *Writer) writeHeader() error { + data, err := w.h.marshalBinary() + if err != nil { + return err + } + _, err = w.bw.(io.Writer).Write(data) + return err +} + +// Write puts data into the Writer. +func (w *Writer) Write(p []byte) (n int, err error) { + if w.h.size >= 0 { + m := w.h.size + m -= w.e.Compressed() + int64(w.e.dict.Buffered()) + if m < 0 { + m = 0 + } + if m < int64(len(p)) { + p = p[:m] + err = ErrNoSpace + } + } + var werr error + if n, werr = w.e.Write(p); werr != nil { + err = werr + } + return n, err +} + +// Close closes the writer stream. It ensures that all data from the +// buffer will be compressed and the LZMA stream will be finished. +func (w *Writer) Close() error { + if w.h.size >= 0 { + n := w.e.Compressed() + int64(w.e.dict.Buffered()) + if n != w.h.size { + return errSize + } + } + err := w.e.Close() + if w.buf != nil { + ferr := w.buf.Flush() + if err == nil { + err = ferr + } + } + return err +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/writer2.go b/vendor/github.com/ulikunitz/xz/lzma/writer2.go new file mode 100644 index 0000000000..7c1afe1572 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/writer2.go @@ -0,0 +1,305 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "bytes" + "errors" + "io" +) + +// Writer2Config is used to create a Writer2 using parameters. +type Writer2Config struct { + // The properties for the encoding. If the it is nil the value + // {LC: 3, LP: 0, PB: 2} will be chosen. + Properties *Properties + // The capacity of the dictionary. If DictCap is zero, the value + // 8 MiB will be chosen. + DictCap int + // Size of the lookahead buffer; value 0 indicates default size + // 4096 + BufSize int + // Match algorithm + Matcher MatchAlgorithm +} + +// fill replaces zero values with default values. +func (c *Writer2Config) fill() { + if c.Properties == nil { + c.Properties = &Properties{LC: 3, LP: 0, PB: 2} + } + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } + if c.BufSize == 0 { + c.BufSize = 4096 + } +} + +// Verify checks the Writer2Config for correctness. Zero values will be +// replaced by default values. +func (c *Writer2Config) Verify() error { + c.fill() + var err error + if c == nil { + return errors.New("lzma: WriterConfig is nil") + } + if c.Properties == nil { + return errors.New("lzma: WriterConfig has no Properties set") + } + if err = c.Properties.verify(); err != nil { + return err + } + if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { + return errors.New("lzma: dictionary capacity is out of range") + } + if !(maxMatchLen <= c.BufSize) { + return errors.New("lzma: lookahead buffer size too small") + } + if c.Properties.LC+c.Properties.LP > 4 { + return errors.New("lzma: sum of lc and lp exceeds 4") + } + if err = c.Matcher.verify(); err != nil { + return err + } + return nil +} + +// Writer2 supports the creation of an LZMA2 stream. But note that +// written data is buffered, so call Flush or Close to write data to the +// underlying writer. The Close method writes the end-of-stream marker +// to the stream. So you may be able to concatenate the output of two +// writers as long the output of the first writer has only been flushed +// but not closed. +// +// Any change to the fields Properties, DictCap must be done before the +// first call to Write, Flush or Close. +type Writer2 struct { + w io.Writer + + start *state + encoder *encoder + + cstate chunkState + ctype chunkType + + buf bytes.Buffer + lbw LimitedByteWriter +} + +// NewWriter2 creates an LZMA2 chunk sequence writer with the default +// parameters and options. +func NewWriter2(lzma2 io.Writer) (w *Writer2, err error) { + return Writer2Config{}.NewWriter2(lzma2) +} + +// NewWriter2 creates a new LZMA2 writer using the given configuration. +func (c Writer2Config) NewWriter2(lzma2 io.Writer) (w *Writer2, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + w = &Writer2{ + w: lzma2, + start: newState(*c.Properties), + cstate: start, + ctype: start.defaultChunkType(), + } + w.buf.Grow(maxCompressed) + w.lbw = LimitedByteWriter{BW: &w.buf, N: maxCompressed} + m, err := c.Matcher.new(c.DictCap) + if err != nil { + return nil, err + } + d, err := newEncoderDict(c.DictCap, c.BufSize, m) + if err != nil { + return nil, err + } + w.encoder, err = newEncoder(&w.lbw, cloneState(w.start), d, 0) + if err != nil { + return nil, err + } + return w, nil +} + +// written returns the number of bytes written to the current chunk +func (w *Writer2) written() int { + if w.encoder == nil { + return 0 + } + return int(w.encoder.Compressed()) + w.encoder.dict.Buffered() +} + +// errClosed indicates that the writer is closed. +var errClosed = errors.New("lzma: writer closed") + +// Writes data to LZMA2 stream. Note that written data will be buffered. +// Use Flush or Close to ensure that data is written to the underlying +// writer. +func (w *Writer2) Write(p []byte) (n int, err error) { + if w.cstate == stop { + return 0, errClosed + } + for n < len(p) { + m := maxUncompressed - w.written() + if m <= 0 { + panic("lzma: maxUncompressed reached") + } + var q []byte + if n+m < len(p) { + q = p[n : n+m] + } else { + q = p[n:] + } + k, err := w.encoder.Write(q) + n += k + if err != nil && err != ErrLimit { + return n, err + } + if err == ErrLimit || k == m { + if err = w.flushChunk(); err != nil { + return n, err + } + } + } + return n, nil +} + +// writeUncompressedChunk writes an uncompressed chunk to the LZMA2 +// stream. +func (w *Writer2) writeUncompressedChunk() error { + u := w.encoder.Compressed() + if u <= 0 { + return errors.New("lzma: can't write empty uncompressed chunk") + } + if u > maxUncompressed { + panic("overrun of uncompressed data limit") + } + switch w.ctype { + case cLRND: + w.ctype = cUD + default: + w.ctype = cU + } + w.encoder.state = w.start + + header := chunkHeader{ + ctype: w.ctype, + uncompressed: uint32(u - 1), + } + hdata, err := header.MarshalBinary() + if err != nil { + return err + } + if _, err = w.w.Write(hdata); err != nil { + return err + } + _, err = w.encoder.dict.CopyN(w.w, int(u)) + return err +} + +// writeCompressedChunk writes a compressed chunk to the underlying +// writer. +func (w *Writer2) writeCompressedChunk() error { + if w.ctype == cU || w.ctype == cUD { + panic("chunk type uncompressed") + } + + u := w.encoder.Compressed() + if u <= 0 { + return errors.New("writeCompressedChunk: empty chunk") + } + if u > maxUncompressed { + panic("overrun of uncompressed data limit") + } + c := w.buf.Len() + if c <= 0 { + panic("no compressed data") + } + if c > maxCompressed { + panic("overrun of compressed data limit") + } + header := chunkHeader{ + ctype: w.ctype, + uncompressed: uint32(u - 1), + compressed: uint16(c - 1), + props: w.encoder.state.Properties, + } + hdata, err := header.MarshalBinary() + if err != nil { + return err + } + if _, err = w.w.Write(hdata); err != nil { + return err + } + _, err = io.Copy(w.w, &w.buf) + return err +} + +// writes a single chunk to the underlying writer. +func (w *Writer2) writeChunk() error { + u := int(uncompressedHeaderLen + w.encoder.Compressed()) + c := headerLen(w.ctype) + w.buf.Len() + if u < c { + return w.writeUncompressedChunk() + } + return w.writeCompressedChunk() +} + +// flushChunk terminates the current chunk. The encoder will be reset +// to support the next chunk. +func (w *Writer2) flushChunk() error { + if w.written() == 0 { + return nil + } + var err error + if err = w.encoder.Close(); err != nil { + return err + } + if err = w.writeChunk(); err != nil { + return err + } + w.buf.Reset() + w.lbw.N = maxCompressed + if err = w.encoder.Reopen(&w.lbw); err != nil { + return err + } + if err = w.cstate.next(w.ctype); err != nil { + return err + } + w.ctype = w.cstate.defaultChunkType() + w.start = cloneState(w.encoder.state) + return nil +} + +// Flush writes all buffered data out to the underlying stream. This +// could result in multiple chunks to be created. +func (w *Writer2) Flush() error { + if w.cstate == stop { + return errClosed + } + for w.written() > 0 { + if err := w.flushChunk(); err != nil { + return err + } + } + return nil +} + +// Close terminates the LZMA2 stream with an EOS chunk. +func (w *Writer2) Close() error { + if w.cstate == stop { + return errClosed + } + if err := w.Flush(); err != nil { + return nil + } + // write zero byte EOS chunk + _, err := w.w.Write([]byte{0}) + if err != nil { + return err + } + w.cstate = stop + return nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzmafilter.go b/vendor/github.com/ulikunitz/xz/lzmafilter.go new file mode 100644 index 0000000000..69cf5f7c27 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzmafilter.go @@ -0,0 +1,117 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xz + +import ( + "errors" + "fmt" + "io" + + "github.com/ulikunitz/xz/lzma" +) + +// LZMA filter constants. +const ( + lzmaFilterID = 0x21 + lzmaFilterLen = 3 +) + +// lzmaFilter declares the LZMA2 filter information stored in an xz +// block header. +type lzmaFilter struct { + dictCap int64 +} + +// String returns a representation of the LZMA filter. +func (f lzmaFilter) String() string { + return fmt.Sprintf("LZMA dict cap %#x", f.dictCap) +} + +// id returns the ID for the LZMA2 filter. +func (f lzmaFilter) id() uint64 { return lzmaFilterID } + +// MarshalBinary converts the lzmaFilter in its encoded representation. +func (f lzmaFilter) MarshalBinary() (data []byte, err error) { + c := lzma.EncodeDictCap(f.dictCap) + return []byte{lzmaFilterID, 1, c}, nil +} + +// UnmarshalBinary unmarshals the given data representation of the LZMA2 +// filter. +func (f *lzmaFilter) UnmarshalBinary(data []byte) error { + if len(data) != lzmaFilterLen { + return errors.New("xz: data for LZMA2 filter has wrong length") + } + if data[0] != lzmaFilterID { + return errors.New("xz: wrong LZMA2 filter id") + } + if data[1] != 1 { + return errors.New("xz: wrong LZMA2 filter size") + } + dc, err := lzma.DecodeDictCap(data[2]) + if err != nil { + return errors.New("xz: wrong LZMA2 dictionary size property") + } + + f.dictCap = dc + return nil +} + +// reader creates a new reader for the LZMA2 filter. +func (f lzmaFilter) reader(r io.Reader, c *ReaderConfig) (fr io.Reader, + err error) { + + config := new(lzma.Reader2Config) + if c != nil { + config.DictCap = c.DictCap + } + dc := int(f.dictCap) + if dc < 1 { + return nil, errors.New("xz: LZMA2 filter parameter " + + "dictionary capacity overflow") + } + if dc > config.DictCap { + config.DictCap = dc + } + + fr, err = config.NewReader2(r) + if err != nil { + return nil, err + } + return fr, nil +} + +// writeCloser creates a io.WriteCloser for the LZMA2 filter. +func (f lzmaFilter) writeCloser(w io.WriteCloser, c *WriterConfig, +) (fw io.WriteCloser, err error) { + config := new(lzma.Writer2Config) + if c != nil { + *config = lzma.Writer2Config{ + Properties: c.Properties, + DictCap: c.DictCap, + BufSize: c.BufSize, + Matcher: c.Matcher, + } + } + + dc := int(f.dictCap) + if dc < 1 { + return nil, errors.New("xz: LZMA2 filter parameter " + + "dictionary capacity overflow") + } + if dc > config.DictCap { + config.DictCap = dc + } + + fw, err = config.NewWriter2(w) + if err != nil { + return nil, err + } + return fw, nil +} + +// last returns true, because an LZMA2 filter must be the last filter in +// the filter list. +func (f lzmaFilter) last() bool { return true } diff --git a/vendor/github.com/ulikunitz/xz/reader.go b/vendor/github.com/ulikunitz/xz/reader.go new file mode 100644 index 0000000000..0634c6bcc0 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/reader.go @@ -0,0 +1,373 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xz supports the compression and decompression of xz files. It +// supports version 1.0.4 of the specification without the non-LZMA2 +// filters. See http://tukaani.org/xz/xz-file-format-1.0.4.txt +package xz + +import ( + "bytes" + "errors" + "fmt" + "hash" + "io" + + "github.com/ulikunitz/xz/internal/xlog" + "github.com/ulikunitz/xz/lzma" +) + +// ReaderConfig defines the parameters for the xz reader. The +// SingleStream parameter requests the reader to assume that the +// underlying stream contains only a single stream. +type ReaderConfig struct { + DictCap int + SingleStream bool +} + +// fill replaces all zero values with their default values. +func (c *ReaderConfig) fill() { + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } +} + +// Verify checks the reader parameters for Validity. Zero values will be +// replaced by default values. +func (c *ReaderConfig) Verify() error { + if c == nil { + return errors.New("xz: reader parameters are nil") + } + lc := lzma.Reader2Config{DictCap: c.DictCap} + if err := lc.Verify(); err != nil { + return err + } + return nil +} + +// Reader supports the reading of one or multiple xz streams. +type Reader struct { + ReaderConfig + + xz io.Reader + sr *streamReader +} + +// streamReader decodes a single xz stream +type streamReader struct { + ReaderConfig + + xz io.Reader + br *blockReader + newHash func() hash.Hash + h header + index []record +} + +// NewReader creates a new xz reader using the default parameters. +// The function reads and checks the header of the first XZ stream. The +// reader will process multiple streams including padding. +func NewReader(xz io.Reader) (r *Reader, err error) { + return ReaderConfig{}.NewReader(xz) +} + +// NewReader creates an xz stream reader. The created reader will be +// able to process multiple streams and padding unless a SingleStream +// has been set in the reader configuration c. +func (c ReaderConfig) NewReader(xz io.Reader) (r *Reader, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + r = &Reader{ + ReaderConfig: c, + xz: xz, + } + if r.sr, err = c.newStreamReader(xz); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return nil, err + } + return r, nil +} + +var errUnexpectedData = errors.New("xz: unexpected data after stream") + +// Read reads uncompressed data from the stream. +func (r *Reader) Read(p []byte) (n int, err error) { + for n < len(p) { + if r.sr == nil { + if r.SingleStream { + data := make([]byte, 1) + _, err = io.ReadFull(r.xz, data) + if err != io.EOF { + return n, errUnexpectedData + } + return n, io.EOF + } + for { + r.sr, err = r.ReaderConfig.newStreamReader(r.xz) + if err != errPadding { + break + } + } + if err != nil { + return n, err + } + } + k, err := r.sr.Read(p[n:]) + n += k + if err != nil { + if err == io.EOF { + r.sr = nil + continue + } + return n, err + } + } + return n, nil +} + +var errPadding = errors.New("xz: padding (4 zero bytes) encountered") + +// newStreamReader creates a new xz stream reader using the given configuration +// parameters. NewReader reads and checks the header of the xz stream. +func (c ReaderConfig) newStreamReader(xz io.Reader) (r *streamReader, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + data := make([]byte, HeaderLen) + if _, err := io.ReadFull(xz, data[:4]); err != nil { + return nil, err + } + if bytes.Equal(data[:4], []byte{0, 0, 0, 0}) { + return nil, errPadding + } + if _, err = io.ReadFull(xz, data[4:]); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return nil, err + } + r = &streamReader{ + ReaderConfig: c, + xz: xz, + index: make([]record, 0, 4), + } + if err = r.h.UnmarshalBinary(data); err != nil { + return nil, err + } + xlog.Debugf("xz header %s", r.h) + if r.newHash, err = newHashFunc(r.h.flags); err != nil { + return nil, err + } + return r, nil +} + +// errIndex indicates an error with the xz file index. +var errIndex = errors.New("xz: error in xz file index") + +// readTail reads the index body and the xz footer. +func (r *streamReader) readTail() error { + index, n, err := readIndexBody(r.xz) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + if len(index) != len(r.index) { + return fmt.Errorf("xz: index length is %d; want %d", + len(index), len(r.index)) + } + for i, rec := range r.index { + if rec != index[i] { + return fmt.Errorf("xz: record %d is %v; want %v", + i, rec, index[i]) + } + } + + p := make([]byte, footerLen) + if _, err = io.ReadFull(r.xz, p); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + var f footer + if err = f.UnmarshalBinary(p); err != nil { + return err + } + xlog.Debugf("xz footer %s", f) + if f.flags != r.h.flags { + return errors.New("xz: footer flags incorrect") + } + if f.indexSize != int64(n)+1 { + return errors.New("xz: index size in footer wrong") + } + return nil +} + +// Read reads actual data from the xz stream. +func (r *streamReader) Read(p []byte) (n int, err error) { + for n < len(p) { + if r.br == nil { + bh, hlen, err := readBlockHeader(r.xz) + if err != nil { + if err == errIndexIndicator { + if err = r.readTail(); err != nil { + return n, err + } + return n, io.EOF + } + return n, err + } + xlog.Debugf("block %v", *bh) + r.br, err = r.ReaderConfig.newBlockReader(r.xz, bh, + hlen, r.newHash()) + if err != nil { + return n, err + } + } + k, err := r.br.Read(p[n:]) + n += k + if err != nil { + if err == io.EOF { + r.index = append(r.index, r.br.record()) + r.br = nil + } else { + return n, err + } + } + } + return n, nil +} + +// countingReader is a reader that counts the bytes read. +type countingReader struct { + r io.Reader + n int64 +} + +// Read reads data from the wrapped reader and adds it to the n field. +func (lr *countingReader) Read(p []byte) (n int, err error) { + n, err = lr.r.Read(p) + lr.n += int64(n) + return n, err +} + +// blockReader supports the reading of a block. +type blockReader struct { + lxz countingReader + header *blockHeader + headerLen int + n int64 + hash hash.Hash + r io.Reader + err error +} + +// newBlockReader creates a new block reader. +func (c *ReaderConfig) newBlockReader(xz io.Reader, h *blockHeader, + hlen int, hash hash.Hash) (br *blockReader, err error) { + + br = &blockReader{ + lxz: countingReader{r: xz}, + header: h, + headerLen: hlen, + hash: hash, + } + + fr, err := c.newFilterReader(&br.lxz, h.filters) + if err != nil { + return nil, err + } + br.r = io.TeeReader(fr, br.hash) + + return br, nil +} + +// uncompressedSize returns the uncompressed size of the block. +func (br *blockReader) uncompressedSize() int64 { + return br.n +} + +// compressedSize returns the compressed size of the block. +func (br *blockReader) compressedSize() int64 { + return br.lxz.n +} + +// unpaddedSize computes the unpadded size for the block. +func (br *blockReader) unpaddedSize() int64 { + n := int64(br.headerLen) + n += br.compressedSize() + n += int64(br.hash.Size()) + return n +} + +// record returns the index record for the current block. +func (br *blockReader) record() record { + return record{br.unpaddedSize(), br.uncompressedSize()} +} + +// errBlockSize indicates that the size of the block in the block header +// is wrong. +var errBlockSize = errors.New("xz: wrong uncompressed size for block") + +// Read reads data from the block. +func (br *blockReader) Read(p []byte) (n int, err error) { + n, err = br.r.Read(p) + br.n += int64(n) + + u := br.header.uncompressedSize + if u >= 0 && br.uncompressedSize() > u { + return n, errors.New("xz: wrong uncompressed size for block") + } + c := br.header.compressedSize + if c >= 0 && br.compressedSize() > c { + return n, errors.New("xz: wrong compressed size for block") + } + if err != io.EOF { + return n, err + } + if br.uncompressedSize() < u || br.compressedSize() < c { + return n, io.ErrUnexpectedEOF + } + + s := br.hash.Size() + k := padLen(br.lxz.n) + q := make([]byte, k+s, k+2*s) + if _, err = io.ReadFull(br.lxz.r, q); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return n, err + } + if !allZeros(q[:k]) { + return n, errors.New("xz: non-zero block padding") + } + checkSum := q[k:] + computedSum := br.hash.Sum(checkSum[s:]) + if !bytes.Equal(checkSum, computedSum) { + return n, errors.New("xz: checksum error for block") + } + return n, io.EOF +} + +func (c *ReaderConfig) newFilterReader(r io.Reader, f []filter) (fr io.Reader, + err error) { + + if err = verifyFilters(f); err != nil { + return nil, err + } + + fr = r + for i := len(f) - 1; i >= 0; i-- { + fr, err = f[i].reader(fr, c) + if err != nil { + return nil, err + } + } + return fr, nil +} diff --git a/vendor/github.com/ulikunitz/xz/writer.go b/vendor/github.com/ulikunitz/xz/writer.go new file mode 100644 index 0000000000..c126f70995 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/writer.go @@ -0,0 +1,386 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xz + +import ( + "errors" + "hash" + "io" + + "github.com/ulikunitz/xz/lzma" +) + +// WriterConfig describe the parameters for an xz writer. +type WriterConfig struct { + Properties *lzma.Properties + DictCap int + BufSize int + BlockSize int64 + // checksum method: CRC32, CRC64 or SHA256 + CheckSum byte + // match algorithm + Matcher lzma.MatchAlgorithm +} + +// fill replaces zero values with default values. +func (c *WriterConfig) fill() { + if c.Properties == nil { + c.Properties = &lzma.Properties{LC: 3, LP: 0, PB: 2} + } + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } + if c.BufSize == 0 { + c.BufSize = 4096 + } + if c.BlockSize == 0 { + c.BlockSize = maxInt64 + } + if c.CheckSum == 0 { + c.CheckSum = CRC64 + } +} + +// Verify checks the configuration for errors. Zero values will be +// replaced by default values. +func (c *WriterConfig) Verify() error { + if c == nil { + return errors.New("xz: writer configuration is nil") + } + c.fill() + lc := lzma.Writer2Config{ + Properties: c.Properties, + DictCap: c.DictCap, + BufSize: c.BufSize, + Matcher: c.Matcher, + } + if err := lc.Verify(); err != nil { + return err + } + if c.BlockSize <= 0 { + return errors.New("xz: block size out of range") + } + if err := verifyFlags(c.CheckSum); err != nil { + return err + } + return nil +} + +// filters creates the filter list for the given parameters. +func (c *WriterConfig) filters() []filter { + return []filter{&lzmaFilter{int64(c.DictCap)}} +} + +// maxInt64 defines the maximum 64-bit signed integer. +const maxInt64 = 1<<63 - 1 + +// verifyFilters checks the filter list for the length and the right +// sequence of filters. +func verifyFilters(f []filter) error { + if len(f) == 0 { + return errors.New("xz: no filters") + } + if len(f) > 4 { + return errors.New("xz: more than four filters") + } + for _, g := range f[:len(f)-1] { + if g.last() { + return errors.New("xz: last filter is not last") + } + } + if !f[len(f)-1].last() { + return errors.New("xz: wrong last filter") + } + return nil +} + +// newFilterWriteCloser converts a filter list into a WriteCloser that +// can be used by a blockWriter. +func (c *WriterConfig) newFilterWriteCloser(w io.Writer, f []filter) (fw io.WriteCloser, err error) { + if err = verifyFilters(f); err != nil { + return nil, err + } + fw = nopWriteCloser(w) + for i := len(f) - 1; i >= 0; i-- { + fw, err = f[i].writeCloser(fw, c) + if err != nil { + return nil, err + } + } + return fw, nil +} + +// nopWCloser implements a WriteCloser with a Close method not doing +// anything. +type nopWCloser struct { + io.Writer +} + +// Close returns nil and doesn't do anything else. +func (c nopWCloser) Close() error { + return nil +} + +// nopWriteCloser converts the Writer into a WriteCloser with a Close +// function that does nothing beside returning nil. +func nopWriteCloser(w io.Writer) io.WriteCloser { + return nopWCloser{w} +} + +// Writer compresses data written to it. It is an io.WriteCloser. +type Writer struct { + WriterConfig + + xz io.Writer + bw *blockWriter + newHash func() hash.Hash + h header + index []record + closed bool +} + +// newBlockWriter creates a new block writer writes the header out. +func (w *Writer) newBlockWriter() error { + var err error + w.bw, err = w.WriterConfig.newBlockWriter(w.xz, w.newHash()) + if err != nil { + return err + } + if err = w.bw.writeHeader(w.xz); err != nil { + return err + } + return nil +} + +// closeBlockWriter closes a block writer and records the sizes in the +// index. +func (w *Writer) closeBlockWriter() error { + var err error + if err = w.bw.Close(); err != nil { + return err + } + w.index = append(w.index, w.bw.record()) + return nil +} + +// NewWriter creates a new xz writer using default parameters. +func NewWriter(xz io.Writer) (w *Writer, err error) { + return WriterConfig{}.NewWriter(xz) +} + +// NewWriter creates a new Writer using the given configuration parameters. +func (c WriterConfig) NewWriter(xz io.Writer) (w *Writer, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + w = &Writer{ + WriterConfig: c, + xz: xz, + h: header{c.CheckSum}, + index: make([]record, 0, 4), + } + if w.newHash, err = newHashFunc(c.CheckSum); err != nil { + return nil, err + } + data, err := w.h.MarshalBinary() + if _, err = xz.Write(data); err != nil { + return nil, err + } + if err = w.newBlockWriter(); err != nil { + return nil, err + } + return w, nil + +} + +// Write compresses the uncompressed data provided. +func (w *Writer) Write(p []byte) (n int, err error) { + if w.closed { + return 0, errClosed + } + for { + k, err := w.bw.Write(p[n:]) + n += k + if err != errNoSpace { + return n, err + } + if err = w.closeBlockWriter(); err != nil { + return n, err + } + if err = w.newBlockWriter(); err != nil { + return n, err + } + } +} + +// Close closes the writer and adds the footer to the Writer. Close +// doesn't close the underlying writer. +func (w *Writer) Close() error { + if w.closed { + return errClosed + } + w.closed = true + var err error + if err = w.closeBlockWriter(); err != nil { + return err + } + + f := footer{flags: w.h.flags} + if f.indexSize, err = writeIndex(w.xz, w.index); err != nil { + return err + } + data, err := f.MarshalBinary() + if err != nil { + return err + } + if _, err = w.xz.Write(data); err != nil { + return err + } + return nil +} + +// countingWriter is a writer that counts all data written to it. +type countingWriter struct { + w io.Writer + n int64 +} + +// Write writes data to the countingWriter. +func (cw *countingWriter) Write(p []byte) (n int, err error) { + n, err = cw.w.Write(p) + cw.n += int64(n) + if err == nil && cw.n < 0 { + return n, errors.New("xz: counter overflow") + } + return +} + +// blockWriter is writes a single block. +type blockWriter struct { + cxz countingWriter + // mw combines io.WriteCloser w and the hash. + mw io.Writer + w io.WriteCloser + n int64 + blockSize int64 + closed bool + headerLen int + + filters []filter + hash hash.Hash +} + +// newBlockWriter creates a new block writer. +func (c *WriterConfig) newBlockWriter(xz io.Writer, hash hash.Hash) (bw *blockWriter, err error) { + bw = &blockWriter{ + cxz: countingWriter{w: xz}, + blockSize: c.BlockSize, + filters: c.filters(), + hash: hash, + } + bw.w, err = c.newFilterWriteCloser(&bw.cxz, bw.filters) + if err != nil { + return nil, err + } + bw.mw = io.MultiWriter(bw.w, bw.hash) + return bw, nil +} + +// writeHeader writes the header. If the function is called after Close +// the commpressedSize and uncompressedSize fields will be filled. +func (bw *blockWriter) writeHeader(w io.Writer) error { + h := blockHeader{ + compressedSize: -1, + uncompressedSize: -1, + filters: bw.filters, + } + if bw.closed { + h.compressedSize = bw.compressedSize() + h.uncompressedSize = bw.uncompressedSize() + } + data, err := h.MarshalBinary() + if err != nil { + return err + } + if _, err = w.Write(data); err != nil { + return err + } + bw.headerLen = len(data) + return nil +} + +// compressed size returns the amount of data written to the underlying +// stream. +func (bw *blockWriter) compressedSize() int64 { + return bw.cxz.n +} + +// uncompressedSize returns the number of data written to the +// blockWriter +func (bw *blockWriter) uncompressedSize() int64 { + return bw.n +} + +// unpaddedSize returns the sum of the header length, the uncompressed +// size of the block and the hash size. +func (bw *blockWriter) unpaddedSize() int64 { + if bw.headerLen <= 0 { + panic("xz: block header not written") + } + n := int64(bw.headerLen) + n += bw.compressedSize() + n += int64(bw.hash.Size()) + return n +} + +// record returns the record for the current stream. Call Close before +// calling this method. +func (bw *blockWriter) record() record { + return record{bw.unpaddedSize(), bw.uncompressedSize()} +} + +var errClosed = errors.New("xz: writer already closed") + +var errNoSpace = errors.New("xz: no space") + +// Write writes uncompressed data to the block writer. +func (bw *blockWriter) Write(p []byte) (n int, err error) { + if bw.closed { + return 0, errClosed + } + + t := bw.blockSize - bw.n + if int64(len(p)) > t { + err = errNoSpace + p = p[:t] + } + + var werr error + n, werr = bw.mw.Write(p) + bw.n += int64(n) + if werr != nil { + return n, werr + } + return n, err +} + +// Close closes the writer. +func (bw *blockWriter) Close() error { + if bw.closed { + return errClosed + } + bw.closed = true + if err := bw.w.Close(); err != nil { + return err + } + s := bw.hash.Size() + k := padLen(bw.cxz.n) + p := make([]byte, k+s) + bw.hash.Sum(p[k:k]) + if _, err := bw.cxz.w.Write(p); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/format_checkers.go b/vendor/github.com/xeipuuv/gojsonschema/format_checkers.go index c6a07923b0..20ead1f73e 100644 --- a/vendor/github.com/xeipuuv/gojsonschema/format_checkers.go +++ b/vendor/github.com/xeipuuv/gojsonschema/format_checkers.go @@ -72,15 +72,15 @@ var ( // so library users can add custom formatters FormatCheckers = FormatCheckerChain{ formatters: map[string]FormatChecker{ - "date-time": DateTimeFormatChecker{}, - "hostname": HostnameFormatChecker{}, - "email": EmailFormatChecker{}, - "ipv4": IPV4FormatChecker{}, - "ipv6": IPV6FormatChecker{}, - "uri": URIFormatChecker{}, + "date-time": DateTimeFormatChecker{}, + "hostname": HostnameFormatChecker{}, + "email": EmailFormatChecker{}, + "ipv4": IPV4FormatChecker{}, + "ipv6": IPV6FormatChecker{}, + "uri": URIFormatChecker{}, "uri-reference": URIReferenceFormatChecker{}, - "uuid": UUIDFormatChecker{}, - "regex": RegexFormatChecker{}, + "uuid": UUIDFormatChecker{}, + "regex": RegexFormatChecker{}, }, } diff --git a/vendor/github.com/xeipuuv/gojsonschema/result.go b/vendor/github.com/xeipuuv/gojsonschema/result.go index 2e1e8d6595..7896f31299 100644 --- a/vendor/github.com/xeipuuv/gojsonschema/result.go +++ b/vendor/github.com/xeipuuv/gojsonschema/result.go @@ -165,6 +165,7 @@ func (v *Result) Valid() bool { func (v *Result) Errors() []ResultError { return v.errors } + // Add a fully filled error to the error set // SetDescription() will be called with the result of the parsed err.DescriptionFormat() func (v *Result) AddError(err ResultError, details ErrorDetails) { diff --git a/vendor/golang.org/x/crypto/openpgp/keys.go b/vendor/golang.org/x/crypto/openpgp/keys.go index aceb1b7963..a79a8c13ae 100644 --- a/vendor/golang.org/x/crypto/openpgp/keys.go +++ b/vendor/golang.org/x/crypto/openpgp/keys.go @@ -346,22 +346,25 @@ EachPacket: switch pkt := p.(type) { case *packet.UserId: + // Make a new Identity object, that we might wind up throwing away. + // We'll only add it if we get a valid self-signature over this + // userID. current = new(Identity) current.Name = pkt.Id current.UserId = pkt - e.Identities[pkt.Id] = current for { p, err = packets.Next() if err == io.EOF { - return nil, io.ErrUnexpectedEOF + break EachPacket } else if err != nil { return nil, err } sig, ok := p.(*packet.Signature) if !ok { - return nil, errors.StructuralError("user ID packet not followed by self-signature") + packets.Unread(p) + continue EachPacket } if (sig.SigType == packet.SigTypePositiveCert || sig.SigType == packet.SigTypeGenericCert) && sig.IssuerKeyId != nil && *sig.IssuerKeyId == e.PrimaryKey.KeyId { @@ -369,9 +372,10 @@ EachPacket: return nil, errors.StructuralError("user ID self-signature invalid: " + err.Error()) } current.SelfSignature = sig - break + e.Identities[pkt.Id] = current + } else { + current.Signatures = append(current.Signatures, sig) } - current.Signatures = append(current.Signatures, sig) } case *packet.Signature: if pkt.SigType == packet.SigTypeKeyRevocation { @@ -540,10 +544,10 @@ func NewEntity(name, comment, email string, config *packet.Config) (*Entity, err return e, nil } -// SerializePrivate serializes an Entity, including private key material, to -// the given Writer. For now, it must only be used on an Entity returned from -// NewEntity. -// config is ignored +// SerializePrivate serializes an Entity, including private key material, but +// excluding signatures from other entities, to the given Writer. +// Identities and subkeys are re-signed in case they changed since NewEntry. +// If config is nil, sensible defaults will be used. func (e *Entity) SerializePrivate(w io.Writer, config *packet.Config) (err error) { err = e.PrivateKey.Serialize(w) if err != nil { @@ -554,6 +558,10 @@ func (e *Entity) SerializePrivate(w io.Writer, config *packet.Config) (err error if err != nil { return } + err = ident.SelfSignature.SignUserId(ident.UserId.Id, e.PrimaryKey, e.PrivateKey, config) + if err != nil { + return + } err = ident.SelfSignature.Serialize(w) if err != nil { return @@ -564,6 +572,10 @@ func (e *Entity) SerializePrivate(w io.Writer, config *packet.Config) (err error if err != nil { return } + err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config) + if err != nil { + return + } err = subkey.Sig.Serialize(w) if err != nil { return @@ -572,8 +584,8 @@ func (e *Entity) SerializePrivate(w io.Writer, config *packet.Config) (err error return nil } -// Serialize writes the public part of the given Entity to w. (No private -// key material will be output). +// Serialize writes the public part of the given Entity to w, including +// signatures from other entities. No private key material will be output. func (e *Entity) Serialize(w io.Writer) error { err := e.PrimaryKey.Serialize(w) if err != nil { diff --git a/vendor/golang.org/x/net/http/httpguts/guts.go b/vendor/golang.org/x/net/http/httpguts/guts.go index 8255fd49b4..e6cd0ced39 100644 --- a/vendor/golang.org/x/net/http/httpguts/guts.go +++ b/vendor/golang.org/x/net/http/httpguts/guts.go @@ -14,21 +14,6 @@ import ( "strings" ) -// SniffedContentType reports whether ct is a Content-Type that is known -// to cause client-side content sniffing. -// -// This provides just a partial implementation of mime.ParseMediaType -// with the assumption that the Content-Type is not attacker controlled. -func SniffedContentType(ct string) bool { - if i := strings.Index(ct, ";"); i != -1 { - ct = ct[:i] - } - ct = strings.ToLower(strings.TrimSpace(ct)) - return ct == "text/plain" || ct == "application/octet-stream" || - ct == "application/unknown" || ct == "unknown/unknown" || ct == "*/*" || - !strings.Contains(ct, "/") -} - // ValidTrailerHeader reports whether name is a valid header field name to appear // in trailers. // See RFC 7230, Section 4.1.2 diff --git a/vendor/golang.org/x/net/http2/client_conn_pool.go b/vendor/golang.org/x/net/http2/client_conn_pool.go index bdf5652b01..f4d9b5ece3 100644 --- a/vendor/golang.org/x/net/http2/client_conn_pool.go +++ b/vendor/golang.org/x/net/http2/client_conn_pool.go @@ -52,9 +52,31 @@ const ( noDialOnMiss = false ) +// shouldTraceGetConn reports whether getClientConn should call any +// ClientTrace.GetConn hook associated with the http.Request. +// +// This complexity is needed to avoid double calls of the GetConn hook +// during the back-and-forth between net/http and x/net/http2 (when the +// net/http.Transport is upgraded to also speak http2), as well as support +// the case where x/net/http2 is being used directly. +func (p *clientConnPool) shouldTraceGetConn(st clientConnIdleState) bool { + // If our Transport wasn't made via ConfigureTransport, always + // trace the GetConn hook if provided, because that means the + // http2 package is being used directly and it's the one + // dialing, as opposed to net/http. + if _, ok := p.t.ConnPool.(noDialClientConnPool); !ok { + return true + } + // Otherwise, only use the GetConn hook if this connection has + // been used previously for other requests. For fresh + // connections, the net/http package does the dialing. + return !st.freshConn +} + func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) { if isConnectionCloseRequest(req) && dialOnMiss { // It gets its own connection. + traceGetConn(req, addr) const singleUse = true cc, err := p.t.dialClientConn(addr, singleUse) if err != nil { @@ -64,7 +86,10 @@ func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMis } p.mu.Lock() for _, cc := range p.conns[addr] { - if cc.CanTakeNewRequest() { + if st := cc.idleState(); st.canTakeNewRequest { + if p.shouldTraceGetConn(st) { + traceGetConn(req, addr) + } p.mu.Unlock() return cc, nil } @@ -73,6 +98,7 @@ func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMis p.mu.Unlock() return nil, ErrNoCachedConn } + traceGetConn(req, addr) call := p.getStartDialLocked(addr) p.mu.Unlock() <-call.done diff --git a/vendor/golang.org/x/net/http2/configure_transport.go b/vendor/golang.org/x/net/http2/configure_transport.go index 088d6e2bdb..6356b32872 100644 --- a/vendor/golang.org/x/net/http2/configure_transport.go +++ b/vendor/golang.org/x/net/http2/configure_transport.go @@ -57,7 +57,7 @@ func configureTransport(t1 *http.Transport) (*Transport, error) { // registerHTTPSProtocol calls Transport.RegisterProtocol but // converting panics into errors. -func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error) { +func registerHTTPSProtocol(t *http.Transport, rt noDialH2RoundTripper) (err error) { defer func() { if e := recover(); e != nil { err = fmt.Errorf("%v", e) @@ -69,10 +69,12 @@ func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error) // noDialH2RoundTripper is a RoundTripper which only tries to complete the request // if there's already has a cached connection to the host. -type noDialH2RoundTripper struct{ t *Transport } +// (The field is exported so it can be accessed via reflect from net/http; tested +// by TestNoDialH2RoundTripperType) +type noDialH2RoundTripper struct{ *Transport } func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - res, err := rt.t.RoundTrip(req) + res, err := rt.Transport.RoundTrip(req) if isNoCachedConnError(err) { return nil, http.ErrSkipAltProtocol } diff --git a/vendor/golang.org/x/net/http2/flow.go b/vendor/golang.org/x/net/http2/flow.go index 957de25420..cea601fcdf 100644 --- a/vendor/golang.org/x/net/http2/flow.go +++ b/vendor/golang.org/x/net/http2/flow.go @@ -41,10 +41,10 @@ func (f *flow) take(n int32) { // add adds n bytes (positive or negative) to the flow control window. // It returns false if the sum would exceed 2^31-1. func (f *flow) add(n int32) bool { - remain := (1<<31 - 1) - f.n - if n > remain { - return false + sum := f.n + n + if (sum > n) == (f.n > 0) { + f.n = sum + return true } - f.n += n - return true + return false } diff --git a/vendor/golang.org/x/net/http2/go111.go b/vendor/golang.org/x/net/http2/go111.go new file mode 100644 index 0000000000..9749dc0be5 --- /dev/null +++ b/vendor/golang.org/x/net/http2/go111.go @@ -0,0 +1,26 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.11 + +package http2 + +import "net/textproto" + +func traceHasWroteHeaderField(trace *clientTrace) bool { + return trace != nil && trace.WroteHeaderField != nil +} + +func traceWroteHeaderField(trace *clientTrace, k, v string) { + if trace != nil && trace.WroteHeaderField != nil { + trace.WroteHeaderField(k, []string{v}) + } +} + +func traceGot1xxResponseFunc(trace *clientTrace) func(int, textproto.MIMEHeader) error { + if trace != nil { + return trace.Got1xxResponse + } + return nil +} diff --git a/vendor/golang.org/x/net/http2/go17.go b/vendor/golang.org/x/net/http2/go17.go index 47b7fae081..d957b7bc52 100644 --- a/vendor/golang.org/x/net/http2/go17.go +++ b/vendor/golang.org/x/net/http2/go17.go @@ -18,6 +18,8 @@ type contextContext interface { context.Context } +var errCanceled = context.Canceled + func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) { ctx, cancel = context.WithCancel(context.Background()) ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr()) @@ -48,6 +50,14 @@ func (t *Transport) idleConnTimeout() time.Duration { func setResponseUncompressed(res *http.Response) { res.Uncompressed = true } +func traceGetConn(req *http.Request, hostPort string) { + trace := httptrace.ContextClientTrace(req.Context()) + if trace == nil || trace.GetConn == nil { + return + } + trace.GetConn(hostPort) +} + func traceGotConn(req *http.Request, cc *ClientConn) { trace := httptrace.ContextClientTrace(req.Context()) if trace == nil || trace.GotConn == nil { @@ -104,3 +114,8 @@ func requestTrace(req *http.Request) *clientTrace { func (cc *ClientConn) Ping(ctx context.Context) error { return cc.ping(ctx) } + +// Shutdown gracefully closes the client connection, waiting for running streams to complete. +func (cc *ClientConn) Shutdown(ctx context.Context) error { + return cc.shutdown(ctx) +} diff --git a/vendor/golang.org/x/net/http2/not_go111.go b/vendor/golang.org/x/net/http2/not_go111.go new file mode 100644 index 0000000000..0df34e6d99 --- /dev/null +++ b/vendor/golang.org/x/net/http2/not_go111.go @@ -0,0 +1,17 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.11 + +package http2 + +import "net/textproto" + +func traceHasWroteHeaderField(trace *clientTrace) bool { return false } + +func traceWroteHeaderField(trace *clientTrace, k, v string) {} + +func traceGot1xxResponseFunc(trace *clientTrace) func(int, textproto.MIMEHeader) error { + return nil +} diff --git a/vendor/golang.org/x/net/http2/not_go17.go b/vendor/golang.org/x/net/http2/not_go17.go index 140434a791..7ffb25046a 100644 --- a/vendor/golang.org/x/net/http2/not_go17.go +++ b/vendor/golang.org/x/net/http2/not_go17.go @@ -8,6 +8,7 @@ package http2 import ( "crypto/tls" + "errors" "net" "net/http" "time" @@ -18,6 +19,8 @@ type contextContext interface { Err() error } +var errCanceled = errors.New("canceled") + type fakeContext struct{} func (fakeContext) Done() <-chan struct{} { return nil } @@ -34,6 +37,7 @@ func setResponseUncompressed(res *http.Response) { type clientTrace struct{} func requestTrace(*http.Request) *clientTrace { return nil } +func traceGetConn(*http.Request, string) {} func traceGotConn(*http.Request, *ClientConn) {} func traceFirstResponseByte(*clientTrace) {} func traceWroteHeaders(*clientTrace) {} @@ -84,4 +88,8 @@ func (cc *ClientConn) Ping(ctx contextContext) error { return cc.ping(ctx) } +func (cc *ClientConn) Shutdown(ctx contextContext) error { + return cc.shutdown(ctx) +} + func (t *Transport) idleConnTimeout() time.Duration { return 0 } diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index acf3b24186..6e2c5b92df 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -1575,6 +1575,12 @@ func (sc *serverConn) processData(f *DataFrame) error { // type PROTOCOL_ERROR." return ConnectionError(ErrCodeProtocol) } + // RFC 7540, sec 6.1: If a DATA frame is received whose stream is not in + // "open" or "half-closed (local)" state, the recipient MUST respond with a + // stream error (Section 5.4.2) of type STREAM_CLOSED. + if state == stateClosed { + return streamError(id, ErrCodeStreamClosed) + } if st == nil || state != stateOpen || st.gotTrailerHeader || st.resetQueued { // This includes sending a RST_STREAM if the stream is // in stateHalfClosedLocal (which currently means that @@ -1721,6 +1727,13 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { // processing this frame. return nil } + // RFC 7540, sec 5.1: If an endpoint receives additional frames, other than + // WINDOW_UPDATE, PRIORITY, or RST_STREAM, for a stream that is in + // this state, it MUST respond with a stream error (Section 5.4.2) of + // type STREAM_CLOSED. + if st.state == stateHalfClosedRemote { + return streamError(id, ErrCodeStreamClosed) + } return st.processTrailerHeaders(f) } @@ -2312,7 +2325,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { isHeadResp := rws.req.Method == "HEAD" if !rws.sentHeader { rws.sentHeader = true - var ctype, clen string if clen = rws.snapHeader.Get("Content-Length"); clen != "" { rws.snapHeader.Del("Content-Length") @@ -2326,7 +2338,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { if clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) { clen = strconv.Itoa(len(p)) } - _, hasContentType := rws.snapHeader["Content-Type"] if !hasContentType && bodyAllowedForStatus(rws.status) && len(p) > 0 { if cto := rws.snapHeader.Get("X-Content-Type-Options"); strings.EqualFold("nosniff", cto) { @@ -2339,20 +2350,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { ctype = http.DetectContentType(p) } } - - var noSniff bool - if bodyAllowedForStatus(rws.status) && (rws.sentContentLen > 0 || len(p) > 0) { - // If the content type triggers client-side sniffing on old browsers, - // attach a X-Content-Type-Options header if not present (or explicitly nil). - if _, ok := rws.snapHeader["X-Content-Type-Options"]; !ok { - if hasContentType { - noSniff = httpguts.SniffedContentType(rws.snapHeader.Get("Content-Type")) - } else if ctype != "" { - noSniff = httpguts.SniffedContentType(ctype) - } - } - } - var date string if _, ok := rws.snapHeader["Date"]; !ok { // TODO(bradfitz): be faster here, like net/http? measure. @@ -2363,6 +2360,19 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { foreachHeaderElement(v, rws.declareTrailer) } + // "Connection" headers aren't allowed in HTTP/2 (RFC 7540, 8.1.2.2), + // but respect "Connection" == "close" to mean sending a GOAWAY and tearing + // down the TCP connection when idle, like we do for HTTP/1. + // TODO: remove more Connection-specific header fields here, in addition + // to "Connection". + if _, ok := rws.snapHeader["Connection"]; ok { + v := rws.snapHeader.Get("Connection") + delete(rws.snapHeader, "Connection") + if v == "close" { + rws.conn.startGracefulShutdown() + } + } + endStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ streamID: rws.stream.id, @@ -2371,7 +2381,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { endStream: endStream, contentType: ctype, contentLength: clen, - noSniff: noSniff, date: date, }) if err != nil { diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index d23a226251..9d1f2fadd3 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -21,6 +21,7 @@ import ( mathrand "math/rand" "net" "net/http" + "net/textproto" "sort" "strconv" "strings" @@ -159,6 +160,7 @@ type ClientConn struct { cond *sync.Cond // hold mu; broadcast on flow/closed changes flow flow // our conn-level flow control quota (cs.flow is per stream) inflow flow // peer's conn-level flow control + closing bool closed bool wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received @@ -211,9 +213,10 @@ type clientStream struct { done chan struct{} // closed when stream remove from cc.streams map; close calls guarded by cc.mu // owned by clientConnReadLoop: - firstByte bool // got the first response byte - pastHeaders bool // got first MetaHeadersFrame (actual headers) - pastTrailers bool // got optional second MetaHeadersFrame (trailers) + firstByte bool // got the first response byte + pastHeaders bool // got first MetaHeadersFrame (actual headers) + pastTrailers bool // got optional second MetaHeadersFrame (trailers) + num1xx uint8 // number of 1xx responses seen trailer http.Header // accumulated trailers resTrailer *http.Header // client's Response.Trailer @@ -237,6 +240,17 @@ func awaitRequestCancel(req *http.Request, done <-chan struct{}) error { } } +var got1xxFuncForTests func(int, textproto.MIMEHeader) error + +// get1xxTraceFunc returns the value of request's httptrace.ClientTrace.Got1xxResponse func, +// if any. It returns nil if not set or if the Go version is too old. +func (cs *clientStream) get1xxTraceFunc() func(int, textproto.MIMEHeader) error { + if fn := got1xxFuncForTests; fn != nil { + return fn + } + return traceGot1xxResponseFunc(cs.trace) +} + // awaitRequestCancel waits for the user to cancel a request, its context to // expire, or for the request to be done (any way it might be removed from the // cc.streams map: peer reset, successful completion, TCP connection breakage, @@ -423,27 +437,36 @@ func shouldRetryRequest(req *http.Request, err error, afterBodyWrite bool) (*htt if !canRetryError(err) { return nil, err } - if !afterBodyWrite { - return req, nil - } // If the Body is nil (or http.NoBody), it's safe to reuse // this request and its Body. if req.Body == nil || reqBodyIsNoBody(req.Body) { return req, nil } - // Otherwise we depend on the Request having its GetBody - // func defined. + + // If the request body can be reset back to its original + // state via the optional req.GetBody, do that. getBody := reqGetBody(req) // Go 1.8: getBody = req.GetBody - if getBody == nil { - return nil, fmt.Errorf("http2: Transport: cannot retry err [%v] after Request.Body was written; define Request.GetBody to avoid this error", err) + if getBody != nil { + // TODO: consider a req.Body.Close here? or audit that all caller paths do? + body, err := getBody() + if err != nil { + return nil, err + } + newReq := *req + newReq.Body = body + return &newReq, nil } - body, err := getBody() - if err != nil { - return nil, err + + // The Request.Body can't reset back to the beginning, but we + // don't seem to have started to read from it yet, so reuse + // the request directly. The "afterBodyWrite" means the + // bodyWrite process has started, which becomes true before + // the first Read. + if !afterBodyWrite { + return req, nil } - newReq := *req - newReq.Body = body - return &newReq, nil + + return nil, fmt.Errorf("http2: Transport: cannot retry err [%v] after Request.Body was written; define Request.GetBody to avoid this error", err) } func canRetryError(err error) bool { @@ -630,12 +653,32 @@ func (cc *ClientConn) CanTakeNewRequest() bool { return cc.canTakeNewRequestLocked() } -func (cc *ClientConn) canTakeNewRequestLocked() bool { +// clientConnIdleState describes the suitability of a client +// connection to initiate a new RoundTrip request. +type clientConnIdleState struct { + canTakeNewRequest bool + freshConn bool // whether it's unused by any previous request +} + +func (cc *ClientConn) idleState() clientConnIdleState { + cc.mu.Lock() + defer cc.mu.Unlock() + return cc.idleStateLocked() +} + +func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { if cc.singleUse && cc.nextStreamID > 1 { - return false + return } - return cc.goAway == nil && !cc.closed && + st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && int64(cc.nextStreamID)+int64(cc.pendingRequests) < math.MaxInt32 + st.freshConn = cc.nextStreamID == 1 && st.canTakeNewRequest + return +} + +func (cc *ClientConn) canTakeNewRequestLocked() bool { + st := cc.idleStateLocked() + return st.canTakeNewRequest } // onIdleTimeout is called from a time.AfterFunc goroutine. It will @@ -665,6 +708,88 @@ func (cc *ClientConn) closeIfIdle() { cc.tconn.Close() } +var shutdownEnterWaitStateHook = func() {} + +// Shutdown gracefully close the client connection, waiting for running streams to complete. +// Public implementation is in go17.go and not_go17.go +func (cc *ClientConn) shutdown(ctx contextContext) error { + if err := cc.sendGoAway(); err != nil { + return err + } + // Wait for all in-flight streams to complete or connection to close + done := make(chan error, 1) + cancelled := false // guarded by cc.mu + go func() { + cc.mu.Lock() + defer cc.mu.Unlock() + for { + if len(cc.streams) == 0 || cc.closed { + cc.closed = true + done <- cc.tconn.Close() + break + } + if cancelled { + break + } + cc.cond.Wait() + } + }() + shutdownEnterWaitStateHook() + select { + case err := <-done: + return err + case <-ctx.Done(): + cc.mu.Lock() + // Free the goroutine above + cancelled = true + cc.cond.Broadcast() + cc.mu.Unlock() + return ctx.Err() + } +} + +func (cc *ClientConn) sendGoAway() error { + cc.mu.Lock() + defer cc.mu.Unlock() + cc.wmu.Lock() + defer cc.wmu.Unlock() + if cc.closing { + // GOAWAY sent already + return nil + } + // Send a graceful shutdown frame to server + maxStreamID := cc.nextStreamID + if err := cc.fr.WriteGoAway(maxStreamID, ErrCodeNo, nil); err != nil { + return err + } + if err := cc.bw.Flush(); err != nil { + return err + } + // Prevent new requests + cc.closing = true + return nil +} + +// Close closes the client connection immediately. +// +// In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead. +func (cc *ClientConn) Close() error { + cc.mu.Lock() + defer cc.cond.Broadcast() + defer cc.mu.Unlock() + err := errors.New("http2: client connection force closed via ClientConn.Close") + for id, cs := range cc.streams { + select { + case cs.resc <- resAndError{err: err}: + default: + } + cs.bufPipe.CloseWithError(err) + delete(cc.streams, id) + } + cc.closed = true + return cc.tconn.Close() +} + const maxAllocFrameSize = 512 << 10 // frameBuffer returns a scratch buffer suitable for writing DATA frames. @@ -747,7 +872,7 @@ func checkConnHeaders(req *http.Request) error { if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv) } - if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "close" && vv[0] != "keep-alive") { + if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !strings.EqualFold(vv[0], "close") && !strings.EqualFold(vv[0], "keep-alive")) { return fmt.Errorf("http2: invalid Connection request header: %q", vv) } return nil @@ -1291,9 +1416,16 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail return nil, errRequestHeaderListSize } + trace := requestTrace(req) + traceHeaders := traceHasWroteHeaderField(trace) + // Header list size is ok. Write the headers. enumerateHeaders(func(name, value string) { - cc.writeHeader(strings.ToLower(name), value) + name = strings.ToLower(name) + cc.writeHeader(name, value) + if traceHeaders { + traceWroteHeaderField(trace, name, value) + } }) return cc.hbuf.Bytes(), nil @@ -1615,8 +1747,7 @@ func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { // is the detail. // // As a special case, handleResponse may return (nil, nil) to skip the -// frame (currently only used for 100 expect continue). This special -// case is going away after Issue 13851 is fixed. +// frame (currently only used for 1xx responses). func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFrame) (*http.Response, error) { if f.Truncated { return nil, errResponseHeaderListSize @@ -1631,15 +1762,6 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra return nil, errors.New("malformed response from server: malformed non-numeric status pseudo header") } - if statusCode == 100 { - traceGot100Continue(cs.trace) - if cs.on100 != nil { - cs.on100() // forces any write delay timer to fire - } - cs.pastHeaders = false // do it all again - return nil, nil - } - header := make(http.Header) res := &http.Response{ Proto: "HTTP/2.0", @@ -1664,6 +1786,27 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra } } + if statusCode >= 100 && statusCode <= 199 { + cs.num1xx++ + const max1xxResponses = 5 // arbitrary bound on number of informational responses, same as net/http + if cs.num1xx > max1xxResponses { + return nil, errors.New("http2: too many 1xx informational responses") + } + if fn := cs.get1xxTraceFunc(); fn != nil { + if err := fn(statusCode, textproto.MIMEHeader(header)); err != nil { + return nil, err + } + } + if statusCode == 100 { + traceGot100Continue(cs.trace) + if cs.on100 != nil { + cs.on100() // forces any write delay timer to fire + } + } + cs.pastHeaders = false // do it all again + return nil, nil + } + streamEnded := f.StreamEnded() isHead := cs.req.Method == "HEAD" if !streamEnded || isHead { diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go index a5120412e6..8a9711f6e4 100644 --- a/vendor/golang.org/x/net/http2/write.go +++ b/vendor/golang.org/x/net/http2/write.go @@ -186,7 +186,6 @@ type writeResHeaders struct { date string contentType string contentLength string - noSniff bool } func encKV(enc *hpack.Encoder, k, v string) { @@ -223,9 +222,6 @@ func (w *writeResHeaders) writeFrame(ctx writeContext) error { if w.contentLength != "" { encKV(enc, "content-length", w.contentLength) } - if w.noSniff { - encKV(enc, "x-content-type-options", "nosniff") - } if w.date != "" { encKV(enc, "date", w.date) } diff --git a/vendor/golang.org/x/net/internal/socks/socks.go b/vendor/golang.org/x/net/internal/socks/socks.go index 9158595366..d93e699b46 100644 --- a/vendor/golang.org/x/net/internal/socks/socks.go +++ b/vendor/golang.org/x/net/internal/socks/socks.go @@ -75,7 +75,7 @@ const ( AuthMethodNotRequired AuthMethod = 0x00 // no authentication required AuthMethodUsernamePassword AuthMethod = 0x02 // use username/password - AuthMethodNoAcceptableMethods AuthMethod = 0xff // no acceptable authetication methods + AuthMethodNoAcceptableMethods AuthMethod = 0xff // no acceptable authentication methods StatusSucceeded Reply = 0x00 ) @@ -149,20 +149,13 @@ type Dialer struct { // See func Dial of the net package of standard library for a // description of the network and address parameters. func (d *Dialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) { - switch network { - case "tcp", "tcp6", "tcp4": - default: - proxy, dst, _ := d.pathAddrs(address) - return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: errors.New("network not implemented")} - } - switch d.cmd { - case CmdConnect, cmdBind: - default: + if err := d.validateTarget(network, address); err != nil { proxy, dst, _ := d.pathAddrs(address) - return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: errors.New("command not implemented")} + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} } if ctx == nil { - ctx = context.Background() + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: errors.New("nil context")} } var err error var c net.Conn @@ -185,11 +178,69 @@ func (d *Dialer) DialContext(ctx context.Context, network, address string) (net. return &Conn{Conn: c, boundAddr: a}, nil } +// DialWithConn initiates a connection from SOCKS server to the target +// network and address using the connection c that is already +// connected to the SOCKS server. +// +// It returns the connection's local address assigned by the SOCKS +// server. +func (d *Dialer) DialWithConn(ctx context.Context, c net.Conn, network, address string) (net.Addr, error) { + if err := d.validateTarget(network, address); err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + if ctx == nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: errors.New("nil context")} + } + a, err := d.connect(ctx, c, address) + if err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + return a, nil +} + // Dial connects to the provided address on the provided network. // -// Deprecated: Use DialContext instead. +// Unlike DialContext, it returns a raw transport connection instead +// of a forward proxy connection. +// +// Deprecated: Use DialContext or DialWithConn instead. func (d *Dialer) Dial(network, address string) (net.Conn, error) { - return d.DialContext(context.Background(), network, address) + if err := d.validateTarget(network, address); err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + var err error + var c net.Conn + if d.ProxyDial != nil { + c, err = d.ProxyDial(context.Background(), d.proxyNetwork, d.proxyAddress) + } else { + c, err = net.Dial(d.proxyNetwork, d.proxyAddress) + } + if err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + if _, err := d.DialWithConn(context.Background(), c, network, address); err != nil { + return nil, err + } + return c, nil +} + +func (d *Dialer) validateTarget(network, address string) error { + switch network { + case "tcp", "tcp6", "tcp4": + default: + return errors.New("network not implemented") + } + switch d.cmd { + case CmdConnect, cmdBind: + default: + return errors.New("command not implemented") + } + return nil } func (d *Dialer) pathAddrs(address string) (proxy, dst net.Addr, err error) { diff --git a/vendor/golang.org/x/sys/unix/fcntl.go b/vendor/golang.org/x/sys/unix/fcntl.go index 0c58c7e1e5..9379ba9cef 100644 --- a/vendor/golang.org/x/sys/unix/fcntl.go +++ b/vendor/golang.org/x/sys/unix/fcntl.go @@ -14,7 +14,11 @@ var fcntl64Syscall uintptr = SYS_FCNTL // FcntlInt performs a fcntl syscall on fd with the provided command and argument. func FcntlInt(fd uintptr, cmd, arg int) (int, error) { - valptr, _, err := Syscall(fcntl64Syscall, fd, uintptr(cmd), uintptr(arg)) + valptr, _, errno := Syscall(fcntl64Syscall, fd, uintptr(cmd), uintptr(arg)) + var err error + if errno != 0 { + err = errno + } return int(valptr), err } diff --git a/vendor/golang.org/x/sys/unix/gccgo_c.c b/vendor/golang.org/x/sys/unix/gccgo_c.c index 24e96b1198..46523ced65 100644 --- a/vendor/golang.org/x/sys/unix/gccgo_c.c +++ b/vendor/golang.org/x/sys/unix/gccgo_c.c @@ -36,12 +36,3 @@ gccgoRealSyscallNoError(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3 { return syscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9); } - -// Define the use function in C so that it is not inlined. - -extern void use(void *) __asm__ (GOSYM_PREFIX GOPKGPATH ".use") __attribute__((noinline)); - -void -use(void *p __attribute__ ((unused))) -{ -} diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go index 53fb851823..33c8b5f0db 100644 --- a/vendor/golang.org/x/sys/unix/syscall_bsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -206,7 +206,7 @@ func (sa *SockaddrDatalink) sockaddr() (unsafe.Pointer, _Socklen, error) { return unsafe.Pointer(&sa.raw), SizeofSockaddrDatalink, nil } -func anyToSockaddr(rsa *RawSockaddrAny) (Sockaddr, error) { +func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { switch rsa.Addr.Family { case AF_LINK: pp := (*RawSockaddrDatalink)(unsafe.Pointer(rsa)) @@ -286,7 +286,7 @@ func Accept(fd int) (nfd int, sa Sockaddr, err error) { Close(nfd) return 0, nil, ECONNABORTED } - sa, err = anyToSockaddr(&rsa) + sa, err = anyToSockaddr(fd, &rsa) if err != nil { Close(nfd) nfd = 0 @@ -306,7 +306,7 @@ func Getsockname(fd int) (sa Sockaddr, err error) { rsa.Addr.Family = AF_UNIX rsa.Addr.Len = SizeofSockaddrUnix } - return anyToSockaddr(&rsa) + return anyToSockaddr(fd, &rsa) } //sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) @@ -356,7 +356,7 @@ func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from recvflags = int(msg.Flags) // source address is only specified if the socket is unconnected if rsa.Addr.Family != AF_UNSPEC { - from, err = anyToSockaddr(&rsa) + from, err = anyToSockaddr(fd, &rsa) } return } diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index b5072de285..e34abe29df 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -87,7 +87,7 @@ func Accept4(fd, flags int) (nfd int, sa Sockaddr, err error) { if len > SizeofSockaddrAny { panic("RawSockaddrAny too small") } - sa, err = anyToSockaddr(&rsa) + sa, err = anyToSockaddr(fd, &rsa) if err != nil { Close(nfd) nfd = 0 diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index ba9df4ac12..5561a3eb76 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -89,7 +89,7 @@ func Accept4(fd, flags int) (nfd int, sa Sockaddr, err error) { if len > SizeofSockaddrAny { panic("RawSockaddrAny too small") } - sa, err = anyToSockaddr(&rsa) + sa, err = anyToSockaddr(fd, &rsa) if err != nil { Close(nfd) nfd = 0 diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 04f38c53ee..690c2c87f0 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -148,8 +148,6 @@ func Unlink(path string) error { //sys Unlinkat(dirfd int, path string, flags int) (err error) -//sys utimes(path string, times *[2]Timeval) (err error) - func Utimes(path string, tv []Timeval) error { if tv == nil { err := utimensat(AT_FDCWD, path, nil, 0) @@ -207,20 +205,14 @@ func UtimesNanoAt(dirfd int, path string, ts []Timespec, flags int) error { return utimensat(dirfd, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), flags) } -//sys futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) - func Futimesat(dirfd int, path string, tv []Timeval) error { - pathp, err := BytePtrFromString(path) - if err != nil { - return err - } if tv == nil { - return futimesat(dirfd, pathp, nil) + return futimesat(dirfd, path, nil) } if len(tv) != 2 { return EINVAL } - return futimesat(dirfd, pathp, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) + return futimesat(dirfd, path, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) } func Futimes(fd int, tv []Timeval) (err error) { @@ -497,6 +489,47 @@ func (sa *SockaddrL2) sockaddr() (unsafe.Pointer, _Socklen, error) { return unsafe.Pointer(&sa.raw), SizeofSockaddrL2, nil } +// SockaddrRFCOMM implements the Sockaddr interface for AF_BLUETOOTH type sockets +// using the RFCOMM protocol. +// +// Server example: +// +// fd, _ := Socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM) +// _ = unix.Bind(fd, &unix.SockaddrRFCOMM{ +// Channel: 1, +// Addr: [6]uint8{0, 0, 0, 0, 0, 0}, // BDADDR_ANY or 00:00:00:00:00:00 +// }) +// _ = Listen(fd, 1) +// nfd, sa, _ := Accept(fd) +// fmt.Printf("conn addr=%v fd=%d", sa.(*unix.SockaddrRFCOMM).Addr, nfd) +// Read(nfd, buf) +// +// Client example: +// +// fd, _ := Socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM) +// _ = Connect(fd, &SockaddrRFCOMM{ +// Channel: 1, +// Addr: [6]byte{0x11, 0x22, 0x33, 0xaa, 0xbb, 0xcc}, // CC:BB:AA:33:22:11 +// }) +// Write(fd, []byte(`hello`)) +type SockaddrRFCOMM struct { + // Addr represents a bluetooth address, byte ordering is little-endian. + Addr [6]uint8 + + // Channel is a designated bluetooth channel, only 1-30 are available for use. + // Since Linux 2.6.7 and further zero value is the first available channel. + Channel uint8 + + raw RawSockaddrRFCOMM +} + +func (sa *SockaddrRFCOMM) sockaddr() (unsafe.Pointer, _Socklen, error) { + sa.raw.Family = AF_BLUETOOTH + sa.raw.Channel = sa.Channel + sa.raw.Bdaddr = sa.Addr + return unsafe.Pointer(&sa.raw), SizeofSockaddrRFCOMM, nil +} + // SockaddrCAN implements the Sockaddr interface for AF_CAN type sockets. // The RxID and TxID fields are used for transport protocol addressing in // (CAN_TP16, CAN_TP20, CAN_MCNET, and CAN_ISOTP), they can be left with @@ -659,7 +692,7 @@ func (sa *SockaddrVM) sockaddr() (unsafe.Pointer, _Socklen, error) { return unsafe.Pointer(&sa.raw), SizeofSockaddrVM, nil } -func anyToSockaddr(rsa *RawSockaddrAny) (Sockaddr, error) { +func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { switch rsa.Addr.Family { case AF_NETLINK: pp := (*RawSockaddrNetlink)(unsafe.Pointer(rsa)) @@ -736,6 +769,30 @@ func anyToSockaddr(rsa *RawSockaddrAny) (Sockaddr, error) { Port: pp.Port, } return sa, nil + case AF_BLUETOOTH: + proto, err := GetsockoptInt(fd, SOL_SOCKET, SO_PROTOCOL) + if err != nil { + return nil, err + } + // only BTPROTO_L2CAP and BTPROTO_RFCOMM can accept connections + switch proto { + case BTPROTO_L2CAP: + pp := (*RawSockaddrL2)(unsafe.Pointer(rsa)) + sa := &SockaddrL2{ + PSM: pp.Psm, + CID: pp.Cid, + Addr: pp.Bdaddr, + AddrType: pp.Bdaddr_type, + } + return sa, nil + case BTPROTO_RFCOMM: + pp := (*RawSockaddrRFCOMM)(unsafe.Pointer(rsa)) + sa := &SockaddrRFCOMM{ + Channel: pp.Channel, + Addr: pp.Bdaddr, + } + return sa, nil + } } return nil, EAFNOSUPPORT } @@ -747,7 +804,7 @@ func Accept(fd int) (nfd int, sa Sockaddr, err error) { if err != nil { return } - sa, err = anyToSockaddr(&rsa) + sa, err = anyToSockaddr(fd, &rsa) if err != nil { Close(nfd) nfd = 0 @@ -765,7 +822,7 @@ func Accept4(fd int, flags int) (nfd int, sa Sockaddr, err error) { if len > SizeofSockaddrAny { panic("RawSockaddrAny too small") } - sa, err = anyToSockaddr(&rsa) + sa, err = anyToSockaddr(fd, &rsa) if err != nil { Close(nfd) nfd = 0 @@ -779,7 +836,7 @@ func Getsockname(fd int) (sa Sockaddr, err error) { if err = getsockname(fd, &rsa, &len); err != nil { return } - return anyToSockaddr(&rsa) + return anyToSockaddr(fd, &rsa) } func GetsockoptIPMreqn(fd, level, opt int) (*IPMreqn, error) { @@ -968,7 +1025,7 @@ func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from recvflags = int(msg.Flags) // source address is only specified if the socket is unconnected if rsa.Addr.Family != AF_UNSPEC { - from, err = anyToSockaddr(&rsa) + from, err = anyToSockaddr(fd, &rsa) } return } @@ -1221,12 +1278,10 @@ func Mount(source string, target string, fstype string, flags uintptr, data stri //sys CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) //sys Dup(oldfd int) (fd int, err error) //sys Dup3(oldfd int, newfd int, flags int) (err error) -//sysnb EpollCreate(size int) (fd int, err error) //sysnb EpollCreate1(flag int) (fd int, err error) //sysnb EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) //sys Eventfd(initval uint, flags int) (fd int, err error) = SYS_EVENTFD2 //sys Exit(code int) = SYS_EXIT_GROUP -//sys Faccessat(dirfd int, path string, mode uint32, flags int) (err error) //sys Fallocate(fd int, mode uint32, off int64, len int64) (err error) //sys Fchdir(fd int) (err error) //sys Fchmod(fd int, mode uint32) (err error) @@ -1306,7 +1361,6 @@ func Setgid(uid int) (err error) { //sysnb Uname(buf *Utsname) (err error) //sys Unmount(target string, flags int) (err error) = SYS_UMOUNT2 //sys Unshare(flags int) (err error) -//sys Ustat(dev int, ubuf *Ustat_t) (err error) //sys write(fd int, p []byte) (n int, err error) //sys exitThread(code int) (err error) = SYS_EXIT //sys readlen(fd int, p *byte, np int) (n int, err error) = SYS_READ @@ -1356,6 +1410,17 @@ func Vmsplice(fd int, iovs []Iovec, flags int) (int, error) { return int(n), nil } +//sys faccessat(dirfd int, path string, mode uint32) (err error) + +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + if flags & ^(AT_SYMLINK_NOFOLLOW|AT_EACCESS) != 0 { + return EINVAL + } else if flags&(AT_SYMLINK_NOFOLLOW|AT_EACCESS) != 0 { + return EOPNOTSUPP + } + return faccessat(dirfd, path, mode) +} + /* * Unimplemented */ diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_386.go index bb8e4fbd86..74bc098ce1 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_386.go @@ -10,7 +10,6 @@ package unix import ( - "syscall" "unsafe" ) @@ -51,6 +50,8 @@ func Pipe2(p []int, flags int) (err error) { // 64-bit file system and 32-bit uid calls // (386 default is 32-bit file system and 16-bit uid). //sys Dup2(oldfd int, newfd int) (err error) +//sysnb EpollCreate(size int) (fd int, err error) +//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64_64 //sys Fchown(fd int, uid int, gid int) (err error) = SYS_FCHOWN32 //sys Fstat(fd int, stat *Stat_t) (err error) = SYS_FSTAT64 @@ -78,12 +79,12 @@ func Pipe2(p []int, flags int) (err error) { //sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 //sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) //sys Truncate(path string, length int64) (err error) = SYS_TRUNCATE64 +//sys Ustat(dev int, ubuf *Ustat_t) (err error) //sysnb getgroups(n int, list *_Gid_t) (nn int, err error) = SYS_GETGROUPS32 //sysnb setgroups(n int, list *_Gid_t) (err error) = SYS_SETGROUPS32 //sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT //sys mmap2(addr uintptr, length uintptr, prot int, flags int, fd int, pageOffset uintptr) (xaddr uintptr, err error) -//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Pause() (err error) func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) { @@ -157,10 +158,6 @@ func Setrlimit(resource int, rlim *Rlimit) (err error) { return setrlimit(resource, &rl) } -// Underlying system call writes to newoffset via pointer. -// Implemented in assembly to avoid allocation. -func seek(fd int, offset int64, whence int) (newoffset int64, err syscall.Errno) - func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { newoffset, errno := seek(fd, offset, whence) if errno != 0 { @@ -169,11 +166,11 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { return newoffset, nil } -// Vsyscalls on amd64. +//sys futimesat(dirfd int, path string, times *[2]Timeval) (err error) //sysnb Gettimeofday(tv *Timeval) (err error) //sysnb Time(t *Time_t) (tt Time_t, err error) - //sys Utime(path string, buf *Utimbuf) (err error) +//sys utimes(path string, times *[2]Timeval) (err error) // On x86 Linux, all the socket calls go through an extra indirection, // I think because the 5-register system call interface can't handle @@ -206,9 +203,6 @@ const ( _SENDMMSG = 20 ) -func socketcall(call int, a0, a1, a2, a3, a4, a5 uintptr) (n int, err syscall.Errno) -func rawsocketcall(call int, a0, a1, a2, a3, a4, a5 uintptr) (n int, err syscall.Errno) - func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { fd, e := socketcall(_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0) if e != 0 { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go index d121106323..5f9b2454ad 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go @@ -7,6 +7,7 @@ package unix //sys Dup2(oldfd int, newfd int) (err error) +//sysnb EpollCreate(size int) (fd int, err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 //sys Fchown(fd int, uid int, gid int) (err error) @@ -57,6 +58,7 @@ func Stat(path string, stat *Stat_t) (err error) { //sys Statfs(path string, buf *Statfs_t) (err error) //sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) //sys Truncate(path string, length int64) (err error) +//sys Ustat(dev int, ubuf *Ustat_t) (err error) //sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) //sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) //sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) @@ -75,6 +77,8 @@ func Stat(path string, stat *Stat_t) (err error) { //sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) +//sys futimesat(dirfd int, path string, times *[2]Timeval) (err error) + func Gettimeofday(tv *Timeval) (err error) { errno := gettimeofday(tv) if errno != 0 { @@ -96,6 +100,7 @@ func Time(t *Time_t) (tt Time_t, err error) { } //sys Utime(path string, buf *Utimbuf) (err error) +//sys utimes(path string, times *[2]Timeval) (err error) func setTimespec(sec, nsec int64) Timespec { return Timespec{Sec: sec, Nsec: nsec} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go index c59f8588f1..3ec7a9329d 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go @@ -75,6 +75,8 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // 64-bit file system and 32-bit uid calls // (16-bit uid calls are not always supported in newer kernels) //sys Dup2(oldfd int, newfd int) (err error) +//sysnb EpollCreate(size int) (fd int, err error) +//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fchown(fd int, uid int, gid int) (err error) = SYS_FCHOWN32 //sys Fstat(fd int, stat *Stat_t) (err error) = SYS_FSTAT64 //sys Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) = SYS_FSTATAT64 @@ -86,6 +88,7 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { //sys Lchown(path string, uid int, gid int) (err error) = SYS_LCHOWN32 //sys Listen(s int, n int) (err error) //sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64 +//sys Pause() (err error) //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64 //sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT //sys Setfsgid(gid int) (err error) = SYS_SETFSGID32 @@ -97,11 +100,10 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) //sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 +//sys Ustat(dev int, ubuf *Ustat_t) (err error) -// Vsyscalls on amd64. +//sys futimesat(dirfd int, path string, times *[2]Timeval) (err error) //sysnb Gettimeofday(tv *Timeval) (err error) -//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) -//sys Pause() (err error) func Time(t *Time_t) (Time_t, error) { var tv Timeval @@ -123,6 +125,8 @@ func Utime(path string, buf *Utimbuf) error { return Utimes(path, tv) } +//sys utimes(path string, times *[2]Timeval) (err error) + //sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 //sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Truncate(path string, length int64) (err error) = SYS_TRUNCATE64 diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index a1e8a609b2..646f295ade 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -6,6 +6,15 @@ package unix +import "unsafe" + +func EpollCreate(size int) (fd int, err error) { + if size <= 0 { + return -1, EINVAL + } + return EpollCreate1(0) +} + //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) = SYS_EPOLL_PWAIT //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 //sys Fchown(fd int, uid int, gid int) (err error) @@ -57,6 +66,11 @@ func Lstat(path string, stat *Stat_t) (err error) { //sys Statfs(path string, buf *Statfs_t) (err error) //sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) //sys Truncate(path string, length int64) (err error) + +func Ustat(dev int, ubuf *Ustat_t) (err error) { + return ENOSYS +} + //sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) //sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) //sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) @@ -85,6 +99,18 @@ func setTimeval(sec, usec int64) Timeval { return Timeval{Sec: sec, Usec: usec} } +func futimesat(dirfd int, path string, tv *[2]Timeval) (err error) { + if tv == nil { + return utimensat(dirfd, path, nil, 0) + } + + ts := []Timespec{ + NsecToTimespec(TimevalToNsec(tv[0])), + NsecToTimespec(TimevalToNsec(tv[1])), + } + return utimensat(dirfd, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) +} + func Time(t *Time_t) (Time_t, error) { var tv Timeval err := Gettimeofday(&tv) @@ -105,6 +131,18 @@ func Utime(path string, buf *Utimbuf) error { return Utimes(path, tv) } +func utimes(path string, tv *[2]Timeval) (err error) { + if tv == nil { + return utimensat(AT_FDCWD, path, nil, 0) + } + + ts := []Timespec{ + NsecToTimespec(TimevalToNsec(tv[0])), + NsecToTimespec(TimevalToNsec(tv[1])), + } + return utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) +} + func Pipe(p []int) (err error) { if len(p) != 2 { return EINVAL @@ -161,22 +199,6 @@ func Pause() (err error) { return } -// TODO(dfc): constants that should be in zsysnum_linux_arm64.go, remove -// these when the deprecated syscalls that the syscall package relies on -// are removed. -const ( - SYS_GETPGRP = 1060 - SYS_UTIMES = 1037 - SYS_FUTIMESAT = 1066 - SYS_PAUSE = 1061 - SYS_USTAT = 1070 - SYS_UTIME = 1063 - SYS_LCHOWN = 1032 - SYS_TIME = 1062 - SYS_EPOLL_CREATE = 1042 - SYS_EPOLL_WAIT = 1069 -) - func Poll(fds []PollFd, timeout int) (n int, err error) { var ts *Timespec if timeout >= 0 { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go new file mode 100644 index 0000000000..070bd38994 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go @@ -0,0 +1,16 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux,!gccgo,386 + +package unix + +import "syscall" + +// Underlying system call writes to newoffset via pointer. +// Implemented in assembly to avoid allocation. +func seek(fd int, offset int64, whence int) (newoffset int64, err syscall.Errno) + +func socketcall(call int, a0, a1, a2, a3, a4, a5 uintptr) (n int, err syscall.Errno) +func rawsocketcall(call int, a0, a1, a2, a3, a4, a5 uintptr) (n int, err syscall.Errno) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go new file mode 100644 index 0000000000..308eb7aecf --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go @@ -0,0 +1,30 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux,gccgo,386 + +package unix + +import ( + "syscall" + "unsafe" +) + +func seek(fd int, offset int64, whence int) (int64, syscall.Errno) { + var newoffset int64 + offsetLow := uint32(offset & 0xffffffff) + offsetHigh := uint32((offset >> 32) & 0xffffffff) + _, _, err := Syscall6(SYS__LLSEEK, uintptr(fd), uintptr(offsetHigh), uintptr(offsetLow), uintptr(unsafe.Pointer(&newoffset)), uintptr(whence), 0) + return newoffset, err +} + +func socketcall(call int, a0, a1, a2, a3, a4, a5 uintptr) (int, syscall.Errno) { + fd, _, err := Syscall(SYS_SOCKETCALL, uintptr(call), uintptr(unsafe.Pointer(&a0)), 0) + return int(fd), err +} + +func rawsocketcall(call int, a0, a1, a2, a3, a4, a5 uintptr) (int, syscall.Errno) { + fd, _, err := RawSyscall(SYS_SOCKETCALL, uintptr(call), uintptr(unsafe.Pointer(&a0)), 0) + return int(fd), err +} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gccgo.go b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go similarity index 53% rename from vendor/golang.org/x/sys/unix/syscall_linux_gccgo.go rename to vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go index df9c123718..aa7fc9e199 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gccgo.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go @@ -2,9 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build linux -// +build gccgo -// +build 386 arm +// +build linux,gccgo,arm package unix @@ -13,9 +11,10 @@ import ( "unsafe" ) -func seek(fd int, offset int64, whence int) (newoffset int64, err syscall.Errno) { +func seek(fd int, offset int64, whence int) (int64, syscall.Errno) { + var newoffset int64 offsetLow := uint32(offset & 0xffffffff) offsetHigh := uint32((offset >> 32) & 0xffffffff) - _, _, err = Syscall6(SYS__LLSEEK, uintptr(fd), uintptr(offsetHigh), uintptr(offsetLow), uintptr(unsafe.Pointer(&newoffset)), uintptr(whence), 0) + _, _, err := Syscall6(SYS__LLSEEK, uintptr(fd), uintptr(offsetHigh), uintptr(offsetLow), uintptr(unsafe.Pointer(&newoffset)), uintptr(whence), 0) return newoffset, err } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go index 090ed404ab..ad991031c7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go @@ -8,6 +8,7 @@ package unix //sys Dup2(oldfd int, newfd int) (err error) +//sysnb EpollCreate(size int) (fd int, err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 //sys Fchown(fd int, uid int, gid int) (err error) @@ -47,6 +48,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err //sys Statfs(path string, buf *Statfs_t) (err error) //sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) //sys Truncate(path string, length int64) (err error) +//sys Ustat(dev int, ubuf *Ustat_t) (err error) //sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) //sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) //sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) @@ -65,6 +67,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err //sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) +//sys futimesat(dirfd int, path string, times *[2]Timeval) (err error) //sysnb Gettimeofday(tv *Timeval) (err error) func Time(t *Time_t) (tt Time_t, err error) { @@ -80,6 +83,7 @@ func Time(t *Time_t) (tt Time_t, err error) { } //sys Utime(path string, buf *Utimbuf) (err error) +//sys utimes(path string, times *[2]Timeval) (err error) func setTimespec(sec, nsec int64) Timespec { return Timespec{Sec: sec, Nsec: nsec} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go index 9e16cc9d14..0e05924820 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go @@ -15,6 +15,8 @@ import ( func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) //sys Dup2(oldfd int, newfd int) (err error) +//sysnb EpollCreate(size int) (fd int, err error) +//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 //sys Fchown(fd int, uid int, gid int) (err error) //sys Ftruncate(fd int, length int64) (err error) = SYS_FTRUNCATE64 @@ -33,13 +35,12 @@ func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, //sysnb Setregid(rgid int, egid int) (err error) //sysnb Setresgid(rgid int, egid int, sgid int) (err error) //sysnb Setresuid(ruid int, euid int, suid int) (err error) - //sysnb Setreuid(ruid int, euid int) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) - //sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) //sys Truncate(path string, length int64) (err error) = SYS_TRUNCATE64 +//sys Ustat(dev int, ubuf *Ustat_t) (err error) //sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) //sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) //sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) @@ -61,16 +62,17 @@ func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, //sys Ioperm(from int, num int, on int) (err error) //sys Iopl(level int) (err error) +//sys futimesat(dirfd int, path string, times *[2]Timeval) (err error) //sysnb Gettimeofday(tv *Timeval) (err error) //sysnb Time(t *Time_t) (tt Time_t, err error) +//sys Utime(path string, buf *Utimbuf) (err error) +//sys utimes(path string, times *[2]Timeval) (err error) //sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64 //sys Fstat(fd int, stat *Stat_t) (err error) = SYS_FSTAT64 //sys Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) = SYS_FSTATAT64 //sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 -//sys Utime(path string, buf *Utimbuf) (err error) -//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Pause() (err error) func Fstatfs(fd int, buf *Statfs_t) (err error) { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go index 6fb8733d67..8c6720f7f4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go @@ -7,8 +7,9 @@ package unix -//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Dup2(oldfd int, newfd int) (err error) +//sysnb EpollCreate(size int) (fd int, err error) +//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 //sys Fchown(fd int, uid int, gid int) (err error) //sys Fstat(fd int, stat *Stat_t) (err error) @@ -45,6 +46,7 @@ package unix //sys Statfs(path string, buf *Statfs_t) (err error) //sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) = SYS_SYNC_FILE_RANGE2 //sys Truncate(path string, length int64) (err error) +//sys Ustat(dev int, ubuf *Ustat_t) (err error) //sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) //sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) //sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) @@ -63,10 +65,11 @@ package unix //sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) +//sys futimesat(dirfd int, path string, times *[2]Timeval) (err error) //sysnb Gettimeofday(tv *Timeval) (err error) //sysnb Time(t *Time_t) (tt Time_t, err error) - //sys Utime(path string, buf *Utimbuf) (err error) +//sys utimes(path string, times *[2]Timeval) (err error) func setTimespec(sec, nsec int64) Timespec { return Timespec{Sec: sec, Nsec: nsec} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go index c0d86e722b..6e4ee0cf2a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go @@ -11,6 +11,7 @@ import ( ) //sys Dup2(oldfd int, newfd int) (err error) +//sysnb EpollCreate(size int) (fd int, err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 //sys Fchown(fd int, uid int, gid int) (err error) @@ -44,9 +45,11 @@ import ( //sys Statfs(path string, buf *Statfs_t) (err error) //sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) //sys Truncate(path string, length int64) (err error) +//sys Ustat(dev int, ubuf *Ustat_t) (err error) //sysnb getgroups(n int, list *_Gid_t) (nn int, err error) //sysnb setgroups(n int, list *_Gid_t) (err error) +//sys futimesat(dirfd int, path string, times *[2]Timeval) (err error) //sysnb Gettimeofday(tv *Timeval) (err error) func Time(t *Time_t) (tt Time_t, err error) { @@ -62,6 +65,7 @@ func Time(t *Time_t) (tt Time_t, err error) { } //sys Utime(path string, buf *Utimbuf) (err error) +//sys utimes(path string, times *[2]Timeval) (err error) func setTimespec(sec, nsec int64) Timespec { return Timespec{Sec: sec, Nsec: nsec} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go index 78c1e0df1d..72e64187de 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go @@ -68,6 +68,7 @@ func Iopl(level int) (err error) { return ENOSYS } +//sys futimesat(dirfd int, path string, times *[2]Timeval) (err error) //sysnb Gettimeofday(tv *Timeval) (err error) func Time(t *Time_t) (tt Time_t, err error) { @@ -83,6 +84,7 @@ func Time(t *Time_t) (tt Time_t, err error) { } //sys Utime(path string, buf *Utimbuf) (err error) +//sys utimes(path string, times *[2]Timeval) (err error) func setTimespec(sec, nsec int64) Timespec { return Timespec{Sec: sec, Nsec: nsec} diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go index e1a3baa237..369a2be2dc 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -233,6 +233,7 @@ func Uname(uname *Utsname) error { //sys Dup(fd int) (nfd int, err error) //sys Dup2(from int, to int) (err error) //sys Exit(code int) +//sys Faccessat(dirfd int, path string, mode uint32, flags int) (err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_POSIX_FADVISE //sys Fchdir(fd int) (err error) //sys Fchflags(fd int, flags int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index 614fcf0494..9fc9c06a08 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -201,6 +201,7 @@ func Uname(uname *Utsname) error { //sys Dup(fd int) (nfd int, err error) //sys Dup2(from int, to int) (err error) //sys Exit(code int) +//sys Faccessat(dirfd int, path string, mode uint32, flags int) (err error) //sys Fchdir(fd int) (err error) //sys Fchflags(fd int, flags int) (err error) //sys Fchmod(fd int, mode uint32) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index b7629529b3..a05337d540 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -112,7 +112,7 @@ func Getsockname(fd int) (sa Sockaddr, err error) { if err = getsockname(fd, &rsa, &len); err != nil { return } - return anyToSockaddr(&rsa) + return anyToSockaddr(fd, &rsa) } // GetsockoptString returns the string value of the socket option opt for the @@ -314,7 +314,11 @@ func UtimesNanoAt(dirfd int, path string, ts []Timespec, flags int) error { // FcntlInt performs a fcntl syscall on fd with the provided command and argument. func FcntlInt(fd uintptr, cmd, arg int) (int, error) { - valptr, _, err := sysvicall6(uintptr(unsafe.Pointer(&procfcntl)), 3, uintptr(fd), uintptr(cmd), uintptr(arg), 0, 0, 0) + valptr, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procfcntl)), 3, uintptr(fd), uintptr(cmd), uintptr(arg), 0, 0, 0) + var err error + if errno != 0 { + err = errno + } return int(valptr), err } @@ -356,7 +360,7 @@ func Futimes(fd int, tv []Timeval) error { return futimesat(fd, nil, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) } -func anyToSockaddr(rsa *RawSockaddrAny) (Sockaddr, error) { +func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { switch rsa.Addr.Family { case AF_UNIX: pp := (*RawSockaddrUnix)(unsafe.Pointer(rsa)) @@ -407,7 +411,7 @@ func Accept(fd int) (nfd int, sa Sockaddr, err error) { if nfd == -1 { return } - sa, err = anyToSockaddr(&rsa) + sa, err = anyToSockaddr(fd, &rsa) if err != nil { Close(nfd) nfd = 0 @@ -444,7 +448,7 @@ func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from oobn = int(msg.Accrightslen) // source address is only specified if the socket is unconnected if rsa.Addr.Family != AF_UNSPEC { - from, err = anyToSockaddr(&rsa) + from, err = anyToSockaddr(fd, &rsa) } return } @@ -595,6 +599,7 @@ func Poll(fds []PollFd, timeout int) (n int, err error) { //sys Dup(fd int) (nfd int, err error) //sys Dup2(oldfd int, newfd int) (err error) //sys Exit(code int) +//sys Faccessat(dirfd int, path string, mode uint32, flags int) (err error) //sys Fchdir(fd int) (err error) //sys Fchmod(fd int, mode uint32) (err error) //sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index b835bad0fe..95b2180aeb 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -219,7 +219,7 @@ func Getpeername(fd int) (sa Sockaddr, err error) { if err = getpeername(fd, &rsa, &len); err != nil { return } - return anyToSockaddr(&rsa) + return anyToSockaddr(fd, &rsa) } func GetsockoptByte(fd, level, opt int) (value byte, err error) { @@ -291,7 +291,7 @@ func Recvfrom(fd int, p []byte, flags int) (n int, from Sockaddr, err error) { return } if rsa.Addr.Family != AF_UNSPEC { - from, err = anyToSockaddr(&rsa) + from, err = anyToSockaddr(fd, &rsa) } return } diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index ee17d4bd4a..2f0091bbc9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -3,7 +3,7 @@ // +build 386,linux -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/include -m32 _const.go package unix @@ -449,6 +449,7 @@ const ( ETH_P_PPP_DISC = 0x8863 ETH_P_PPP_MP = 0x8 ETH_P_PPP_SES = 0x8864 + ETH_P_PREAUTH = 0x88c7 ETH_P_PRP = 0x88fb ETH_P_PUP = 0x200 ETH_P_PUPAT = 0x201 @@ -1234,6 +1235,7 @@ const ( PERF_EVENT_IOC_DISABLE = 0x2401 PERF_EVENT_IOC_ENABLE = 0x2400 PERF_EVENT_IOC_ID = 0x80042407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x4004240b PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 PERF_EVENT_IOC_PERIOD = 0x40082404 PERF_EVENT_IOC_QUERY_BPF = 0xc004240a @@ -1289,6 +1291,7 @@ const ( PR_GET_PDEATHSIG = 0x2 PR_GET_SECCOMP = 0x15 PR_GET_SECUREBITS = 0x1b + PR_GET_SPECULATION_CTRL = 0x34 PR_GET_THP_DISABLE = 0x2a PR_GET_TID_ADDRESS = 0x28 PR_GET_TIMERSLACK = 0x1e @@ -1334,11 +1337,18 @@ const ( PR_SET_PTRACER_ANY = 0xffffffff PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c + PR_SET_SPECULATION_CTRL = 0x35 PR_SET_THP_DISABLE = 0x29 PR_SET_TIMERSLACK = 0x1d PR_SET_TIMING = 0xe PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 + PR_SPEC_DISABLE = 0x4 + PR_SPEC_ENABLE = 0x2 + PR_SPEC_FORCE_DISABLE = 0x8 + PR_SPEC_NOT_AFFECTED = 0x0 + PR_SPEC_PRCTL = 0x1 + PR_SPEC_STORE_BYPASS = 0x0 PR_SVE_GET_VL = 0x33 PR_SVE_SET_VL = 0x32 PR_SVE_SET_VL_ONEXEC = 0x40000 @@ -1466,6 +1476,33 @@ const ( RTCF_MASQ = 0x400000 RTCF_NAT = 0x800000 RTCF_VALVE = 0x200000 + RTC_AF = 0x20 + RTC_AIE_OFF = 0x7002 + RTC_AIE_ON = 0x7001 + RTC_ALM_READ = 0x80247008 + RTC_ALM_SET = 0x40247007 + RTC_EPOCH_READ = 0x8004700d + RTC_EPOCH_SET = 0x4004700e + RTC_IRQF = 0x80 + RTC_IRQP_READ = 0x8004700b + RTC_IRQP_SET = 0x4004700c + RTC_MAX_FREQ = 0x2000 + RTC_PF = 0x40 + RTC_PIE_OFF = 0x7006 + RTC_PIE_ON = 0x7005 + RTC_PLL_GET = 0x801c7011 + RTC_PLL_SET = 0x401c7012 + RTC_RD_TIME = 0x80247009 + RTC_SET_TIME = 0x4024700a + RTC_UF = 0x10 + RTC_UIE_OFF = 0x7004 + RTC_UIE_ON = 0x7003 + RTC_VL_CLR = 0x7014 + RTC_VL_READ = 0x80047013 + RTC_WIE_OFF = 0x7010 + RTC_WIE_ON = 0x700f + RTC_WKALM_RD = 0x80287010 + RTC_WKALM_SET = 0x4028700f RTF_ADDRCLASSMASK = 0xf8000000 RTF_ADDRCONF = 0x40000 RTF_ALLONLINK = 0x20000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 64ab9f40ad..a80c7ae5fb 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -3,7 +3,7 @@ // +build amd64,linux -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/include -m64 _const.go package unix @@ -449,6 +449,7 @@ const ( ETH_P_PPP_DISC = 0x8863 ETH_P_PPP_MP = 0x8 ETH_P_PPP_SES = 0x8864 + ETH_P_PREAUTH = 0x88c7 ETH_P_PRP = 0x88fb ETH_P_PUP = 0x200 ETH_P_PUPAT = 0x201 @@ -1234,6 +1235,7 @@ const ( PERF_EVENT_IOC_DISABLE = 0x2401 PERF_EVENT_IOC_ENABLE = 0x2400 PERF_EVENT_IOC_ID = 0x80082407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x4008240b PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 PERF_EVENT_IOC_PERIOD = 0x40082404 PERF_EVENT_IOC_QUERY_BPF = 0xc008240a @@ -1289,6 +1291,7 @@ const ( PR_GET_PDEATHSIG = 0x2 PR_GET_SECCOMP = 0x15 PR_GET_SECUREBITS = 0x1b + PR_GET_SPECULATION_CTRL = 0x34 PR_GET_THP_DISABLE = 0x2a PR_GET_TID_ADDRESS = 0x28 PR_GET_TIMERSLACK = 0x1e @@ -1334,11 +1337,18 @@ const ( PR_SET_PTRACER_ANY = 0xffffffffffffffff PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c + PR_SET_SPECULATION_CTRL = 0x35 PR_SET_THP_DISABLE = 0x29 PR_SET_TIMERSLACK = 0x1d PR_SET_TIMING = 0xe PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 + PR_SPEC_DISABLE = 0x4 + PR_SPEC_ENABLE = 0x2 + PR_SPEC_FORCE_DISABLE = 0x8 + PR_SPEC_NOT_AFFECTED = 0x0 + PR_SPEC_PRCTL = 0x1 + PR_SPEC_STORE_BYPASS = 0x0 PR_SVE_GET_VL = 0x33 PR_SVE_SET_VL = 0x32 PR_SVE_SET_VL_ONEXEC = 0x40000 @@ -1467,6 +1477,33 @@ const ( RTCF_MASQ = 0x400000 RTCF_NAT = 0x800000 RTCF_VALVE = 0x200000 + RTC_AF = 0x20 + RTC_AIE_OFF = 0x7002 + RTC_AIE_ON = 0x7001 + RTC_ALM_READ = 0x80247008 + RTC_ALM_SET = 0x40247007 + RTC_EPOCH_READ = 0x8008700d + RTC_EPOCH_SET = 0x4008700e + RTC_IRQF = 0x80 + RTC_IRQP_READ = 0x8008700b + RTC_IRQP_SET = 0x4008700c + RTC_MAX_FREQ = 0x2000 + RTC_PF = 0x40 + RTC_PIE_OFF = 0x7006 + RTC_PIE_ON = 0x7005 + RTC_PLL_GET = 0x80207011 + RTC_PLL_SET = 0x40207012 + RTC_RD_TIME = 0x80247009 + RTC_SET_TIME = 0x4024700a + RTC_UF = 0x10 + RTC_UIE_OFF = 0x7004 + RTC_UIE_ON = 0x7003 + RTC_VL_CLR = 0x7014 + RTC_VL_READ = 0x80047013 + RTC_WIE_OFF = 0x7010 + RTC_WIE_ON = 0x700f + RTC_WKALM_RD = 0x80287010 + RTC_WKALM_SET = 0x4028700f RTF_ADDRCLASSMASK = 0xf8000000 RTF_ADDRCONF = 0x40000 RTF_ALLONLINK = 0x20000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 6ae0ac6f86..49a9b0133b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -3,7 +3,7 @@ // +build arm,linux -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go package unix @@ -449,6 +449,7 @@ const ( ETH_P_PPP_DISC = 0x8863 ETH_P_PPP_MP = 0x8 ETH_P_PPP_SES = 0x8864 + ETH_P_PREAUTH = 0x88c7 ETH_P_PRP = 0x88fb ETH_P_PUP = 0x200 ETH_P_PUPAT = 0x201 @@ -1232,6 +1233,7 @@ const ( PERF_EVENT_IOC_DISABLE = 0x2401 PERF_EVENT_IOC_ENABLE = 0x2400 PERF_EVENT_IOC_ID = 0x80042407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x4004240b PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 PERF_EVENT_IOC_PERIOD = 0x40082404 PERF_EVENT_IOC_QUERY_BPF = 0xc004240a @@ -1287,6 +1289,7 @@ const ( PR_GET_PDEATHSIG = 0x2 PR_GET_SECCOMP = 0x15 PR_GET_SECUREBITS = 0x1b + PR_GET_SPECULATION_CTRL = 0x34 PR_GET_THP_DISABLE = 0x2a PR_GET_TID_ADDRESS = 0x28 PR_GET_TIMERSLACK = 0x1e @@ -1332,11 +1335,18 @@ const ( PR_SET_PTRACER_ANY = 0xffffffff PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c + PR_SET_SPECULATION_CTRL = 0x35 PR_SET_THP_DISABLE = 0x29 PR_SET_TIMERSLACK = 0x1d PR_SET_TIMING = 0xe PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 + PR_SPEC_DISABLE = 0x4 + PR_SPEC_ENABLE = 0x2 + PR_SPEC_FORCE_DISABLE = 0x8 + PR_SPEC_NOT_AFFECTED = 0x0 + PR_SPEC_PRCTL = 0x1 + PR_SPEC_STORE_BYPASS = 0x0 PR_SVE_GET_VL = 0x33 PR_SVE_SET_VL = 0x32 PR_SVE_SET_VL_ONEXEC = 0x40000 @@ -1473,6 +1483,33 @@ const ( RTCF_MASQ = 0x400000 RTCF_NAT = 0x800000 RTCF_VALVE = 0x200000 + RTC_AF = 0x20 + RTC_AIE_OFF = 0x7002 + RTC_AIE_ON = 0x7001 + RTC_ALM_READ = 0x80247008 + RTC_ALM_SET = 0x40247007 + RTC_EPOCH_READ = 0x8004700d + RTC_EPOCH_SET = 0x4004700e + RTC_IRQF = 0x80 + RTC_IRQP_READ = 0x8004700b + RTC_IRQP_SET = 0x4004700c + RTC_MAX_FREQ = 0x2000 + RTC_PF = 0x40 + RTC_PIE_OFF = 0x7006 + RTC_PIE_ON = 0x7005 + RTC_PLL_GET = 0x801c7011 + RTC_PLL_SET = 0x401c7012 + RTC_RD_TIME = 0x80247009 + RTC_SET_TIME = 0x4024700a + RTC_UF = 0x10 + RTC_UIE_OFF = 0x7004 + RTC_UIE_ON = 0x7003 + RTC_VL_CLR = 0x7014 + RTC_VL_READ = 0x80047013 + RTC_WIE_OFF = 0x7010 + RTC_WIE_ON = 0x700f + RTC_WKALM_RD = 0x80287010 + RTC_WKALM_SET = 0x4028700f RTF_ADDRCLASSMASK = 0xf8000000 RTF_ADDRCONF = 0x40000 RTF_ALLONLINK = 0x20000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index f58450bf3d..8d70233b26 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -3,7 +3,7 @@ // +build arm64,linux -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char _const.go package unix @@ -450,6 +450,7 @@ const ( ETH_P_PPP_DISC = 0x8863 ETH_P_PPP_MP = 0x8 ETH_P_PPP_SES = 0x8864 + ETH_P_PREAUTH = 0x88c7 ETH_P_PRP = 0x88fb ETH_P_PUP = 0x200 ETH_P_PUPAT = 0x201 @@ -1235,6 +1236,7 @@ const ( PERF_EVENT_IOC_DISABLE = 0x2401 PERF_EVENT_IOC_ENABLE = 0x2400 PERF_EVENT_IOC_ID = 0x80082407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x4008240b PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 PERF_EVENT_IOC_PERIOD = 0x40082404 PERF_EVENT_IOC_QUERY_BPF = 0xc008240a @@ -1290,6 +1292,7 @@ const ( PR_GET_PDEATHSIG = 0x2 PR_GET_SECCOMP = 0x15 PR_GET_SECUREBITS = 0x1b + PR_GET_SPECULATION_CTRL = 0x34 PR_GET_THP_DISABLE = 0x2a PR_GET_TID_ADDRESS = 0x28 PR_GET_TIMERSLACK = 0x1e @@ -1335,11 +1338,18 @@ const ( PR_SET_PTRACER_ANY = 0xffffffffffffffff PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c + PR_SET_SPECULATION_CTRL = 0x35 PR_SET_THP_DISABLE = 0x29 PR_SET_TIMERSLACK = 0x1d PR_SET_TIMING = 0xe PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 + PR_SPEC_DISABLE = 0x4 + PR_SPEC_ENABLE = 0x2 + PR_SPEC_FORCE_DISABLE = 0x8 + PR_SPEC_NOT_AFFECTED = 0x0 + PR_SPEC_PRCTL = 0x1 + PR_SPEC_STORE_BYPASS = 0x0 PR_SVE_GET_VL = 0x33 PR_SVE_SET_VL = 0x32 PR_SVE_SET_VL_ONEXEC = 0x40000 @@ -1457,6 +1467,33 @@ const ( RTCF_MASQ = 0x400000 RTCF_NAT = 0x800000 RTCF_VALVE = 0x200000 + RTC_AF = 0x20 + RTC_AIE_OFF = 0x7002 + RTC_AIE_ON = 0x7001 + RTC_ALM_READ = 0x80247008 + RTC_ALM_SET = 0x40247007 + RTC_EPOCH_READ = 0x8008700d + RTC_EPOCH_SET = 0x4008700e + RTC_IRQF = 0x80 + RTC_IRQP_READ = 0x8008700b + RTC_IRQP_SET = 0x4008700c + RTC_MAX_FREQ = 0x2000 + RTC_PF = 0x40 + RTC_PIE_OFF = 0x7006 + RTC_PIE_ON = 0x7005 + RTC_PLL_GET = 0x80207011 + RTC_PLL_SET = 0x40207012 + RTC_RD_TIME = 0x80247009 + RTC_SET_TIME = 0x4024700a + RTC_UF = 0x10 + RTC_UIE_OFF = 0x7004 + RTC_UIE_ON = 0x7003 + RTC_VL_CLR = 0x7014 + RTC_VL_READ = 0x80047013 + RTC_WIE_OFF = 0x7010 + RTC_WIE_ON = 0x700f + RTC_WKALM_RD = 0x80287010 + RTC_WKALM_SET = 0x4028700f RTF_ADDRCLASSMASK = 0xf8000000 RTF_ADDRCONF = 0x40000 RTF_ALLONLINK = 0x20000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 465ff2f0f3..410ab56b09 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -3,7 +3,7 @@ // +build mips,linux -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go package unix @@ -449,6 +449,7 @@ const ( ETH_P_PPP_DISC = 0x8863 ETH_P_PPP_MP = 0x8 ETH_P_PPP_SES = 0x8864 + ETH_P_PREAUTH = 0x88c7 ETH_P_PRP = 0x88fb ETH_P_PUP = 0x200 ETH_P_PUPAT = 0x201 @@ -1233,6 +1234,7 @@ const ( PERF_EVENT_IOC_DISABLE = 0x20002401 PERF_EVENT_IOC_ENABLE = 0x20002400 PERF_EVENT_IOC_ID = 0x40042407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x8004240b PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 PERF_EVENT_IOC_PERIOD = 0x80082404 PERF_EVENT_IOC_QUERY_BPF = 0xc004240a @@ -1288,6 +1290,7 @@ const ( PR_GET_PDEATHSIG = 0x2 PR_GET_SECCOMP = 0x15 PR_GET_SECUREBITS = 0x1b + PR_GET_SPECULATION_CTRL = 0x34 PR_GET_THP_DISABLE = 0x2a PR_GET_TID_ADDRESS = 0x28 PR_GET_TIMERSLACK = 0x1e @@ -1333,11 +1336,18 @@ const ( PR_SET_PTRACER_ANY = 0xffffffff PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c + PR_SET_SPECULATION_CTRL = 0x35 PR_SET_THP_DISABLE = 0x29 PR_SET_TIMERSLACK = 0x1d PR_SET_TIMING = 0xe PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 + PR_SPEC_DISABLE = 0x4 + PR_SPEC_ENABLE = 0x2 + PR_SPEC_FORCE_DISABLE = 0x8 + PR_SPEC_NOT_AFFECTED = 0x0 + PR_SPEC_PRCTL = 0x1 + PR_SPEC_STORE_BYPASS = 0x0 PR_SVE_GET_VL = 0x33 PR_SVE_SET_VL = 0x32 PR_SVE_SET_VL_ONEXEC = 0x40000 @@ -1467,6 +1477,33 @@ const ( RTCF_MASQ = 0x400000 RTCF_NAT = 0x800000 RTCF_VALVE = 0x200000 + RTC_AF = 0x20 + RTC_AIE_OFF = 0x20007002 + RTC_AIE_ON = 0x20007001 + RTC_ALM_READ = 0x40247008 + RTC_ALM_SET = 0x80247007 + RTC_EPOCH_READ = 0x4004700d + RTC_EPOCH_SET = 0x8004700e + RTC_IRQF = 0x80 + RTC_IRQP_READ = 0x4004700b + RTC_IRQP_SET = 0x8004700c + RTC_MAX_FREQ = 0x2000 + RTC_PF = 0x40 + RTC_PIE_OFF = 0x20007006 + RTC_PIE_ON = 0x20007005 + RTC_PLL_GET = 0x401c7011 + RTC_PLL_SET = 0x801c7012 + RTC_RD_TIME = 0x40247009 + RTC_SET_TIME = 0x8024700a + RTC_UF = 0x10 + RTC_UIE_OFF = 0x20007004 + RTC_UIE_ON = 0x20007003 + RTC_VL_CLR = 0x20007014 + RTC_VL_READ = 0x40047013 + RTC_WIE_OFF = 0x20007010 + RTC_WIE_ON = 0x2000700f + RTC_WKALM_RD = 0x40287010 + RTC_WKALM_SET = 0x8028700f RTF_ADDRCLASSMASK = 0xf8000000 RTF_ADDRCONF = 0x40000 RTF_ALLONLINK = 0x20000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 37e851aabe..dac4d90fad 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -3,7 +3,7 @@ // +build mips64,linux -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go package unix @@ -449,6 +449,7 @@ const ( ETH_P_PPP_DISC = 0x8863 ETH_P_PPP_MP = 0x8 ETH_P_PPP_SES = 0x8864 + ETH_P_PREAUTH = 0x88c7 ETH_P_PRP = 0x88fb ETH_P_PUP = 0x200 ETH_P_PUPAT = 0x201 @@ -1233,6 +1234,7 @@ const ( PERF_EVENT_IOC_DISABLE = 0x20002401 PERF_EVENT_IOC_ENABLE = 0x20002400 PERF_EVENT_IOC_ID = 0x40082407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x8008240b PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 PERF_EVENT_IOC_PERIOD = 0x80082404 PERF_EVENT_IOC_QUERY_BPF = 0xc008240a @@ -1288,6 +1290,7 @@ const ( PR_GET_PDEATHSIG = 0x2 PR_GET_SECCOMP = 0x15 PR_GET_SECUREBITS = 0x1b + PR_GET_SPECULATION_CTRL = 0x34 PR_GET_THP_DISABLE = 0x2a PR_GET_TID_ADDRESS = 0x28 PR_GET_TIMERSLACK = 0x1e @@ -1333,11 +1336,18 @@ const ( PR_SET_PTRACER_ANY = 0xffffffffffffffff PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c + PR_SET_SPECULATION_CTRL = 0x35 PR_SET_THP_DISABLE = 0x29 PR_SET_TIMERSLACK = 0x1d PR_SET_TIMING = 0xe PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 + PR_SPEC_DISABLE = 0x4 + PR_SPEC_ENABLE = 0x2 + PR_SPEC_FORCE_DISABLE = 0x8 + PR_SPEC_NOT_AFFECTED = 0x0 + PR_SPEC_PRCTL = 0x1 + PR_SPEC_STORE_BYPASS = 0x0 PR_SVE_GET_VL = 0x33 PR_SVE_SET_VL = 0x32 PR_SVE_SET_VL_ONEXEC = 0x40000 @@ -1467,6 +1477,33 @@ const ( RTCF_MASQ = 0x400000 RTCF_NAT = 0x800000 RTCF_VALVE = 0x200000 + RTC_AF = 0x20 + RTC_AIE_OFF = 0x20007002 + RTC_AIE_ON = 0x20007001 + RTC_ALM_READ = 0x40247008 + RTC_ALM_SET = 0x80247007 + RTC_EPOCH_READ = 0x4008700d + RTC_EPOCH_SET = 0x8008700e + RTC_IRQF = 0x80 + RTC_IRQP_READ = 0x4008700b + RTC_IRQP_SET = 0x8008700c + RTC_MAX_FREQ = 0x2000 + RTC_PF = 0x40 + RTC_PIE_OFF = 0x20007006 + RTC_PIE_ON = 0x20007005 + RTC_PLL_GET = 0x40207011 + RTC_PLL_SET = 0x80207012 + RTC_RD_TIME = 0x40247009 + RTC_SET_TIME = 0x8024700a + RTC_UF = 0x10 + RTC_UIE_OFF = 0x20007004 + RTC_UIE_ON = 0x20007003 + RTC_VL_CLR = 0x20007014 + RTC_VL_READ = 0x40047013 + RTC_WIE_OFF = 0x20007010 + RTC_WIE_ON = 0x2000700f + RTC_WKALM_RD = 0x40287010 + RTC_WKALM_SET = 0x8028700f RTF_ADDRCLASSMASK = 0xf8000000 RTF_ADDRCONF = 0x40000 RTF_ALLONLINK = 0x20000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 1131d3ca16..1d2f0e6382 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -3,7 +3,7 @@ // +build mips64le,linux -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go package unix @@ -449,6 +449,7 @@ const ( ETH_P_PPP_DISC = 0x8863 ETH_P_PPP_MP = 0x8 ETH_P_PPP_SES = 0x8864 + ETH_P_PREAUTH = 0x88c7 ETH_P_PRP = 0x88fb ETH_P_PUP = 0x200 ETH_P_PUPAT = 0x201 @@ -1233,6 +1234,7 @@ const ( PERF_EVENT_IOC_DISABLE = 0x20002401 PERF_EVENT_IOC_ENABLE = 0x20002400 PERF_EVENT_IOC_ID = 0x40082407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x8008240b PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 PERF_EVENT_IOC_PERIOD = 0x80082404 PERF_EVENT_IOC_QUERY_BPF = 0xc008240a @@ -1288,6 +1290,7 @@ const ( PR_GET_PDEATHSIG = 0x2 PR_GET_SECCOMP = 0x15 PR_GET_SECUREBITS = 0x1b + PR_GET_SPECULATION_CTRL = 0x34 PR_GET_THP_DISABLE = 0x2a PR_GET_TID_ADDRESS = 0x28 PR_GET_TIMERSLACK = 0x1e @@ -1333,11 +1336,18 @@ const ( PR_SET_PTRACER_ANY = 0xffffffffffffffff PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c + PR_SET_SPECULATION_CTRL = 0x35 PR_SET_THP_DISABLE = 0x29 PR_SET_TIMERSLACK = 0x1d PR_SET_TIMING = 0xe PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 + PR_SPEC_DISABLE = 0x4 + PR_SPEC_ENABLE = 0x2 + PR_SPEC_FORCE_DISABLE = 0x8 + PR_SPEC_NOT_AFFECTED = 0x0 + PR_SPEC_PRCTL = 0x1 + PR_SPEC_STORE_BYPASS = 0x0 PR_SVE_GET_VL = 0x33 PR_SVE_SET_VL = 0x32 PR_SVE_SET_VL_ONEXEC = 0x40000 @@ -1467,6 +1477,33 @@ const ( RTCF_MASQ = 0x400000 RTCF_NAT = 0x800000 RTCF_VALVE = 0x200000 + RTC_AF = 0x20 + RTC_AIE_OFF = 0x20007002 + RTC_AIE_ON = 0x20007001 + RTC_ALM_READ = 0x40247008 + RTC_ALM_SET = 0x80247007 + RTC_EPOCH_READ = 0x4008700d + RTC_EPOCH_SET = 0x8008700e + RTC_IRQF = 0x80 + RTC_IRQP_READ = 0x4008700b + RTC_IRQP_SET = 0x8008700c + RTC_MAX_FREQ = 0x2000 + RTC_PF = 0x40 + RTC_PIE_OFF = 0x20007006 + RTC_PIE_ON = 0x20007005 + RTC_PLL_GET = 0x40207011 + RTC_PLL_SET = 0x80207012 + RTC_RD_TIME = 0x40247009 + RTC_SET_TIME = 0x8024700a + RTC_UF = 0x10 + RTC_UIE_OFF = 0x20007004 + RTC_UIE_ON = 0x20007003 + RTC_VL_CLR = 0x20007014 + RTC_VL_READ = 0x40047013 + RTC_WIE_OFF = 0x20007010 + RTC_WIE_ON = 0x2000700f + RTC_WKALM_RD = 0x40287010 + RTC_WKALM_SET = 0x8028700f RTF_ADDRCLASSMASK = 0xf8000000 RTF_ADDRCONF = 0x40000 RTF_ALLONLINK = 0x20000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index d04a43b2bc..33b9940234 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -3,7 +3,7 @@ // +build mipsle,linux -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go package unix @@ -449,6 +449,7 @@ const ( ETH_P_PPP_DISC = 0x8863 ETH_P_PPP_MP = 0x8 ETH_P_PPP_SES = 0x8864 + ETH_P_PREAUTH = 0x88c7 ETH_P_PRP = 0x88fb ETH_P_PUP = 0x200 ETH_P_PUPAT = 0x201 @@ -1233,6 +1234,7 @@ const ( PERF_EVENT_IOC_DISABLE = 0x20002401 PERF_EVENT_IOC_ENABLE = 0x20002400 PERF_EVENT_IOC_ID = 0x40042407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x8004240b PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 PERF_EVENT_IOC_PERIOD = 0x80082404 PERF_EVENT_IOC_QUERY_BPF = 0xc004240a @@ -1288,6 +1290,7 @@ const ( PR_GET_PDEATHSIG = 0x2 PR_GET_SECCOMP = 0x15 PR_GET_SECUREBITS = 0x1b + PR_GET_SPECULATION_CTRL = 0x34 PR_GET_THP_DISABLE = 0x2a PR_GET_TID_ADDRESS = 0x28 PR_GET_TIMERSLACK = 0x1e @@ -1333,11 +1336,18 @@ const ( PR_SET_PTRACER_ANY = 0xffffffff PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c + PR_SET_SPECULATION_CTRL = 0x35 PR_SET_THP_DISABLE = 0x29 PR_SET_TIMERSLACK = 0x1d PR_SET_TIMING = 0xe PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 + PR_SPEC_DISABLE = 0x4 + PR_SPEC_ENABLE = 0x2 + PR_SPEC_FORCE_DISABLE = 0x8 + PR_SPEC_NOT_AFFECTED = 0x0 + PR_SPEC_PRCTL = 0x1 + PR_SPEC_STORE_BYPASS = 0x0 PR_SVE_GET_VL = 0x33 PR_SVE_SET_VL = 0x32 PR_SVE_SET_VL_ONEXEC = 0x40000 @@ -1467,6 +1477,33 @@ const ( RTCF_MASQ = 0x400000 RTCF_NAT = 0x800000 RTCF_VALVE = 0x200000 + RTC_AF = 0x20 + RTC_AIE_OFF = 0x20007002 + RTC_AIE_ON = 0x20007001 + RTC_ALM_READ = 0x40247008 + RTC_ALM_SET = 0x80247007 + RTC_EPOCH_READ = 0x4004700d + RTC_EPOCH_SET = 0x8004700e + RTC_IRQF = 0x80 + RTC_IRQP_READ = 0x4004700b + RTC_IRQP_SET = 0x8004700c + RTC_MAX_FREQ = 0x2000 + RTC_PF = 0x40 + RTC_PIE_OFF = 0x20007006 + RTC_PIE_ON = 0x20007005 + RTC_PLL_GET = 0x401c7011 + RTC_PLL_SET = 0x801c7012 + RTC_RD_TIME = 0x40247009 + RTC_SET_TIME = 0x8024700a + RTC_UF = 0x10 + RTC_UIE_OFF = 0x20007004 + RTC_UIE_ON = 0x20007003 + RTC_VL_CLR = 0x20007014 + RTC_VL_READ = 0x40047013 + RTC_WIE_OFF = 0x20007010 + RTC_WIE_ON = 0x2000700f + RTC_WKALM_RD = 0x40287010 + RTC_WKALM_SET = 0x8028700f RTF_ADDRCLASSMASK = 0xf8000000 RTF_ADDRCONF = 0x40000 RTF_ALLONLINK = 0x20000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 710410efdd..c78d669761 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -3,7 +3,7 @@ // +build ppc64,linux -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go package unix @@ -449,6 +449,7 @@ const ( ETH_P_PPP_DISC = 0x8863 ETH_P_PPP_MP = 0x8 ETH_P_PPP_SES = 0x8864 + ETH_P_PREAUTH = 0x88c7 ETH_P_PRP = 0x88fb ETH_P_PUP = 0x200 ETH_P_PUPAT = 0x201 @@ -1234,6 +1235,7 @@ const ( PERF_EVENT_IOC_DISABLE = 0x20002401 PERF_EVENT_IOC_ENABLE = 0x20002400 PERF_EVENT_IOC_ID = 0x40082407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x8008240b PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 PERF_EVENT_IOC_PERIOD = 0x80082404 PERF_EVENT_IOC_QUERY_BPF = 0xc008240a @@ -1290,6 +1292,7 @@ const ( PR_GET_PDEATHSIG = 0x2 PR_GET_SECCOMP = 0x15 PR_GET_SECUREBITS = 0x1b + PR_GET_SPECULATION_CTRL = 0x34 PR_GET_THP_DISABLE = 0x2a PR_GET_TID_ADDRESS = 0x28 PR_GET_TIMERSLACK = 0x1e @@ -1335,11 +1338,18 @@ const ( PR_SET_PTRACER_ANY = 0xffffffffffffffff PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c + PR_SET_SPECULATION_CTRL = 0x35 PR_SET_THP_DISABLE = 0x29 PR_SET_TIMERSLACK = 0x1d PR_SET_TIMING = 0xe PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 + PR_SPEC_DISABLE = 0x4 + PR_SPEC_ENABLE = 0x2 + PR_SPEC_FORCE_DISABLE = 0x8 + PR_SPEC_NOT_AFFECTED = 0x0 + PR_SPEC_PRCTL = 0x1 + PR_SPEC_STORE_BYPASS = 0x0 PR_SVE_GET_VL = 0x33 PR_SVE_SET_VL = 0x32 PR_SVE_SET_VL_ONEXEC = 0x40000 @@ -1523,6 +1533,33 @@ const ( RTCF_MASQ = 0x400000 RTCF_NAT = 0x800000 RTCF_VALVE = 0x200000 + RTC_AF = 0x20 + RTC_AIE_OFF = 0x20007002 + RTC_AIE_ON = 0x20007001 + RTC_ALM_READ = 0x40247008 + RTC_ALM_SET = 0x80247007 + RTC_EPOCH_READ = 0x4008700d + RTC_EPOCH_SET = 0x8008700e + RTC_IRQF = 0x80 + RTC_IRQP_READ = 0x4008700b + RTC_IRQP_SET = 0x8008700c + RTC_MAX_FREQ = 0x2000 + RTC_PF = 0x40 + RTC_PIE_OFF = 0x20007006 + RTC_PIE_ON = 0x20007005 + RTC_PLL_GET = 0x40207011 + RTC_PLL_SET = 0x80207012 + RTC_RD_TIME = 0x40247009 + RTC_SET_TIME = 0x8024700a + RTC_UF = 0x10 + RTC_UIE_OFF = 0x20007004 + RTC_UIE_ON = 0x20007003 + RTC_VL_CLR = 0x20007014 + RTC_VL_READ = 0x40047013 + RTC_WIE_OFF = 0x20007010 + RTC_WIE_ON = 0x2000700f + RTC_WKALM_RD = 0x40287010 + RTC_WKALM_SET = 0x8028700f RTF_ADDRCLASSMASK = 0xf8000000 RTF_ADDRCONF = 0x40000 RTF_ALLONLINK = 0x20000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index c1c1c01bcf..63493756d8 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -3,7 +3,7 @@ // +build ppc64le,linux -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go package unix @@ -449,6 +449,7 @@ const ( ETH_P_PPP_DISC = 0x8863 ETH_P_PPP_MP = 0x8 ETH_P_PPP_SES = 0x8864 + ETH_P_PREAUTH = 0x88c7 ETH_P_PRP = 0x88fb ETH_P_PUP = 0x200 ETH_P_PUPAT = 0x201 @@ -1234,6 +1235,7 @@ const ( PERF_EVENT_IOC_DISABLE = 0x20002401 PERF_EVENT_IOC_ENABLE = 0x20002400 PERF_EVENT_IOC_ID = 0x40082407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x8008240b PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 PERF_EVENT_IOC_PERIOD = 0x80082404 PERF_EVENT_IOC_QUERY_BPF = 0xc008240a @@ -1290,6 +1292,7 @@ const ( PR_GET_PDEATHSIG = 0x2 PR_GET_SECCOMP = 0x15 PR_GET_SECUREBITS = 0x1b + PR_GET_SPECULATION_CTRL = 0x34 PR_GET_THP_DISABLE = 0x2a PR_GET_TID_ADDRESS = 0x28 PR_GET_TIMERSLACK = 0x1e @@ -1335,11 +1338,18 @@ const ( PR_SET_PTRACER_ANY = 0xffffffffffffffff PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c + PR_SET_SPECULATION_CTRL = 0x35 PR_SET_THP_DISABLE = 0x29 PR_SET_TIMERSLACK = 0x1d PR_SET_TIMING = 0xe PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 + PR_SPEC_DISABLE = 0x4 + PR_SPEC_ENABLE = 0x2 + PR_SPEC_FORCE_DISABLE = 0x8 + PR_SPEC_NOT_AFFECTED = 0x0 + PR_SPEC_PRCTL = 0x1 + PR_SPEC_STORE_BYPASS = 0x0 PR_SVE_GET_VL = 0x33 PR_SVE_SET_VL = 0x32 PR_SVE_SET_VL_ONEXEC = 0x40000 @@ -1523,6 +1533,33 @@ const ( RTCF_MASQ = 0x400000 RTCF_NAT = 0x800000 RTCF_VALVE = 0x200000 + RTC_AF = 0x20 + RTC_AIE_OFF = 0x20007002 + RTC_AIE_ON = 0x20007001 + RTC_ALM_READ = 0x40247008 + RTC_ALM_SET = 0x80247007 + RTC_EPOCH_READ = 0x4008700d + RTC_EPOCH_SET = 0x8008700e + RTC_IRQF = 0x80 + RTC_IRQP_READ = 0x4008700b + RTC_IRQP_SET = 0x8008700c + RTC_MAX_FREQ = 0x2000 + RTC_PF = 0x40 + RTC_PIE_OFF = 0x20007006 + RTC_PIE_ON = 0x20007005 + RTC_PLL_GET = 0x40207011 + RTC_PLL_SET = 0x80207012 + RTC_RD_TIME = 0x40247009 + RTC_SET_TIME = 0x8024700a + RTC_UF = 0x10 + RTC_UIE_OFF = 0x20007004 + RTC_UIE_ON = 0x20007003 + RTC_VL_CLR = 0x20007014 + RTC_VL_READ = 0x40047013 + RTC_WIE_OFF = 0x20007010 + RTC_WIE_ON = 0x2000700f + RTC_WKALM_RD = 0x40287010 + RTC_WKALM_SET = 0x8028700f RTF_ADDRCLASSMASK = 0xf8000000 RTF_ADDRCONF = 0x40000 RTF_ALLONLINK = 0x20000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index c7583e15eb..3814df8549 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -3,7 +3,7 @@ // +build s390x,linux -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char _const.go package unix @@ -449,6 +449,7 @@ const ( ETH_P_PPP_DISC = 0x8863 ETH_P_PPP_MP = 0x8 ETH_P_PPP_SES = 0x8864 + ETH_P_PREAUTH = 0x88c7 ETH_P_PRP = 0x88fb ETH_P_PUP = 0x200 ETH_P_PUPAT = 0x201 @@ -1232,6 +1233,7 @@ const ( PERF_EVENT_IOC_DISABLE = 0x2401 PERF_EVENT_IOC_ENABLE = 0x2400 PERF_EVENT_IOC_ID = 0x80082407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x4008240b PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 PERF_EVENT_IOC_PERIOD = 0x40082404 PERF_EVENT_IOC_QUERY_BPF = 0xc008240a @@ -1287,6 +1289,7 @@ const ( PR_GET_PDEATHSIG = 0x2 PR_GET_SECCOMP = 0x15 PR_GET_SECUREBITS = 0x1b + PR_GET_SPECULATION_CTRL = 0x34 PR_GET_THP_DISABLE = 0x2a PR_GET_TID_ADDRESS = 0x28 PR_GET_TIMERSLACK = 0x1e @@ -1332,11 +1335,18 @@ const ( PR_SET_PTRACER_ANY = 0xffffffffffffffff PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c + PR_SET_SPECULATION_CTRL = 0x35 PR_SET_THP_DISABLE = 0x29 PR_SET_TIMERSLACK = 0x1d PR_SET_TIMING = 0xe PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 + PR_SPEC_DISABLE = 0x4 + PR_SPEC_ENABLE = 0x2 + PR_SPEC_FORCE_DISABLE = 0x8 + PR_SPEC_NOT_AFFECTED = 0x0 + PR_SPEC_PRCTL = 0x1 + PR_SPEC_STORE_BYPASS = 0x0 PR_SVE_GET_VL = 0x33 PR_SVE_SET_VL = 0x32 PR_SVE_SET_VL_ONEXEC = 0x40000 @@ -1527,6 +1537,33 @@ const ( RTCF_MASQ = 0x400000 RTCF_NAT = 0x800000 RTCF_VALVE = 0x200000 + RTC_AF = 0x20 + RTC_AIE_OFF = 0x7002 + RTC_AIE_ON = 0x7001 + RTC_ALM_READ = 0x80247008 + RTC_ALM_SET = 0x40247007 + RTC_EPOCH_READ = 0x8008700d + RTC_EPOCH_SET = 0x4008700e + RTC_IRQF = 0x80 + RTC_IRQP_READ = 0x8008700b + RTC_IRQP_SET = 0x4008700c + RTC_MAX_FREQ = 0x2000 + RTC_PF = 0x40 + RTC_PIE_OFF = 0x7006 + RTC_PIE_ON = 0x7005 + RTC_PLL_GET = 0x80207011 + RTC_PLL_SET = 0x40207012 + RTC_RD_TIME = 0x80247009 + RTC_SET_TIME = 0x4024700a + RTC_UF = 0x10 + RTC_UIE_OFF = 0x7004 + RTC_UIE_ON = 0x7003 + RTC_VL_CLR = 0x7014 + RTC_VL_READ = 0x80047013 + RTC_WIE_OFF = 0x7010 + RTC_WIE_ON = 0x700f + RTC_WKALM_RD = 0x80287010 + RTC_WKALM_SET = 0x4028700f RTF_ADDRCLASSMASK = 0xf8000000 RTF_ADDRCONF = 0x40000 RTF_ALLONLINK = 0x20000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 95de199fc4..7fdc85b172 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -1,5 +1,5 @@ // mkerrors.sh -m64 -// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT +// Code generated by the command above; DO NOT EDIT. // +build sparc64,linux diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go index 433becfd02..237e960959 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go @@ -143,21 +143,6 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func utimes(path string, times *[2]Timeval) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -173,16 +158,6 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getcwd(buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { @@ -494,17 +469,6 @@ func Dup3(oldfd int, newfd int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollCreate(size int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func EpollCreate1(flag int) (fd int, err error) { r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) fd = int(r0) @@ -544,21 +508,6 @@ func Exit(code int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(off>>32), uintptr(len), uintptr(len>>32)) if e1 != 0 { @@ -1366,16 +1315,6 @@ func Unshare(flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Ustat(dev int, ubuf *Ustat_t) (err error) { - _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func write(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -1535,6 +1474,21 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func faccessat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pipe(p *[2]_C_int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) if e1 != 0 { @@ -1565,6 +1519,34 @@ func Dup2(oldfd int, newfd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func EpollCreate(size int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fadvise(fd int, offset int64, length int64, advice int) (err error) { _, _, e1 := Syscall6(SYS_FADVISE64_64, uintptr(fd), uintptr(offset), uintptr(offset>>32), uintptr(length), uintptr(length>>32), uintptr(advice)) if e1 != 0 { @@ -1869,6 +1851,16 @@ func Truncate(path string, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Ustat(dev int, ubuf *Ustat_t) (err error) { + _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func getgroups(n int, list *_Gid_t) (nn int, err error) { r0, _, e1 := RawSyscall(SYS_GETGROUPS32, uintptr(n), uintptr(unsafe.Pointer(list)), 0) nn = int(r0) @@ -1912,15 +1904,8 @@ func mmap2(addr uintptr, length uintptr, prot int, flags int, fd int, pageOffset // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { - var _p0 unsafe.Pointer - if len(events) > 0 { - _p0 = unsafe.Pointer(&events[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) - n = int(r0) +func Pause() (err error) { + _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1929,8 +1914,8 @@ func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pause() (err error) { - _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) +func getrlimit(resource int, rlim *rlimit32) (err error) { + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1939,8 +1924,8 @@ func Pause() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getrlimit(resource int, rlim *rlimit32) (err error) { - _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) +func setrlimit(resource int, rlim *rlimit32) (err error) { + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1949,8 +1934,13 @@ func getrlimit(resource int, rlim *rlimit32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func setrlimit(resource int, rlim *rlimit32) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) +func futimesat(dirfd int, path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times))) if e1 != 0 { err = errnoErr(e1) } @@ -1995,6 +1985,21 @@ func Utime(path string, buf *Utimbuf) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func utimes(path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go index 33c02b2695..9b40f580b0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go @@ -143,21 +143,6 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func utimes(path string, times *[2]Timeval) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -173,16 +158,6 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getcwd(buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { @@ -494,17 +469,6 @@ func Dup3(oldfd int, newfd int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollCreate(size int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func EpollCreate1(flag int) (fd int, err error) { r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) fd = int(r0) @@ -544,21 +508,6 @@ func Exit(code int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) if e1 != 0 { @@ -1366,16 +1315,6 @@ func Unshare(flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Ustat(dev int, ubuf *Ustat_t) (err error) { - _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func write(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -1535,6 +1474,21 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func faccessat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { @@ -1545,6 +1499,17 @@ func Dup2(oldfd int, newfd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func EpollCreate(size int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { var _p0 unsafe.Pointer if len(events) > 0 { @@ -1937,6 +1902,16 @@ func Truncate(path string, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Ustat(dev int, ubuf *Ustat_t) (err error) { + _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) fd = int(r0) @@ -2127,6 +2102,21 @@ func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int6 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func futimesat(dirfd int, path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Utime(path string, buf *Utimbuf) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2142,6 +2132,21 @@ func Utime(path string, buf *Utimbuf) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func utimes(path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pipe(p *[2]_C_int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go index f91b56c217..d93cc2f3b1 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go @@ -143,21 +143,6 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func utimes(path string, times *[2]Timeval) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -173,16 +158,6 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getcwd(buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { @@ -494,17 +469,6 @@ func Dup3(oldfd int, newfd int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollCreate(size int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func EpollCreate1(flag int) (fd int, err error) { r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) fd = int(r0) @@ -544,21 +508,6 @@ func Exit(code int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(off>>32), uintptr(len), uintptr(len>>32)) if e1 != 0 { @@ -1366,16 +1315,6 @@ func Unshare(flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Ustat(dev int, ubuf *Ustat_t) (err error) { - _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func write(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -1535,6 +1474,21 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func faccessat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pipe2(p *[2]_C_int, flags int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { @@ -1734,6 +1688,34 @@ func Dup2(oldfd int, newfd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func EpollCreate(size int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchown(fd int, uid int, gid int) (err error) { _, _, e1 := Syscall(SYS_FCHOWN32, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { @@ -1852,6 +1834,16 @@ func Lstat(path string, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Pause() (err error) { + _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { r0, _, e1 := Syscall6(SYS_SENDFILE64, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) written = int(r0) @@ -1970,8 +1962,8 @@ func Stat(path string, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Gettimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) +func Ustat(dev int, ubuf *Ustat_t) (err error) { + _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1980,15 +1972,13 @@ func Gettimeofday(tv *Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { - var _p0 unsafe.Pointer - if len(events) > 0 { - _p0 = unsafe.Pointer(&events[0]) - } else { - _p0 = unsafe.Pointer(&_zero) +func futimesat(dirfd int, path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return } - r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) - n = int(r0) + _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times))) if e1 != 0 { err = errnoErr(e1) } @@ -1997,8 +1987,23 @@ func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pause() (err error) { - _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go index 52d7595250..5f7d0213b2 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go @@ -143,21 +143,6 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func utimes(path string, times *[2]Timeval) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -173,16 +158,6 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getcwd(buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { @@ -494,17 +469,6 @@ func Dup3(oldfd int, newfd int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollCreate(size int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func EpollCreate1(flag int) (fd int, err error) { r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) fd = int(r0) @@ -544,21 +508,6 @@ func Exit(code int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) if e1 != 0 { @@ -1366,16 +1315,6 @@ func Unshare(flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Ustat(dev int, ubuf *Ustat_t) (err error) { - _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func write(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -1535,6 +1474,21 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func faccessat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { var _p0 unsafe.Pointer if len(events) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go index 970a5c132d..e761705282 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go @@ -143,21 +143,6 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func utimes(path string, times *[2]Timeval) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -173,16 +158,6 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getcwd(buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { @@ -494,17 +469,6 @@ func Dup3(oldfd int, newfd int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollCreate(size int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func EpollCreate1(flag int) (fd int, err error) { r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) fd = int(r0) @@ -544,21 +508,6 @@ func Exit(code int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off>>32), uintptr(off), uintptr(len>>32), uintptr(len)) if e1 != 0 { @@ -1366,16 +1315,6 @@ func Unshare(flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Ustat(dev int, ubuf *Ustat_t) (err error) { - _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func write(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -1535,6 +1474,21 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func faccessat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { @@ -1545,6 +1499,34 @@ func Dup2(oldfd int, newfd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func EpollCreate(size int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fadvise(fd int, offset int64, length int64, advice int) (err error) { _, _, e1 := Syscall9(SYS_FADVISE64, uintptr(fd), 0, uintptr(offset>>32), uintptr(offset), uintptr(length>>32), uintptr(length), uintptr(advice), 0, 0) if e1 != 0 { @@ -1794,6 +1776,16 @@ func Truncate(path string, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Ustat(dev int, ubuf *Ustat_t) (err error) { + _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) fd = int(r0) @@ -2004,6 +1996,21 @@ func Iopl(level int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func futimesat(dirfd int, path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Gettimeofday(tv *Timeval) (err error) { _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) if e1 != 0 { @@ -2025,13 +2032,13 @@ func Time(t *Time_t) (tt Time_t, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Lstat(path string, stat *Stat_t) (err error) { +func Utime(path string, buf *Utimbuf) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall(SYS_LSTAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -2040,8 +2047,13 @@ func Lstat(path string, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fstat(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) +func utimes(path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -2050,13 +2062,13 @@ func Fstat(fd int, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) { +func Lstat(path string, stat *Stat_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall6(SYS_FSTATAT64, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + _, _, e1 := Syscall(SYS_LSTAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -2065,13 +2077,8 @@ func Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Stat(path string, stat *Stat_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -2080,13 +2087,13 @@ func Stat(path string, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Utime(path string, buf *Utimbuf) (err error) { +func Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) + _, _, e1 := Syscall6(SYS_FSTATAT64, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -2095,15 +2102,13 @@ func Utime(path string, buf *Utimbuf) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { - var _p0 unsafe.Pointer - if len(events) > 0 { - _p0 = unsafe.Pointer(&events[0]) - } else { - _p0 = unsafe.Pointer(&_zero) +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return } - r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) - n = int(r0) + _, _, e1 := Syscall(SYS_STAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go index b989d0f282..382e072f5e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go @@ -143,21 +143,6 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func utimes(path string, times *[2]Timeval) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -173,16 +158,6 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getcwd(buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { @@ -494,17 +469,6 @@ func Dup3(oldfd int, newfd int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollCreate(size int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func EpollCreate1(flag int) (fd int, err error) { r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) fd = int(r0) @@ -544,21 +508,6 @@ func Exit(code int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) if e1 != 0 { @@ -1366,16 +1315,6 @@ func Unshare(flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Ustat(dev int, ubuf *Ustat_t) (err error) { - _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func write(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -1535,6 +1474,21 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func faccessat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { @@ -1545,6 +1499,17 @@ func Dup2(oldfd int, newfd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func EpollCreate(size int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { var _p0 unsafe.Pointer if len(events) > 0 { @@ -1881,6 +1846,16 @@ func Truncate(path string, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Ustat(dev int, ubuf *Ustat_t) (err error) { + _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) fd = int(r0) @@ -2071,6 +2046,21 @@ func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int6 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func futimesat(dirfd int, path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Gettimeofday(tv *Timeval) (err error) { _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) if e1 != 0 { @@ -2096,6 +2086,21 @@ func Utime(path string, buf *Utimbuf) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func utimes(path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pipe2(p *[2]_C_int, flags int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go index 1f8d14cacc..da33eb12b0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go @@ -143,21 +143,6 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func utimes(path string, times *[2]Timeval) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -173,16 +158,6 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getcwd(buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { @@ -494,17 +469,6 @@ func Dup3(oldfd int, newfd int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollCreate(size int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func EpollCreate1(flag int) (fd int, err error) { r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) fd = int(r0) @@ -544,21 +508,6 @@ func Exit(code int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) if e1 != 0 { @@ -1366,16 +1315,6 @@ func Unshare(flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Ustat(dev int, ubuf *Ustat_t) (err error) { - _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func write(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -1535,6 +1474,21 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func faccessat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { @@ -1545,6 +1499,17 @@ func Dup2(oldfd int, newfd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func EpollCreate(size int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { var _p0 unsafe.Pointer if len(events) > 0 { @@ -1881,6 +1846,16 @@ func Truncate(path string, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Ustat(dev int, ubuf *Ustat_t) (err error) { + _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) fd = int(r0) @@ -2071,6 +2046,21 @@ func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int6 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func futimesat(dirfd int, path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Gettimeofday(tv *Timeval) (err error) { _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) if e1 != 0 { @@ -2096,6 +2086,21 @@ func Utime(path string, buf *Utimbuf) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func utimes(path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pipe2(p *[2]_C_int, flags int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go index a9c7e520e4..f6e45189ae 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go @@ -143,21 +143,6 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func utimes(path string, times *[2]Timeval) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -173,16 +158,6 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getcwd(buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { @@ -494,17 +469,6 @@ func Dup3(oldfd int, newfd int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollCreate(size int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func EpollCreate1(flag int) (fd int, err error) { r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) fd = int(r0) @@ -544,21 +508,6 @@ func Exit(code int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(off>>32), uintptr(len), uintptr(len>>32)) if e1 != 0 { @@ -1366,16 +1315,6 @@ func Unshare(flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Ustat(dev int, ubuf *Ustat_t) (err error) { - _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func write(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -1535,6 +1474,21 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func faccessat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { @@ -1545,6 +1499,34 @@ func Dup2(oldfd int, newfd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func EpollCreate(size int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fadvise(fd int, offset int64, length int64, advice int) (err error) { _, _, e1 := Syscall9(SYS_FADVISE64, uintptr(fd), 0, uintptr(offset), uintptr(offset>>32), uintptr(length), uintptr(length>>32), uintptr(advice), 0, 0) if e1 != 0 { @@ -1794,6 +1776,16 @@ func Truncate(path string, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Ustat(dev int, ubuf *Ustat_t) (err error) { + _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) fd = int(r0) @@ -2004,6 +1996,21 @@ func Iopl(level int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func futimesat(dirfd int, path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Gettimeofday(tv *Timeval) (err error) { _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) if e1 != 0 { @@ -2025,13 +2032,13 @@ func Time(t *Time_t) (tt Time_t, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Lstat(path string, stat *Stat_t) (err error) { +func Utime(path string, buf *Utimbuf) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall(SYS_LSTAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -2040,8 +2047,13 @@ func Lstat(path string, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fstat(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) +func utimes(path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -2050,13 +2062,13 @@ func Fstat(fd int, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) { +func Lstat(path string, stat *Stat_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall6(SYS_FSTATAT64, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + _, _, e1 := Syscall(SYS_LSTAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -2065,13 +2077,8 @@ func Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Stat(path string, stat *Stat_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -2080,13 +2087,13 @@ func Stat(path string, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Utime(path string, buf *Utimbuf) (err error) { +func Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) + _, _, e1 := Syscall6(SYS_FSTATAT64, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -2095,15 +2102,13 @@ func Utime(path string, buf *Utimbuf) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { - var _p0 unsafe.Pointer - if len(events) > 0 { - _p0 = unsafe.Pointer(&events[0]) - } else { - _p0 = unsafe.Pointer(&_zero) +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return } - r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) - n = int(r0) + _, _, e1 := Syscall(SYS_STAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go index 3bb9a20992..7cc1bfd129 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -143,21 +143,6 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func utimes(path string, times *[2]Timeval) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -173,16 +158,6 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getcwd(buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { @@ -494,17 +469,6 @@ func Dup3(oldfd int, newfd int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollCreate(size int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func EpollCreate1(flag int) (fd int, err error) { r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) fd = int(r0) @@ -544,21 +508,6 @@ func Exit(code int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) if e1 != 0 { @@ -1366,16 +1315,6 @@ func Unshare(flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Ustat(dev int, ubuf *Ustat_t) (err error) { - _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func write(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -1535,15 +1474,13 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { - var _p0 unsafe.Pointer - if len(events) > 0 { - _p0 = unsafe.Pointer(&events[0]) - } else { - _p0 = unsafe.Pointer(&_zero) +func faccessat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return } - r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) - n = int(r0) + _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } @@ -1562,6 +1499,34 @@ func Dup2(oldfd int, newfd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func EpollCreate(size int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fadvise(fd int, offset int64, length int64, advice int) (err error) { _, _, e1 := Syscall6(SYS_FADVISE64, uintptr(fd), uintptr(offset), uintptr(length), uintptr(advice), 0, 0) if e1 != 0 { @@ -1963,6 +1928,16 @@ func Truncate(path string, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Ustat(dev int, ubuf *Ustat_t) (err error) { + _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) fd = int(r0) @@ -2153,6 +2128,21 @@ func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int6 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func futimesat(dirfd int, path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Gettimeofday(tv *Timeval) (err error) { _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) if e1 != 0 { @@ -2189,6 +2179,21 @@ func Utime(path string, buf *Utimbuf) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func utimes(path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pipe(p *[2]_C_int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go index 56116623d6..c3dcb38186 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -143,21 +143,6 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func utimes(path string, times *[2]Timeval) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -173,16 +158,6 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getcwd(buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { @@ -494,17 +469,6 @@ func Dup3(oldfd int, newfd int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollCreate(size int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func EpollCreate1(flag int) (fd int, err error) { r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) fd = int(r0) @@ -544,21 +508,6 @@ func Exit(code int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) if e1 != 0 { @@ -1366,16 +1315,6 @@ func Unshare(flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Ustat(dev int, ubuf *Ustat_t) (err error) { - _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func write(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -1535,15 +1474,13 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { - var _p0 unsafe.Pointer - if len(events) > 0 { - _p0 = unsafe.Pointer(&events[0]) - } else { - _p0 = unsafe.Pointer(&_zero) +func faccessat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return } - r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) - n = int(r0) + _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } @@ -1562,6 +1499,34 @@ func Dup2(oldfd int, newfd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func EpollCreate(size int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fadvise(fd int, offset int64, length int64, advice int) (err error) { _, _, e1 := Syscall6(SYS_FADVISE64, uintptr(fd), uintptr(offset), uintptr(length), uintptr(advice), 0, 0) if e1 != 0 { @@ -1963,6 +1928,16 @@ func Truncate(path string, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Ustat(dev int, ubuf *Ustat_t) (err error) { + _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) fd = int(r0) @@ -2153,6 +2128,21 @@ func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int6 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func futimesat(dirfd int, path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Gettimeofday(tv *Timeval) (err error) { _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) if e1 != 0 { @@ -2189,6 +2179,21 @@ func Utime(path string, buf *Utimbuf) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func utimes(path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pipe(p *[2]_C_int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go index 9696a0199d..38f903cd3e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go @@ -143,21 +143,6 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func utimes(path string, times *[2]Timeval) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -173,16 +158,6 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getcwd(buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { @@ -494,17 +469,6 @@ func Dup3(oldfd int, newfd int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollCreate(size int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func EpollCreate1(flag int) (fd int, err error) { r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) fd = int(r0) @@ -544,21 +508,6 @@ func Exit(code int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) if e1 != 0 { @@ -1366,16 +1315,6 @@ func Unshare(flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Ustat(dev int, ubuf *Ustat_t) (err error) { - _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func write(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -1535,6 +1474,21 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func faccessat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { @@ -1545,6 +1499,17 @@ func Dup2(oldfd int, newfd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func EpollCreate(size int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { var _p0 unsafe.Pointer if len(events) > 0 { @@ -1923,6 +1888,16 @@ func Truncate(path string, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Ustat(dev int, ubuf *Ustat_t) (err error) { + _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func getgroups(n int, list *_Gid_t) (nn int, err error) { r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) nn = int(r0) @@ -1944,6 +1919,21 @@ func setgroups(n int, list *_Gid_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func futimesat(dirfd int, path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Gettimeofday(tv *Timeval) (err error) { _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) if e1 != 0 { @@ -1969,6 +1959,21 @@ func Utime(path string, buf *Utimbuf) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func utimes(path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pipe2(p *[2]_C_int, flags int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go index c01b3b6bab..b26aee9579 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go @@ -14,6 +14,31 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fchmodat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(oldpath) @@ -118,13 +143,13 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func utimes(path string, times *[2]Timeval) (err error) { +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) + _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -133,13 +158,70 @@ func utimes(path string, times *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func KeyctlInt(cmd int, arg2 int, arg3 int, arg4 int, arg5 int) (ret int, err error) { + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func KeyctlBuffer(cmd int, arg2 int, buf []byte, arg5 int) (ret int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(buf)), uintptr(arg5), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlJoin(cmd int, arg2 string) (ret int, err error) { var _p0 *byte - _p0, err = BytePtrFromString(path) + _p0, err = BytePtrFromString(arg2) if err != nil { return } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + r0, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0) + ret = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -148,8 +230,19 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times))) +func keyctlSearch(cmd int, arg2 int, arg3 string, arg4 string, arg5 int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(arg3) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(arg4) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(arg5), 0) + ret = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -158,15 +251,14 @@ func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getcwd(buf []byte) (n int, err error) { +func keyctlIOV(cmd int, arg2 int, payload []Iovec, arg5 int) (err error) { var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) + if len(payload) > 0 { + _p0 = unsafe.Pointer(&payload[0]) } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) - n = int(r0) + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(payload)), uintptr(arg5), 0) if e1 != 0 { err = errnoErr(e1) } @@ -175,9 +267,15 @@ func Getcwd(buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) - wpid = int(r0) +func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(arg2)), uintptr(_p0), uintptr(len(buf)), 0, 0) + ret = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -251,6 +349,33 @@ func Acct(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func AddKey(keyType string, description string, payload []byte, ringid int) (id int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(description) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(payload) > 0 { + _p2 = unsafe.Pointer(&payload[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_ADD_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(payload)), uintptr(ringid), 0) + id = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Adjtimex(buf *Timex) (state int, err error) { r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) state = int(r0) @@ -344,8 +469,8 @@ func Dup3(oldfd int, newfd int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollCreate(size int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0) +func EpollCreate1(flag int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -355,9 +480,8 @@ func EpollCreate(size int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollCreate1(flag int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) - fd = int(r0) +func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -366,8 +490,9 @@ func EpollCreate1(flag int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { - _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) +func Eventfd(initval uint, flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_EVENTFD2, uintptr(initval), uintptr(flags), 0) + fd = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -377,7 +502,7 @@ func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Exit(code int) { - Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0) + SyscallNoError(SYS_EXIT_GROUP, uintptr(code), 0, 0) return } @@ -428,21 +553,6 @@ func Fchmod(fd int, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -528,7 +638,7 @@ func Getpgid(pid int) (pgid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpid() (pid int) { - r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETPID, 0, 0, 0) pid = int(r0) return } @@ -536,7 +646,7 @@ func Getpid() (pid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getppid() (ppid int) { - r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETPPID, 0, 0, 0) ppid = int(r0) return } @@ -593,7 +703,7 @@ func Getsid(pid int) (sid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Gettid() (tid int) { - r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETTID, 0, 0, 0) tid = int(r0) return } @@ -692,6 +802,33 @@ func Klogctl(typ int, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Lgetxattr(path string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Listxattr(path string, dest []byte) (sz int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -714,6 +851,74 @@ func Listxattr(path string, dest []byte) (sz int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Llistxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lremovexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mkdirat(dirfd int, path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -754,6 +959,17 @@ func Nanosleep(time *Timespec, leftover *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { + r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func PivotRoot(newroot string, putold string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(newroot) @@ -794,6 +1010,17 @@ func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) ( // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func read(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -851,6 +1078,32 @@ func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err e // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(description) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(callback) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_REQUEST_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(destRingid), 0, 0) + id = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Setdomainname(p []byte) (err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -960,8 +1213,33 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Sync() { - Syscall(SYS_SYNC, 0, 0, 0) + SyscallNoError(SYS_SYNC, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Syncfs(fd int) (err error) { + _, _, e1 := Syscall(SYS_SYNCFS, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } return } @@ -1010,7 +1288,7 @@ func Times(tms *Tms) (ticks uintptr, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Umask(mask int) (oldmask int) { - r0, _, _ := RawSyscall(SYS_UMASK, uintptr(mask), 0, 0) + r0, _ := RawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) oldmask = int(r0) return } @@ -1052,16 +1330,6 @@ func Unshare(flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Ustat(dev int, ubuf *Ustat_t) (err error) { - _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func write(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -1169,14 +1437,24 @@ func Mlock(b []byte) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Munlock(b []byte) (err error) { +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { var _p0 unsafe.Pointer if len(b) > 0 { _p0 = unsafe.Pointer(&b[0]) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } @@ -1185,8 +1463,14 @@ func Munlock(b []byte) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1262,6 +1546,21 @@ func Fstat(fd int, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT64, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fstatfs(fd int, buf *Statfs_t) (err error) { _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(buf)), 0) if e1 != 0 { @@ -1283,7 +1582,7 @@ func Ftruncate(fd int, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getegid() (egid int) { - r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETEGID, 0, 0, 0) egid = int(r0) return } @@ -1291,7 +1590,7 @@ func Getegid() (egid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Geteuid() (euid int) { - r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETEUID, 0, 0, 0) euid = int(r0) return } @@ -1299,7 +1598,7 @@ func Geteuid() (euid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getgid() (gid int) { - r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETGID, 0, 0, 0) gid = int(r0) return } @@ -1317,7 +1616,7 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + r0, _ := RawSyscallNoError(SYS_GETUID, 0, 0, 0) uid = int(r0) return } @@ -1788,6 +2087,21 @@ func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int6 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func futimesat(dirfd int, path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Gettimeofday(tv *Timeval) (err error) { _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) if e1 != 0 { @@ -1813,6 +2127,21 @@ func Utime(path string, buf *Utimbuf) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func utimes(path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pipe(p *[2]_C_int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go index fb4b962782..b3298930e7 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -571,6 +571,21 @@ func Exit(code int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fadvise(fd int, offset int64, length int64, advice int) (err error) { _, _, e1 := Syscall9(SYS_POSIX_FADVISE, uintptr(fd), 0, uintptr(offset), uintptr(offset>>32), 0, uintptr(length), uintptr(length>>32), uintptr(advice), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go index beac82ef86..5096ef07e4 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -571,6 +571,21 @@ func Exit(code int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fadvise(fd int, offset int64, length int64, advice int) (err error) { _, _, e1 := Syscall6(SYS_POSIX_FADVISE, uintptr(fd), 0, uintptr(offset), 0, uintptr(length), uintptr(advice)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go index 7bd5f60b00..b7141c63f9 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -571,6 +571,21 @@ func Exit(code int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fadvise(fd int, offset int64, length int64, advice int) (err error) { _, _, e1 := Syscall9(SYS_POSIX_FADVISE, uintptr(fd), 0, uintptr(offset), uintptr(offset>>32), 0, uintptr(length), uintptr(length>>32), uintptr(advice), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index 49b3b5e8a4..1942049b0f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -569,6 +569,21 @@ func Exit(code int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchdir(fd int) (err error) { _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index c4c7d8540c..d351c72cb7 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -569,6 +569,21 @@ func Exit(code int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchdir(fd int) (err error) { _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index 210285b0ba..617d47f0f3 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -569,6 +569,21 @@ func Exit(code int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchdir(fd int) (err error) { _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index 397896300d..e2e5fc5e0a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -41,6 +41,7 @@ import ( //go:cgo_import_dynamic libc_dup dup "libc.so" //go:cgo_import_dynamic libc_dup2 dup2 "libc.so" //go:cgo_import_dynamic libc_exit exit "libc.so" +//go:cgo_import_dynamic libc_faccessat faccessat "libc.so" //go:cgo_import_dynamic libc_fchdir fchdir "libc.so" //go:cgo_import_dynamic libc_fchmod fchmod "libc.so" //go:cgo_import_dynamic libc_fchmodat fchmodat "libc.so" @@ -169,6 +170,7 @@ import ( //go:linkname procDup libc_dup //go:linkname procDup2 libc_dup2 //go:linkname procExit libc_exit +//go:linkname procFaccessat libc_faccessat //go:linkname procFchdir libc_fchdir //go:linkname procFchmod libc_fchmod //go:linkname procFchmodat libc_fchmodat @@ -298,6 +300,7 @@ var ( procDup, procDup2, procExit, + procFaccessat, procFchdir, procFchmod, procFchmodat, @@ -695,6 +698,19 @@ func Exit(code int) { return } +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFaccessat)), 4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = e1 + } + return +} + func Fchdir(fd int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFchdir)), 1, uintptr(fd), 0, 0, 0, 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go index 83bb935b91..b005031abe 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go @@ -1,5 +1,5 @@ // mksysctl_openbsd.pl -// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT +// Code generated by the command above; DO NOT EDIT. package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go index 207b27938b..90c95c2c75 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go @@ -1,5 +1,5 @@ // mksysctl_openbsd.pl -// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT +// Code generated by the command above; DO NOT EDIT. // +build amd64,openbsd diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go index 83bb935b91..b005031abe 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go @@ -1,5 +1,5 @@ // mksysctl_openbsd.pl -// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT +// Code generated by the command above; DO NOT EDIT. package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index ed92409d97..c061d6f1d3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -331,4 +331,5 @@ const ( SYS_S390_GUARDED_STORAGE = 378 SYS_STATX = 379 SYS_S390_STHYI = 380 + SYS_KEXEC_FILE_LOAD = 381 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index c9c129dc42..2d09936721 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -1,5 +1,5 @@ // mksysnum_linux.pl -Ilinux/usr/include -m64 -D__arch64__ linux/usr/include/asm/unistd.h -// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT +// Code generated by the command above; DO NOT EDIT. // +build sparc64,linux diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go index 8afda9c451..f0daa05a9c 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go @@ -1,5 +1,5 @@ // mksysnum_netbsd.pl -// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT +// Code generated by the command above; DO NOT EDIT. // +build 386,netbsd diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go index aea8dbec43..ddb25b94f3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go @@ -1,5 +1,5 @@ // mksysnum_netbsd.pl -// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT +// Code generated by the command above; DO NOT EDIT. // +build amd64,netbsd diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go index c6158a7ef9..315bd63f89 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go @@ -1,5 +1,5 @@ // mksysnum_netbsd.pl -// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT +// Code generated by the command above; DO NOT EDIT. // +build arm,netbsd diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go index 3e8ce2a1dd..07787301f0 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go @@ -1,5 +1,5 @@ // mksysnum_openbsd.pl -// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT +// Code generated by the command above; DO NOT EDIT. // +build 386,openbsd diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go index 32653e53c7..7a1693acbc 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go @@ -1,5 +1,5 @@ // mksysnum_openbsd.pl -// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT +// Code generated by the command above; DO NOT EDIT. // +build arm,openbsd diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 4047462081..4c250033fa 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -248,6 +248,13 @@ type RawSockaddrL2 struct { _ [1]byte } +type RawSockaddrRFCOMM struct { + Family uint16 + Bdaddr [6]uint8 + Channel uint8 + _ [1]byte +} + type RawSockaddrCAN struct { Family uint16 _ [2]byte @@ -401,6 +408,7 @@ const ( SizeofSockaddrNetlink = 0xc SizeofSockaddrHCI = 0x6 SizeofSockaddrL2 = 0xe + SizeofSockaddrRFCOMM = 0xa SizeofSockaddrCAN = 0x10 SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 @@ -705,6 +713,8 @@ const ( AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x100 + + AT_EACCESS = 0x200 ) type PollFd struct { @@ -1813,3 +1823,32 @@ const ( NFT_NG_INCREMENTAL = 0x0 NFT_NG_RANDOM = 0x1 ) + +type RTCTime struct { + Sec int32 + Min int32 + Hour int32 + Mday int32 + Mon int32 + Year int32 + Wday int32 + Yday int32 + Isdst int32 +} + +type RTCWkAlrm struct { + Enabled uint8 + Pending uint8 + _ [2]byte + Time RTCTime +} + +type RTCPLLInfo struct { + Ctrl int32 + Value int32 + Max int32 + Min int32 + Posmult int32 + Negmult int32 + Clock int32 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index 2ab0cb9e79..2e4d709b4f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -250,6 +250,13 @@ type RawSockaddrL2 struct { _ [1]byte } +type RawSockaddrRFCOMM struct { + Family uint16 + Bdaddr [6]uint8 + Channel uint8 + _ [1]byte +} + type RawSockaddrCAN struct { Family uint16 _ [2]byte @@ -405,6 +412,7 @@ const ( SizeofSockaddrNetlink = 0xc SizeofSockaddrHCI = 0x6 SizeofSockaddrL2 = 0xe + SizeofSockaddrRFCOMM = 0xa SizeofSockaddrCAN = 0x10 SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 @@ -723,6 +731,8 @@ const ( AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x100 + + AT_EACCESS = 0x200 ) type PollFd struct { @@ -1833,3 +1843,32 @@ const ( NFT_NG_INCREMENTAL = 0x0 NFT_NG_RANDOM = 0x1 ) + +type RTCTime struct { + Sec int32 + Min int32 + Hour int32 + Mday int32 + Mon int32 + Year int32 + Wday int32 + Yday int32 + Isdst int32 +} + +type RTCWkAlrm struct { + Enabled uint8 + Pending uint8 + _ [2]byte + Time RTCTime +} + +type RTCPLLInfo struct { + Ctrl int32 + Value int32 + Max int32 + Min int32 + Posmult int32 + Negmult int32 + Clock int64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index 18d577b912..bf38e5e2cd 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -251,6 +251,13 @@ type RawSockaddrL2 struct { _ [1]byte } +type RawSockaddrRFCOMM struct { + Family uint16 + Bdaddr [6]uint8 + Channel uint8 + _ [1]byte +} + type RawSockaddrCAN struct { Family uint16 _ [2]byte @@ -404,6 +411,7 @@ const ( SizeofSockaddrNetlink = 0xc SizeofSockaddrHCI = 0x6 SizeofSockaddrL2 = 0xe + SizeofSockaddrRFCOMM = 0xa SizeofSockaddrCAN = 0x10 SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 @@ -693,6 +701,8 @@ const ( AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x100 + + AT_EACCESS = 0x200 ) type PollFd struct { @@ -1802,3 +1812,32 @@ const ( NFT_NG_INCREMENTAL = 0x0 NFT_NG_RANDOM = 0x1 ) + +type RTCTime struct { + Sec int32 + Min int32 + Hour int32 + Mday int32 + Mon int32 + Year int32 + Wday int32 + Yday int32 + Isdst int32 +} + +type RTCWkAlrm struct { + Enabled uint8 + Pending uint8 + _ [2]byte + Time RTCTime +} + +type RTCPLLInfo struct { + Ctrl int32 + Value int32 + Max int32 + Min int32 + Posmult int32 + Negmult int32 + Clock int32 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index 6ea80563f1..972c1b8723 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -251,6 +251,13 @@ type RawSockaddrL2 struct { _ [1]byte } +type RawSockaddrRFCOMM struct { + Family uint16 + Bdaddr [6]uint8 + Channel uint8 + _ [1]byte +} + type RawSockaddrCAN struct { Family uint16 _ [2]byte @@ -406,6 +413,7 @@ const ( SizeofSockaddrNetlink = 0xc SizeofSockaddrHCI = 0x6 SizeofSockaddrL2 = 0xe + SizeofSockaddrRFCOMM = 0xa SizeofSockaddrCAN = 0x10 SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 @@ -702,6 +710,8 @@ const ( AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x100 + + AT_EACCESS = 0x200 ) type PollFd struct { @@ -1812,3 +1822,32 @@ const ( NFT_NG_INCREMENTAL = 0x0 NFT_NG_RANDOM = 0x1 ) + +type RTCTime struct { + Sec int32 + Min int32 + Hour int32 + Mday int32 + Mon int32 + Year int32 + Wday int32 + Yday int32 + Isdst int32 +} + +type RTCWkAlrm struct { + Enabled uint8 + Pending uint8 + _ [2]byte + Time RTCTime +} + +type RTCPLLInfo struct { + Ctrl int32 + Value int32 + Max int32 + Min int32 + Posmult int32 + Negmult int32 + Clock int64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index e0a3932232..783e70e87c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -249,6 +249,13 @@ type RawSockaddrL2 struct { _ [1]byte } +type RawSockaddrRFCOMM struct { + Family uint16 + Bdaddr [6]uint8 + Channel uint8 + _ [1]byte +} + type RawSockaddrCAN struct { Family uint16 _ [2]byte @@ -402,6 +409,7 @@ const ( SizeofSockaddrNetlink = 0xc SizeofSockaddrHCI = 0x6 SizeofSockaddrL2 = 0xe + SizeofSockaddrRFCOMM = 0xa SizeofSockaddrCAN = 0x10 SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 @@ -697,6 +705,8 @@ const ( AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x100 + + AT_EACCESS = 0x200 ) type PollFd struct { @@ -1807,3 +1817,32 @@ const ( NFT_NG_INCREMENTAL = 0x0 NFT_NG_RANDOM = 0x1 ) + +type RTCTime struct { + Sec int32 + Min int32 + Hour int32 + Mday int32 + Mon int32 + Year int32 + Wday int32 + Yday int32 + Isdst int32 +} + +type RTCWkAlrm struct { + Enabled uint8 + Pending uint8 + _ [2]byte + Time RTCTime +} + +type RTCPLLInfo struct { + Ctrl int32 + Value int32 + Max int32 + Min int32 + Posmult int32 + Negmult int32 + Clock int32 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index 838135e24b..5c6ea719da 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -251,6 +251,13 @@ type RawSockaddrL2 struct { _ [1]byte } +type RawSockaddrRFCOMM struct { + Family uint16 + Bdaddr [6]uint8 + Channel uint8 + _ [1]byte +} + type RawSockaddrCAN struct { Family uint16 _ [2]byte @@ -406,6 +413,7 @@ const ( SizeofSockaddrNetlink = 0xc SizeofSockaddrHCI = 0x6 SizeofSockaddrL2 = 0xe + SizeofSockaddrRFCOMM = 0xa SizeofSockaddrCAN = 0x10 SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 @@ -704,6 +712,8 @@ const ( AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x100 + + AT_EACCESS = 0x200 ) type PollFd struct { @@ -1814,3 +1824,32 @@ const ( NFT_NG_INCREMENTAL = 0x0 NFT_NG_RANDOM = 0x1 ) + +type RTCTime struct { + Sec int32 + Min int32 + Hour int32 + Mday int32 + Mon int32 + Year int32 + Wday int32 + Yday int32 + Isdst int32 +} + +type RTCWkAlrm struct { + Enabled uint8 + Pending uint8 + _ [2]byte + Time RTCTime +} + +type RTCPLLInfo struct { + Ctrl int32 + Value int32 + Max int32 + Min int32 + Posmult int32 + Negmult int32 + Clock int64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index a9d0131f2e..93effc8eca 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -251,6 +251,13 @@ type RawSockaddrL2 struct { _ [1]byte } +type RawSockaddrRFCOMM struct { + Family uint16 + Bdaddr [6]uint8 + Channel uint8 + _ [1]byte +} + type RawSockaddrCAN struct { Family uint16 _ [2]byte @@ -406,6 +413,7 @@ const ( SizeofSockaddrNetlink = 0xc SizeofSockaddrHCI = 0x6 SizeofSockaddrL2 = 0xe + SizeofSockaddrRFCOMM = 0xa SizeofSockaddrCAN = 0x10 SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 @@ -704,6 +712,8 @@ const ( AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x100 + + AT_EACCESS = 0x200 ) type PollFd struct { @@ -1814,3 +1824,32 @@ const ( NFT_NG_INCREMENTAL = 0x0 NFT_NG_RANDOM = 0x1 ) + +type RTCTime struct { + Sec int32 + Min int32 + Hour int32 + Mday int32 + Mon int32 + Year int32 + Wday int32 + Yday int32 + Isdst int32 +} + +type RTCWkAlrm struct { + Enabled uint8 + Pending uint8 + _ [2]byte + Time RTCTime +} + +type RTCPLLInfo struct { + Ctrl int32 + Value int32 + Max int32 + Min int32 + Posmult int32 + Negmult int32 + Clock int64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index 4f6f145556..cc5ca242eb 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -249,6 +249,13 @@ type RawSockaddrL2 struct { _ [1]byte } +type RawSockaddrRFCOMM struct { + Family uint16 + Bdaddr [6]uint8 + Channel uint8 + _ [1]byte +} + type RawSockaddrCAN struct { Family uint16 _ [2]byte @@ -402,6 +409,7 @@ const ( SizeofSockaddrNetlink = 0xc SizeofSockaddrHCI = 0x6 SizeofSockaddrL2 = 0xe + SizeofSockaddrRFCOMM = 0xa SizeofSockaddrCAN = 0x10 SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 @@ -697,6 +705,8 @@ const ( AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x100 + + AT_EACCESS = 0x200 ) type PollFd struct { @@ -1807,3 +1817,32 @@ const ( NFT_NG_INCREMENTAL = 0x0 NFT_NG_RANDOM = 0x1 ) + +type RTCTime struct { + Sec int32 + Min int32 + Hour int32 + Mday int32 + Mon int32 + Year int32 + Wday int32 + Yday int32 + Isdst int32 +} + +type RTCWkAlrm struct { + Enabled uint8 + Pending uint8 + _ [2]byte + Time RTCTime +} + +type RTCPLLInfo struct { + Ctrl int32 + Value int32 + Max int32 + Min int32 + Posmult int32 + Negmult int32 + Clock int32 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 9ddd47015e..712f640295 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -252,6 +252,13 @@ type RawSockaddrL2 struct { _ [1]byte } +type RawSockaddrRFCOMM struct { + Family uint16 + Bdaddr [6]uint8 + Channel uint8 + _ [1]byte +} + type RawSockaddrCAN struct { Family uint16 _ [2]byte @@ -407,6 +414,7 @@ const ( SizeofSockaddrNetlink = 0xc SizeofSockaddrHCI = 0x6 SizeofSockaddrL2 = 0xe + SizeofSockaddrRFCOMM = 0xa SizeofSockaddrCAN = 0x10 SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 @@ -712,6 +720,8 @@ const ( AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x100 + + AT_EACCESS = 0x200 ) type PollFd struct { @@ -1822,3 +1832,32 @@ const ( NFT_NG_INCREMENTAL = 0x0 NFT_NG_RANDOM = 0x1 ) + +type RTCTime struct { + Sec int32 + Min int32 + Hour int32 + Mday int32 + Mon int32 + Year int32 + Wday int32 + Yday int32 + Isdst int32 +} + +type RTCWkAlrm struct { + Enabled uint8 + Pending uint8 + _ [2]byte + Time RTCTime +} + +type RTCPLLInfo struct { + Ctrl int32 + Value int32 + Max int32 + Min int32 + Posmult int32 + Negmult int32 + Clock int64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index 3a5cc696ec..1be45320ab 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -252,6 +252,13 @@ type RawSockaddrL2 struct { _ [1]byte } +type RawSockaddrRFCOMM struct { + Family uint16 + Bdaddr [6]uint8 + Channel uint8 + _ [1]byte +} + type RawSockaddrCAN struct { Family uint16 _ [2]byte @@ -407,6 +414,7 @@ const ( SizeofSockaddrNetlink = 0xc SizeofSockaddrHCI = 0x6 SizeofSockaddrL2 = 0xe + SizeofSockaddrRFCOMM = 0xa SizeofSockaddrCAN = 0x10 SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 @@ -712,6 +720,8 @@ const ( AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x100 + + AT_EACCESS = 0x200 ) type PollFd struct { @@ -1822,3 +1832,32 @@ const ( NFT_NG_INCREMENTAL = 0x0 NFT_NG_RANDOM = 0x1 ) + +type RTCTime struct { + Sec int32 + Min int32 + Hour int32 + Mday int32 + Mon int32 + Year int32 + Wday int32 + Yday int32 + Isdst int32 +} + +type RTCWkAlrm struct { + Enabled uint8 + Pending uint8 + _ [2]byte + Time RTCTime +} + +type RTCPLLInfo struct { + Ctrl int32 + Value int32 + Max int32 + Min int32 + Posmult int32 + Negmult int32 + Clock int64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index 032a71bbfe..932b655fed 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -250,6 +250,13 @@ type RawSockaddrL2 struct { _ [1]byte } +type RawSockaddrRFCOMM struct { + Family uint16 + Bdaddr [6]uint8 + Channel uint8 + _ [1]byte +} + type RawSockaddrCAN struct { Family uint16 _ [2]byte @@ -405,6 +412,7 @@ const ( SizeofSockaddrNetlink = 0xc SizeofSockaddrHCI = 0x6 SizeofSockaddrL2 = 0xe + SizeofSockaddrRFCOMM = 0xa SizeofSockaddrCAN = 0x10 SizeofSockaddrALG = 0x58 SizeofSockaddrVM = 0x10 @@ -728,6 +736,8 @@ const ( AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x100 + + AT_EACCESS = 0x200 ) type PollFd struct { @@ -1839,3 +1849,32 @@ const ( NFT_NG_INCREMENTAL = 0x0 NFT_NG_RANDOM = 0x1 ) + +type RTCTime struct { + Sec int32 + Min int32 + Hour int32 + Mday int32 + Mon int32 + Year int32 + Wday int32 + Yday int32 + Isdst int32 +} + +type RTCWkAlrm struct { + Enabled uint8 + Pending uint8 + _ [2]byte + Time RTCTime +} + +type RTCPLLInfo struct { + Ctrl int32 + Value int32 + Max int32 + Min int32 + Posmult int32 + Negmult int32 + Clock int64 +} diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go index f1ec5dc4ee..4f17a3331f 100644 --- a/vendor/golang.org/x/sys/windows/security_windows.go +++ b/vendor/golang.org/x/sys/windows/security_windows.go @@ -296,6 +296,7 @@ const ( TOKEN_ADJUST_PRIVILEGES TOKEN_ADJUST_GROUPS TOKEN_ADJUST_DEFAULT + TOKEN_ADJUST_SESSIONID TOKEN_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | TOKEN_ASSIGN_PRIMARY | @@ -305,7 +306,8 @@ const ( TOKEN_QUERY_SOURCE | TOKEN_ADJUST_PRIVILEGES | TOKEN_ADJUST_GROUPS | - TOKEN_ADJUST_DEFAULT + TOKEN_ADJUST_DEFAULT | + TOKEN_ADJUST_SESSIONID TOKEN_READ = STANDARD_RIGHTS_READ | TOKEN_QUERY TOKEN_WRITE = STANDARD_RIGHTS_WRITE | TOKEN_ADJUST_PRIVILEGES | diff --git a/vendor/golang.org/x/sys/windows/service.go b/vendor/golang.org/x/sys/windows/service.go index 24aa90bbbe..62fc31b40b 100644 --- a/vendor/golang.org/x/sys/windows/service.go +++ b/vendor/golang.org/x/sys/windows/service.go @@ -43,6 +43,11 @@ const ( SC_STATUS_PROCESS_INFO = 0 + SC_ACTION_NONE = 0 + SC_ACTION_RESTART = 1 + SC_ACTION_REBOOT = 2 + SC_ACTION_RUN_COMMAND = 3 + SERVICE_STOPPED = 1 SERVICE_START_PENDING = 2 SERVICE_STOP_PENDING = 3 @@ -148,6 +153,19 @@ type ENUM_SERVICE_STATUS_PROCESS struct { ServiceStatusProcess SERVICE_STATUS_PROCESS } +type SERVICE_FAILURE_ACTIONS struct { + ResetPeriod uint32 + RebootMsg *uint16 + Command *uint16 + ActionsCount uint32 + Actions *SC_ACTION +} + +type SC_ACTION struct { + Type uint32 + Delay uint32 +} + //sys CloseServiceHandle(handle Handle) (err error) = advapi32.CloseServiceHandle //sys CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access uint32, srvType uint32, startType uint32, errCtl uint32, pathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16) (handle Handle, err error) [failretval==0] = advapi32.CreateServiceW //sys OpenService(mgr Handle, serviceName *uint16, access uint32) (handle Handle, err error) [failretval==0] = advapi32.OpenServiceW diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 52c2037b68..7d2a6794db 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -94,16 +94,29 @@ const ( FILE_APPEND_DATA = 0x00000004 FILE_WRITE_ATTRIBUTES = 0x00000100 - FILE_SHARE_READ = 0x00000001 - FILE_SHARE_WRITE = 0x00000002 - FILE_SHARE_DELETE = 0x00000004 - FILE_ATTRIBUTE_READONLY = 0x00000001 - FILE_ATTRIBUTE_HIDDEN = 0x00000002 - FILE_ATTRIBUTE_SYSTEM = 0x00000004 - FILE_ATTRIBUTE_DIRECTORY = 0x00000010 - FILE_ATTRIBUTE_ARCHIVE = 0x00000020 - FILE_ATTRIBUTE_NORMAL = 0x00000080 - FILE_ATTRIBUTE_REPARSE_POINT = 0x00000400 + FILE_SHARE_READ = 0x00000001 + FILE_SHARE_WRITE = 0x00000002 + FILE_SHARE_DELETE = 0x00000004 + + FILE_ATTRIBUTE_READONLY = 0x00000001 + FILE_ATTRIBUTE_HIDDEN = 0x00000002 + FILE_ATTRIBUTE_SYSTEM = 0x00000004 + FILE_ATTRIBUTE_DIRECTORY = 0x00000010 + FILE_ATTRIBUTE_ARCHIVE = 0x00000020 + FILE_ATTRIBUTE_DEVICE = 0x00000040 + FILE_ATTRIBUTE_NORMAL = 0x00000080 + FILE_ATTRIBUTE_TEMPORARY = 0x00000100 + FILE_ATTRIBUTE_SPARSE_FILE = 0x00000200 + FILE_ATTRIBUTE_REPARSE_POINT = 0x00000400 + FILE_ATTRIBUTE_COMPRESSED = 0x00000800 + FILE_ATTRIBUTE_OFFLINE = 0x00001000 + FILE_ATTRIBUTE_NOT_CONTENT_INDEXED = 0x00002000 + FILE_ATTRIBUTE_ENCRYPTED = 0x00004000 + FILE_ATTRIBUTE_INTEGRITY_STREAM = 0x00008000 + FILE_ATTRIBUTE_VIRTUAL = 0x00010000 + FILE_ATTRIBUTE_NO_SCRUB_DATA = 0x00020000 + FILE_ATTRIBUTE_RECALL_ON_OPEN = 0x00040000 + FILE_ATTRIBUTE_RECALL_ON_DATA_ACCESS = 0x00400000 INVALID_FILE_ATTRIBUTES = 0xffffffff @@ -312,6 +325,14 @@ var ( OID_SGC_NETSCAPE = []byte("2.16.840.1.113730.4.1\x00") ) +// Pointer represents a pointer to an arbitrary Windows type. +// +// Pointer-typed fields may point to one of many different types. It's +// up to the caller to provide a pointer to the appropriate type, cast +// to Pointer. The caller must obey the unsafe.Pointer rules while +// doing so. +type Pointer *struct{} + // Invented values to support what package os expects. type Timeval struct { Sec int32 @@ -880,11 +901,15 @@ type MibIfRow struct { Descr [MAXLEN_IFDESCR]byte } +type CertInfo struct { + // Not implemented +} + type CertContext struct { EncodingType uint32 EncodedCert *byte Length uint32 - CertInfo uintptr + CertInfo *CertInfo Store Handle } @@ -899,12 +924,16 @@ type CertChainContext struct { RevocationFreshnessTime uint32 } +type CertTrustListInfo struct { + // Not implemented +} + type CertSimpleChain struct { Size uint32 TrustStatus CertTrustStatus NumElements uint32 Elements **CertChainElement - TrustListInfo uintptr + TrustListInfo *CertTrustListInfo HasRevocationFreshnessTime uint32 RevocationFreshnessTime uint32 } @@ -919,14 +948,18 @@ type CertChainElement struct { ExtendedErrorInfo *uint16 } +type CertRevocationCrlInfo struct { + // Not implemented +} + type CertRevocationInfo struct { Size uint32 RevocationResult uint32 RevocationOid *byte - OidSpecificInfo uintptr + OidSpecificInfo Pointer HasFreshnessTime uint32 FreshnessTime uint32 - CrlInfo uintptr // *CertRevocationCrlInfo + CrlInfo *CertRevocationCrlInfo } type CertTrustStatus struct { @@ -957,7 +990,7 @@ type CertChainPara struct { type CertChainPolicyPara struct { Size uint32 Flags uint32 - ExtraPolicyPara uintptr + ExtraPolicyPara Pointer } type SSLExtraCertChainPolicyPara struct { @@ -972,7 +1005,7 @@ type CertChainPolicyStatus struct { Error uint32 ChainIndex uint32 ElementIndex uint32 - ExtraPolicyStatus uintptr + ExtraPolicyStatus Pointer } const ( diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 318c61634e..fc56aec035 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -1,4 +1,4 @@ -// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT +// Code generated by 'go generate'; DO NOT EDIT. package windows diff --git a/vendor/golang.org/x/text/README.md b/vendor/golang.org/x/text/README.md index b3f365eed4..c6d2c257a2 100644 --- a/vendor/golang.org/x/text/README.md +++ b/vendor/golang.org/x/text/README.md @@ -89,5 +89,5 @@ This repository uses Gerrit for code changes. To learn how to submit changes to this repository, see https://golang.org/doc/contribute.html. The main issue tracker for the image repository is located at -https://github.com/golang/go/issues. Prefix your issue with "x/image:" in the +https://github.com/golang/go/issues. Prefix your issue with "x/text:" in the subject line, so it is easy to find. diff --git a/vendor/k8s.io/client-go/README.md b/vendor/k8s.io/client-go/README.md index 8d079162f7..5f01fa1802 100644 --- a/vendor/k8s.io/client-go/README.md +++ b/vendor/k8s.io/client-go/README.md @@ -2,7 +2,7 @@ Go clients for talking to a [kubernetes](http://kubernetes.io/) cluster. -We currently recommend using the v7.0.0 tag. See [INSTALL.md](/INSTALL.md) for +We currently recommend using the v8.0.0 tag. See [INSTALL.md](/INSTALL.md) for detailed installation instructions. `go get k8s.io/client-go/...` works, but will build `master`, which doesn't handle the dependencies well. @@ -91,17 +91,17 @@ We will backport bugfixes--but not new features--into older versions of #### Compatibility matrix -| | Kubernetes 1.4 | Kubernetes 1.5 | Kubernetes 1.6 | Kubernetes 1.7 | Kubernetes 1.8 | Kubernetes 1.9 | Kubernetes 1.10 | -|---------------------|----------------|----------------|----------------|----------------|----------------|----------------|-----------------| -| client-go 1.4 | ✓ | - | - | - | - | - | - | -| client-go 1.5 | + | - | - | - | - | - | - | -| client-go 2.0 | +- | ✓ | +- | +- | +- | +- | +- | -| client-go 3.0 | +- | +- | ✓ | - | +- | +- | +- | -| client-go 4.0 | +- | +- | +- | ✓ | +- | +- | +- | -| client-go 5.0 | +- | +- | +- | +- | ✓ | +- | +- | -| client-go 6.0 | +- | +- | +- | +- | +- | ✓ | +- | -| client-go 7.0 | +- | +- | +- | +- | +- | +- | ✓ | -| client-go HEAD | +- | +- | +- | +- | +- | + | + | +| | Kubernetes 1.5 | Kubernetes 1.6 | Kubernetes 1.7 | Kubernetes 1.8 | Kubernetes 1.9 | Kubernetes 1.10 | Kubernetes 1.11 | +|---------------------|----------------|----------------|----------------|----------------|----------------|-----------------|-----------------| +| client-go 1.5 | - | - | - | - | - | - | - | +| client-go 2.0 | ✓ | +- | +- | +- | +- | +- | +- | +| client-go 3.0 | +- | ✓ | - | +- | +- | +- | +- | +| client-go 4.0 | +- | +- | ✓ | +- | +- | +- | +- | +| client-go 5.0 | +- | +- | +- | ✓ | +- | +- | +- | +| client-go 6.0 | +- | +- | +- | +- | ✓ | +- | +- | +| client-go 7.0 | +- | +- | +- | +- | +- | ✓ | +- | +| client-go 8.0 | +- | +- | +- | +- | +- | +- | ✓ | +| client-go HEAD | +- | +- | +- | +- | +- | +- | +- | Key: @@ -127,9 +127,10 @@ between client-go versions. | client-go 2.0 | Kubernetes main repo, 1.5 branch | = - | | client-go 3.0 | Kubernetes main repo, 1.6 branch | = - | | client-go 4.0 | Kubernetes main repo, 1.7 branch | = - | -| client-go 5.0 | Kubernetes main repo, 1.8 branch | ✓ | +| client-go 5.0 | Kubernetes main repo, 1.8 branch | = - | | client-go 6.0 | Kubernetes main repo, 1.9 branch | ✓ | | client-go 7.0 | Kubernetes main repo, 1.10 branch | ✓ | +| client-go 8.0 | Kubernetes main repo, 1.11 branch | ✓ | | client-go HEAD | Kubernetes main repo, master branch | ✓ | Key: @@ -154,7 +155,7 @@ existing users won't be broken. ### Kubernetes tags -As of April 2018, client-go is still a mirror of +This repository is still a mirror of [k8s.io/kubernetes/staging/src/client-go](https://github.com/kubernetes/kubernetes/tree/master/staging/src/k8s.io/client-go), the code development is still done in the staging area. Since Kubernetes 1.8 release, when syncing the code from the staging area, we also sync the Kubernetes