diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..9fdc20f --- /dev/null +++ b/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2016 The Linux Foundation. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/README.md b/README.md new file mode 100644 index 0000000..73c6939 --- /dev/null +++ b/README.md @@ -0,0 +1,75 @@ +## oci-store + +This is an oci store implementation + +It's made of a base store on top of which additional stores can be built (perhaps also an appc store). The oci store adds OCI specific functions (primarily blobs GC) to the base store. + +Tha base store is a CAS store with additional features required to handle multiple requirements (access from multiple processes, access from multiple goroutine using the same store instance, indexing, additional blob data, key/value data): + +* The store data can be concurrently accessed by multiple processes and multiple goroutines (like CoreOS rkt) (locally since it's not meant to be used as a multi host shared store) +* Store data (blobs) is indexed to be easily listed and searched (for example to resolve partial digests) +* Every blob can be augmented with additional data (useful to save additional informations (like last accessed time etc...) +* Additional key/value data (the can optionally reference a blob) can be added (useful for oci references to descriptors and for the appc spec remote caching data) + + +The oci store use this store and implements OCI specific function: + +* Garbage collection of unreferenced manifests, configs, layers +* A check for verifying that all the blobs needed by a name reference are available in the store (TODO) + +Its base comes from the experience in the CoreOS rkt store and uses big part of it. + +NOTE: This is a work in progress and before a initial release I'll squash and force push to avoid multiple unuseful commits. + +### Test client + +A simple test client has been created to test different features. + +It can import and export a directory containing a oci image layout and list references + + +#### Example client + +Build the example client + +``` +cd examples/client +cd oci-fetch +go install +``` + +Create an oci image layout dir: + +``` +go get github.com/sgotti/oci-fetch # temporary fixes to be merged in github.com/containers/oci-fetch +oci-fetch docker://registry-1.docker.io/library/nginx:latest nginx.oci +mkdir nginx; cd nginx +tar xvf ../nging.oci +``` + +Import from oci image layout ref latest with name `nginx` in the oci store +``` +./client import --sref latest --dref nginx:latest nginx/ +``` + +List oci store manifest references + +``` +./client list +``` + +Export image nginx with rev latest to an oci image layout in out/ dir +``` +./client export --sref nginx:latest --dref latest out/ +``` + +Remove image nginx with rev latest from the store +``` +./client rm --ref nginx:latest +``` + +Garbage collect unreferenced blobs +``` +./client gc +``` + diff --git a/common/schema.go b/common/schema.go new file mode 100644 index 0000000..4d1f359 --- /dev/null +++ b/common/schema.go @@ -0,0 +1,26 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package common + +type Descriptor struct { + MediaType string `json:"mediaType"` + Digest string `json:"digest"` + Size int64 `json:"size"` +} + +type Manifest struct { + Config Descriptor `json:"config"` + Layers []Descriptor `json:"layers"` +} diff --git a/examples/client/main.go b/examples/client/main.go new file mode 100644 index 0000000..77f2325 --- /dev/null +++ b/examples/client/main.go @@ -0,0 +1,448 @@ +package main + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "path/filepath" + "strings" + "text/tabwriter" + + "github.com/sgotti/oci-store/common" + "github.com/sgotti/oci-store/ocistore" + "github.com/spf13/cobra" +) + +var storePath string + +func main() { + cmd := &cobra.Command{ + Use: "test-client", + Short: "A test client for the oci store", + } + + stdout := log.New(os.Stdout, "", 0) + stderr := log.New(os.Stderr, "", 0) + + cmd.AddCommand(newImportCmd(stdout, stderr)) + cmd.AddCommand(newExportCmd(stdout, stderr)) + cmd.AddCommand(newListCmd(stdout, stderr)) + cmd.AddCommand(newRmCmd(stdout, stderr)) + cmd.AddCommand(newGCCmd(stdout, stderr)) + + flags := cmd.PersistentFlags() + + flags.StringVar(&storePath, "store-path", "/tmp/store", `the oci store path.`) + + if err := cmd.Execute(); err != nil { + stderr.Println(err) + os.Exit(1) + } +} + +type importCmd struct { + stdout *log.Logger + stderr *log.Logger + sref string + dref string +} + +func newImportCmd(stdout, stderr *log.Logger) *cobra.Command { + v := &importCmd{ + stdout: stdout, + stderr: stderr, + } + + cmd := &cobra.Command{ + Use: "import --sref REF --dref REF dir...", + Short: "import a ref from an image layout directory", + Run: v.Run, + } + + flags := cmd.Flags() + + flags.StringVar(&v.sref, "sref", "", `the source ref.`) + flags.StringVar(&v.dref, "dref", "", `the dest ref.`) + + return cmd +} + +func (v *importCmd) Run(cmd *cobra.Command, args []string) { + store, err := ocistore.NewOCIStore(storePath) + if err != nil { + fmt.Printf("NewOCIStore err: %v", err) + return + } + + if v.sref == "" { + v.stderr.Fatalf("source ref must be provided") + } + if v.dref == "" { + v.stderr.Fatalf("dest ref must be provided") + } + err = imp(store, args[0], v.sref, v.dref) + + if err != nil { + v.stderr.Fatalf("err: %v", err) + } +} + +func pathDigest(digest string) string { + return strings.Replace(digest, ":", "-", 1) +} + +func doFile(path string, f func(io.Reader) error) error { + r, err := os.Open(path) + if err != nil { + return err + } + defer r.Close() + return f(r) + +} + +func imp(s *ocistore.OCIStore, src, sref, dref string) error { + descPath := filepath.Join(src, "refs", sref) + + var desc common.Descriptor + err := doFile(descPath, func(r io.Reader) error { + return json.NewDecoder(r).Decode(&desc) + }) + if err != nil { + return err + } + + fmt.Printf("desc: %v\n", desc) + + manifestPath := filepath.Join(src, "blobs", pathDigest(desc.Digest)) + manifestBlob, err := ioutil.ReadFile(manifestPath) + if err != nil { + return err + } + var m common.Manifest + json.Unmarshal(manifestBlob, &m) + + fmt.Printf("m: %v\n", m) + + manifestDigest, err := s.WriteBlob(bytes.NewReader(manifestBlob), ocistore.MediaTypeManifest) + if err != nil { + return err + } + fmt.Printf("manifestDigest: %s\n", manifestDigest) + + configPath := filepath.Join(src, "blobs", pathDigest(m.Config.Digest)) + configBlob, err := ioutil.ReadFile(configPath) + if err != nil { + return err + } + + fmt.Printf("config: %s\n", configBlob) + configDigest, err := s.WriteBlob(bytes.NewReader(configBlob), ocistore.MediaTypeImageSerializationConfig) + if err != nil { + return err + } + fmt.Printf("configDigest: %s\n", configDigest) + + seenLayers := map[string]struct{}{} + for _, l := range m.Layers { + // Duplicate layers may appear in the manifest + if _, ok := seenLayers[l.Digest]; ok { + continue + } + seenLayers[l.Digest] = struct{}{} + + layerPath := filepath.Join(src, "blobs", pathDigest(l.Digest)) + lf, err := os.Open(layerPath) + layerDigest, err := s.WriteBlob(lf, ocistore.MediaTypeImageSerialization) + if err != nil { + lf.Close() + return err + } + lf.Close() + fmt.Printf("layerDigest: %s\n", layerDigest) + + } + + err = s.SetRef(dref, &desc) + if err != nil { + return err + } + return nil +} + +type exportCmd struct { + stdout *log.Logger + stderr *log.Logger + sref string + dref string +} + +func newExportCmd(stdout, stderr *log.Logger) *cobra.Command { + v := &exportCmd{ + stdout: stdout, + stderr: stderr, + } + + cmd := &cobra.Command{ + Use: "export --sref REF --dref REF dir...", + Short: "export a ref to an image layout directory", + Run: v.Run, + } + + flags := cmd.Flags() + + flags.StringVar(&v.sref, "sref", "", `the source ref.`) + flags.StringVar(&v.dref, "dref", "", `the dest ref.`) + + return cmd +} + +func (v *exportCmd) Run(cmd *cobra.Command, args []string) { + exitcode := 0 + + store, err := ocistore.NewOCIStore(storePath) + if err != nil { + fmt.Printf("NewOCIStore err: %v", err) + return + } + + err = exp(store, args[0], v.sref, v.dref) + + if err != nil { + fmt.Printf("err: %v", err) + exitcode = 1 + } + os.Exit(exitcode) +} + +func writeFile(path string, r io.ReadCloser) error { + defer r.Close() + w, err := os.Create(path) + if err != nil { + return err + } + defer w.Close() + _, err = io.Copy(w, r) + return err +} + +func exp(s *ocistore.OCIStore, dest, sref, dref string) error { + if err := os.MkdirAll(filepath.Join(dest, "blobs"), 0755); err != nil { + return err + } + if err := os.MkdirAll(filepath.Join(dest, "refs"), 0755); err != nil { + return err + } + + desc, err := s.GetRef(sref) + if err != nil { + return err + } + + manifestPath := filepath.Join(dest, "blobs", pathDigest(desc.Digest)) + mr, err := s.ReadBlob(desc.Digest) + if err != nil { + return err + } + manifestBlob, err := ioutil.ReadAll(mr) + if err != nil { + mr.Close() + return err + } + mr.Close() + + if err := ioutil.WriteFile(manifestPath, manifestBlob, 0644); err != nil { + return err + } + + var m common.Manifest + if err := json.Unmarshal(manifestBlob, &m); err != nil { + return err + } + configPath := filepath.Join(dest, "blobs", pathDigest(m.Config.Digest)) + cr, err := s.ReadBlob(m.Config.Digest) + if err != nil { + return err + } + configBlob, err := ioutil.ReadAll(cr) + if err != nil { + cr.Close() + return err + } + cr.Close() + if err := ioutil.WriteFile(configPath, configBlob, 0644); err != nil { + return err + } + + for _, l := range m.Layers { + layerPath := filepath.Join(dest, "blobs", pathDigest(l.Digest)) + lr, err := s.ReadBlob(l.Digest) + if err != nil { + return err + } + if err := writeFile(layerPath, lr); err != nil { + return err + } + } + + descriptor := &common.Descriptor{ + MediaType: ocistore.MediaTypeManifest, + Size: int64(len(manifestBlob)), + Digest: desc.Digest, + } + descriptorBlob, err := json.Marshal(&descriptor) + if err != nil { + return err + } + descriptorPath := filepath.Join(dest, "refs", dref) + if err := ioutil.WriteFile(descriptorPath, descriptorBlob, 0644); err != nil { + return err + } + return nil +} + +type listCmd struct { + stdout *log.Logger + stderr *log.Logger +} + +func newListCmd(stdout, stderr *log.Logger) *cobra.Command { + v := &listCmd{ + stdout: stdout, + stderr: stderr, + } + + cmd := &cobra.Command{ + Use: "list", + Short: "list images in the store", + Run: v.Run, + } + + return cmd +} + +func (v *listCmd) Run(cmd *cobra.Command, args []string) { + store, err := ocistore.NewOCIStore(storePath) + if err != nil { + fmt.Printf("NewOCIStore err: %v", err) + return + } + + err = list(store) + + if err != nil { + v.stderr.Fatalf("err: %v", err) + } +} + +func list(s *ocistore.OCIStore) error { + refs, err := s.ListAllRefs() + if err != nil { + return err + } + + w := new(tabwriter.Writer) + + descs := []*common.Descriptor{} + for _, ref := range refs { + desc, err := s.GetRef(ref) + if err != nil { + return err + } + descs = append(descs, desc) + } + // Format in tab-separated columns with a tab stop of 8. + w.Init(os.Stdout, 0, 8, 0, '\t', 0) + fmt.Fprint(w, "REF\tMANIFEST DIGEST\n") + for i, ref := range refs { + fmt.Fprintf(w, "%s\t%s\n", ref, descs[i].Digest) + } + w.Flush() + + return nil +} + +type rmCmd struct { + stdout *log.Logger + stderr *log.Logger + ref string +} + +func newRmCmd(stdout, stderr *log.Logger) *cobra.Command { + v := &rmCmd{ + stdout: stdout, + stderr: stderr, + } + + cmd := &cobra.Command{ + Use: "rm", + Short: "rm images in the store", + Run: v.Run, + } + + flags := cmd.Flags() + + flags.StringVar(&v.ref, "ref", "", `the ref.`) + + return cmd +} + +func (v *rmCmd) Run(cmd *cobra.Command, args []string) { + store, err := ocistore.NewOCIStore(storePath) + if err != nil { + fmt.Printf("NewOCIStore err: %v", err) + return + } + + err = rm(store, v.ref) + + if err != nil { + v.stderr.Fatalf("err: %v", err) + } +} + +func rm(s *ocistore.OCIStore, ref string) error { + return s.DeleteRef(ref) +} + +type gcCmd struct { + stdout *log.Logger + stderr *log.Logger +} + +func newGCCmd(stdout, stderr *log.Logger) *cobra.Command { + v := &gcCmd{ + stdout: stdout, + stderr: stderr, + } + + cmd := &cobra.Command{ + Use: "gc", + Short: "gc images in the store", + Run: v.Run, + } + + return cmd +} + +func (v *gcCmd) Run(cmd *cobra.Command, args []string) { + store, err := ocistore.NewOCIStore(storePath) + if err != nil { + fmt.Printf("NewOCIStore err: %v", err) + return + } + + err = gc(store) + + if err != nil { + v.stderr.Fatalf("err: %v", err) + } +} + +func gc(s *ocistore.OCIStore) error { + return s.GC() +} diff --git a/ocistore/store.go b/ocistore/store.go new file mode 100644 index 0000000..0b2353d --- /dev/null +++ b/ocistore/store.go @@ -0,0 +1,353 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ocistore + +import ( + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/coreos/rkt/pkg/lock" + "github.com/sgotti/oci-store/common" + "github.com/sgotti/oci-store/store" +) + +const ( + defaultPathPerm = os.FileMode(0770 | os.ModeSetgid) + defaultFilePerm = os.FileMode(0660) + + MediaTypeDescriptor = `application/vnd.oci.descriptor.v1+json` + MediaTypeManifest = `application/vnd.oci.image.manifest.v1+json` + MediaTypeManifestList = `application/vnd.oci.image.manifest.list.v1+json` + MediaTypeImageSerialization = `application/vnd.oci.image.serialization.rootfs.tar.gzip` + MediaTypeImageSerializationConfig = `application/vnd.oci.image.serialization.config.v1+json` +) + +// OCIStore is a store for oci image spec +// (https://github.com/opencontainers/image-spec/) +// It can handle blobs and refs to descriptors +type OCIStore struct { + *store.Store + dir string + gcLockDir string +} + +func NewOCIStore(storeDir string) (*OCIStore, error) { + store, err := store.NewStore(storeDir) + if err != nil { + return nil, err + } + + s := &OCIStore{ + Store: store, + dir: storeDir, + } + + s.gcLockDir = filepath.Join(storeDir, "gclock") + if err := os.MkdirAll(s.gcLockDir, defaultPathPerm); err != nil { + return nil, err + } + return s, nil +} + +func (s *OCIStore) Close() error { + return s.Store.Close() +} + +// ReadBlob return an io.ReadCloser for the requested digest +func (s *OCIStore) ReadBlob(digest string) (io.ReadCloser, error) { + gcLock, err := lock.SharedLock(s.gcLockDir, lock.Dir) + if err != nil { + return nil, fmt.Errorf("error getting gc lock: %v", err) + } + defer gcLock.Close() + return s.Store.ReadBlob(digest) +} + +// WriteBlob takes a blob encapsulated in an io.Reader, calculates its digest +// and then stores it in the blob store +// mediaType can be used to define a blob mediaType for later fetching of all +// the blobs my mediaType +func (s *OCIStore) WriteBlob(r io.ReadSeeker, mediaType string) (string, error) { + gcLock, err := lock.SharedLock(s.gcLockDir, lock.Dir) + if err != nil { + return "", fmt.Errorf("error getting gc lock: %v", err) + } + defer gcLock.Close() + return s.Store.WriteBlob(r, mediaType) +} + +// ListBlobs returns a list of all blob digest of the given mediaType +func (s *OCIStore) ListBlobs(mediaType string) ([]string, error) { + gcLock, err := lock.SharedLock(s.gcLockDir, lock.Dir) + if err != nil { + return nil, fmt.Errorf("error getting gc lock: %v", err) + } + defer gcLock.Close() + return s.Store.ListBlobs(mediaType) +} + +// DeleteBlob removes a blob. +// It firstly removes the blob infos from the db, then it tries to remove the +// non transactional data. +// If some error occurs removing some non transactional data a +// StoreRemovalError is returned. +func (s *OCIStore) DeleteBlob(digest string) error { + gcLock, err := lock.SharedLock(s.gcLockDir, lock.Dir) + if err != nil { + return fmt.Errorf("error getting gc lock: %v", err) + } + defer gcLock.Close() + return s.Store.DeleteBlob(digest) +} + +// GetRef get a ref to a blob, returns the blob's digest +func (s *OCIStore) GetRef(name string) (*common.Descriptor, error) { + gcLock, err := lock.SharedLock(s.gcLockDir, lock.Dir) + if err != nil { + return nil, fmt.Errorf("error getting gc lock: %v", err) + } + defer gcLock.Close() + data, err := s.Store.GetData(name, MediaTypeDescriptor) + if err != nil { + return nil, fmt.Errorf("cannot get ref: %v", err) + } + var d *common.Descriptor + if err := json.Unmarshal(data.Value, &d); err != nil { + return nil, fmt.Errorf("failed to unmarshal descriptor: %v", err) + } + return d, nil +} + +// SetRef defines a ref to a blob +func (s *OCIStore) SetRef(name string, d *common.Descriptor) error { + gcLock, err := lock.SharedLock(s.gcLockDir, lock.Dir) + if err != nil { + return fmt.Errorf("error getting gc lock: %v", err) + } + defer gcLock.Close() + + data, err := json.Marshal(d) + if err != nil { + return fmt.Errorf("failed to marshal descriptor: %v", err) + } + return s.Store.SetData(name, data, MediaTypeDescriptor, "") +} + +// DeleteRef defines a ref to a blob +func (s *OCIStore) DeleteRef(name string) error { + gcLock, err := lock.SharedLock(s.gcLockDir, lock.Dir) + if err != nil { + return fmt.Errorf("error getting gc lock: %v", err) + } + defer gcLock.Close() + return s.Store.DeleteData(name, MediaTypeDescriptor) +} + +// ListAllRefs retrieves all the refs +func (s *OCIStore) ListAllRefs() ([]string, error) { + gcLock, err := lock.SharedLock(s.gcLockDir, lock.Dir) + if err != nil { + return nil, fmt.Errorf("error getting gc lock: %v", err) + } + defer gcLock.Close() + return s.Store.ListAllDataKeys(MediaTypeDescriptor) +} + +// CheckAllDataAvailable verifies that, given a ref name, all the required blobs are available in the store +func (s *OCIStore) CheckAllDataAvailable(name string) (bool, error) { + gcLock, err := lock.SharedLock(s.gcLockDir, lock.Dir) + if err != nil { + return false, fmt.Errorf("error getting gc lock: %v", err) + } + defer gcLock.Close() + + return s.checkAllDataAvailable(name) +} + +func (s *OCIStore) checkAllDataAvailable(name string) (bool, error) { + data, err := s.Store.GetData(name, MediaTypeDescriptor) + if err != nil { + return false, fmt.Errorf("cannot get ref: %v", err) + } + var d *common.Descriptor + if err := json.Unmarshal(data.Value, &d); err != nil { + return false, fmt.Errorf("failed to unmarshal descriptor: %v", err) + } + + // Check manifest availability + minfo, err := s.Store.GetBlobInfo(pathDigest(d.Digest)) + if err != nil { + return false, err + } + if minfo == nil { + return false, fmt.Errorf("manifest is missing") + } + // TODO(sgotti) also handle MediaTypeManifestList + if minfo.MediaType != MediaTypeManifest { + return false, fmt.Errorf("wrong referenced media type") + } + mblob, err := s.Store.ReadBlob(d.Digest) + if err != nil { + return false, err + } + defer mblob.Close() + var m *common.Manifest + if err := json.NewDecoder(mblob).Decode(&m); err != nil { + return false, fmt.Errorf("failed to unmarshal manifest: %v", err) + } + + // Check config availability + cinfo, err := s.Store.GetBlobInfo(pathDigest(m.Config.Digest)) + if err != nil { + return false, err + } + if cinfo == nil { + return false, fmt.Errorf("config is missing") + } + + // Check layers availability + for _, l := range m.Layers { + linfo, err := s.Store.GetBlobInfo(pathDigest(l.Digest)) + if err != nil { + return false, err + } + if linfo == nil { + return false, fmt.Errorf("layer with digest %s is missing", l.Digest) + } + } + + return true, nil +} + +// GC removes unreferenced blobs in addition the the base store GC +func (s *OCIStore) GC() error { + // GC will remove blobs not referenced (named references to manifests + // and manifest references to layers and configs). If between the + // unreferenced blobs calculation and the real blob removal some of + // these blob becomes referenced, they will be removed + // To avoid this we have to take an exclusive gc lock + + // If GC runs when another store users is uploading some blobs, but they aren't already referenced, they will be removed. + // To avoid this, the upload should be done in this order: + // SetRef + // WriteBlob for Manifest + // WriteBlob for config and Layers + + gcLock, err := lock.ExclusiveLock(s.gcLockDir, lock.Dir) + if err != nil { + return fmt.Errorf("error getting gc lock: %v", err) + } + defer gcLock.Close() + + // Remove unreferenced manifests + // Get all manifests + mdigests, err := s.Store.ListBlobs(MediaTypeManifest) + if err != nil { + return err + } + + // Get all refs + refs, err := s.Store.ListAllDataKeys(MediaTypeDescriptor) + if err != nil { + return err + } + + refsmap := map[string]struct{}{} + for _, r := range refs { + data, err := s.Store.GetData(r, MediaTypeDescriptor) + if err != nil { + return fmt.Errorf("cannot get ref: %v", err) + } + var d *common.Descriptor + if err := json.Unmarshal(data.Value, &d); err != nil { + return fmt.Errorf("failed to unmarshal descriptor: %v", err) + } + refsmap[pathDigest(d.Digest)] = struct{}{} + } + // Calculate and remove unreferenced manifests + oldmdigests := []string{} + for _, mdigest := range mdigests { + if _, ok := refsmap[mdigest]; !ok { + oldmdigests = append(oldmdigests, mdigest) + } + } + + for _, oldmdigest := range oldmdigests { + // Ignore delete errors + s.Store.DeleteBlob(oldmdigest) + } + + // Remove unreferenced layers and configs + // Get all manifests + mdigests, err = s.Store.ListBlobs(MediaTypeManifest) + if err != nil { + return err + } + // Get all configs and layers + cdigests, err := s.Store.ListBlobs(MediaTypeImageSerializationConfig) + if err != nil { + return err + } + ldigests, err := s.Store.ListBlobs(MediaTypeImageSerialization) + if err != nil { + return err + } + digests := append(cdigests, ldigests...) + + refsmap = map[string]struct{}{} + for _, mdigest := range mdigests { + mr, err := s.Store.ReadBlob(mdigest) + if err != nil { + return err + } + defer mr.Close() + var m common.Manifest + if err := json.NewDecoder(mr).Decode(&m); err != nil { + return err + } + + refsmap[pathDigest(m.Config.Digest)] = struct{}{} + for _, l := range m.Layers { + refsmap[pathDigest(l.Digest)] = struct{}{} + } + } + // Calculate and remove unreferenced configs and layers + olddigests := []string{} + for _, digest := range digests { + if _, ok := refsmap[digest]; !ok { + olddigests = append(olddigests, digest) + } + } + + for _, olddigest := range olddigests { + // Ignore delete errors + s.Store.DeleteBlob(olddigest) + } + + // Call base store GC + if err := s.Store.GC(); err != nil { + return err + } + + return nil +} + +func pathDigest(digest string) string { + return strings.Replace(digest, ":", "-", 1) +} diff --git a/store/backup.go b/store/backup.go new file mode 100644 index 0000000..e8837c5 --- /dev/null +++ b/store/backup.go @@ -0,0 +1,96 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package store + +import ( + "io/ioutil" + "os" + "path/filepath" + "strconv" + + "github.com/coreos/rkt/pkg/fileutil" + "github.com/coreos/rkt/pkg/user" +) + +// createBackup backs a database up in a given directory. It basically +// copies this directory into a given backups directory. The backups +// directory has a simple structure - a directory inside named "0" is +// the most recent backup. A directory name for oldest backup is +// deduced from a given limit. For instance, for limit being 5 the +// name for the oldest backup would be "4". If a backups number +// exceeds the given limit then only newest ones are kept and the rest +// is removed. +func createBackup(dbDir, backupsDir string, limit int) error { + tmpBackupDir := filepath.Join(backupsDir, "tmp") + if err := os.MkdirAll(backupsDir, defaultPathPerm); err != nil { + return err + } + if err := fileutil.CopyTree(dbDir, tmpBackupDir, user.NewBlankUidRange()); err != nil { + return err + } + defer os.RemoveAll(tmpBackupDir) + // prune backups + if err := pruneOldBackups(backupsDir, limit-1); err != nil { + return err + } + if err := shiftBackups(backupsDir, limit-2); err != nil { + return err + } + if err := os.Rename(tmpBackupDir, filepath.Join(backupsDir, "0")); err != nil { + return err + } + return nil +} + +// pruneOldBackups removes old backups, that is - directories with +// names greater or equal than given limit. +func pruneOldBackups(dir string, limit int) error { + if list, err := ioutil.ReadDir(dir); err != nil { + return err + } else { + for _, fi := range list { + if num, err := strconv.Atoi(fi.Name()); err != nil { + // directory name is not a number, + // leave it alone + continue + } else if num < limit { + // directory name is a number lower + // than a limit, leave it alone + continue + } + path := filepath.Join(dir, fi.Name()) + if err := os.RemoveAll(path); err != nil { + return err + } + } + } + return nil +} + +// shiftBackups renames all directories with names being numbers up to +// oldest to names with numbers greater by one. +func shiftBackups(dir string, oldest int) error { + if oldest < 0 { + return nil + } + for i := oldest; i >= 0; i-- { + current := filepath.Join(dir, strconv.Itoa(i)) + inc := filepath.Join(dir, strconv.Itoa(i+1)) + if err := os.Rename(current, inc); err != nil && !os.IsNotExist(err) { + return err + } + } + return nil +} diff --git a/store/backup_test.go b/store/backup_test.go new file mode 100644 index 0000000..3e36d89 --- /dev/null +++ b/store/backup_test.go @@ -0,0 +1,126 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package store + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "testing" +) + +// TestBackup tests backup creation with limit 4 +func TestBackup(t *testing.T) { + dir, err := ioutil.TempDir("", tstprefix) + if err != nil { + t.Fatalf("error creating tempdir: %v", err) + } + defer os.RemoveAll(dir) + dbDir := filepath.Join(dir, "db") + backupsDir := filepath.Join(dir, "db-backups") + limit := 4 + + for i := 0; i < limit*2; i++ { + if err := os.RemoveAll(dbDir); err != nil { + t.Fatalf("error removing dbdir: %v", err) + } + if err := os.MkdirAll(dbDir, 0755); err != nil { + t.Fatalf("error creating dbdir: %v", err) + } + if err := touch(filepath.Join(dbDir, strconv.Itoa(i))); err != nil { + t.Fatalf("error creating a file: %v", err) + } + if err := createBackup(dbDir, backupsDir, limit); err != nil { + t.Fatalf("error creating a backup: %v", err) + } + if err := checkBackups(backupsDir, limit, i); err != nil { + t.Fatalf("error checking the backup: %v", err) + } + } +} + +func touch(fileName string) error { + file, err := os.Create(fileName) + if err != nil { + return err + } + if err := file.Close(); err != nil { + return err + } + return nil +} + +func checkBackups(backupsDir string, limit, i int) error { + expected := getExpectedTree(limit, i) + return checkDirectory(backupsDir, expected) +} + +// Expectations for limit L +// +// for iteration I (0, 1, 2, ...) +// at most D directories (where D = min(L,I + 1) +// dir 0 with file I +// dir 1 with file I - 1 +// ... +// dir D - 1 with file I - L +func getExpectedTree(limit, iteration int) map[string]string { + dirCount := iteration + if iteration > limit { + dirCount = limit + } + expected := make(map[string]string, dirCount) + for j := 0; j <= dirCount; j++ { + expected[strconv.Itoa(j)] = strconv.Itoa(iteration - j) + } + return expected +} + +func checkDirectory(dir string, expected map[string]string) error { + list, err := ioutil.ReadDir(dir) + if err != nil { + return err + } + for _, fi := range list { + if !fi.IsDir() { + return fmt.Errorf("%s is not a directory", fi.Name()) + } + dirName := fi.Name() + if _, err := strconv.Atoi(dirName); err != nil { + return fmt.Errorf("Name is not a number: %v", err) + } + expectedFileName, ok := expected[dirName] + if !ok { + return fmt.Errorf("Unexpected name found: %s", dirName) + } + subList, err := ioutil.ReadDir(filepath.Join(dir, dirName)) + if err != nil { + return fmt.Errorf("Failed to get a list of files in %s: %v", + dirName, err) + } + filesCount := len(subList) + if filesCount != 1 { + return fmt.Errorf("Expected only 1 file in %s, got %d", + dirName, filesCount) + } + gottenFileName := subList[0].Name() + if gottenFileName != expectedFileName { + return fmt.Errorf("Expected %s in %s, got %s", + expectedFileName, dirName, gottenFileName) + } + } + return nil +} diff --git a/store/blob.go b/store/blob.go new file mode 100644 index 0000000..9b7163a --- /dev/null +++ b/store/blob.go @@ -0,0 +1,121 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package store + +import "database/sql" + +// Blob is used to index a Blob. +type Blob struct { + Digest string + MediaType string +} + +func newBlob(digest string, mediaType string) *Blob { + return &Blob{ + Digest: digest, + MediaType: mediaType, + } +} + +func blobRowScan(rows *sql.Rows, blob *Blob) error { + // This ordering MUST match that in schema.go + return rows.Scan(&blob.Digest, &blob.MediaType) +} + +// getBlobWithPrefix returns all the blobs with a digest starting with the given prefix. +func getBlobsWithPrefix(tx *sql.Tx, prefix string) ([]*Blob, error) { + var blobs []*Blob + rows, err := tx.Query("SELECT * from blobinfo WHERE hasPrefix(digest, $1)", prefix) + if err != nil { + return nil, err + } + for rows.Next() { + blob := &Blob{} + if err := blobRowScan(rows, blob); err != nil { + return nil, err + } + blobs = append(blobs, blob) + } + if err := rows.Err(); err != nil { + return nil, err + } + return blobs, err +} + +// getBlob returns the blob with the given digesdigest. +func getBlob(tx *sql.Tx, digest string) (*Blob, error) { + var b *Blob + rows, err := tx.Query("SELECT * from blobinfo WHERE digest == $1", digest) + if err != nil { + return nil, err + } + for rows.Next() { + b = &Blob{} + if err := blobRowScan(rows, b); err != nil { + return nil, err + } + // No more than one row for digest must exist. + break + } + if err := rows.Err(); err != nil { + return nil, err + } + return b, err +} + +// getAllBlobs returns all the blobs for the required mediaType +func getAllBlobsByMediaType(tx *sql.Tx, mediaType string) ([]*Blob, error) { + var blobs []*Blob + rows, err := tx.Query("SELECT * from blobinfo where mediatype == $1", mediaType) + if err != nil { + return nil, err + } + for rows.Next() { + blob := &Blob{} + if err := blobRowScan(rows, blob); err != nil { + return nil, err + } + blobs = append(blobs, blob) + } + if err := rows.Err(); err != nil { + return nil, err + } + return blobs, err +} + +// writeBlob adds or updates the provided blob. +func writeBlob(tx *sql.Tx, blob *Blob) error { + // ql doesn't have an INSERT OR UPDATE function so + // it's faster to remove and reinsert the row + _, err := tx.Exec("DELETE from blobinfo where digest == $1", blob.Digest) + if err != nil { + return err + } + _, err = tx.Exec("INSERT into blobinfo (digest, mediatype) VALUES ($1, $2)", blob.Digest, blob.MediaType) + if err != nil { + return err + } + + return nil +} + +// removeBlob removes the blob with the given digest. +func removeBlob(tx *sql.Tx, digest string) error { + _, err := tx.Exec("DELETE from blobinfo where digest == $1", digest) + if err != nil { + return err + } + return nil +} diff --git a/store/blobdata.go b/store/blobdata.go new file mode 100644 index 0000000..36f3965 --- /dev/null +++ b/store/blobdata.go @@ -0,0 +1,17 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package store + +type BlobData []byte diff --git a/store/data.go b/store/data.go new file mode 100644 index 0000000..d6fb598 --- /dev/null +++ b/store/data.go @@ -0,0 +1,143 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package store + +import "database/sql" + +// Data is used to index data. +type Data struct { + Key string + Value []byte + DataType string + Digest string +} + +func newData(key string, value []byte) *Data { + return &Data{ + Key: key, + Value: value, + } +} + +func dataRowScan(rows *sql.Rows, data *Data) error { + // This ordering MUST match that in schema.go + return rows.Scan(&data.Key, &data.Value, &data.DataType, &data.Digest) +} + +// getDataWithPrefix returns all the datas with a key starting with the given prefix. +func getDatasWithPrefix(tx *sql.Tx, prefix, dataType string) ([]*Data, error) { + var datas []*Data + rows, err := tx.Query("SELECT * from data WHERE hasPrefix(key, $1) AND datatype == $2", prefix, dataType) + if err != nil { + return nil, err + } + for rows.Next() { + data := &Data{} + if err := dataRowScan(rows, data); err != nil { + return nil, err + } + datas = append(datas, data) + } + if err := rows.Err(); err != nil { + return nil, err + } + return datas, err +} + +// getData returns the data with the given digeskey. +func getData(tx *sql.Tx, key, dataType string) (*Data, error) { + var b *Data + rows, err := tx.Query("SELECT * from data WHERE key == $1 AND datatype == $2", key, dataType) + if err != nil { + return nil, err + } + for rows.Next() { + b = &Data{} + if err := dataRowScan(rows, b); err != nil { + return nil, err + } + // No more than one row for key must exist. + break + } + if err := rows.Err(); err != nil { + return nil, err + } + return b, err +} + +// listAllDatas returns all the datas for the required dataType +func listAllDataKeys(tx *sql.Tx, dataType string) ([]string, error) { + var keys []string + rows, err := tx.Query("SELECT key from data where datatype == $1", dataType) + if err != nil { + return nil, err + } + for rows.Next() { + var key string + if err := rows.Scan(&key); err != nil { + return nil, err + } + keys = append(keys, key) + } + if err := rows.Err(); err != nil { + return nil, err + } + return keys, err +} + +// getAllDatas returns all the datas for the required dataType +func getAllDatas(tx *sql.Tx, dataType string) ([]*Data, error) { + var datas []*Data + rows, err := tx.Query("SELECT * from data where datatype == $1", dataType) + if err != nil { + return nil, err + } + for rows.Next() { + data := &Data{} + if err := dataRowScan(rows, data); err != nil { + return nil, err + } + datas = append(datas, data) + } + if err := rows.Err(); err != nil { + return nil, err + } + return datas, err +} + +// writeData adds or updates the provided data. +func writeData(tx *sql.Tx, data *Data) error { + // ql doesn't have an INSERT OR UPDATE function so + // it's faster to remove and reinsert the row + _, err := tx.Exec("DELETE from data where key == $1", data.Key) + if err != nil { + return err + } + _, err = tx.Exec("INSERT into data (key, value, datatype, digest) VALUES ($1, $2, $3, $4)", data.Key, data.Value, data.DataType, data.Digest) + if err != nil { + return err + } + + return nil +} + +// removeData removes the data with the given key. +func removeData(tx *sql.Tx, key, dataType string) error { + _, err := tx.Exec("DELETE from data where key == $1 AND datatype == $2", key, dataType) + if err != nil { + return err + } + return nil +} diff --git a/store/db.go b/store/db.go new file mode 100644 index 0000000..37dd171 --- /dev/null +++ b/store/db.go @@ -0,0 +1,108 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package store + +import ( + "database/sql" + "errors" + "os" + "path/filepath" + + "github.com/coreos/rkt/pkg/lock" + "github.com/hashicorp/errwrap" + + _ "github.com/cznic/ql/driver" +) + +const ( + DbFilename = "ql.db" +) + +type DB struct { + dbdir string + sqldb *sql.DB +} + +func NewDB(dbdir string) (*DB, error) { + if err := os.MkdirAll(dbdir, defaultPathPerm); err != nil { + return nil, err + } + + return &DB{dbdir: dbdir}, nil +} + +func (db *DB) open() error { + sqldb, err := sql.Open("ql", filepath.Join(db.dbdir, DbFilename)) + if err != nil { + return err + } + db.sqldb = sqldb + + return nil +} + +func (db *DB) close() error { + if db.sqldb == nil { + panic("cas db, Close called without an open sqldb") + } + + if err := db.sqldb.Close(); err != nil { + return errwrap.Wrap(errors.New("cas db close failed"), err) + } + db.sqldb = nil + + // Don't close the flock as it will be reused. + return nil +} + +func (db *DB) begin() (*sql.Tx, error) { + return db.sqldb.Begin() +} + +type txfunc func(*sql.Tx) error + +// Do take an exclusive flock, opens the db, executes DoTx and then Closes the +// DB +// It's the unique function exported since it's the one that takes the +// exclusive flock. +func (db *DB) Do(fns ...txfunc) error { + l, err := lock.ExclusiveLock(db.dbdir, lock.Dir) + if err != nil { + return err + } + defer l.Close() + if err := db.open(); err != nil { + return err + } + defer db.close() + + return db.doTx(fns...) +} + +// DoTx executes the provided txfuncs inside a unique transaction. +// If one of the functions returns an error the whole transaction is rolled back. +func (db *DB) doTx(fns ...txfunc) error { + tx, err := db.begin() + if err != nil { + return err + } + for _, fn := range fns { + if err := fn(tx); err != nil { + tx.Rollback() + return err + } + } + return tx.Commit() +} diff --git a/store/db_test.go b/store/db_test.go new file mode 100644 index 0000000..fac6450 --- /dev/null +++ b/store/db_test.go @@ -0,0 +1,121 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package store + +import ( + "database/sql" + "fmt" + "io/ioutil" + "os" + "runtime" + "testing" + + "github.com/coreos/rkt/tests/testutils" +) + +func queryValue(query string, tx *sql.Tx) (int, error) { + var value int + rows, err := tx.Query(query) + if err != nil { + return -1, err + } + defer rows.Close() + + if !rows.Next() { + return -1, fmt.Errorf("no result of %q", query) + } + if err := rows.Scan(&value); err != nil { + return -1, err + } + return value, nil +} + +func insertValue(db *DB) error { + return db.Do(func(tx *sql.Tx) error { + // Get the current count. + count, err := queryValue("SELECT count(*) FROM rkt_db_test", tx) + if err != nil { + return err + } + // Increase the count. + _, err = tx.Exec(fmt.Sprintf("INSERT INTO rkt_db_test VALUES (%d)", count+1)) + return err + }) +} + +func getMaxCount(db *DB, t *testing.T) int { + var maxCount int + var err error + if err := db.Do(func(tx *sql.Tx) error { + // Get the maximum count. + maxCount, err = queryValue("SELECT max(counts) FROM rkt_db_test", tx) + return err + }); err != nil { + t.Fatalf("Failed to get the maximum count: %v", err) + } + return maxCount +} + +func createTable(db *DB, t *testing.T) { + if err := db.Do(func(tx *sql.Tx) error { + _, err := tx.Exec("CREATE TABLE rkt_db_test (counts int)") + return err + }); err != nil { + t.Fatalf("Unexpected error: %v", err) + } +} + +func TestDBRace(t *testing.T) { + // TODO(sgotti) this will not find concurrent accesses to ql db from + // multiple processes using multiple goroutines. A test that spawns at + // least two processes using multiple goroutines is needed. + // See https://github.com/coreos/rkt/pull/2391 + oldGoMaxProcs := runtime.GOMAXPROCS(runtime.NumCPU()) + defer runtime.GOMAXPROCS(oldGoMaxProcs) + + dir, err := ioutil.TempDir("", "rkt_db_test") + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + defer os.RemoveAll(dir) + + db, err := NewDB(dir) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + + // Create the table. + createTable(db, t) + + // Insert values concurrently. + ga := testutils.NewGoroutineAssistant(t) + runs := 100 + ga.Add(runs) + for i := 0; i < runs; i++ { + go func() { + if err := insertValue(db); err != nil { + ga.Fatalf("Failed to insert value: %v", err) + } + ga.Done() + }() + } + ga.Wait() + + // Check the final values. + maxCount := getMaxCount(db, t) + if maxCount != runs { + t.Errorf("Expected: %v, saw: %v", runs, maxCount) + } +} diff --git a/store/migrate.go b/store/migrate.go new file mode 100644 index 0000000..1565a38 --- /dev/null +++ b/store/migrate.go @@ -0,0 +1,55 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package store + +import ( + "database/sql" + "fmt" + + "github.com/hashicorp/errwrap" +) + +type migrateFunc func(*sql.Tx) error + +var ( + // migrateTable is a map of migrate functions. The key is the db + // version to migrate to. + migrateTable = map[int]migrateFunc{} +) + +func migrate(tx *sql.Tx, finalVersion int) error { + if finalVersion > dbVersion { + return fmt.Errorf("required migrate final version greater than the last supported db version") + } + version, err := getDBVersion(tx) + if err != nil { + return err + } + + for v := version + 1; v <= finalVersion; v++ { + f, ok := migrateTable[v] + if !ok { + return fmt.Errorf("missing migrate function for version %d", v) + } + err := f(tx) + if err == nil { + updateDBVersion(tx, v) + } + if err != nil { + return errwrap.Wrap(fmt.Errorf("failed to migrate db to version %d", v), err) + } + } + return nil +} diff --git a/store/schema.go b/store/schema.go new file mode 100644 index 0000000..3d8748b --- /dev/null +++ b/store/schema.go @@ -0,0 +1,100 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package store + +import ( + "database/sql" + "fmt" +) + +const ( + // Incremental db version at the current code revision. + dbVersion = 1 +) + +// Statement to run when creating a db. These are the statements to create the +// db at the latest db version (dbVersion) provided by this rkt version. +// If the db already exists migration statements should be executed +var dbCreateStmts = [...]string{ + // version table + "CREATE TABLE IF NOT EXISTS version (version int);", + fmt.Sprintf("INSERT INTO version VALUES (%d)", dbVersion), + + // blobinfo table. The primary key is "digest". + "CREATE TABLE IF NOT EXISTS blobinfo (digest string, mediatype string);", + "CREATE UNIQUE INDEX IF NOT EXISTS digestidx ON blobinfo (digest);", + + // data table. The primary key is "key". + "CREATE TABLE IF NOT EXISTS data (key string, value blob, datatype string, digest string);", + "CREATE UNIQUE INDEX IF NOT EXISTS keyidx ON data (key);", +} + +// dbIsPopulated checks if the db is already populated (at any version) verifing if the "version" table exists +func dbIsPopulated(tx *sql.Tx) (bool, error) { + rows, err := tx.Query("SELECT Name FROM __Table where Name == $1", "version") + if err != nil { + return false, err + } + count := 0 + for rows.Next() { + count++ + } + if err := rows.Err(); err != nil { + return false, err + } + if count > 0 { + return true, nil + } + return false, nil +} + +// getDBVersion retrieves the current db version +func getDBVersion(tx *sql.Tx) (int, error) { + var version int + rows, err := tx.Query("SELECT version FROM version") + if err != nil { + return -1, err + } + found := false + for rows.Next() { + if err := rows.Scan(&version); err != nil { + return -1, err + } + found = true + break + } + if err := rows.Err(); err != nil { + return -1, err + } + if !found { + return -1, fmt.Errorf("db version table empty") + } + return version, nil +} + +// updateDBVersion updates the db version +func updateDBVersion(tx *sql.Tx, version int) error { + // ql doesn't have an INSERT OR UPDATE function so + // it's faster to remove and reinsert the row + _, err := tx.Exec("DELETE FROM version") + if err != nil { + return err + } + _, err = tx.Exec("INSERT INTO version VALUES ($1)", version) + if err != nil { + return err + } + return nil +} diff --git a/store/store.go b/store/store.go new file mode 100644 index 0000000..c49b9ae --- /dev/null +++ b/store/store.go @@ -0,0 +1,511 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package store + +import ( + "crypto/sha256" + "database/sql" + "errors" + "fmt" + "hash" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/coreos/rkt/pkg/lock" + + "github.com/peterbourgon/diskv" +) + +const ( + defaultPathPerm = os.FileMode(0770 | os.ModeSetgid) + defaultFilePerm = os.FileMode(0660) + + // TODO(sgotti) Handle multiple digest algorithms (sha384, sha512) + digestPrefix = "sha256-" + lenHash = sha256.Size // raw byte size + lenHexHash = lenHash // in hex characters + lenDigest = len(digestPrefix) + lenHexHash + minLenDigest = len(digestPrefix) + 2 // at least sha256-aa + + // how many backups to keep when migrating to new db version + backupsNumber = 5 +) + +var ( + ErrDigestNotFound = errors.New("no digest found") + ErrMultipleDigests = errors.New("found multiple digests") + // ErrRemovingBlobContents defines an error removing non transactional (diskv) blob contents + // When this happen there's the possibility that the store is left in an + // unclean state (for example with some stale files). This will be fixed by store GC + ErrRemovingBlobContents = errors.New("failed to remove blob disk contents") +) + +// Store encapsulates a content-addressable-storage for storing blobs on disk. +// It's an indexed CAS with the ability to augment a blob with additional data +// and a key value store (with optional references to a blob) +type Store struct { + dir string + blobStore *diskv.Diskv + db *DB + // storeLock is a lock on the whole store. It's used for store migration. If + // a previous version of rkt is using the store and in the meantime a + // new version is installed and executed it will try migrate the store + // during NewStore. This means that the previous running rkt will fail + // or behave badly after the migration as it's expecting another db format. + // For this reason, before executing migration, an exclusive lock must + // be taken on the whole store. + storeLock *lock.FileLock + blobLockDir string +} + +func NewStore(storeDir string) (*Store, error) { + // We need to allow the store's setgid bits (if any) to propagate, so + // disable umask + um := syscall.Umask(0) + defer syscall.Umask(um) + + blobStore := diskv.New(diskv.Options{ + PathPerm: defaultPathPerm, + FilePerm: defaultFilePerm, + BasePath: filepath.Join(storeDir, "blobs"), + Transform: blockTransform, + }) + + s := &Store{ + dir: storeDir, + blobStore: blobStore, + } + + s.blobLockDir = filepath.Join(storeDir, "bloblocks") + if err := os.MkdirAll(s.blobLockDir, defaultPathPerm); err != nil { + return nil, err + } + + // Take a shared store lock + var err error + s.storeLock, err = lock.NewLock(storeDir, lock.Dir) + if err != nil { + return nil, err + } + if err := s.storeLock.SharedLock(); err != nil { + return nil, err + } + + db, err := NewDB(filepath.Join(storeDir, "db")) + if err != nil { + return nil, err + } + s.db = db + + needsMigrate := false + fn := func(tx *sql.Tx) error { + var err error + ok, err := dbIsPopulated(tx) + if err != nil { + return err + } + // populate the db + if !ok { + for _, stmt := range dbCreateStmts { + _, err = tx.Exec(stmt) + if err != nil { + return fmt.Errorf("stmt: %s err: %v", stmt, err) + } + } + return nil + } + // if db is populated check its version + version, err := getDBVersion(tx) + if err != nil { + return err + } + if version < dbVersion { + needsMigrate = true + } + if version > dbVersion { + return fmt.Errorf("current store db version: %d (greater than the current rkt expected version: %d)", version, dbVersion) + } + return nil + } + if err = db.Do(fn); err != nil { + return nil, err + } + + // migration is done in another transaction as it must take an exclusive + // store lock. If, in the meantime, another process has already done the + // migration, between the previous db version check and the below + // migration code, the migration will do nothing as it'll start + // migration from the current version. + if needsMigrate { + // Take an exclusive store lock + err := s.storeLock.ExclusiveLock() + if err != nil { + return nil, err + } + if err := s.backupDB(); err != nil { + return nil, err + } + fn := func(tx *sql.Tx) error { + return migrate(tx, dbVersion) + } + if err = db.Do(fn); err != nil { + return nil, err + } + } + + return s, nil +} + +// Close closes a Store opened with NewStore(). +func (s *Store) Close() error { + return s.storeLock.Close() +} + +// backupDB backs up current database. +func (s *Store) backupDB() error { + backupsDir := filepath.Join(s.dir, "db-backups") + return createBackup(s.db.dbdir, backupsDir, backupsNumber) +} + +// TmpFile returns an *os.File local to the same filesystem as the Store, or +// any error encountered +func (s *Store) TmpFile() (*os.File, error) { + dir, err := s.TmpDir() + if err != nil { + return nil, err + } + return ioutil.TempFile(dir, "") +} + +// TmpDir creates and returns dir local to the same filesystem as the Store, +// or any error encountered +func (s *Store) TmpDir() (string, error) { + dir := filepath.Join(s.dir, "tmp") + if err := os.MkdirAll(dir, defaultPathPerm); err != nil { + return "", err + } + return dir, nil +} + +// ResolveDigest resolves a partial digest (of format `sha256-0c45e8c0ab2`) to a full +// digest by considering the digest prefix and using the store for resolution. +func (s *Store) ResolveDigest(inDigest string) (string, error) { + digest, err := s.parseDigest(inDigest) + if err != nil { + return "", fmt.Errorf("cannot parse digest %s: %v", inDigest, err) + } + + var blobs []*Blob + err = s.db.Do(func(tx *sql.Tx) error { + var err error + blobs, err = getBlobsWithPrefix(tx, digest) + return err + }) + if err != nil { + return "", fmt.Errorf("error retrieving blob infos: %v", err) + } + + blobCount := len(blobs) + if blobCount == 0 { + return "", ErrDigestNotFound + } + if blobCount != 1 { + return "", ErrMultipleDigests + } + return blobs[0].Digest, nil +} + +// ReadBlob return an io.ReadCloser for the requested digest +func (s *Store) ReadBlob(digest string) (io.ReadCloser, error) { + digest, err := s.ResolveDigest(digest) + if err != nil { + return nil, fmt.Errorf("error resolving digest: %v", err) + } + blobLock, err := lock.SharedKeyLock(s.blobLockDir, digest) + if err != nil { + return nil, fmt.Errorf("error locking blob: %v", err) + } + defer blobLock.Close() + + var blob *Blob + err = s.db.Do(func(tx *sql.Tx) error { + var err error + blob, err = getBlob(tx, digest) + if err != nil { + return fmt.Errorf("error getting blob: %v", err) + } + return nil + }) + if err != nil { + return nil, fmt.Errorf("cannot get blob info for %q from db: %v", digest, err) + } + if blob == nil { + return nil, fmt.Errorf("blob %q does not exists") + } + + return s.blobStore.ReadStream(digest, false) +} + +// WriteBlob takes a blob encapsulated in an io.Reader, calculates its digest +// and then stores it in the blob store +// mediaType can be used to define a blob mediaType for later fetching of all +// the blobs my mediaType +func (s *Store) WriteBlob(r io.ReadSeeker, mediaType string) (string, error) { + // We need to allow the store's setgid bits (if any) to propagate, so + // disable umask + um := syscall.Umask(0) + defer syscall.Umask(um) + + h := sha256.New() + tr := io.TeeReader(r, h) + fh, err := s.TmpFile() + if err != nil { + return "", fmt.Errorf("error creating temporary file: %v", err) + } + defer os.Remove(fh.Name()) + _, err = io.Copy(fh, tr) + if err != nil { + return "", fmt.Errorf("error copying blob: %v", err) + } + + digest := s.hashToDigest(h) + + blobLock, err := lock.ExclusiveKeyLock(s.blobLockDir, digest) + if err != nil { + return "", fmt.Errorf("error locking blob: %v", err) + } + defer blobLock.Close() + + // TODO(sgotti) Avoid importing if the blob is already in the store? + if err := s.blobStore.Import(fh.Name(), digest, true); err != nil { + return "", fmt.Errorf("error importing blob: %v", err) + } + + // Save blob info + if err := s.db.Do(func(tx *sql.Tx) error { + blob := &Blob{ + Digest: digest, + MediaType: mediaType, + } + return writeBlob(tx, blob) + }); err != nil { + return "", fmt.Errorf("error writing blob info: %v", err) + } + + return digest, nil +} + +// GetBlobInfo return blob info for the provided blob digest +// If the blob doesn't exists the return value will be nil +func (s *Store) GetBlobInfo(digest string) (*Blob, error) { + var blob *Blob + err := s.db.Do(func(tx *sql.Tx) error { + var err error + blob, err = getBlob(tx, digest) + if err != nil { + return fmt.Errorf("error getting blob: %v", err) + } + return nil + }) + if err != nil { + return nil, fmt.Errorf("cannot get blob info from db: %v", err) + } + return blob, nil +} + +// ListBlobs returns a list of all blob digest of the given mediaType +func (s *Store) ListBlobs(mediaType string) ([]string, error) { + var blobs []*Blob + err := s.db.Do(func(tx *sql.Tx) error { + var err error + blobs, err = getAllBlobsByMediaType(tx, mediaType) + if err != nil { + return fmt.Errorf("error getting blob: %v", err) + } + return nil + }) + if err != nil { + return nil, fmt.Errorf("cannot get blob list from db: %v", err) + } + + digests := make([]string, len(blobs)) + for i, b := range blobs { + digests[i] = b.Digest + } + return digests, nil +} + +// DeleteBlob removes a blob. +// It firstly removes the blob infos from the db, then it tries to remove the +// non transactional data. +// If some error occurs removing some non transactional data a +// ErrRemovingBlobContents is returned. The user may ignore this error since a +// followin GC will try to remove it and the blob won't be available with the +// other functions. +func (s *Store) DeleteBlob(digest string) error { + digest, err := s.ResolveDigest(digest) + if err != nil { + return fmt.Errorf("error resolving digest: %v", err) + } + blobLock, err := lock.ExclusiveKeyLock(s.blobLockDir, digest) + if err != nil { + return fmt.Errorf("error locking blob: %v", err) + } + defer blobLock.Close() + + err = s.db.Do(func(tx *sql.Tx) error { + if err := removeBlob(tx, digest); err != nil { + return err + } + return nil + }) + if err != nil { + return fmt.Errorf("cannot remove blob with digest %s from db: %v", digest, err) + } + + // Then remove non transactional data (diskv store) + if err := s.blobStore.Erase(digest); err != nil { + return ErrRemovingBlobContents + } + return nil +} + +// SetBlobData set additional data of dataType to a blob +// It'll be removed when the blob is removed +func (s *Store) SetBlobData(digest string, data []byte, dataType string) error { + //TODO(sgotti) implement + return nil +} + +// GetBlobData gets additional data of dataType from a blob +func (s *Store) GetBlobData(digest, dataType string) (*BlobData, error) { + //TODO(sgotti) implement + return nil, nil +} + +// DelBlobData removes additional data of dataType from a blob +func (s *Store) DeleteBlobData(digest, dataType string) error { + //TODO(sgotti) implement + return nil +} + +// SetData adds additional data of dataType. The data has an unique key to +// easily list and search by key +// If digest is not nil it can reference a blob digest and it'll be removed +// when the blob is removed. +// This can be useful to add custom data to the store. For example: +// * oci references where key is the ref name and data is an oci descriptor +// * remote caching information for an appc ACI store where key is the download +// URL, data contains additional data (like http caching information) and if +// references a digest to an ACI blob. +// TODO(sgotti) add a version field and a ErrConcurrentUpdate as a facility for +// clients to do optimistic locking? +// it should be useful to handle data migration without chaning the dataType +func (s *Store) SetData(key string, value []byte, dataType, digest string) error { + if err := s.db.Do(func(tx *sql.Tx) error { + if digest != "" { + // Check blob existance + blob, err := getBlob(tx, digest) + if err != nil { + return fmt.Errorf("error getting blob: %v", err) + } + if blob == nil { + return fmt.Errorf("cannot find blob with digest: %s", digest) + } + } + data := &Data{ + Key: key, + Value: value, + DataType: dataType, + Digest: digest, + } + return writeData(tx, data) + }); err != nil { + return fmt.Errorf("error writing data: %v", err) + } + return nil +} + +// GetData gets additional data from a blob +func (s *Store) GetData(key, dataType string) (*Data, error) { + var data *Data + err := s.db.Do(func(tx *sql.Tx) error { + var err error + data, err = getData(tx, key, dataType) + if err != nil { + return fmt.Errorf("error getting data: %v", err) + } + if data == nil { + return fmt.Errorf("data does not exists") + } + return nil + }) + if err != nil { + return nil, fmt.Errorf("cannot get data for key %s and dataType %s: %v", key, dataType, err) + } + return data, nil +} + +// DeleteData removes additional data with the specified key and dataType +func (s *Store) DeleteData(key, dataType string) error { + if err := s.db.Do(func(tx *sql.Tx) error { + return removeData(tx, key, dataType) + }); err != nil { + return fmt.Errorf("error removing data for key %s and dataType %s: %v", key, dataType, err) + } + return nil +} + +// ListData returns a list of all data for the given dataType +func (s *Store) ListAllDataKeys(dataType string) ([]string, error) { + var keys []string + if err := s.db.Do(func(tx *sql.Tx) error { + var err error + keys, err = listAllDataKeys(tx, dataType) + return err + }); err != nil { + return nil, fmt.Errorf("error getting all keys for dataType %s: %v", dataType, err) + } + return keys, nil +} + +// GC garbage collects stale blobs contents (blobs not appering in the db) +// (this can happend if they aren't remove for whatever reason) and remove +// unused keylocks +func (s *Store) GC() error { + //TODO(sgotti) implement stale blob removal + return lock.CleanKeyLocks(s.blobLockDir) +} + +func (s *Store) hashToDigest(h hash.Hash) string { + sum := h.Sum(nil) + return fmt.Sprintf("%s%x", digestPrefix, sum) +} + +func (s *Store) parseDigest(digest string) (string, error) { + // Replace possible colon with hypen + digest = strings.Replace(digest, ":", "-", 1) + if !strings.HasPrefix(digest, digestPrefix) { + return "", fmt.Errorf("wrong digest prefix") + } + if len(digest) < minLenDigest { + return "", fmt.Errorf("digest too short") + } + return digest, nil +} diff --git a/store/store_test.go b/store/store_test.go new file mode 100644 index 0000000..2c74ff0 --- /dev/null +++ b/store/store_test.go @@ -0,0 +1,17 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package store + +const tstprefix = "store-test" diff --git a/store/utils.go b/store/utils.go new file mode 100644 index 0000000..bd86a6c --- /dev/null +++ b/store/utils.go @@ -0,0 +1,37 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package store + +import ( + "fmt" + "strings" +) + +// blockTransform creates a path slice from the given string to use as a +// directory prefix. The string must be in hash format: +// "sha256-abcdefgh"... -> []{"sha256", "ab"} +// Right now it just copies the default of git which is a two byte prefix. We +// will likely want to add re-sharding later. +func blockTransform(s string) []string { + // TODO(philips): use spec/types.Hash after export typ field + parts := strings.SplitN(s, "-", 2) + if len(parts) != 2 { + panic(fmt.Errorf("blockTransform should never receive non-hash, got %v", s)) + } + return []string{ + parts[0], + parts[1][0:2], + } +}