A vendor-neutral storage library for Golang.
Write once, run on every storage service.
- Vendor agnostic
- Production ready
- High performance
package main
import (
"log"
"github.com/beyondstorage/go-storage/v5/services"
"github.com/beyondstorage/go-storage/v5/types"
// Add fs support
_ "github.com/beyondstorage/go-storage/services/fs/v4"
// Add s3 support
_ "github.com/beyondstorage/go-storage/services/s3/v3"
// Add gcs support
_ "github.com/beyondstorage/go-storage/services/gcs/v3"
// Add azblob support
_ "github.com/beyondstorage/go-storage/services/azblob/v3"
// More support could be found under BeyondStorage.
_ "github.com/beyondstorage/go-storage/services/xxx"
)
func main() {
// Init a Storager from connection string.
store, err := services.NewStoragerFromString("s3://bucket_name/path/to/workdir")
if err != nil {
log.Fatalf("service init failed: %v", err)
}
// Write data from io.Reader into hello.txt
n, err := store.Write("hello.txt", r, length)
// Read data from hello.txt to io.Writer
n, err := store.Read("hello.txt", w)
// Stat hello.txt to check existence or get its metadata
o, err := store.Stat("hello.txt")
// Use object's functions to get metadata
length, ok := o.GetContentLength()
// List will create an iterator of object under path.
it, err := store.List("path")
for {
// Use iterator.Next to retrieve next object until we meet IterateDone.
o, err := it.Next()
if errors.Is(err, types.IterateDone) {
break
}
}
// Delete hello.txt
err = store.Delete("hello.txt")
}
More examples could be found at go-storage-example.
16 stable services that have passed all integration tests.
- azblob: Azure Blob storage
- bos: Baidu Object Storage
- cos: Tencent Cloud Object Storage
- dropbox: Dropbox
- fs: Local file system
- ftp: FTP
- gcs: Google Cloud Storage
- gdrive: Google Drive
- ipfs: InterPlanetary File System
- kodo: qiniu kodo
- memory: data that only in memory
- minio: MinIO
- obs: Huawei Object Storage Service
- oss: Aliyun Object Storage
- qingstor: QingStor Object Storage
- s3: Amazon S3
3 beta services that implemented required functions, but not passed integration tests.
- hdfs: Hadoop Distributed File System
- tar: tar files
- uss: UPYUN Storage Service
4 alpha services that still under development.
More service ideas could be found at Service Integration Tracking.
Basic operations
- Metadata: get
Storager
metadata
meta := store.Metadata()
_ := meta.GetWorkDir() // Get object WorkDir
_, ok := meta.GetWriteSizeMaximum() // Get the maximum size for write operation
- Read: read
Object
content
// Read 2048 byte at the offset 1024 into the io.Writer.
n, err := store.Read("path", w, pairs.WithOffset(1024), pairs.WithSize(2048))
- Write: write content into
Object
// Write 2048 byte from io.Reader
n, err := store.Write("path", r, 2048)
- Stat: get
Object
metadata or check existences
o, err := store.Stat("path")
if errors.Is(err, services.ErrObjectNotExist) {
// object is not exist
}
length, ok := o.GetContentLength() // get the object content length.
- Delete: delete an
Object
err := store.Delete("path") // Delete the object "path"
- List: list
Object
in given prefix or dir
it, err := store.List("path")
for {
o, err := it.Next()
if err != nil && errors.Is(err, types.IterateDone) {
// the list is over
}
length, ok := o.GetContentLength() // get the object content length.
}
Extended operations
- Copy: copy a
Object
inside storager
err := store.(Copier).Copy(src, dst) // Copy an object from src to dst.
- Move: move a
Object
inside storager
err := store.(Mover).Move(src, dst) // Move an object from src to dst.
- Reach: generate a public accessible url to an
Object
url, err := store.(Reacher).Reach("path") // Generate an url to the object.
- Dir: Dir
Object
support
o, err := store.(Direr).CreateDir("path") // Create a dir object.
Large file manipulation
- Multipart: allow doing multipart uploads
ms := store.(Multiparter)
// Create a multipart object.
o, err := ms.CreateMultipart("path")
// Write 1024 bytes from io.Reader into a multipart at index 1
n, part, err := ms.WriteMultipart(o, r, 1024, 1)
// Complete a multipart object.
err := ms.CompleteMultipart(o, []*Part{part})
- Append: allow appending to an object
as := store.(Appender)
// Create an appendable object.
o, err := as.CreateAppend("path")
// Write 1024 bytes from io.Reader.
n, err := as.WriteAppend(o, r, 1024)
// Commit an append object.
err = as.CommitAppend(o)
- Block: allow combining an object with block ids
bs := store.(Blocker)
// Create a block object.
o, err := bs.CreateBlock("path")
// Write 1024 bytes from io.Reader with block id "id-abc"
n, err := bs.WriteBlock(o, r, 1024, "id-abc")
// Combine block via block ids.
err := bs.CombineBlock(o, []string{"id-abc"})
- Page: allow doing random writes
ps := store.(Pager)
// Create a page object.
o, err := ps.CreatePage("path")
// Write 1024 bytes from io.Reader at offset 2048
n, err := ps.WritePage(o, r, 1024, 2048)
Global object metadata
id
: unique key in servicename
: relative path towards service's work dirmode
: object mode can be a combination ofread
,dir
,part
and moreetag
: entity tag as defined in rfc2616content-length
: object's content size.content-md5
: md5 digest as defined in rfc2616content-type
: media type as defined in rfc2616last-modified
: object's last updated time.
System object metadata
Service system object metadata like storage-class
and so on.
o, err := store.Stat("path")
// Get service system metadata via API provides by go-service-s3.
om := s3.GetObjectSystemMetadata(o)
_ = om.StorageClass // this object's storage class
_ = om.ServerSideEncryptionCustomerAlgorithm // this object's sse algorithm
Self maintained codegen definitions helps to generate all our APIs, pairs and metadata.
Generated pairs which can be used as API optional arguments.
func WithContentMd5(v string) Pair {
return Pair{
Key: "content_md5",
Value: v,
}
}
Generated object metadata which can be used to get content md5 from object.
func (o *Object) GetContentMd5() (string, bool) {
o.stat()
if o.bit&objectIndexContentMd5 != 0 {
return o.contentMd5, true
}
return "", false
}
Server-Side Encrypt supports via system pair and system metadata, and we can use Default Pairs to simplify the job.
func NewS3SseC(key []byte) (types.Storager, error) {
defaultPairs := s3.DefaultStoragePairs{
Write: []types.Pair{
// Required, must be AES256
s3.WithServerSideEncryptionCustomerAlgorithm(s3.ServerSideEncryptionAes256),
// Required, your AES-256 key, a 32-byte binary value
s3.WithServerSideEncryptionCustomerKey(key),
},
// Now you have to provide customer key to read encrypted data
Read: []types.Pair{
// Required, must be AES256
s3.WithServerSideEncryptionCustomerAlgorithm(s3.ServerSideEncryptionAes256),
// Required, your AES-256 key, a 32-byte binary value
s3.WithServerSideEncryptionCustomerKey(key),
}}
return s3.NewStorager(..., s3.WithDefaultStoragePairs(defaultPairs))
}