From a58eb76b6b6c0f98d0717f6a49dc3e3379923cae Mon Sep 17 00:00:00 2001 From: Lunny Xiao Date: Tue, 20 Nov 2018 13:33:57 +0800 Subject: [PATCH 01/14] refactor issue indexer --- models/issue.go | 1 - models/issue_comment.go | 10 + models/issue_indexer.go | 160 +++++++------- modules/indexer/issues/bleve.go | 220 ++++++++++++++++++++ modules/indexer/issues/bleve_test.go | 11 + modules/indexer/issues/indexer.go | 33 +++ modules/indexer/issues/queue.go | 11 + modules/indexer/issues/queue_channel.go | 54 +++++ modules/indexer/issues/queue_ledis_local.go | 93 +++++++++ modules/notification/base/notifier.go | 4 +- modules/notification/base/null.go | 4 +- modules/notification/indexer/indexer.go | 20 +- modules/notification/notification.go | 8 +- routers/api/v1/repo/issue.go | 3 +- routers/api/v1/repo/issue_comment.go | 4 +- routers/repo/issue.go | 11 +- 16 files changed, 539 insertions(+), 108 deletions(-) create mode 100644 modules/indexer/issues/bleve.go create mode 100644 modules/indexer/issues/bleve_test.go create mode 100644 modules/indexer/issues/indexer.go create mode 100644 modules/indexer/issues/queue.go create mode 100644 modules/indexer/issues/queue_channel.go create mode 100644 modules/indexer/issues/queue_ledis_local.go diff --git a/models/issue.go b/models/issue.go index 1421b28da267..a78bddd0dfc4 100644 --- a/models/issue.go +++ b/models/issue.go @@ -677,7 +677,6 @@ func updateIssueCols(e Engine, issue *Issue, cols ...string) error { if _, err := e.ID(issue.ID).Cols(cols...).Update(issue); err != nil { return err } - UpdateIssueIndexerCols(issue.ID, cols...) return nil } diff --git a/models/issue_comment.go b/models/issue_comment.go index 05756c6cf2e3..b931b21a6d82 100644 --- a/models/issue_comment.go +++ b/models/issue_comment.go @@ -1031,6 +1031,11 @@ func UpdateComment(doer *User, c *Comment, oldContent string) error { if err := c.LoadIssue(); err != nil { return err } + + if c.Type == CommentTypeComment { + UpdateIssueCommentIndexer(c, c.Issue.RepoID) + } + if err := c.Issue.LoadAttributes(); err != nil { return err } @@ -1089,6 +1094,11 @@ func DeleteComment(doer *User, comment *Comment) error { if err := comment.LoadIssue(); err != nil { return err } + + if comment.Type == CommentTypeComment { + UpdateIssueCommentIndexer(comment, comment.Issue.RepoID) + } + if err := comment.Issue.LoadAttributes(); err != nil { return err } diff --git a/models/issue_indexer.go b/models/issue_indexer.go index 48c0b9f2466c..e12b1a494e8b 100644 --- a/models/issue_indexer.go +++ b/models/issue_indexer.go @@ -5,28 +5,40 @@ package models import ( - "fmt" - - "code.gitea.io/gitea/modules/indexer" + "code.gitea.io/gitea/modules/indexer/issues" "code.gitea.io/gitea/modules/log" - "code.gitea.io/gitea/modules/setting" "code.gitea.io/gitea/modules/util" ) -// issueIndexerUpdateQueue queue of issue ids to be updated -var issueIndexerUpdateQueue chan int64 +var ( + // issueIndexerUpdateQueue queue of issue ids to be updated + issueIndexerUpdateQueue issues.Queue + issueIndexer issues.Indexer +) // InitIssueIndexer initialize issue indexer -func InitIssueIndexer() { - indexer.InitIssueIndexer(populateIssueIndexer) - issueIndexerUpdateQueue = make(chan int64, setting.Indexer.UpdateQueueLength) - go processIssueIndexerUpdateQueue() +func InitIssueIndexer() error { + issueIndexer = issues.NewBleveIndexer() + exist, err := issueIndexer.Init() + if err != nil { + return err + } + + if !exist { + go populateIssueIndexer() + } + + // TODO: init quque via settings + issueIndexerUpdateQueue = issues.NewChannelQueue(issueIndexer, 20) + go issueIndexerUpdateQueue.Run() + + return nil } // populateIssueIndexer populate the issue indexer with issue data -func populateIssueIndexer() error { - batch := indexer.IssueIndexerBatch() - for page := 1; ; page++ { +func populateIssueIndexer() { + page := 1 + for { repos, _, err := SearchRepositoryByName(&SearchRepoOptions{ Page: page, PageSize: RepositoryListDefaultPageSize, @@ -35,98 +47,84 @@ func populateIssueIndexer() error { Collaborate: util.OptionalBoolFalse, }) if err != nil { - return fmt.Errorf("Repositories: %v", err) + log.Error(4, "SearchRepositoryByName: %v", err) + continue } if len(repos) == 0 { - return batch.Flush() + return } for _, repo := range repos { - issues, err := Issues(&IssuesOptions{ + is, err := Issues(&IssuesOptions{ RepoIDs: []int64{repo.ID}, IsClosed: util.OptionalBoolNone, IsPull: util.OptionalBoolNone, }) if err != nil { - return err + log.Error(4, "Issues: %v", err) + continue } - if err = IssueList(issues).LoadComments(); err != nil { - return err + if err = IssueList(is).LoadComments(); err != nil { + log.Error(4, "LoadComments: %v", err) + continue } - for _, issue := range issues { - if err := issue.update().AddToFlushingBatch(batch); err != nil { - return err + for _, issue := range is { + UpdateIssueIndexer(issue) + + for _, comment := range issue.Comments { + UpdateIssueCommentIndexer(comment, issue.RepoID) } } } } } -func processIssueIndexerUpdateQueue() { - batch := indexer.IssueIndexerBatch() - for { - var issueID int64 - select { - case issueID = <-issueIndexerUpdateQueue: - default: - // flush whatever updates we currently have, since we - // might have to wait a while - if err := batch.Flush(); err != nil { - log.Error(4, "IssueIndexer: %v", err) - } - issueID = <-issueIndexerUpdateQueue - } - issue, err := GetIssueByID(issueID) - if err != nil { - log.Error(4, "GetIssueByID: %v", err) - } else if err = issue.update().AddToFlushingBatch(batch); err != nil { - log.Error(4, "IssueIndexer: %v", err) - } - } +// UpdateIssueIndexer add/update an issue to the issue indexer +func UpdateIssueIndexer(issue *Issue) { + issueIndexerUpdateQueue.Push(&issues.IndexerData{ + ID: issue.ID, + RepoID: issue.RepoID, + Title: issue.Title, + Content: issue.Content, + }) } -func (issue *Issue) update() indexer.IssueIndexerUpdate { - comments := make([]string, 0, 5) - for _, comment := range issue.Comments { - if comment.Type == CommentTypeComment { - comments = append(comments, comment.Content) - } - } - return indexer.IssueIndexerUpdate{ - IssueID: issue.ID, - Data: &indexer.IssueIndexerData{ - RepoID: issue.RepoID, - Title: issue.Title, - Content: issue.Content, - Comments: comments, - }, - } +// DeleteRepoIssueIndexer deletes repo's all issues indexes +func DeleteRepoIssueIndexer(repo *Repository) { + issueIndexerUpdateQueue.Push(&issues.IndexerData{ + RepoID: repo.ID, + IsDelete: true, + }) } -// updateNeededCols whether a change to the specified columns requires updating -// the issue indexer -func updateNeededCols(cols []string) bool { - for _, col := range cols { - switch col { - case "name", "content": - return true - } - } - return false +// UpdateIssueIndexer add/update an issue to the issue indexer +func UpdateIssueCommentIndexer(comment *Comment, repoID int64) { + issueIndexerUpdateQueue.Push(&issues.IndexerData{ + ID: comment.IssueID, + RepoID: repoID, + Content: comment.Content, + CommentID: comment.ID, + }) } -// UpdateIssueIndexerCols update an issue in the issue indexer, given changes -// to the specified columns -func UpdateIssueIndexerCols(issueID int64, cols ...string) { - updateNeededCols(cols) +// DeleteIssueCommentIndexer deletes a comment index +func DeleteIssueCommentIndexer(comment *Comment, repoID int64) { + issueIndexerUpdateQueue.Push(&issues.IndexerData{ + ID: comment.IssueID, + RepoID: repoID, + CommentID: comment.ID, + IsDelete: true, + }) } -// UpdateIssueIndexer add/update an issue to the issue indexer -func UpdateIssueIndexer(issueID int64) { - select { - case issueIndexerUpdateQueue <- issueID: - default: - go func() { - issueIndexerUpdateQueue <- issueID - }() +// SearchIssuesByKeyword search issue ids by keywords and repo id +func SearchIssuesByKeyword(keyword string, repoID int64) ([]int64, error) { + var issueIDs []int64 + res, err := issueIndexer.Search(keyword, repoID, 1000, 0) + if err != nil { + return nil, err + } + for _, r := range res.Hits { + issueIDs = append(issueIDs, r.ID) } + return issueIDs, nil } diff --git a/modules/indexer/issues/bleve.go b/modules/indexer/issues/bleve.go new file mode 100644 index 000000000000..c02cb2b9e308 --- /dev/null +++ b/modules/indexer/issues/bleve.go @@ -0,0 +1,220 @@ +// Copyright 2018 The Gitea Authors. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package issues + +import ( + "fmt" + "os" + "strconv" + + "code.gitea.io/gitea/modules/setting" + + "github.com/blevesearch/bleve" + "github.com/blevesearch/bleve/analysis/analyzer/custom" + "github.com/blevesearch/bleve/analysis/token/lowercase" + "github.com/blevesearch/bleve/analysis/token/unicodenorm" + "github.com/blevesearch/bleve/analysis/tokenizer/unicode" + "github.com/blevesearch/bleve/index/upsidedown" + "github.com/blevesearch/bleve/mapping" + "github.com/blevesearch/bleve/search/query" + "github.com/ethantkoenig/rupture" +) + +const ( + issueIndexerAnalyzer = "issueIndexer" + issueIndexerDocType = "issueIndexerDocType" + + issueIndexerLatestVersion = 1 +) + +// indexerID a bleve-compatible unique identifier for an integer id +func indexerID(id int64) string { + return strconv.FormatInt(id, 36) +} + +// idOfIndexerID the integer id associated with an indexer id +func idOfIndexerID(indexerID string) (int64, error) { + id, err := strconv.ParseInt(indexerID, 36, 64) + if err != nil { + return 0, fmt.Errorf("Unexpected indexer ID %s: %v", indexerID, err) + } + return id, nil +} + +// numericEqualityQuery a numeric equality query for the given value and field +func numericEqualityQuery(value int64, field string) *query.NumericRangeQuery { + f := float64(value) + tru := true + q := bleve.NewNumericRangeInclusiveQuery(&f, &f, &tru, &tru) + q.SetField(field) + return q +} + +func newMatchPhraseQuery(matchPhrase, field, analyzer string) *query.MatchPhraseQuery { + q := bleve.NewMatchPhraseQuery(matchPhrase) + q.FieldVal = field + q.Analyzer = analyzer + return q +} + +const unicodeNormalizeName = "unicodeNormalize" + +func addUnicodeNormalizeTokenFilter(m *mapping.IndexMappingImpl) error { + return m.AddCustomTokenFilter(unicodeNormalizeName, map[string]interface{}{ + "type": unicodenorm.Name, + "form": unicodenorm.NFC, + }) +} + +const maxBatchSize = 16 + +// openIndexer open the index at the specified path, checking for metadata +// updates and bleve version updates. If index needs to be created (or +// re-created), returns (nil, nil) +func openIndexer(path string, latestVersion int) (bleve.Index, error) { + _, err := os.Stat(setting.Indexer.IssuePath) + if err != nil && os.IsNotExist(err) { + return nil, nil + } else if err != nil { + return nil, err + } + + metadata, err := rupture.ReadIndexMetadata(path) + if err != nil { + return nil, err + } + if metadata.Version < latestVersion { + // the indexer is using a previous version, so we should delete it and + // re-populate + return nil, os.RemoveAll(path) + } + + index, err := bleve.Open(path) + if err != nil && err == upsidedown.IncompatibleVersion { + // the indexer was built with a previous version of bleve, so we should + // delete it and re-populate + return nil, os.RemoveAll(path) + } else if err != nil { + return nil, err + } + return index, nil +} + +// IssueIndexerUpdate an update to the issue indexer +type BleveIndexerData IndexerData + +// Type returns the document type, for bleve's mapping.Classifier interface. +func (i *BleveIndexerData) Type() string { + return issueIndexerDocType +} + +// createIssueIndexer create an issue indexer if one does not already exist +func createIssueIndexer() (bleve.Index, error) { + mapping := bleve.NewIndexMapping() + docMapping := bleve.NewDocumentMapping() + + numericFieldMapping := bleve.NewNumericFieldMapping() + numericFieldMapping.IncludeInAll = false + docMapping.AddFieldMappingsAt("RepoID", numericFieldMapping) + + textFieldMapping := bleve.NewTextFieldMapping() + textFieldMapping.Store = false + textFieldMapping.IncludeInAll = false + docMapping.AddFieldMappingsAt("Title", textFieldMapping) + docMapping.AddFieldMappingsAt("Content", textFieldMapping) + docMapping.AddFieldMappingsAt("Comments", textFieldMapping) + + if err := addUnicodeNormalizeTokenFilter(mapping); err != nil { + return nil, err + } else if err = mapping.AddCustomAnalyzer(issueIndexerAnalyzer, map[string]interface{}{ + "type": custom.Name, + "char_filters": []string{}, + "tokenizer": unicode.Name, + "token_filters": []string{unicodeNormalizeName, lowercase.Name}, + }); err != nil { + return nil, err + } + + mapping.DefaultAnalyzer = issueIndexerAnalyzer + mapping.AddDocumentMapping(issueIndexerDocType, docMapping) + mapping.AddDocumentMapping("_all", bleve.NewDocumentDisabledMapping()) + + return bleve.New(setting.Indexer.IssuePath, mapping) +} + +var ( + _ Indexer = &BleveIndexer{} +) + +// BleveIndexer implements Indexer interface +type BleveIndexer struct { + indexer bleve.Index +} + +// NewBleveIndexer creates a new bleve local indexer +func NewBleveIndexer() *BleveIndexer { + return &BleveIndexer{} +} + +// IssueIndexerBatch batch to add updates to +func (b *BleveIndexer) IssueIndexerBatch() rupture.FlushingBatch { + return rupture.NewFlushingBatch(b.indexer, maxBatchSize) +} + +func (b *BleveIndexer) Init() (bool, error) { + var err error + b.indexer, err = openIndexer(setting.Indexer.IssuePath, issueIndexerLatestVersion) + if err != nil { + return false, err + } + if b.indexer != nil { + return true, nil + } + + b.indexer, err = createIssueIndexer() + return false, err +} + +func (b *BleveIndexer) Index(issues []*IndexerData) error { + batch := rupture.NewFlushingBatch(b.indexer, maxBatchSize) + for _, issue := range issues { + if err := batch.Index(indexerID(issue.ID), issue); err != nil { + return err + } + } + return batch.Flush() +} + +// Search searches for issues by given conditions. +// Returns the matching issue IDs +func (b *BleveIndexer) Search(keyword string, repoID int64, limit, start int) (*SearchResult, error) { + indexerQuery := bleve.NewConjunctionQuery( + numericEqualityQuery(repoID, "RepoID"), + bleve.NewDisjunctionQuery( + newMatchPhraseQuery(keyword, "Title", issueIndexerAnalyzer), + newMatchPhraseQuery(keyword, "Content", issueIndexerAnalyzer), + )) + search := bleve.NewSearchRequestOptions(indexerQuery, 2147483647, 0, false) + + result, err := b.indexer.Search(search) + if err != nil { + return nil, err + } + + var ret = SearchResult{ + Hits: make([]Match, 0, len(result.Hits)), + } + for _, hit := range result.Hits { + id, err := idOfIndexerID(hit.ID) + if err != nil { + return nil, err + } + ret.Hits = append(ret.Hits, Match{ + ID: id, + RepoID: repoID, + }) + } + return &ret, nil +} diff --git a/modules/indexer/issues/bleve_test.go b/modules/indexer/issues/bleve_test.go new file mode 100644 index 000000000000..b6c1dbccfe95 --- /dev/null +++ b/modules/indexer/issues/bleve_test.go @@ -0,0 +1,11 @@ +// Copyright 2018 The Gitea Authors. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package issues + +import "testing" + +func TestIndexAndSearch(t *testing.T) { + +} diff --git a/modules/indexer/issues/indexer.go b/modules/indexer/issues/indexer.go new file mode 100644 index 000000000000..eadf72fd4dbc --- /dev/null +++ b/modules/indexer/issues/indexer.go @@ -0,0 +1,33 @@ +// Copyright 2018 The Gitea Authors. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package issues + +// IndexerData data stored in the issue indexer +type IndexerData struct { + ID int64 + RepoID int64 + Title string + Content string + CommentID int64 + IsDelete bool `json:"-"` +} + +// Match +type Match struct { + ID int64 `json:"id"` + RepoID int64 `json:"repo_id"` + Score float64 `json:"score"` +} + +type SearchResult struct { + Hits []Match +} + +// Indexer defines an inteface to indexer issues contents +type Indexer interface { + Init() (bool, error) + Index(issue []*IndexerData) error + Search(kw string, repoID int64, limit, start int) (*SearchResult, error) +} diff --git a/modules/indexer/issues/queue.go b/modules/indexer/issues/queue.go new file mode 100644 index 000000000000..6f4ee4c13ae7 --- /dev/null +++ b/modules/indexer/issues/queue.go @@ -0,0 +1,11 @@ +// Copyright 2018 The Gitea Authors. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package issues + +// Queue defines an interface to save an issue indexer queue +type Queue interface { + Run() error + Push(*IndexerData) +} diff --git a/modules/indexer/issues/queue_channel.go b/modules/indexer/issues/queue_channel.go new file mode 100644 index 000000000000..5b39e199a605 --- /dev/null +++ b/modules/indexer/issues/queue_channel.go @@ -0,0 +1,54 @@ +// Copyright 2018 The Gitea Authors. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package issues + +import ( + "time" + + "code.gitea.io/gitea/modules/setting" +) + +// ChannelQueue implements +type ChannelQueue struct { + queue chan *IndexerData + indexer Indexer + batchNumber int +} + +// NewChannelQueue create a memory channel queue +func NewChannelQueue(indexer Indexer, batchNumber int) *ChannelQueue { + return &ChannelQueue{ + queue: make(chan *IndexerData, setting.Indexer.UpdateQueueLength), + indexer: indexer, + batchNumber: batchNumber, + } +} + +func (c *ChannelQueue) Run() error { + var i int + var datas = make([]*IndexerData, 0, c.batchNumber) + for { + select { + case data := <-c.queue: + datas = append(datas, data) + if len(datas) >= c.batchNumber { + c.indexer.Index(datas) + // TODO: save the point + datas = make([]*IndexerData, 0, c.batchNumber) + } + case <-time.After(time.Millisecond * 100): + i++ + if i >= 3 && len(datas) > 0 { + c.indexer.Index(datas) + // TODO: save the point + datas = make([]*IndexerData, 0, c.batchNumber) + } + } + } +} + +func (c *ChannelQueue) Push(data *IndexerData) { + c.queue <- data +} diff --git a/modules/indexer/issues/queue_ledis_local.go b/modules/indexer/issues/queue_ledis_local.go new file mode 100644 index 000000000000..5907ce2d80d1 --- /dev/null +++ b/modules/indexer/issues/queue_ledis_local.go @@ -0,0 +1,93 @@ +// Copyright 2018 The Gitea Authors. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package issues + +import ( + "encoding/json" + "time" + + "code.gitea.io/gitea/modules/log" + "github.com/siddontang/ledisdb/config" + "github.com/siddontang/ledisdb/ledis" +) + +var ( + _ Queue = &LedisLocalQueue{} + + ledis_local_key = []byte("ledis_local_key") +) + +// LedisLocalQueue implements a ledis as a disk library queue +type LedisLocalQueue struct { + indexer Indexer + ledis *ledis.Ledis + db *ledis.DB + batchNumber int +} + +// NewLedisLocalQueue creates a ledis local queue +func NewLedisLocalQueue(indexer Indexer, dataDir string, dbIdx, batchNumber int) (*LedisLocalQueue, error) { + ledis, err := ledis.Open(&config.Config{ + DataDir: dataDir, + }) + if err != nil { + return nil, err + } + + db, err := ledis.Select(dbIdx) + if err != nil { + return nil, err + } + + return &LedisLocalQueue{ + indexer: indexer, + ledis: ledis, + db: db, + batchNumber: batchNumber, + }, nil +} + +func (l *LedisLocalQueue) Run() error { + var i int + var datas = make([]*IndexerData, 0, l.batchNumber) + for { + bs, err := l.db.RPop(ledis_local_key) + if err != nil { + log.Error(4, "RPop: %v", err) + time.Sleep(time.Millisecond * 100) + continue + } + + var data IndexerData + err = json.Unmarshal(bs, &data) + if err != nil { + log.Error(4, "Unmarshal: %v", err) + time.Sleep(time.Millisecond * 100) + continue + } + + datas = append(datas, &data) + i++ + + if len(datas) > l.batchNumber || i > 3 { + l.indexer.Index(datas) + datas = make([]*IndexerData, 0, l.batchNumber) + i = 0 + } + time.Sleep(time.Millisecond * 100) + } +} + +func (l *LedisLocalQueue) Push(data *IndexerData) { + bs, err := json.Marshal(data) + if err != nil { + log.Error(4, "Marshal: %v", err) + return + } + _, err = l.db.LPush(ledis_local_key, bs) + if err != nil { + log.Error(4, "LPush: %v", err) + } +} diff --git a/modules/notification/base/notifier.go b/modules/notification/base/notifier.go index bac90f5bb1d2..2e127293c4be 100644 --- a/modules/notification/base/notifier.go +++ b/modules/notification/base/notifier.go @@ -34,8 +34,8 @@ type Notifier interface { NotifyCreateIssueComment(*models.User, *models.Repository, *models.Issue, *models.Comment) - NotifyUpdateComment(*models.User, *models.Comment, string) - NotifyDeleteComment(*models.User, *models.Comment) + NotifyUpdateComment(*models.User, *models.Comment, int64, string) + NotifyDeleteComment(*models.User, *models.Comment, int64) NotifyNewRelease(rel *models.Release) NotifyUpdateRelease(doer *models.User, rel *models.Release) diff --git a/modules/notification/base/null.go b/modules/notification/base/null.go index 608bd0dcaae7..33c48c077c95 100644 --- a/modules/notification/base/null.go +++ b/modules/notification/base/null.go @@ -47,11 +47,11 @@ func (*NullNotifier) NotifyMergePullRequest(pr *models.PullRequest, doer *models } // NotifyUpdateComment places a place holder function -func (*NullNotifier) NotifyUpdateComment(doer *models.User, c *models.Comment, oldContent string) { +func (*NullNotifier) NotifyUpdateComment(doer *models.User, c *models.Comment, repoID int64, oldContent string) { } // NotifyDeleteComment places a place holder function -func (*NullNotifier) NotifyDeleteComment(doer *models.User, c *models.Comment) { +func (*NullNotifier) NotifyDeleteComment(doer *models.User, c *models.Comment, repoID int64) { } // NotifyDeleteRepository places a place holder function diff --git a/modules/notification/indexer/indexer.go b/modules/notification/indexer/indexer.go index 3fd33521889a..6fb36e81adb7 100644 --- a/modules/notification/indexer/indexer.go +++ b/modules/notification/indexer/indexer.go @@ -25,38 +25,38 @@ func NewNotifier() base.Notifier { func (r *indexerNotifier) NotifyCreateIssueComment(doer *models.User, repo *models.Repository, issue *models.Issue, comment *models.Comment) { if comment.Type == models.CommentTypeComment { - models.UpdateIssueIndexer(issue.ID) + models.UpdateIssueCommentIndexer(comment, issue.RepoID) } } func (r *indexerNotifier) NotifyNewIssue(issue *models.Issue) { - models.UpdateIssueIndexer(issue.ID) + models.UpdateIssueIndexer(issue) } func (r *indexerNotifier) NotifyNewPullRequest(pr *models.PullRequest) { - models.UpdateIssueIndexer(pr.Issue.ID) + models.UpdateIssueIndexer(pr.Issue) } -func (r *indexerNotifier) NotifyUpdateComment(doer *models.User, c *models.Comment, oldContent string) { +func (r *indexerNotifier) NotifyUpdateComment(doer *models.User, c *models.Comment, repoID int64, oldContent string) { if c.Type == models.CommentTypeComment { - models.UpdateIssueIndexer(c.IssueID) + models.UpdateIssueCommentIndexer(c, repoID) } } -func (r *indexerNotifier) NotifyDeleteComment(doer *models.User, comment *models.Comment) { +func (r *indexerNotifier) NotifyDeleteComment(doer *models.User, comment *models.Comment, repoID int64) { if comment.Type == models.CommentTypeComment { - models.UpdateIssueIndexer(comment.IssueID) + models.DeleteIssueCommentIndexer(comment, repoID) } } func (r *indexerNotifier) NotifyDeleteRepository(doer *models.User, repo *models.Repository) { - models.DeleteRepoFromIndexer(repo) + models.DeleteRepoIssueIndexer(repo) } func (r *indexerNotifier) NotifyIssueChangeContent(doer *models.User, issue *models.Issue, oldContent string) { - models.UpdateIssueIndexer(issue.ID) + models.UpdateIssueIndexer(issue) } func (r *indexerNotifier) NotifyIssueChangeTitle(doer *models.User, issue *models.Issue, oldTitle string) { - models.UpdateIssueIndexer(issue.ID) + models.UpdateIssueIndexer(issue) } diff --git a/modules/notification/notification.go b/modules/notification/notification.go index e38c36f7dd1e..f0e160385eb9 100644 --- a/modules/notification/notification.go +++ b/modules/notification/notification.go @@ -73,16 +73,16 @@ func NotifyPullRequestReview(pr *models.PullRequest, review *models.Review, comm } // NotifyUpdateComment notifies update comment to notifiers -func NotifyUpdateComment(doer *models.User, c *models.Comment, oldContent string) { +func NotifyUpdateComment(doer *models.User, c *models.Comment, repoID int64, oldContent string) { for _, notifier := range notifiers { - notifier.NotifyUpdateComment(doer, c, oldContent) + notifier.NotifyUpdateComment(doer, c, repoID, oldContent) } } // NotifyDeleteComment notifies delete comment to notifiers -func NotifyDeleteComment(doer *models.User, c *models.Comment) { +func NotifyDeleteComment(doer *models.User, c *models.Comment, repoID int64) { for _, notifier := range notifiers { - notifier.NotifyDeleteComment(doer, c) + notifier.NotifyDeleteComment(doer, c, repoID) } } diff --git a/routers/api/v1/repo/issue.go b/routers/api/v1/repo/issue.go index d339d8f0b771..0a5d4032c0a5 100644 --- a/routers/api/v1/repo/issue.go +++ b/routers/api/v1/repo/issue.go @@ -13,7 +13,6 @@ import ( "code.gitea.io/gitea/models" "code.gitea.io/gitea/modules/context" - "code.gitea.io/gitea/modules/indexer" "code.gitea.io/gitea/modules/notification" "code.gitea.io/gitea/modules/setting" "code.gitea.io/gitea/modules/util" @@ -78,7 +77,7 @@ func ListIssues(ctx *context.APIContext) { var labelIDs []int64 var err error if len(keyword) > 0 { - issueIDs, err = indexer.SearchIssuesByKeyword(ctx.Repo.Repository.ID, keyword) + issueIDs, err = models.SearchIssuesByKeyword(keyword, ctx.Repo.Repository.ID) } if splitted := strings.Split(ctx.Query("labels"), ","); len(splitted) > 0 { diff --git a/routers/api/v1/repo/issue_comment.go b/routers/api/v1/repo/issue_comment.go index 720513f00720..930157281952 100644 --- a/routers/api/v1/repo/issue_comment.go +++ b/routers/api/v1/repo/issue_comment.go @@ -284,7 +284,7 @@ func editIssueComment(ctx *context.APIContext, form api.EditIssueCommentOption) return } - notification.NotifyUpdateComment(ctx.User, comment, oldContent) + notification.NotifyUpdateComment(ctx.User, comment, ctx.Repo.Repository.ID, oldContent) ctx.JSON(200, comment.APIFormat()) } @@ -375,7 +375,7 @@ func deleteIssueComment(ctx *context.APIContext) { return } - notification.NotifyDeleteComment(ctx.User, comment) + notification.NotifyDeleteComment(ctx.User, comment, ctx.Repo.Repository.ID) ctx.Status(204) } diff --git a/routers/repo/issue.go b/routers/repo/issue.go index 9767d1113694..19fcce5e0f1e 100644 --- a/routers/repo/issue.go +++ b/routers/repo/issue.go @@ -23,7 +23,6 @@ import ( "code.gitea.io/gitea/modules/auth" "code.gitea.io/gitea/modules/base" "code.gitea.io/gitea/modules/context" - "code.gitea.io/gitea/modules/indexer" "code.gitea.io/gitea/modules/log" "code.gitea.io/gitea/modules/markup/markdown" "code.gitea.io/gitea/modules/notification" @@ -130,7 +129,11 @@ func issues(ctx *context.Context, milestoneID int64, isPullOption util.OptionalB var issueIDs []int64 if len(keyword) > 0 { - issueIDs, err = indexer.SearchIssuesByKeyword(repo.ID, keyword) + issueIDs, err = models.SearchIssuesByKeyword(keyword, repo.ID) + if err != nil { + ctx.ServerError("issueIndexer.Search", err) + return + } if len(issueIDs) == 0 { forceEmpty = true } @@ -1256,7 +1259,7 @@ func UpdateCommentContent(ctx *context.Context) { return } - notification.NotifyUpdateComment(ctx.User, comment, oldContent) + notification.NotifyUpdateComment(ctx.User, comment, comment.Issue.RepoID, oldContent) ctx.JSON(200, map[string]interface{}{ "content": string(markdown.Render([]byte(comment.Content), ctx.Query("context"), ctx.Repo.Repository.ComposeMetas())), @@ -1289,7 +1292,7 @@ func DeleteComment(ctx *context.Context) { return } - notification.NotifyDeleteComment(ctx.User, comment) + notification.NotifyDeleteComment(ctx.User, comment, comment.Issue.RepoID) ctx.Status(200) } From f62f4029d7d799fe88b45e7a41b1be1022170941 Mon Sep 17 00:00:00 2001 From: Lunny Xiao Date: Tue, 20 Nov 2018 22:33:33 +0800 Subject: [PATCH 02/14] improve bleve index --- models/issue_comment.go | 10 ++++-- models/issue_indexer.go | 40 +++++++----------------- modules/indexer/issues/bleve.go | 41 ++++++++++++++++--------- modules/indexer/issues/bleve_test.go | 26 +++++++++++++++- modules/indexer/issues/indexer.go | 12 ++++---- modules/notification/indexer/indexer.go | 6 ++-- 6 files changed, 79 insertions(+), 56 deletions(-) diff --git a/models/issue_comment.go b/models/issue_comment.go index b931b21a6d82..161ef06521a0 100644 --- a/models/issue_comment.go +++ b/models/issue_comment.go @@ -1033,7 +1033,10 @@ func UpdateComment(doer *User, c *Comment, oldContent string) error { } if c.Type == CommentTypeComment { - UpdateIssueCommentIndexer(c, c.Issue.RepoID) + if err := c.Issue.loadComments(x); err != nil { + return err + } + UpdateIssueIndexer(c.Issue) } if err := c.Issue.LoadAttributes(); err != nil { @@ -1096,7 +1099,10 @@ func DeleteComment(doer *User, comment *Comment) error { } if comment.Type == CommentTypeComment { - UpdateIssueCommentIndexer(comment, comment.Issue.RepoID) + if err := comment.Issue.loadComments(x); err != nil { + return err + } + UpdateIssueIndexer(comment.Issue) } if err := comment.Issue.LoadAttributes(); err != nil { diff --git a/models/issue_indexer.go b/models/issue_indexer.go index e12b1a494e8b..43303321e122 100644 --- a/models/issue_indexer.go +++ b/models/issue_indexer.go @@ -7,6 +7,7 @@ package models import ( "code.gitea.io/gitea/modules/indexer/issues" "code.gitea.io/gitea/modules/log" + "code.gitea.io/gitea/modules/setting" "code.gitea.io/gitea/modules/util" ) @@ -18,7 +19,7 @@ var ( // InitIssueIndexer initialize issue indexer func InitIssueIndexer() error { - issueIndexer = issues.NewBleveIndexer() + issueIndexer = issues.NewBleveIndexer(setting.Indexer.IssuePath) exist, err := issueIndexer.Init() if err != nil { return err @@ -69,10 +70,6 @@ func populateIssueIndexer() { } for _, issue := range is { UpdateIssueIndexer(issue) - - for _, comment := range issue.Comments { - UpdateIssueCommentIndexer(comment, issue.RepoID) - } } } } @@ -80,11 +77,16 @@ func populateIssueIndexer() { // UpdateIssueIndexer add/update an issue to the issue indexer func UpdateIssueIndexer(issue *Issue) { + var comments []string + for _, comment := range issue.Comments { + comments = append(comments, comment.Content) + } issueIndexerUpdateQueue.Push(&issues.IndexerData{ - ID: issue.ID, - RepoID: issue.RepoID, - Title: issue.Title, - Content: issue.Content, + ID: issue.ID, + RepoID: issue.RepoID, + Title: issue.Title, + Content: issue.Content, + Comments: comments, }) } @@ -96,26 +98,6 @@ func DeleteRepoIssueIndexer(repo *Repository) { }) } -// UpdateIssueIndexer add/update an issue to the issue indexer -func UpdateIssueCommentIndexer(comment *Comment, repoID int64) { - issueIndexerUpdateQueue.Push(&issues.IndexerData{ - ID: comment.IssueID, - RepoID: repoID, - Content: comment.Content, - CommentID: comment.ID, - }) -} - -// DeleteIssueCommentIndexer deletes a comment index -func DeleteIssueCommentIndexer(comment *Comment, repoID int64) { - issueIndexerUpdateQueue.Push(&issues.IndexerData{ - ID: comment.IssueID, - RepoID: repoID, - CommentID: comment.ID, - IsDelete: true, - }) -} - // SearchIssuesByKeyword search issue ids by keywords and repo id func SearchIssuesByKeyword(keyword string, repoID int64) ([]int64, error) { var issueIDs []int64 diff --git a/modules/indexer/issues/bleve.go b/modules/indexer/issues/bleve.go index c02cb2b9e308..3f0d1a14563f 100644 --- a/modules/indexer/issues/bleve.go +++ b/modules/indexer/issues/bleve.go @@ -9,8 +9,6 @@ import ( "os" "strconv" - "code.gitea.io/gitea/modules/setting" - "github.com/blevesearch/bleve" "github.com/blevesearch/bleve/analysis/analyzer/custom" "github.com/blevesearch/bleve/analysis/token/lowercase" @@ -23,9 +21,8 @@ import ( ) const ( - issueIndexerAnalyzer = "issueIndexer" - issueIndexerDocType = "issueIndexerDocType" - + issueIndexerAnalyzer = "issueIndexer" + issueIndexerDocType = "issueIndexerDocType" issueIndexerLatestVersion = 1 ) @@ -74,7 +71,7 @@ const maxBatchSize = 16 // updates and bleve version updates. If index needs to be created (or // re-created), returns (nil, nil) func openIndexer(path string, latestVersion int) (bleve.Index, error) { - _, err := os.Stat(setting.Indexer.IssuePath) + _, err := os.Stat(path) if err != nil && os.IsNotExist(err) { return nil, nil } else if err != nil { @@ -111,7 +108,7 @@ func (i *BleveIndexerData) Type() string { } // createIssueIndexer create an issue indexer if one does not already exist -func createIssueIndexer() (bleve.Index, error) { +func createIssueIndexer(path string) (bleve.Index, error) { mapping := bleve.NewIndexMapping() docMapping := bleve.NewDocumentMapping() @@ -141,7 +138,7 @@ func createIssueIndexer() (bleve.Index, error) { mapping.AddDocumentMapping(issueIndexerDocType, docMapping) mapping.AddDocumentMapping("_all", bleve.NewDocumentDisabledMapping()) - return bleve.New(setting.Indexer.IssuePath, mapping) + return bleve.New(path, mapping) } var ( @@ -150,12 +147,15 @@ var ( // BleveIndexer implements Indexer interface type BleveIndexer struct { - indexer bleve.Index + indexDir string + indexer bleve.Index } // NewBleveIndexer creates a new bleve local indexer -func NewBleveIndexer() *BleveIndexer { - return &BleveIndexer{} +func NewBleveIndexer(indexDir string) *BleveIndexer { + return &BleveIndexer{ + indexDir: indexDir, + } } // IssueIndexerBatch batch to add updates to @@ -165,7 +165,7 @@ func (b *BleveIndexer) IssueIndexerBatch() rupture.FlushingBatch { func (b *BleveIndexer) Init() (bool, error) { var err error - b.indexer, err = openIndexer(setting.Indexer.IssuePath, issueIndexerLatestVersion) + b.indexer, err = openIndexer(b.indexDir, issueIndexerLatestVersion) if err != nil { return false, err } @@ -173,14 +173,24 @@ func (b *BleveIndexer) Init() (bool, error) { return true, nil } - b.indexer, err = createIssueIndexer() + b.indexer, err = createIssueIndexer(b.indexDir) return false, err } func (b *BleveIndexer) Index(issues []*IndexerData) error { batch := rupture.NewFlushingBatch(b.indexer, maxBatchSize) for _, issue := range issues { - if err := batch.Index(indexerID(issue.ID), issue); err != nil { + if err := batch.Index(indexerID(issue.ID), struct { + RepoID int64 + Title string + Content string + Comments []string + }{ + RepoID: issue.RepoID, + Title: issue.Title, + Content: issue.Content, + Comments: issue.Comments, + }); err != nil { return err } } @@ -195,8 +205,9 @@ func (b *BleveIndexer) Search(keyword string, repoID int64, limit, start int) (* bleve.NewDisjunctionQuery( newMatchPhraseQuery(keyword, "Title", issueIndexerAnalyzer), newMatchPhraseQuery(keyword, "Content", issueIndexerAnalyzer), + newMatchPhraseQuery(keyword, "Comments", issueIndexerAnalyzer), )) - search := bleve.NewSearchRequestOptions(indexerQuery, 2147483647, 0, false) + search := bleve.NewSearchRequestOptions(indexerQuery, limit, start, false) result, err := b.indexer.Search(search) if err != nil { diff --git a/modules/indexer/issues/bleve_test.go b/modules/indexer/issues/bleve_test.go index b6c1dbccfe95..c85950529eba 100644 --- a/modules/indexer/issues/bleve_test.go +++ b/modules/indexer/issues/bleve_test.go @@ -4,8 +4,32 @@ package issues -import "testing" +import ( + "testing" + + "code.gitea.io/gitea/modules/setting" + "github.com/stretchr/testify/assert" +) func TestIndexAndSearch(t *testing.T) { + indexer := NewBleveIndexer(setting.Indexer.IssuePath) + _, err := indexer.Init() + assert.NoError(t, err) + + err = indexer.Index([]*IndexerData{ + { + ID: 1, + RepoID: 2, + Title: "Issue search should support Chinese", + Content: "As title", + }, + }) + assert.NoError(t, err) + + res, err := indexer.Search("search", 2, 10, 0) + assert.NoError(t, err) + for _, hit := range res.Hits { + assert.EqualValues(t, 1, hit.ID) + } } diff --git a/modules/indexer/issues/indexer.go b/modules/indexer/issues/indexer.go index eadf72fd4dbc..7954197d15f3 100644 --- a/modules/indexer/issues/indexer.go +++ b/modules/indexer/issues/indexer.go @@ -6,12 +6,12 @@ package issues // IndexerData data stored in the issue indexer type IndexerData struct { - ID int64 - RepoID int64 - Title string - Content string - CommentID int64 - IsDelete bool `json:"-"` + ID int64 + RepoID int64 + Title string + Content string + Comments []string + IsDelete bool `json:"-"` } // Match diff --git a/modules/notification/indexer/indexer.go b/modules/notification/indexer/indexer.go index 6fb36e81adb7..561b713bac45 100644 --- a/modules/notification/indexer/indexer.go +++ b/modules/notification/indexer/indexer.go @@ -25,7 +25,7 @@ func NewNotifier() base.Notifier { func (r *indexerNotifier) NotifyCreateIssueComment(doer *models.User, repo *models.Repository, issue *models.Issue, comment *models.Comment) { if comment.Type == models.CommentTypeComment { - models.UpdateIssueCommentIndexer(comment, issue.RepoID) + models.UpdateIssueIndexer(issue) } } @@ -39,13 +39,13 @@ func (r *indexerNotifier) NotifyNewPullRequest(pr *models.PullRequest) { func (r *indexerNotifier) NotifyUpdateComment(doer *models.User, c *models.Comment, repoID int64, oldContent string) { if c.Type == models.CommentTypeComment { - models.UpdateIssueCommentIndexer(c, repoID) + models.UpdateIssueIndexer(c.Issue) } } func (r *indexerNotifier) NotifyDeleteComment(doer *models.User, comment *models.Comment, repoID int64) { if comment.Type == models.CommentTypeComment { - models.DeleteIssueCommentIndexer(comment, repoID) + models.UpdateIssueIndexer(comment.Issue) } } From 961318b6e415f63b9344cc170a7b13738a2e70e4 Mon Sep 17 00:00:00 2001 From: Lunny Xiao Date: Wed, 21 Nov 2018 11:36:56 +0800 Subject: [PATCH 03/14] indxer queue will init according setting and fix some tests --- models/issue_indexer.go | 27 +++++++-- models/issue_list.go | 16 ++++- models/models.go | 14 ----- modules/indexer/issues/bleve.go | 5 -- modules/indexer/issues/bleve_test.go | 65 +++++++++++++++++++-- modules/indexer/issues/queue_ledis_local.go | 7 +++ modules/setting/setting.go | 39 +++++++++++-- 7 files changed, 136 insertions(+), 37 deletions(-) diff --git a/models/issue_indexer.go b/models/issue_indexer.go index 43303321e122..29bdaf37f1fc 100644 --- a/models/issue_indexer.go +++ b/models/issue_indexer.go @@ -5,6 +5,8 @@ package models import ( + "fmt" + "code.gitea.io/gitea/modules/indexer/issues" "code.gitea.io/gitea/modules/log" "code.gitea.io/gitea/modules/setting" @@ -29,8 +31,22 @@ func InitIssueIndexer() error { go populateIssueIndexer() } - // TODO: init quque via settings - issueIndexerUpdateQueue = issues.NewChannelQueue(issueIndexer, 20) + switch setting.Indexer.IssueIndexerQueueType { + case setting.LedisLocalQueueType: + issueIndexerUpdateQueue, err = issues.NewLedisLocalQueue( + issueIndexer, + setting.Indexer.IssueIndexerQueueDir, + setting.Indexer.IssueIndexerQueueDBIndex, + setting.Indexer.IssueIndexerQueueBatchNumber) + if err != nil { + return err + } + case setting.ChannelQueueType: + issueIndexerUpdateQueue = issues.NewChannelQueue(issueIndexer, setting.Indexer.IssueIndexerQueueBatchNumber) + default: + return fmt.Errorf("Unsupported indexer queue type: %v", setting.Indexer.IssueIndexerQueueType) + } + go issueIndexerUpdateQueue.Run() return nil @@ -54,6 +70,7 @@ func populateIssueIndexer() { if len(repos) == 0 { return } + page++ for _, repo := range repos { is, err := Issues(&IssuesOptions{ RepoIDs: []int64{repo.ID}, @@ -64,7 +81,7 @@ func populateIssueIndexer() { log.Error(4, "Issues: %v", err) continue } - if err = IssueList(is).LoadComments(); err != nil { + if err = IssueList(is).LoadDiscussComments(); err != nil { log.Error(4, "LoadComments: %v", err) continue } @@ -79,7 +96,9 @@ func populateIssueIndexer() { func UpdateIssueIndexer(issue *Issue) { var comments []string for _, comment := range issue.Comments { - comments = append(comments, comment.Content) + if comment.Type == CommentTypeComment { + comments = append(comments, comment.Content) + } } issueIndexerUpdateQueue.Push(&issues.IndexerData{ ID: issue.ID, diff --git a/models/issue_list.go b/models/issue_list.go index 7e4c26464385..a1aab488fcb9 100644 --- a/models/issue_list.go +++ b/models/issue_list.go @@ -4,7 +4,11 @@ package models -import "fmt" +import ( + "fmt" + + "github.com/go-xorm/builder" +) // IssueList defines a list of issues type IssueList []*Issue @@ -338,7 +342,7 @@ func (issues IssueList) loadAttachments(e Engine) (err error) { return nil } -func (issues IssueList) loadComments(e Engine) (err error) { +func (issues IssueList) loadComments(e Engine, cond builder.Cond) (err error) { if len(issues) == 0 { return nil } @@ -354,6 +358,7 @@ func (issues IssueList) loadComments(e Engine) (err error) { rows, err := e.Table("comment"). Join("INNER", "issue", "issue.id = comment.issue_id"). In("issue.id", issuesIDs[:limit]). + Where(cond). Rows(new(Comment)) if err != nil { return err @@ -479,5 +484,10 @@ func (issues IssueList) LoadAttachments() error { // LoadComments loads comments func (issues IssueList) LoadComments() error { - return issues.loadComments(x) + return issues.loadComments(x, builder.NewCond()) +} + +// LoadDiscussComments loads discuss comments +func (issues IssueList) LoadDiscussComments() error { + return issues.loadComments(x, builder.Eq{"comment.type": CommentTypeComment}) } diff --git a/models/models.go b/models/models.go index daef7c07e820..b8fe588b5a43 100644 --- a/models/models.go +++ b/models/models.go @@ -12,7 +12,6 @@ import ( "net/url" "os" "path" - "path/filepath" "strings" "code.gitea.io/gitea/modules/log" @@ -158,19 +157,6 @@ func LoadConfigs() { DbCfg.SSLMode = sec.Key("SSL_MODE").MustString("disable") DbCfg.Path = sec.Key("PATH").MustString("data/gitea.db") DbCfg.Timeout = sec.Key("SQLITE_TIMEOUT").MustInt(500) - - sec = setting.Cfg.Section("indexer") - setting.Indexer.IssuePath = sec.Key("ISSUE_INDEXER_PATH").MustString(path.Join(setting.AppDataPath, "indexers/issues.bleve")) - if !filepath.IsAbs(setting.Indexer.IssuePath) { - setting.Indexer.IssuePath = path.Join(setting.AppWorkPath, setting.Indexer.IssuePath) - } - setting.Indexer.RepoIndexerEnabled = sec.Key("REPO_INDEXER_ENABLED").MustBool(false) - setting.Indexer.RepoPath = sec.Key("REPO_INDEXER_PATH").MustString(path.Join(setting.AppDataPath, "indexers/repos.bleve")) - if !filepath.IsAbs(setting.Indexer.RepoPath) { - setting.Indexer.RepoPath = path.Join(setting.AppWorkPath, setting.Indexer.RepoPath) - } - setting.Indexer.UpdateQueueLength = sec.Key("UPDATE_BUFFER_LEN").MustInt(20) - setting.Indexer.MaxIndexerFileSize = sec.Key("MAX_FILE_SIZE").MustInt64(1024 * 1024) } // parsePostgreSQLHostPort parses given input in various forms defined in diff --git a/modules/indexer/issues/bleve.go b/modules/indexer/issues/bleve.go index 3f0d1a14563f..b40274f96eff 100644 --- a/modules/indexer/issues/bleve.go +++ b/modules/indexer/issues/bleve.go @@ -158,11 +158,6 @@ func NewBleveIndexer(indexDir string) *BleveIndexer { } } -// IssueIndexerBatch batch to add updates to -func (b *BleveIndexer) IssueIndexerBatch() rupture.FlushingBatch { - return rupture.NewFlushingBatch(b.indexer, maxBatchSize) -} - func (b *BleveIndexer) Init() (bool, error) { var err error b.indexer, err = openIndexer(b.indexDir, issueIndexerLatestVersion) diff --git a/modules/indexer/issues/bleve_test.go b/modules/indexer/issues/bleve_test.go index c85950529eba..720266e3b5bf 100644 --- a/modules/indexer/issues/bleve_test.go +++ b/modules/indexer/issues/bleve_test.go @@ -5,14 +5,17 @@ package issues import ( + "os" "testing" - "code.gitea.io/gitea/modules/setting" "github.com/stretchr/testify/assert" ) func TestIndexAndSearch(t *testing.T) { - indexer := NewBleveIndexer(setting.Indexer.IssuePath) + dir := "./bleve.index" + indexer := NewBleveIndexer(dir) + defer os.RemoveAll(dir) + _, err := indexer.Init() assert.NoError(t, err) @@ -22,14 +25,64 @@ func TestIndexAndSearch(t *testing.T) { RepoID: 2, Title: "Issue search should support Chinese", Content: "As title", + Comments: []string{ + "test1", + "test2", + }, + }, + { + ID: 2, + RepoID: 2, + Title: "CJK support could be optional", + Content: "Chinese Korean and Japanese should be supported but I would like it's not enabled by default", + Comments: []string{ + "LGTM", + "Good idea", + }, }, }) assert.NoError(t, err) - res, err := indexer.Search("search", 2, 10, 0) - assert.NoError(t, err) + var ( + keywords = []struct { + Keyword string + IDs []int64 + }{ + { + Keyword: "search", + IDs: []int64{1}, + }, + { + Keyword: "test1", + IDs: []int64{1}, + }, + { + Keyword: "test2", + IDs: []int64{1}, + }, + { + Keyword: "support", + IDs: []int64{1, 2}, + }, + { + Keyword: "chinese", + IDs: []int64{1, 2}, + }, + { + Keyword: "help", + IDs: []int64{}, + }, + } + ) + + for _, kw := range keywords { + res, err := indexer.Search(kw.Keyword, 2, 10, 0) + assert.NoError(t, err) - for _, hit := range res.Hits { - assert.EqualValues(t, 1, hit.ID) + var ids = make([]int64, 0, len(res.Hits)) + for _, hit := range res.Hits { + ids = append(ids, hit.ID) + } + assert.EqualValues(t, kw.IDs, ids) } } diff --git a/modules/indexer/issues/queue_ledis_local.go b/modules/indexer/issues/queue_ledis_local.go index 5907ce2d80d1..012fbe74b8b5 100644 --- a/modules/indexer/issues/queue_ledis_local.go +++ b/modules/indexer/issues/queue_ledis_local.go @@ -60,6 +60,11 @@ func (l *LedisLocalQueue) Run() error { continue } + if len(bs) <= 0 { + time.Sleep(time.Millisecond * 100) + continue + } + var data IndexerData err = json.Unmarshal(bs, &data) if err != nil { @@ -68,6 +73,8 @@ func (l *LedisLocalQueue) Run() error { continue } + log.Trace("LedisLocalQueue: task found: %#v", data) + datas = append(datas, &data) i++ diff --git a/modules/setting/setting.go b/modules/setting/setting.go index d3b45ec29d3d..dde656011489 100644 --- a/modules/setting/setting.go +++ b/modules/setting/setting.go @@ -83,6 +83,12 @@ const ( ReCaptcha = "recaptcha" ) +// enumerates all the indexer queue types +const ( + LedisLocalQueueType = "ledis_local" + ChannelQueueType = "channel" +) + // settings var ( // AppVer settings @@ -181,11 +187,15 @@ var ( // Indexer settings Indexer struct { - IssuePath string - RepoIndexerEnabled bool - RepoPath string - UpdateQueueLength int - MaxIndexerFileSize int64 + IssuePath string + RepoIndexerEnabled bool + RepoPath string + UpdateQueueLength int + MaxIndexerFileSize int64 + IssueIndexerQueueType string + IssueIndexerQueueDir string + IssueIndexerQueueDBIndex int + IssueIndexerQueueBatchNumber int } // Repository settings @@ -1202,6 +1212,7 @@ func NewContext() { IsInputFile: sec.Key("IS_INPUT_FILE").MustBool(false), }) } + sec = Cfg.Section("U2F") U2F.TrustedFacets, _ = shellquote.Split(sec.Key("TRUSTED_FACETS").MustString(strings.TrimRight(AppURL, "/"))) U2F.AppID = sec.Key("APP_ID").MustString(strings.TrimRight(AppURL, "/")) @@ -1215,6 +1226,24 @@ func NewContext() { // Explicitly disable credential helper, otherwise Git credentials might leak git.GlobalCommandArgs = append(git.GlobalCommandArgs, "-c", "credential.helper=") } + + sec = Cfg.Section("indexer") + Indexer.IssuePath = sec.Key("ISSUE_INDEXER_PATH").MustString(path.Join(AppDataPath, "indexers/issues.bleve")) + if !filepath.IsAbs(Indexer.IssuePath) { + Indexer.IssuePath = path.Join(AppWorkPath, Indexer.IssuePath) + } + Indexer.RepoIndexerEnabled = sec.Key("REPO_INDEXER_ENABLED").MustBool(false) + Indexer.RepoPath = sec.Key("REPO_INDEXER_PATH").MustString(path.Join(AppDataPath, "indexers/repos.bleve")) + if !filepath.IsAbs(Indexer.RepoPath) { + Indexer.RepoPath = path.Join(AppWorkPath, Indexer.RepoPath) + } + Indexer.UpdateQueueLength = sec.Key("UPDATE_BUFFER_LEN").MustInt(20) + Indexer.MaxIndexerFileSize = sec.Key("MAX_FILE_SIZE").MustInt64(1024 * 1024) + Indexer.IssueIndexerQueueType = sec.Key("ISSUE_INDEXER_QUEUE_TYPE").MustString(LedisLocalQueueType) + Indexer.IssueIndexerQueueDir = sec.Key("ISSUE_INDEXER_QUEUE_DIR").MustString(path.Join(AppDataPath, "indexers/issues.queue")) + Indexer.IssueIndexerQueueBatchNumber = sec.Key("ISSUE_INDEXER_QUEUE_BATCH_NUMBER").MustInt(20) + Indexer.IssueIndexerQueueDBIndex = sec.Key("ISSUE_INDEXER_QUEUE_DB_INDEX").MustInt(0) + } // NewServices initializes the services From b3b82c2707c8f72c5a8d3b235f67f065e2d055cb Mon Sep 17 00:00:00 2001 From: Lunny Xiao Date: Sun, 25 Nov 2018 20:06:42 +0800 Subject: [PATCH 04/14] fix bug --- models/issue_indexer.go | 1 - models/unit_tests.go | 4 ++++ modules/indexer/issues/queue_ledis_local.go | 16 ++++++++-------- modules/setting/setting.go | 8 +++++++- routers/init.go | 4 +++- 5 files changed, 22 insertions(+), 11 deletions(-) diff --git a/models/issue_indexer.go b/models/issue_indexer.go index 29bdaf37f1fc..15d821c8b010 100644 --- a/models/issue_indexer.go +++ b/models/issue_indexer.go @@ -26,7 +26,6 @@ func InitIssueIndexer() error { if err != nil { return err } - if !exist { go populateIssueIndexer() } diff --git a/models/unit_tests.go b/models/unit_tests.go index 28cd91215edc..f87dd7ee96cd 100644 --- a/models/unit_tests.go +++ b/models/unit_tests.go @@ -44,6 +44,10 @@ func MainTest(m *testing.M, pathToGiteaRoot string) { fatalTestError("Error creating test engine: %v\n", err) } + if err = InitIssueIndexer(); err != nil { + fatalTestError("Error InitIssueIndexer: %v\n", err) + } + setting.AppURL = "https://try.gitea.io/" setting.RunUser = "runuser" setting.SSH.Port = 3000 diff --git a/modules/indexer/issues/queue_ledis_local.go b/modules/indexer/issues/queue_ledis_local.go index 012fbe74b8b5..1ebeda75e37a 100644 --- a/modules/indexer/issues/queue_ledis_local.go +++ b/modules/indexer/issues/queue_ledis_local.go @@ -60,6 +60,13 @@ func (l *LedisLocalQueue) Run() error { continue } + i++ + if len(datas) > l.batchNumber || (len(datas) > 0 && i > 3) { + l.indexer.Index(datas) + datas = make([]*IndexerData, 0, l.batchNumber) + i = 0 + } + if len(bs) <= 0 { time.Sleep(time.Millisecond * 100) continue @@ -76,14 +83,7 @@ func (l *LedisLocalQueue) Run() error { log.Trace("LedisLocalQueue: task found: %#v", data) datas = append(datas, &data) - i++ - - if len(datas) > l.batchNumber || i > 3 { - l.indexer.Index(datas) - datas = make([]*IndexerData, 0, l.batchNumber) - i = 0 - } - time.Sleep(time.Millisecond * 100) + time.Sleep(time.Millisecond * 10) } } diff --git a/modules/setting/setting.go b/modules/setting/setting.go index dde656011489..de881b25805e 100644 --- a/modules/setting/setting.go +++ b/modules/setting/setting.go @@ -186,7 +186,7 @@ var ( DBConnectBackoff time.Duration // Indexer settings - Indexer struct { + Indexer = struct { IssuePath string RepoIndexerEnabled bool RepoPath string @@ -196,6 +196,12 @@ var ( IssueIndexerQueueDir string IssueIndexerQueueDBIndex int IssueIndexerQueueBatchNumber int + }{ + IssuePath: "indexers/issues.bleve", + IssueIndexerQueueType: LedisLocalQueueType, + IssueIndexerQueueDir: "indexers/issues.queue", + IssueIndexerQueueBatchNumber: 20, + IssueIndexerQueueDBIndex: 0, } // Repository settings diff --git a/routers/init.go b/routers/init.go index 4da786cc003a..1da21a351bc3 100644 --- a/routers/init.go +++ b/routers/init.go @@ -90,7 +90,9 @@ func GlobalInit() { // Booting long running goroutines. cron.NewContext() - models.InitIssueIndexer() + if err := models.InitIssueIndexer(); err != nil { + log.Fatal(4, "Failed to initialize issue indexer: %v", err) + } models.InitRepoIndexer() models.InitSyncMirrors() models.InitDeliverHooks() From e2fbf785158158be7407ffc8bf5707c4149b8abe Mon Sep 17 00:00:00 2001 From: Lunny Xiao Date: Mon, 26 Nov 2018 16:25:28 +0800 Subject: [PATCH 05/14] fix vendors --- Gopkg.lock | 82 ++ vendor/github.com/cupcake/rdb/LICENCE | 21 + vendor/github.com/cupcake/rdb/crc64/crc64.go | 64 + vendor/github.com/cupcake/rdb/decoder.go | 824 +++++++++++++ vendor/github.com/cupcake/rdb/encoder.go | 130 ++ .../cupcake/rdb/nopdecoder/nop_decoder.go | 24 + vendor/github.com/cupcake/rdb/slice_buffer.go | 67 ++ vendor/github.com/pelletier/go-toml/LICENSE | 21 + vendor/github.com/pelletier/go-toml/doc.go | 23 + vendor/github.com/pelletier/go-toml/fuzz.go | 31 + .../pelletier/go-toml/keysparsing.go | 85 ++ vendor/github.com/pelletier/go-toml/lexer.go | 750 ++++++++++++ .../github.com/pelletier/go-toml/marshal.go | 609 ++++++++++ vendor/github.com/pelletier/go-toml/parser.go | 430 +++++++ .../github.com/pelletier/go-toml/position.go | 29 + vendor/github.com/pelletier/go-toml/token.go | 144 +++ vendor/github.com/pelletier/go-toml/toml.go | 367 ++++++ .../pelletier/go-toml/tomltree_create.go | 142 +++ .../pelletier/go-toml/tomltree_write.go | 333 ++++++ vendor/github.com/siddontang/go/LICENSE | 20 + vendor/github.com/siddontang/go/bson/LICENSE | 25 + .../github.com/siddontang/go/filelock/LICENSE | 27 + .../go/filelock/file_lock_generic.go | 17 + .../go/filelock/file_lock_solaris.go | 43 + .../siddontang/go/filelock/file_lock_unix.go | 51 + .../go/filelock/file_lock_windows.go | 36 + vendor/github.com/siddontang/go/hack/hack.go | 27 + .../siddontang/go/ioutil2/ioutil.go | 39 + .../siddontang/go/ioutil2/sectionwriter.go | 69 ++ vendor/github.com/siddontang/go/log/doc.go | 21 + .../siddontang/go/log/filehandler.go | 221 ++++ .../github.com/siddontang/go/log/handler.go | 48 + vendor/github.com/siddontang/go/log/log.go | 343 ++++++ .../siddontang/go/log/sockethandler.go | 65 + vendor/github.com/siddontang/go/num/bytes.go | 67 ++ vendor/github.com/siddontang/go/num/cmp.go | 161 +++ vendor/github.com/siddontang/go/num/str.go | 157 +++ .../github.com/siddontang/go/snappy/LICENSE | 27 + .../github.com/siddontang/go/snappy/decode.go | 124 ++ .../github.com/siddontang/go/snappy/encode.go | 174 +++ .../github.com/siddontang/go/snappy/snappy.go | 38 + .../github.com/siddontang/go/sync2/atomic.go | 146 +++ .../siddontang/go/sync2/semaphore.go | 65 + vendor/github.com/siddontang/ledisdb/LICENSE | 21 + .../siddontang/ledisdb/config/config.go | 315 +++++ .../siddontang/ledisdb/ledis/batch.go | 139 +++ .../siddontang/ledisdb/ledis/const.go | 144 +++ .../siddontang/ledisdb/ledis/doc.go | 58 + .../siddontang/ledisdb/ledis/dump.go | 223 ++++ .../siddontang/ledisdb/ledis/event.go | 126 ++ .../siddontang/ledisdb/ledis/ledis.go | 241 ++++ .../siddontang/ledisdb/ledis/ledis_db.go | 204 ++++ .../siddontang/ledisdb/ledis/migrate.go | 189 +++ .../siddontang/ledisdb/ledis/replication.go | 250 ++++ .../siddontang/ledisdb/ledis/scan.go | 396 ++++++ .../siddontang/ledisdb/ledis/sort.go | 233 ++++ .../siddontang/ledisdb/ledis/t_hash.go | 537 +++++++++ .../siddontang/ledisdb/ledis/t_kv.go | 769 ++++++++++++ .../siddontang/ledisdb/ledis/t_list.go | 783 ++++++++++++ .../siddontang/ledisdb/ledis/t_set.go | 627 ++++++++++ .../siddontang/ledisdb/ledis/t_ttl.go | 213 ++++ .../siddontang/ledisdb/ledis/t_zset.go | 1063 +++++++++++++++++ .../siddontang/ledisdb/ledis/util.go | 95 ++ .../siddontang/ledisdb/rpl/file_io.go | 363 ++++++ .../siddontang/ledisdb/rpl/file_store.go | 416 +++++++ .../siddontang/ledisdb/rpl/file_table.go | 571 +++++++++ .../siddontang/ledisdb/rpl/goleveldb_store.go | 225 ++++ .../github.com/siddontang/ledisdb/rpl/log.go | 167 +++ .../github.com/siddontang/ledisdb/rpl/rpl.go | 336 ++++++ .../siddontang/ledisdb/rpl/store.go | 36 + .../github.com/siddontang/ledisdb/store/db.go | 169 +++ .../siddontang/ledisdb/store/driver/driver.go | 57 + .../siddontang/ledisdb/store/driver/slice.go | 21 + .../siddontang/ledisdb/store/driver/store.go | 46 + .../ledisdb/store/goleveldb/batch.go | 39 + .../ledisdb/store/goleveldb/const.go | 4 + .../siddontang/ledisdb/store/goleveldb/db.go | 204 ++++ .../ledisdb/store/goleveldb/iterator.go | 49 + .../ledisdb/store/goleveldb/snapshot.go | 26 + .../siddontang/ledisdb/store/iterator.go | 334 ++++++ .../siddontang/ledisdb/store/leveldb/batch.go | 99 ++ .../siddontang/ledisdb/store/leveldb/cache.go | 20 + .../siddontang/ledisdb/store/leveldb/const.go | 3 + .../siddontang/ledisdb/store/leveldb/db.go | 314 +++++ .../ledisdb/store/leveldb/filterpolicy.go | 21 + .../ledisdb/store/leveldb/iterator.go | 70 ++ .../ledisdb/store/leveldb/leveldb_ext.cc | 95 ++ .../ledisdb/store/leveldb/leveldb_ext.h | 41 + .../ledisdb/store/leveldb/options.go | 126 ++ .../siddontang/ledisdb/store/leveldb/slice.go | 40 + .../ledisdb/store/leveldb/snapshot.go | 39 + .../siddontang/ledisdb/store/leveldb/util.go | 45 + .../siddontang/ledisdb/store/rocksdb/batch.go | 83 ++ .../siddontang/ledisdb/store/rocksdb/cache.go | 20 + .../siddontang/ledisdb/store/rocksdb/const.go | 3 + .../siddontang/ledisdb/store/rocksdb/db.go | 342 ++++++ .../siddontang/ledisdb/store/rocksdb/env.go | 27 + .../ledisdb/store/rocksdb/filterpolicy.go | 21 + .../ledisdb/store/rocksdb/iterator.go | 70 ++ .../ledisdb/store/rocksdb/options.go | 229 ++++ .../ledisdb/store/rocksdb/rocksdb_ext.cc | 44 + .../ledisdb/store/rocksdb/rocksdb_ext.h | 24 + .../siddontang/ledisdb/store/rocksdb/slice.go | 41 + .../ledisdb/store/rocksdb/snapshot.go | 39 + .../siddontang/ledisdb/store/rocksdb/util.go | 54 + .../siddontang/ledisdb/store/slice.go | 9 + .../siddontang/ledisdb/store/snapshot.go | 48 + .../siddontang/ledisdb/store/stat.go | 37 + .../siddontang/ledisdb/store/store.go | 62 + .../siddontang/ledisdb/store/writebatch.go | 136 +++ vendor/github.com/siddontang/rdb/LICENSE | 21 + vendor/github.com/siddontang/rdb/decode.go | 128 ++ vendor/github.com/siddontang/rdb/digest.go | 106 ++ vendor/github.com/siddontang/rdb/encode.go | 52 + vendor/github.com/siddontang/rdb/loader.go | 112 ++ vendor/github.com/siddontang/rdb/reader.go | 332 +++++ 116 files changed, 18829 insertions(+) create mode 100644 vendor/github.com/cupcake/rdb/LICENCE create mode 100644 vendor/github.com/cupcake/rdb/crc64/crc64.go create mode 100644 vendor/github.com/cupcake/rdb/decoder.go create mode 100644 vendor/github.com/cupcake/rdb/encoder.go create mode 100644 vendor/github.com/cupcake/rdb/nopdecoder/nop_decoder.go create mode 100644 vendor/github.com/cupcake/rdb/slice_buffer.go create mode 100644 vendor/github.com/pelletier/go-toml/LICENSE create mode 100644 vendor/github.com/pelletier/go-toml/doc.go create mode 100644 vendor/github.com/pelletier/go-toml/fuzz.go create mode 100644 vendor/github.com/pelletier/go-toml/keysparsing.go create mode 100644 vendor/github.com/pelletier/go-toml/lexer.go create mode 100644 vendor/github.com/pelletier/go-toml/marshal.go create mode 100644 vendor/github.com/pelletier/go-toml/parser.go create mode 100644 vendor/github.com/pelletier/go-toml/position.go create mode 100644 vendor/github.com/pelletier/go-toml/token.go create mode 100644 vendor/github.com/pelletier/go-toml/toml.go create mode 100644 vendor/github.com/pelletier/go-toml/tomltree_create.go create mode 100644 vendor/github.com/pelletier/go-toml/tomltree_write.go create mode 100644 vendor/github.com/siddontang/go/LICENSE create mode 100644 vendor/github.com/siddontang/go/bson/LICENSE create mode 100644 vendor/github.com/siddontang/go/filelock/LICENSE create mode 100644 vendor/github.com/siddontang/go/filelock/file_lock_generic.go create mode 100644 vendor/github.com/siddontang/go/filelock/file_lock_solaris.go create mode 100644 vendor/github.com/siddontang/go/filelock/file_lock_unix.go create mode 100644 vendor/github.com/siddontang/go/filelock/file_lock_windows.go create mode 100644 vendor/github.com/siddontang/go/hack/hack.go create mode 100644 vendor/github.com/siddontang/go/ioutil2/ioutil.go create mode 100644 vendor/github.com/siddontang/go/ioutil2/sectionwriter.go create mode 100644 vendor/github.com/siddontang/go/log/doc.go create mode 100644 vendor/github.com/siddontang/go/log/filehandler.go create mode 100644 vendor/github.com/siddontang/go/log/handler.go create mode 100644 vendor/github.com/siddontang/go/log/log.go create mode 100644 vendor/github.com/siddontang/go/log/sockethandler.go create mode 100644 vendor/github.com/siddontang/go/num/bytes.go create mode 100644 vendor/github.com/siddontang/go/num/cmp.go create mode 100644 vendor/github.com/siddontang/go/num/str.go create mode 100644 vendor/github.com/siddontang/go/snappy/LICENSE create mode 100644 vendor/github.com/siddontang/go/snappy/decode.go create mode 100644 vendor/github.com/siddontang/go/snappy/encode.go create mode 100644 vendor/github.com/siddontang/go/snappy/snappy.go create mode 100644 vendor/github.com/siddontang/go/sync2/atomic.go create mode 100644 vendor/github.com/siddontang/go/sync2/semaphore.go create mode 100644 vendor/github.com/siddontang/ledisdb/LICENSE create mode 100644 vendor/github.com/siddontang/ledisdb/config/config.go create mode 100644 vendor/github.com/siddontang/ledisdb/ledis/batch.go create mode 100644 vendor/github.com/siddontang/ledisdb/ledis/const.go create mode 100644 vendor/github.com/siddontang/ledisdb/ledis/doc.go create mode 100644 vendor/github.com/siddontang/ledisdb/ledis/dump.go create mode 100644 vendor/github.com/siddontang/ledisdb/ledis/event.go create mode 100644 vendor/github.com/siddontang/ledisdb/ledis/ledis.go create mode 100644 vendor/github.com/siddontang/ledisdb/ledis/ledis_db.go create mode 100644 vendor/github.com/siddontang/ledisdb/ledis/migrate.go create mode 100644 vendor/github.com/siddontang/ledisdb/ledis/replication.go create mode 100644 vendor/github.com/siddontang/ledisdb/ledis/scan.go create mode 100644 vendor/github.com/siddontang/ledisdb/ledis/sort.go create mode 100644 vendor/github.com/siddontang/ledisdb/ledis/t_hash.go create mode 100644 vendor/github.com/siddontang/ledisdb/ledis/t_kv.go create mode 100644 vendor/github.com/siddontang/ledisdb/ledis/t_list.go create mode 100644 vendor/github.com/siddontang/ledisdb/ledis/t_set.go create mode 100644 vendor/github.com/siddontang/ledisdb/ledis/t_ttl.go create mode 100644 vendor/github.com/siddontang/ledisdb/ledis/t_zset.go create mode 100644 vendor/github.com/siddontang/ledisdb/ledis/util.go create mode 100644 vendor/github.com/siddontang/ledisdb/rpl/file_io.go create mode 100644 vendor/github.com/siddontang/ledisdb/rpl/file_store.go create mode 100644 vendor/github.com/siddontang/ledisdb/rpl/file_table.go create mode 100644 vendor/github.com/siddontang/ledisdb/rpl/goleveldb_store.go create mode 100644 vendor/github.com/siddontang/ledisdb/rpl/log.go create mode 100644 vendor/github.com/siddontang/ledisdb/rpl/rpl.go create mode 100644 vendor/github.com/siddontang/ledisdb/rpl/store.go create mode 100644 vendor/github.com/siddontang/ledisdb/store/db.go create mode 100644 vendor/github.com/siddontang/ledisdb/store/driver/driver.go create mode 100644 vendor/github.com/siddontang/ledisdb/store/driver/slice.go create mode 100644 vendor/github.com/siddontang/ledisdb/store/driver/store.go create mode 100644 vendor/github.com/siddontang/ledisdb/store/goleveldb/batch.go create mode 100644 vendor/github.com/siddontang/ledisdb/store/goleveldb/const.go create mode 100644 vendor/github.com/siddontang/ledisdb/store/goleveldb/db.go create mode 100644 vendor/github.com/siddontang/ledisdb/store/goleveldb/iterator.go create mode 100644 vendor/github.com/siddontang/ledisdb/store/goleveldb/snapshot.go create mode 100644 vendor/github.com/siddontang/ledisdb/store/iterator.go create mode 100644 vendor/github.com/siddontang/ledisdb/store/leveldb/batch.go create mode 100644 vendor/github.com/siddontang/ledisdb/store/leveldb/cache.go create mode 100644 vendor/github.com/siddontang/ledisdb/store/leveldb/const.go create mode 100644 vendor/github.com/siddontang/ledisdb/store/leveldb/db.go create mode 100644 vendor/github.com/siddontang/ledisdb/store/leveldb/filterpolicy.go create mode 100644 vendor/github.com/siddontang/ledisdb/store/leveldb/iterator.go create mode 100644 vendor/github.com/siddontang/ledisdb/store/leveldb/leveldb_ext.cc create mode 100644 vendor/github.com/siddontang/ledisdb/store/leveldb/leveldb_ext.h create mode 100644 vendor/github.com/siddontang/ledisdb/store/leveldb/options.go create mode 100644 vendor/github.com/siddontang/ledisdb/store/leveldb/slice.go create mode 100644 vendor/github.com/siddontang/ledisdb/store/leveldb/snapshot.go create mode 100644 vendor/github.com/siddontang/ledisdb/store/leveldb/util.go create mode 100644 vendor/github.com/siddontang/ledisdb/store/rocksdb/batch.go create mode 100644 vendor/github.com/siddontang/ledisdb/store/rocksdb/cache.go create mode 100644 vendor/github.com/siddontang/ledisdb/store/rocksdb/const.go create mode 100644 vendor/github.com/siddontang/ledisdb/store/rocksdb/db.go create mode 100644 vendor/github.com/siddontang/ledisdb/store/rocksdb/env.go create mode 100644 vendor/github.com/siddontang/ledisdb/store/rocksdb/filterpolicy.go create mode 100644 vendor/github.com/siddontang/ledisdb/store/rocksdb/iterator.go create mode 100644 vendor/github.com/siddontang/ledisdb/store/rocksdb/options.go create mode 100644 vendor/github.com/siddontang/ledisdb/store/rocksdb/rocksdb_ext.cc create mode 100644 vendor/github.com/siddontang/ledisdb/store/rocksdb/rocksdb_ext.h create mode 100644 vendor/github.com/siddontang/ledisdb/store/rocksdb/slice.go create mode 100644 vendor/github.com/siddontang/ledisdb/store/rocksdb/snapshot.go create mode 100644 vendor/github.com/siddontang/ledisdb/store/rocksdb/util.go create mode 100644 vendor/github.com/siddontang/ledisdb/store/slice.go create mode 100644 vendor/github.com/siddontang/ledisdb/store/snapshot.go create mode 100644 vendor/github.com/siddontang/ledisdb/store/stat.go create mode 100644 vendor/github.com/siddontang/ledisdb/store/store.go create mode 100644 vendor/github.com/siddontang/ledisdb/store/writebatch.go create mode 100644 vendor/github.com/siddontang/rdb/LICENSE create mode 100644 vendor/github.com/siddontang/rdb/decode.go create mode 100644 vendor/github.com/siddontang/rdb/digest.go create mode 100644 vendor/github.com/siddontang/rdb/encode.go create mode 100644 vendor/github.com/siddontang/rdb/loader.go create mode 100644 vendor/github.com/siddontang/rdb/reader.go diff --git a/Gopkg.lock b/Gopkg.lock index 0fe028a5eadd..592d48bf29e7 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -221,6 +221,17 @@ pruneopts = "NUT" revision = "d904413d884d1fb849e2ad8834619f661761ef57" +[[projects]] + digest = "1:2e39e716a20e285bb1da0c5f6d00d7b5da0e50e527a8e5dd0258d1e0fcd1b403" + name = "github.com/cupcake/rdb" + packages = [ + ".", + "crc64", + "nopdecoder", + ] + pruneopts = "NUT" + revision = "43ba34106c765f2111c0dc7b74cdf8ee437411e0" + [[projects]] digest = "1:a2c1d0e43bd3baaa071d1b9ed72c27d78169b2b269f71c105ac4ba34b1be4a39" name = "github.com/davecgh/go-spew" @@ -733,6 +744,14 @@ revision = "c37440a7cf42ac63b919c752ca73a85067e05992" version = "v0.2.0" +[[projects]] + digest = "1:51ea800cff51752ff68e12e04106f5887b4daec6f9356721238c28019f0b42db" + name = "github.com/pelletier/go-toml" + packages = ["."] + pruneopts = "NUT" + revision = "c01d1270ff3e442a8a57cddc1c92dc1138598194" + version = "v1.2.0" + [[projects]] digest = "1:44c66ad69563dbe3f8e76d7d6cad21a03626e53f1875b5ab163ded419e01ca7a" name = "github.com/philhofer/fwd" @@ -842,6 +861,22 @@ pruneopts = "NUT" revision = "1dba4b3954bc059efc3991ec364f9f9a35f597d2" +[[projects]] + branch = "master" + digest = "1:81cd039986aace9719c68a9794fa8c9dd1007cffa1ff8995631e8ed35aacf6fe" + name = "github.com/siddontang/go" + packages = [ + "filelock", + "hack", + "ioutil2", + "log", + "num", + "snappy", + "sync2", + ] + pruneopts = "NUT" + revision = "bdc77568d726a8702315ec4eafda030b6abc4f43" + [[projects]] branch = "master" digest = "1:dbda803f21e60c38de7d9f884390f2ebbe234ce0c3d139b65bbb36b03a99d266" @@ -850,6 +885,31 @@ pruneopts = "NUT" revision = "d8f7bb82a96d89c1254e5a6c967134e1433c9ee2" +[[projects]] + digest = "1:25ac32ee449099128d3c84e4d4596749f1ba8965045bdfe4e99e1914e26b5e93" + name = "github.com/siddontang/ledisdb" + packages = [ + "config", + "ledis", + "rpl", + "store", + "store/driver", + "store/goleveldb", + "store/leveldb", + "store/rocksdb", + ] + pruneopts = "NUT" + revision = "56900470a899883f691bcdf6bea4ac547f2a9a6f" + version = "v0.6" + +[[projects]] + branch = "master" + digest = "1:7ddaee1a4c41ddf0b35191621f7849bb96889a614137356a851c5d4da491f173" + name = "github.com/siddontang/rdb" + packages = ["."] + pruneopts = "NUT" + revision = "fc89ed2e418d27e3ea76e708e54276d2b44ae9cf" + [[projects]] digest = "1:89fd77d603a74a6540d60067debad9397865bf040955d907362c95d364baeba6" name = "github.com/src-d/gcfg" @@ -879,6 +939,26 @@ revision = "12b6f73e6084dad08a7c6e575284b177ecafbc71" version = "v1.2.1" +[[projects]] + digest = "1:685fdfea42d825ebd39ee0994354b46c374cf2c2b2d97a41a8dee1807c6a9b62" + name = "github.com/syndtr/goleveldb" + packages = [ + "leveldb", + "leveldb/cache", + "leveldb/comparer", + "leveldb/errors", + "leveldb/filter", + "leveldb/iterator", + "leveldb/journal", + "leveldb/memdb", + "leveldb/opt", + "leveldb/storage", + "leveldb/table", + "leveldb/util", + ] + pruneopts = "NUT" + revision = "cfa635847112c5dc4782e128fa7e0d05fdbfb394" + [[projects]] branch = "master" digest = "1:685fdfea42d825ebd39ee0994354b46c374cf2c2b2d97a41a8dee1807c6a9b62" @@ -1293,6 +1373,8 @@ "github.com/russross/blackfriday", "github.com/satori/go.uuid", "github.com/sergi/go-diff/diffmatchpatch", + "github.com/siddontang/ledisdb/config", + "github.com/siddontang/ledisdb/ledis", "github.com/stretchr/testify/assert", "github.com/tstranex/u2f", "github.com/urfave/cli", diff --git a/vendor/github.com/cupcake/rdb/LICENCE b/vendor/github.com/cupcake/rdb/LICENCE new file mode 100644 index 000000000000..50257901b124 --- /dev/null +++ b/vendor/github.com/cupcake/rdb/LICENCE @@ -0,0 +1,21 @@ +Copyright (c) 2012 Jonathan Rudenberg +Copyright (c) 2012 Sripathi Krishnan + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cupcake/rdb/crc64/crc64.go b/vendor/github.com/cupcake/rdb/crc64/crc64.go new file mode 100644 index 000000000000..54fed9c5a291 --- /dev/null +++ b/vendor/github.com/cupcake/rdb/crc64/crc64.go @@ -0,0 +1,64 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package crc64 implements the Jones coefficients with an init value of 0. +package crc64 + +import "hash" + +// Redis uses the CRC64 variant with "Jones" coefficients and init value of 0. +// +// Specification of this CRC64 variant follows: +// Name: crc-64-jones +// Width: 64 bits +// Poly: 0xad93d23594c935a9 +// Reflected In: True +// Xor_In: 0xffffffffffffffff +// Reflected_Out: True +// Xor_Out: 0x0 + +var table = [256]uint64{0x0000000000000000, 0x7ad870c830358979, 0xf5b0e190606b12f2, 0x8f689158505e9b8b, 0xc038e5739841b68f, 0xbae095bba8743ff6, 0x358804e3f82aa47d, 0x4f50742bc81f2d04, 0xab28ecb46814fe75, 0xd1f09c7c5821770c, 0x5e980d24087fec87, 0x24407dec384a65fe, 0x6b1009c7f05548fa, 0x11c8790fc060c183, 0x9ea0e857903e5a08, 0xe478989fa00bd371, 0x7d08ff3b88be6f81, 0x07d08ff3b88be6f8, 0x88b81eabe8d57d73, 0xf2606e63d8e0f40a, 0xbd301a4810ffd90e, 0xc7e86a8020ca5077, 0x4880fbd87094cbfc, 0x32588b1040a14285, 0xd620138fe0aa91f4, 0xacf86347d09f188d, 0x2390f21f80c18306, 0x594882d7b0f40a7f, 0x1618f6fc78eb277b, 0x6cc0863448deae02, 0xe3a8176c18803589, 0x997067a428b5bcf0, 0xfa11fe77117cdf02, 0x80c98ebf2149567b, 0x0fa11fe77117cdf0, 0x75796f2f41224489, 0x3a291b04893d698d, 0x40f16bccb908e0f4, 0xcf99fa94e9567b7f, 0xb5418a5cd963f206, 0x513912c379682177, 0x2be1620b495da80e, 0xa489f35319033385, 0xde51839b2936bafc, 0x9101f7b0e12997f8, 0xebd98778d11c1e81, 0x64b116208142850a, 0x1e6966e8b1770c73, 0x8719014c99c2b083, 0xfdc17184a9f739fa, 0x72a9e0dcf9a9a271, 0x08719014c99c2b08, 0x4721e43f0183060c, 0x3df994f731b68f75, 0xb29105af61e814fe, 0xc849756751dd9d87, 0x2c31edf8f1d64ef6, 0x56e99d30c1e3c78f, 0xd9810c6891bd5c04, 0xa3597ca0a188d57d, 0xec09088b6997f879, 0x96d1784359a27100, 0x19b9e91b09fcea8b, 0x636199d339c963f2, 0xdf7adabd7a6e2d6f, 0xa5a2aa754a5ba416, 0x2aca3b2d1a053f9d, 0x50124be52a30b6e4, 0x1f423fcee22f9be0, 0x659a4f06d21a1299, 0xeaf2de5e82448912, 0x902aae96b271006b, 0x74523609127ad31a, 0x0e8a46c1224f5a63, 0x81e2d7997211c1e8, 0xfb3aa75142244891, 0xb46ad37a8a3b6595, 0xceb2a3b2ba0eecec, 0x41da32eaea507767, 0x3b024222da65fe1e, 0xa2722586f2d042ee, 0xd8aa554ec2e5cb97, 0x57c2c41692bb501c, 0x2d1ab4dea28ed965, 0x624ac0f56a91f461, 0x1892b03d5aa47d18, 0x97fa21650afae693, 0xed2251ad3acf6fea, 0x095ac9329ac4bc9b, 0x7382b9faaaf135e2, 0xfcea28a2faafae69, 0x8632586aca9a2710, 0xc9622c4102850a14, 0xb3ba5c8932b0836d, 0x3cd2cdd162ee18e6, 0x460abd1952db919f, 0x256b24ca6b12f26d, 0x5fb354025b277b14, 0xd0dbc55a0b79e09f, 0xaa03b5923b4c69e6, 0xe553c1b9f35344e2, 0x9f8bb171c366cd9b, 0x10e3202993385610, 0x6a3b50e1a30ddf69, 0x8e43c87e03060c18, 0xf49bb8b633338561, 0x7bf329ee636d1eea, 0x012b592653589793, 0x4e7b2d0d9b47ba97, 0x34a35dc5ab7233ee, 0xbbcbcc9dfb2ca865, 0xc113bc55cb19211c, 0x5863dbf1e3ac9dec, 0x22bbab39d3991495, 0xadd33a6183c78f1e, 0xd70b4aa9b3f20667, 0x985b3e827bed2b63, 0xe2834e4a4bd8a21a, 0x6debdf121b863991, 0x1733afda2bb3b0e8, 0xf34b37458bb86399, 0x8993478dbb8deae0, 0x06fbd6d5ebd3716b, 0x7c23a61ddbe6f812, 0x3373d23613f9d516, 0x49aba2fe23cc5c6f, 0xc6c333a67392c7e4, 0xbc1b436e43a74e9d, 0x95ac9329ac4bc9b5, 0xef74e3e19c7e40cc, 0x601c72b9cc20db47, 0x1ac40271fc15523e, 0x5594765a340a7f3a, 0x2f4c0692043ff643, 0xa02497ca54616dc8, 0xdafce7026454e4b1, 0x3e847f9dc45f37c0, 0x445c0f55f46abeb9, 0xcb349e0da4342532, 0xb1eceec59401ac4b, 0xfebc9aee5c1e814f, 0x8464ea266c2b0836, 0x0b0c7b7e3c7593bd, 0x71d40bb60c401ac4, 0xe8a46c1224f5a634, 0x927c1cda14c02f4d, 0x1d148d82449eb4c6, 0x67ccfd4a74ab3dbf, 0x289c8961bcb410bb, 0x5244f9a98c8199c2, 0xdd2c68f1dcdf0249, 0xa7f41839ecea8b30, 0x438c80a64ce15841, 0x3954f06e7cd4d138, 0xb63c61362c8a4ab3, 0xcce411fe1cbfc3ca, 0x83b465d5d4a0eece, 0xf96c151de49567b7, 0x76048445b4cbfc3c, 0x0cdcf48d84fe7545, 0x6fbd6d5ebd3716b7, 0x15651d968d029fce, 0x9a0d8ccedd5c0445, 0xe0d5fc06ed698d3c, 0xaf85882d2576a038, 0xd55df8e515432941, 0x5a3569bd451db2ca, 0x20ed197575283bb3, 0xc49581ead523e8c2, 0xbe4df122e51661bb, 0x3125607ab548fa30, 0x4bfd10b2857d7349, 0x04ad64994d625e4d, 0x7e7514517d57d734, 0xf11d85092d094cbf, 0x8bc5f5c11d3cc5c6, 0x12b5926535897936, 0x686de2ad05bcf04f, 0xe70573f555e26bc4, 0x9ddd033d65d7e2bd, 0xd28d7716adc8cfb9, 0xa85507de9dfd46c0, 0x273d9686cda3dd4b, 0x5de5e64efd965432, 0xb99d7ed15d9d8743, 0xc3450e196da80e3a, 0x4c2d9f413df695b1, 0x36f5ef890dc31cc8, 0x79a59ba2c5dc31cc, 0x037deb6af5e9b8b5, 0x8c157a32a5b7233e, 0xf6cd0afa9582aa47, 0x4ad64994d625e4da, 0x300e395ce6106da3, 0xbf66a804b64ef628, 0xc5bed8cc867b7f51, 0x8aeeace74e645255, 0xf036dc2f7e51db2c, 0x7f5e4d772e0f40a7, 0x05863dbf1e3ac9de, 0xe1fea520be311aaf, 0x9b26d5e88e0493d6, 0x144e44b0de5a085d, 0x6e963478ee6f8124, 0x21c640532670ac20, 0x5b1e309b16452559, 0xd476a1c3461bbed2, 0xaeaed10b762e37ab, 0x37deb6af5e9b8b5b, 0x4d06c6676eae0222, 0xc26e573f3ef099a9, 0xb8b627f70ec510d0, 0xf7e653dcc6da3dd4, 0x8d3e2314f6efb4ad, 0x0256b24ca6b12f26, 0x788ec2849684a65f, 0x9cf65a1b368f752e, 0xe62e2ad306bafc57, 0x6946bb8b56e467dc, 0x139ecb4366d1eea5, 0x5ccebf68aecec3a1, 0x2616cfa09efb4ad8, 0xa97e5ef8cea5d153, 0xd3a62e30fe90582a, 0xb0c7b7e3c7593bd8, 0xca1fc72bf76cb2a1, 0x45775673a732292a, 0x3faf26bb9707a053, 0x70ff52905f188d57, 0x0a2722586f2d042e, 0x854fb3003f739fa5, 0xff97c3c80f4616dc, 0x1bef5b57af4dc5ad, 0x61372b9f9f784cd4, 0xee5fbac7cf26d75f, 0x9487ca0fff135e26, 0xdbd7be24370c7322, 0xa10fceec0739fa5b, 0x2e675fb4576761d0, 0x54bf2f7c6752e8a9, 0xcdcf48d84fe75459, 0xb71738107fd2dd20, 0x387fa9482f8c46ab, 0x42a7d9801fb9cfd2, 0x0df7adabd7a6e2d6, 0x772fdd63e7936baf, 0xf8474c3bb7cdf024, 0x829f3cf387f8795d, 0x66e7a46c27f3aa2c, 0x1c3fd4a417c62355, 0x935745fc4798b8de, 0xe98f353477ad31a7, 0xa6df411fbfb21ca3, 0xdc0731d78f8795da, 0x536fa08fdfd90e51, 0x29b7d047efec8728} + +func crc64(crc uint64, b []byte) uint64 { + for _, v := range b { + crc = table[byte(crc)^v] ^ (crc >> 8) + } + return crc +} + +func Digest(b []byte) uint64 { + return crc64(0, b) +} + +type digest struct { + crc uint64 +} + +func New() hash.Hash64 { + return &digest{} +} + +func (h *digest) Write(p []byte) (int, error) { + h.crc = crc64(h.crc, p) + return len(p), nil +} + +// Encode in little endian +func (d *digest) Sum(in []byte) []byte { + s := d.Sum64() + in = append(in, byte(s)) + in = append(in, byte(s>>8)) + in = append(in, byte(s>>16)) + in = append(in, byte(s>>24)) + in = append(in, byte(s>>32)) + in = append(in, byte(s>>40)) + in = append(in, byte(s>>48)) + in = append(in, byte(s>>56)) + return in +} + +func (d *digest) Sum64() uint64 { return d.crc } +func (d *digest) BlockSize() int { return 1 } +func (d *digest) Size() int { return 8 } +func (d *digest) Reset() { d.crc = 0 } diff --git a/vendor/github.com/cupcake/rdb/decoder.go b/vendor/github.com/cupcake/rdb/decoder.go new file mode 100644 index 000000000000..dd3993b5cebc --- /dev/null +++ b/vendor/github.com/cupcake/rdb/decoder.go @@ -0,0 +1,824 @@ +// Package rdb implements parsing and encoding of the Redis RDB file format. +package rdb + +import ( + "bufio" + "bytes" + "encoding/binary" + "fmt" + "io" + "math" + "strconv" + + "github.com/cupcake/rdb/crc64" +) + +// A Decoder must be implemented to parse a RDB file. +type Decoder interface { + // StartRDB is called when parsing of a valid RDB file starts. + StartRDB() + // StartDatabase is called when database n starts. + // Once a database starts, another database will not start until EndDatabase is called. + StartDatabase(n int) + // AUX field + Aux(key, value []byte) + // ResizeDB hint + ResizeDatabase(dbSize, expiresSize uint32) + // Set is called once for each string key. + Set(key, value []byte, expiry int64) + // StartHash is called at the beginning of a hash. + // Hset will be called exactly length times before EndHash. + StartHash(key []byte, length, expiry int64) + // Hset is called once for each field=value pair in a hash. + Hset(key, field, value []byte) + // EndHash is called when there are no more fields in a hash. + EndHash(key []byte) + // StartSet is called at the beginning of a set. + // Sadd will be called exactly cardinality times before EndSet. + StartSet(key []byte, cardinality, expiry int64) + // Sadd is called once for each member of a set. + Sadd(key, member []byte) + // EndSet is called when there are no more fields in a set. + EndSet(key []byte) + // StartList is called at the beginning of a list. + // Rpush will be called exactly length times before EndList. + // If length of the list is not known, then length is -1 + StartList(key []byte, length, expiry int64) + // Rpush is called once for each value in a list. + Rpush(key, value []byte) + // EndList is called when there are no more values in a list. + EndList(key []byte) + // StartZSet is called at the beginning of a sorted set. + // Zadd will be called exactly cardinality times before EndZSet. + StartZSet(key []byte, cardinality, expiry int64) + // Zadd is called once for each member of a sorted set. + Zadd(key []byte, score float64, member []byte) + // EndZSet is called when there are no more members in a sorted set. + EndZSet(key []byte) + // EndDatabase is called at the end of a database. + EndDatabase(n int) + // EndRDB is called when parsing of the RDB file is complete. + EndRDB() +} + +// Decode parses a RDB file from r and calls the decode hooks on d. +func Decode(r io.Reader, d Decoder) error { + decoder := &decode{d, make([]byte, 8), bufio.NewReader(r)} + return decoder.decode() +} + +// Decode a byte slice from the Redis DUMP command. The dump does not contain the +// database, key or expiry, so they must be included in the function call (but +// can be zero values). +func DecodeDump(dump []byte, db int, key []byte, expiry int64, d Decoder) error { + err := verifyDump(dump) + if err != nil { + return err + } + + decoder := &decode{d, make([]byte, 8), bytes.NewReader(dump[1:])} + decoder.event.StartRDB() + decoder.event.StartDatabase(db) + + err = decoder.readObject(key, ValueType(dump[0]), expiry) + + decoder.event.EndDatabase(db) + decoder.event.EndRDB() + return err +} + +type byteReader interface { + io.Reader + io.ByteReader +} + +type decode struct { + event Decoder + intBuf []byte + r byteReader +} + +type ValueType byte + +const ( + TypeString ValueType = 0 + TypeList ValueType = 1 + TypeSet ValueType = 2 + TypeZSet ValueType = 3 + TypeHash ValueType = 4 + + TypeHashZipmap ValueType = 9 + TypeListZiplist ValueType = 10 + TypeSetIntset ValueType = 11 + TypeZSetZiplist ValueType = 12 + TypeHashZiplist ValueType = 13 + TypeListQuicklist ValueType = 14 +) + +const ( + rdb6bitLen = 0 + rdb14bitLen = 1 + rdb32bitLen = 2 + rdbEncVal = 3 + + rdbFlagAux = 0xfa + rdbFlagResizeDB = 0xfb + rdbFlagExpiryMS = 0xfc + rdbFlagExpiry = 0xfd + rdbFlagSelectDB = 0xfe + rdbFlagEOF = 0xff + + rdbEncInt8 = 0 + rdbEncInt16 = 1 + rdbEncInt32 = 2 + rdbEncLZF = 3 + + rdbZiplist6bitlenString = 0 + rdbZiplist14bitlenString = 1 + rdbZiplist32bitlenString = 2 + + rdbZiplistInt16 = 0xc0 + rdbZiplistInt32 = 0xd0 + rdbZiplistInt64 = 0xe0 + rdbZiplistInt24 = 0xf0 + rdbZiplistInt8 = 0xfe + rdbZiplistInt4 = 15 +) + +func (d *decode) decode() error { + err := d.checkHeader() + if err != nil { + return err + } + + d.event.StartRDB() + + var db uint32 + var expiry int64 + firstDB := true + for { + objType, err := d.r.ReadByte() + if err != nil { + return err + } + switch objType { + case rdbFlagAux: + auxKey, err := d.readString() + if err != nil { + return err + } + auxVal, err := d.readString() + if err != nil { + return err + } + d.event.Aux(auxKey, auxVal) + case rdbFlagResizeDB: + dbSize, _, err := d.readLength() + if err != nil { + return err + } + expiresSize, _, err := d.readLength() + if err != nil { + return err + } + d.event.ResizeDatabase(dbSize, expiresSize) + case rdbFlagExpiryMS: + _, err := io.ReadFull(d.r, d.intBuf) + if err != nil { + return err + } + expiry = int64(binary.LittleEndian.Uint64(d.intBuf)) + case rdbFlagExpiry: + _, err := io.ReadFull(d.r, d.intBuf[:4]) + if err != nil { + return err + } + expiry = int64(binary.LittleEndian.Uint32(d.intBuf)) * 1000 + case rdbFlagSelectDB: + if !firstDB { + d.event.EndDatabase(int(db)) + } + db, _, err = d.readLength() + if err != nil { + return err + } + d.event.StartDatabase(int(db)) + case rdbFlagEOF: + d.event.EndDatabase(int(db)) + d.event.EndRDB() + return nil + default: + key, err := d.readString() + if err != nil { + return err + } + err = d.readObject(key, ValueType(objType), expiry) + if err != nil { + return err + } + expiry = 0 + } + } + + panic("not reached") +} + +func (d *decode) readObject(key []byte, typ ValueType, expiry int64) error { + switch typ { + case TypeString: + value, err := d.readString() + if err != nil { + return err + } + d.event.Set(key, value, expiry) + case TypeList: + length, _, err := d.readLength() + if err != nil { + return err + } + d.event.StartList(key, int64(length), expiry) + for i := uint32(0); i < length; i++ { + value, err := d.readString() + if err != nil { + return err + } + d.event.Rpush(key, value) + } + d.event.EndList(key) + case TypeListQuicklist: + length, _, err := d.readLength() + if err != nil { + return err + } + d.event.StartList(key, int64(-1), expiry) + for i := uint32(0); i < length; i++ { + d.readZiplist(key, 0, false) + } + d.event.EndList(key) + case TypeSet: + cardinality, _, err := d.readLength() + if err != nil { + return err + } + d.event.StartSet(key, int64(cardinality), expiry) + for i := uint32(0); i < cardinality; i++ { + member, err := d.readString() + if err != nil { + return err + } + d.event.Sadd(key, member) + } + d.event.EndSet(key) + case TypeZSet: + cardinality, _, err := d.readLength() + if err != nil { + return err + } + d.event.StartZSet(key, int64(cardinality), expiry) + for i := uint32(0); i < cardinality; i++ { + member, err := d.readString() + if err != nil { + return err + } + score, err := d.readFloat64() + if err != nil { + return err + } + d.event.Zadd(key, score, member) + } + d.event.EndZSet(key) + case TypeHash: + length, _, err := d.readLength() + if err != nil { + return err + } + d.event.StartHash(key, int64(length), expiry) + for i := uint32(0); i < length; i++ { + field, err := d.readString() + if err != nil { + return err + } + value, err := d.readString() + if err != nil { + return err + } + d.event.Hset(key, field, value) + } + d.event.EndHash(key) + case TypeHashZipmap: + return d.readZipmap(key, expiry) + case TypeListZiplist: + return d.readZiplist(key, expiry, true) + case TypeSetIntset: + return d.readIntset(key, expiry) + case TypeZSetZiplist: + return d.readZiplistZset(key, expiry) + case TypeHashZiplist: + return d.readZiplistHash(key, expiry) + default: + return fmt.Errorf("rdb: unknown object type %d for key %s", typ, key) + } + return nil +} + +func (d *decode) readZipmap(key []byte, expiry int64) error { + var length int + zipmap, err := d.readString() + if err != nil { + return err + } + buf := newSliceBuffer(zipmap) + lenByte, err := buf.ReadByte() + if err != nil { + return err + } + if lenByte >= 254 { // we need to count the items manually + length, err = countZipmapItems(buf) + length /= 2 + if err != nil { + return err + } + } else { + length = int(lenByte) + } + d.event.StartHash(key, int64(length), expiry) + for i := 0; i < length; i++ { + field, err := readZipmapItem(buf, false) + if err != nil { + return err + } + value, err := readZipmapItem(buf, true) + if err != nil { + return err + } + d.event.Hset(key, field, value) + } + d.event.EndHash(key) + return nil +} + +func readZipmapItem(buf *sliceBuffer, readFree bool) ([]byte, error) { + length, free, err := readZipmapItemLength(buf, readFree) + if err != nil { + return nil, err + } + if length == -1 { + return nil, nil + } + value, err := buf.Slice(length) + if err != nil { + return nil, err + } + _, err = buf.Seek(int64(free), 1) + return value, err +} + +func countZipmapItems(buf *sliceBuffer) (int, error) { + n := 0 + for { + strLen, free, err := readZipmapItemLength(buf, n%2 != 0) + if err != nil { + return 0, err + } + if strLen == -1 { + break + } + _, err = buf.Seek(int64(strLen)+int64(free), 1) + if err != nil { + return 0, err + } + n++ + } + _, err := buf.Seek(0, 0) + return n, err +} + +func readZipmapItemLength(buf *sliceBuffer, readFree bool) (int, int, error) { + b, err := buf.ReadByte() + if err != nil { + return 0, 0, err + } + switch b { + case 253: + s, err := buf.Slice(5) + if err != nil { + return 0, 0, err + } + return int(binary.BigEndian.Uint32(s)), int(s[4]), nil + case 254: + return 0, 0, fmt.Errorf("rdb: invalid zipmap item length") + case 255: + return -1, 0, nil + } + var free byte + if readFree { + free, err = buf.ReadByte() + } + return int(b), int(free), err +} + +func (d *decode) readZiplist(key []byte, expiry int64, addListEvents bool) error { + ziplist, err := d.readString() + if err != nil { + return err + } + buf := newSliceBuffer(ziplist) + length, err := readZiplistLength(buf) + if err != nil { + return err + } + if addListEvents { + d.event.StartList(key, length, expiry) + } + for i := int64(0); i < length; i++ { + entry, err := readZiplistEntry(buf) + if err != nil { + return err + } + d.event.Rpush(key, entry) + } + if addListEvents { + d.event.EndList(key) + } + return nil +} + +func (d *decode) readZiplistZset(key []byte, expiry int64) error { + ziplist, err := d.readString() + if err != nil { + return err + } + buf := newSliceBuffer(ziplist) + cardinality, err := readZiplistLength(buf) + if err != nil { + return err + } + cardinality /= 2 + d.event.StartZSet(key, cardinality, expiry) + for i := int64(0); i < cardinality; i++ { + member, err := readZiplistEntry(buf) + if err != nil { + return err + } + scoreBytes, err := readZiplistEntry(buf) + if err != nil { + return err + } + score, err := strconv.ParseFloat(string(scoreBytes), 64) + if err != nil { + return err + } + d.event.Zadd(key, score, member) + } + d.event.EndZSet(key) + return nil +} + +func (d *decode) readZiplistHash(key []byte, expiry int64) error { + ziplist, err := d.readString() + if err != nil { + return err + } + buf := newSliceBuffer(ziplist) + length, err := readZiplistLength(buf) + if err != nil { + return err + } + length /= 2 + d.event.StartHash(key, length, expiry) + for i := int64(0); i < length; i++ { + field, err := readZiplistEntry(buf) + if err != nil { + return err + } + value, err := readZiplistEntry(buf) + if err != nil { + return err + } + d.event.Hset(key, field, value) + } + d.event.EndHash(key) + return nil +} + +func readZiplistLength(buf *sliceBuffer) (int64, error) { + buf.Seek(8, 0) // skip the zlbytes and zltail + lenBytes, err := buf.Slice(2) + if err != nil { + return 0, err + } + return int64(binary.LittleEndian.Uint16(lenBytes)), nil +} + +func readZiplistEntry(buf *sliceBuffer) ([]byte, error) { + prevLen, err := buf.ReadByte() + if err != nil { + return nil, err + } + if prevLen == 254 { + buf.Seek(4, 1) // skip the 4-byte prevlen + } + + header, err := buf.ReadByte() + if err != nil { + return nil, err + } + switch { + case header>>6 == rdbZiplist6bitlenString: + return buf.Slice(int(header & 0x3f)) + case header>>6 == rdbZiplist14bitlenString: + b, err := buf.ReadByte() + if err != nil { + return nil, err + } + return buf.Slice((int(header&0x3f) << 8) | int(b)) + case header>>6 == rdbZiplist32bitlenString: + lenBytes, err := buf.Slice(4) + if err != nil { + return nil, err + } + return buf.Slice(int(binary.BigEndian.Uint32(lenBytes))) + case header == rdbZiplistInt16: + intBytes, err := buf.Slice(2) + if err != nil { + return nil, err + } + return []byte(strconv.FormatInt(int64(int16(binary.LittleEndian.Uint16(intBytes))), 10)), nil + case header == rdbZiplistInt32: + intBytes, err := buf.Slice(4) + if err != nil { + return nil, err + } + return []byte(strconv.FormatInt(int64(int32(binary.LittleEndian.Uint32(intBytes))), 10)), nil + case header == rdbZiplistInt64: + intBytes, err := buf.Slice(8) + if err != nil { + return nil, err + } + return []byte(strconv.FormatInt(int64(binary.LittleEndian.Uint64(intBytes)), 10)), nil + case header == rdbZiplistInt24: + intBytes := make([]byte, 4) + _, err := buf.Read(intBytes[1:]) + if err != nil { + return nil, err + } + return []byte(strconv.FormatInt(int64(int32(binary.LittleEndian.Uint32(intBytes))>>8), 10)), nil + case header == rdbZiplistInt8: + b, err := buf.ReadByte() + return []byte(strconv.FormatInt(int64(int8(b)), 10)), err + case header>>4 == rdbZiplistInt4: + return []byte(strconv.FormatInt(int64(header&0x0f)-1, 10)), nil + } + + return nil, fmt.Errorf("rdb: unknown ziplist header byte: %d", header) +} + +func (d *decode) readIntset(key []byte, expiry int64) error { + intset, err := d.readString() + if err != nil { + return err + } + buf := newSliceBuffer(intset) + intSizeBytes, err := buf.Slice(4) + if err != nil { + return err + } + intSize := binary.LittleEndian.Uint32(intSizeBytes) + + if intSize != 2 && intSize != 4 && intSize != 8 { + return fmt.Errorf("rdb: unknown intset encoding: %d", intSize) + } + + lenBytes, err := buf.Slice(4) + if err != nil { + return err + } + cardinality := binary.LittleEndian.Uint32(lenBytes) + + d.event.StartSet(key, int64(cardinality), expiry) + for i := uint32(0); i < cardinality; i++ { + intBytes, err := buf.Slice(int(intSize)) + if err != nil { + return err + } + var intString string + switch intSize { + case 2: + intString = strconv.FormatInt(int64(int16(binary.LittleEndian.Uint16(intBytes))), 10) + case 4: + intString = strconv.FormatInt(int64(int32(binary.LittleEndian.Uint32(intBytes))), 10) + case 8: + intString = strconv.FormatInt(int64(int64(binary.LittleEndian.Uint64(intBytes))), 10) + } + d.event.Sadd(key, []byte(intString)) + } + d.event.EndSet(key) + return nil +} + +func (d *decode) checkHeader() error { + header := make([]byte, 9) + _, err := io.ReadFull(d.r, header) + if err != nil { + return err + } + + if !bytes.Equal(header[:5], []byte("REDIS")) { + return fmt.Errorf("rdb: invalid file format") + } + + version, _ := strconv.ParseInt(string(header[5:]), 10, 64) + if version < 1 || version > 7 { + return fmt.Errorf("rdb: invalid RDB version number %d", version) + } + + return nil +} + +func (d *decode) readString() ([]byte, error) { + length, encoded, err := d.readLength() + if err != nil { + return nil, err + } + if encoded { + switch length { + case rdbEncInt8: + i, err := d.readUint8() + return []byte(strconv.FormatInt(int64(int8(i)), 10)), err + case rdbEncInt16: + i, err := d.readUint16() + return []byte(strconv.FormatInt(int64(int16(i)), 10)), err + case rdbEncInt32: + i, err := d.readUint32() + return []byte(strconv.FormatInt(int64(int32(i)), 10)), err + case rdbEncLZF: + clen, _, err := d.readLength() + if err != nil { + return nil, err + } + ulen, _, err := d.readLength() + if err != nil { + return nil, err + } + compressed := make([]byte, clen) + _, err = io.ReadFull(d.r, compressed) + if err != nil { + return nil, err + } + decompressed := lzfDecompress(compressed, int(ulen)) + if len(decompressed) != int(ulen) { + return nil, fmt.Errorf("decompressed string length %d didn't match expected length %d", len(decompressed), ulen) + } + return decompressed, nil + } + } + + str := make([]byte, length) + _, err = io.ReadFull(d.r, str) + return str, err +} + +func (d *decode) readUint8() (uint8, error) { + b, err := d.r.ReadByte() + return uint8(b), err +} + +func (d *decode) readUint16() (uint16, error) { + _, err := io.ReadFull(d.r, d.intBuf[:2]) + if err != nil { + return 0, err + } + return binary.LittleEndian.Uint16(d.intBuf), nil +} + +func (d *decode) readUint32() (uint32, error) { + _, err := io.ReadFull(d.r, d.intBuf[:4]) + if err != nil { + return 0, err + } + return binary.LittleEndian.Uint32(d.intBuf), nil +} + +func (d *decode) readUint64() (uint64, error) { + _, err := io.ReadFull(d.r, d.intBuf) + if err != nil { + return 0, err + } + return binary.LittleEndian.Uint64(d.intBuf), nil +} + +func (d *decode) readUint32Big() (uint32, error) { + _, err := io.ReadFull(d.r, d.intBuf[:4]) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint32(d.intBuf), nil +} + +// Doubles are saved as strings prefixed by an unsigned +// 8 bit integer specifying the length of the representation. +// This 8 bit integer has special values in order to specify the following +// conditions: +// 253: not a number +// 254: + inf +// 255: - inf +func (d *decode) readFloat64() (float64, error) { + length, err := d.readUint8() + if err != nil { + return 0, err + } + switch length { + case 253: + return math.NaN(), nil + case 254: + return math.Inf(0), nil + case 255: + return math.Inf(-1), nil + default: + floatBytes := make([]byte, length) + _, err := io.ReadFull(d.r, floatBytes) + if err != nil { + return 0, err + } + f, err := strconv.ParseFloat(string(floatBytes), 64) + return f, err + } + + panic("not reached") +} + +func (d *decode) readLength() (uint32, bool, error) { + b, err := d.r.ReadByte() + if err != nil { + return 0, false, err + } + // The first two bits of the first byte are used to indicate the length encoding type + switch (b & 0xc0) >> 6 { + case rdb6bitLen: + // When the first two bits are 00, the next 6 bits are the length. + return uint32(b & 0x3f), false, nil + case rdb14bitLen: + // When the first two bits are 01, the next 14 bits are the length. + bb, err := d.r.ReadByte() + if err != nil { + return 0, false, err + } + return (uint32(b&0x3f) << 8) | uint32(bb), false, nil + case rdbEncVal: + // When the first two bits are 11, the next object is encoded. + // The next 6 bits indicate the encoding type. + return uint32(b & 0x3f), true, nil + default: + // When the first two bits are 10, the next 6 bits are discarded. + // The next 4 bytes are the length. + length, err := d.readUint32Big() + return length, false, err + } + + panic("not reached") +} + +func verifyDump(d []byte) error { + if len(d) < 10 { + return fmt.Errorf("rdb: invalid dump length") + } + version := binary.LittleEndian.Uint16(d[len(d)-10:]) + if version != uint16(Version) { + return fmt.Errorf("rdb: invalid version %d, expecting %d", version, Version) + } + + if binary.LittleEndian.Uint64(d[len(d)-8:]) != crc64.Digest(d[:len(d)-8]) { + return fmt.Errorf("rdb: invalid CRC checksum") + } + + return nil +} + +func lzfDecompress(in []byte, outlen int) []byte { + out := make([]byte, outlen) + for i, o := 0, 0; i < len(in); { + ctrl := int(in[i]) + i++ + if ctrl < 32 { + for x := 0; x <= ctrl; x++ { + out[o] = in[i] + i++ + o++ + } + } else { + length := ctrl >> 5 + if length == 7 { + length = length + int(in[i]) + i++ + } + ref := o - ((ctrl & 0x1f) << 8) - int(in[i]) - 1 + i++ + for x := 0; x <= length+1; x++ { + out[o] = out[ref] + ref++ + o++ + } + } + } + return out +} diff --git a/vendor/github.com/cupcake/rdb/encoder.go b/vendor/github.com/cupcake/rdb/encoder.go new file mode 100644 index 000000000000..7902a7d314d7 --- /dev/null +++ b/vendor/github.com/cupcake/rdb/encoder.go @@ -0,0 +1,130 @@ +package rdb + +import ( + "encoding/binary" + "fmt" + "hash" + "io" + "math" + "strconv" + + "github.com/cupcake/rdb/crc64" +) + +const Version = 6 + +type Encoder struct { + w io.Writer + crc hash.Hash +} + +func NewEncoder(w io.Writer) *Encoder { + e := &Encoder{crc: crc64.New()} + e.w = io.MultiWriter(w, e.crc) + return e +} + +func (e *Encoder) EncodeHeader() error { + _, err := fmt.Fprintf(e.w, "REDIS%04d", Version) + return err +} + +func (e *Encoder) EncodeFooter() error { + e.w.Write([]byte{rdbFlagEOF}) + _, err := e.w.Write(e.crc.Sum(nil)) + return err +} + +func (e *Encoder) EncodeDumpFooter() error { + binary.Write(e.w, binary.LittleEndian, uint16(Version)) + _, err := e.w.Write(e.crc.Sum(nil)) + return err +} + +func (e *Encoder) EncodeDatabase(n int) error { + e.w.Write([]byte{rdbFlagSelectDB}) + return e.EncodeLength(uint32(n)) +} + +func (e *Encoder) EncodeExpiry(expiry uint64) error { + b := make([]byte, 9) + b[0] = rdbFlagExpiryMS + binary.LittleEndian.PutUint64(b[1:], expiry) + _, err := e.w.Write(b) + return err +} + +func (e *Encoder) EncodeType(v ValueType) error { + _, err := e.w.Write([]byte{byte(v)}) + return err +} + +func (e *Encoder) EncodeString(s []byte) error { + written, err := e.encodeIntString(s) + if written { + return err + } + e.EncodeLength(uint32(len(s))) + _, err = e.w.Write(s) + return err +} + +func (e *Encoder) EncodeLength(l uint32) (err error) { + switch { + case l < 1<<6: + _, err = e.w.Write([]byte{byte(l)}) + case l < 1<<14: + _, err = e.w.Write([]byte{byte(l>>8) | rdb14bitLen<<6, byte(l)}) + default: + b := make([]byte, 5) + b[0] = rdb32bitLen << 6 + binary.BigEndian.PutUint32(b[1:], l) + _, err = e.w.Write(b) + } + return +} + +func (e *Encoder) EncodeFloat(f float64) (err error) { + switch { + case math.IsNaN(f): + _, err = e.w.Write([]byte{253}) + case math.IsInf(f, 1): + _, err = e.w.Write([]byte{254}) + case math.IsInf(f, -1): + _, err = e.w.Write([]byte{255}) + default: + b := []byte(strconv.FormatFloat(f, 'g', 17, 64)) + e.w.Write([]byte{byte(len(b))}) + _, err = e.w.Write(b) + } + return +} + +func (e *Encoder) encodeIntString(b []byte) (written bool, err error) { + s := string(b) + i, err := strconv.ParseInt(s, 10, 32) + if err != nil { + return + } + // if the stringified parsed int isn't exactly the same, we can't encode it as an int + if s != strconv.FormatInt(i, 10) { + return + } + switch { + case i >= math.MinInt8 && i <= math.MaxInt8: + _, err = e.w.Write([]byte{rdbEncVal << 6, byte(int8(i))}) + case i >= math.MinInt16 && i <= math.MaxInt16: + b := make([]byte, 3) + b[0] = rdbEncVal<<6 | rdbEncInt16 + binary.LittleEndian.PutUint16(b[1:], uint16(int16(i))) + _, err = e.w.Write(b) + case i >= math.MinInt32 && i <= math.MaxInt32: + b := make([]byte, 5) + b[0] = rdbEncVal<<6 | rdbEncInt32 + binary.LittleEndian.PutUint32(b[1:], uint32(int32(i))) + _, err = e.w.Write(b) + default: + return + } + return true, err +} diff --git a/vendor/github.com/cupcake/rdb/nopdecoder/nop_decoder.go b/vendor/github.com/cupcake/rdb/nopdecoder/nop_decoder.go new file mode 100644 index 000000000000..de93a6973fed --- /dev/null +++ b/vendor/github.com/cupcake/rdb/nopdecoder/nop_decoder.go @@ -0,0 +1,24 @@ +package nopdecoder + +// NopDecoder may be embedded in a real Decoder to avoid implementing methods. +type NopDecoder struct{} + +func (d NopDecoder) StartRDB() {} +func (d NopDecoder) StartDatabase(n int) {} +func (d NopDecoder) Aux(key, value []byte) {} +func (d NopDecoder) ResizeDatabase(dbSize, expiresSize uint32) {} +func (d NopDecoder) EndDatabase(n int) {} +func (d NopDecoder) EndRDB() {} +func (d NopDecoder) Set(key, value []byte, expiry int64) {} +func (d NopDecoder) StartHash(key []byte, length, expiry int64) {} +func (d NopDecoder) Hset(key, field, value []byte) {} +func (d NopDecoder) EndHash(key []byte) {} +func (d NopDecoder) StartSet(key []byte, cardinality, expiry int64) {} +func (d NopDecoder) Sadd(key, member []byte) {} +func (d NopDecoder) EndSet(key []byte) {} +func (d NopDecoder) StartList(key []byte, length, expiry int64) {} +func (d NopDecoder) Rpush(key, value []byte) {} +func (d NopDecoder) EndList(key []byte) {} +func (d NopDecoder) StartZSet(key []byte, cardinality, expiry int64) {} +func (d NopDecoder) Zadd(key []byte, score float64, member []byte) {} +func (d NopDecoder) EndZSet(key []byte) {} diff --git a/vendor/github.com/cupcake/rdb/slice_buffer.go b/vendor/github.com/cupcake/rdb/slice_buffer.go new file mode 100644 index 000000000000..b3e12a02c6c0 --- /dev/null +++ b/vendor/github.com/cupcake/rdb/slice_buffer.go @@ -0,0 +1,67 @@ +package rdb + +import ( + "errors" + "io" +) + +type sliceBuffer struct { + s []byte + i int +} + +func newSliceBuffer(s []byte) *sliceBuffer { + return &sliceBuffer{s, 0} +} + +func (s *sliceBuffer) Slice(n int) ([]byte, error) { + if s.i+n > len(s.s) { + return nil, io.EOF + } + b := s.s[s.i : s.i+n] + s.i += n + return b, nil +} + +func (s *sliceBuffer) ReadByte() (byte, error) { + if s.i >= len(s.s) { + return 0, io.EOF + } + b := s.s[s.i] + s.i++ + return b, nil +} + +func (s *sliceBuffer) Read(b []byte) (int, error) { + if len(b) == 0 { + return 0, nil + } + if s.i >= len(s.s) { + return 0, io.EOF + } + n := copy(b, s.s[s.i:]) + s.i += n + return n, nil +} + +func (s *sliceBuffer) Seek(offset int64, whence int) (int64, error) { + var abs int64 + switch whence { + case 0: + abs = offset + case 1: + abs = int64(s.i) + offset + case 2: + abs = int64(len(s.s)) + offset + default: + return 0, errors.New("invalid whence") + } + if abs < 0 { + return 0, errors.New("negative position") + } + if abs >= 1<<31 { + return 0, errors.New("position out of range") + } + s.i = int(abs) + return abs, nil +} diff --git a/vendor/github.com/pelletier/go-toml/LICENSE b/vendor/github.com/pelletier/go-toml/LICENSE new file mode 100644 index 000000000000..583bdae62823 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 - 2017 Thomas Pelletier, Eric Anderton + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/pelletier/go-toml/doc.go b/vendor/github.com/pelletier/go-toml/doc.go new file mode 100644 index 000000000000..d5fd98c0211a --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/doc.go @@ -0,0 +1,23 @@ +// Package toml is a TOML parser and manipulation library. +// +// This version supports the specification as described in +// https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md +// +// Marshaling +// +// Go-toml can marshal and unmarshal TOML documents from and to data +// structures. +// +// TOML document as a tree +// +// Go-toml can operate on a TOML document as a tree. Use one of the Load* +// functions to parse TOML data and obtain a Tree instance, then one of its +// methods to manipulate the tree. +// +// JSONPath-like queries +// +// The package github.com/pelletier/go-toml/query implements a system +// similar to JSONPath to quickly retrieve elements of a TOML document using a +// single expression. See the package documentation for more information. +// +package toml diff --git a/vendor/github.com/pelletier/go-toml/fuzz.go b/vendor/github.com/pelletier/go-toml/fuzz.go new file mode 100644 index 000000000000..14570c8d3577 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/fuzz.go @@ -0,0 +1,31 @@ +// +build gofuzz + +package toml + +func Fuzz(data []byte) int { + tree, err := LoadBytes(data) + if err != nil { + if tree != nil { + panic("tree must be nil if there is an error") + } + return 0 + } + + str, err := tree.ToTomlString() + if err != nil { + if str != "" { + panic(`str must be "" if there is an error`) + } + panic(err) + } + + tree, err = Load(str) + if err != nil { + if tree != nil { + panic("tree must be nil if there is an error") + } + return 0 + } + + return 1 +} diff --git a/vendor/github.com/pelletier/go-toml/keysparsing.go b/vendor/github.com/pelletier/go-toml/keysparsing.go new file mode 100644 index 000000000000..284db64678b3 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/keysparsing.go @@ -0,0 +1,85 @@ +// Parsing keys handling both bare and quoted keys. + +package toml + +import ( + "bytes" + "errors" + "fmt" + "unicode" +) + +// Convert the bare key group string to an array. +// The input supports double quotation to allow "." inside the key name, +// but escape sequences are not supported. Lexers must unescape them beforehand. +func parseKey(key string) ([]string, error) { + groups := []string{} + var buffer bytes.Buffer + inQuotes := false + wasInQuotes := false + ignoreSpace := true + expectDot := false + + for _, char := range key { + if ignoreSpace { + if char == ' ' { + continue + } + ignoreSpace = false + } + switch char { + case '"': + if inQuotes { + groups = append(groups, buffer.String()) + buffer.Reset() + wasInQuotes = true + } + inQuotes = !inQuotes + expectDot = false + case '.': + if inQuotes { + buffer.WriteRune(char) + } else { + if !wasInQuotes { + if buffer.Len() == 0 { + return nil, errors.New("empty table key") + } + groups = append(groups, buffer.String()) + buffer.Reset() + } + ignoreSpace = true + expectDot = false + wasInQuotes = false + } + case ' ': + if inQuotes { + buffer.WriteRune(char) + } else { + expectDot = true + } + default: + if !inQuotes && !isValidBareChar(char) { + return nil, fmt.Errorf("invalid bare character: %c", char) + } + if !inQuotes && expectDot { + return nil, errors.New("what?") + } + buffer.WriteRune(char) + expectDot = false + } + } + if inQuotes { + return nil, errors.New("mismatched quotes") + } + if buffer.Len() > 0 { + groups = append(groups, buffer.String()) + } + if len(groups) == 0 { + return nil, errors.New("empty key") + } + return groups, nil +} + +func isValidBareChar(r rune) bool { + return isAlphanumeric(r) || r == '-' || unicode.IsNumber(r) +} diff --git a/vendor/github.com/pelletier/go-toml/lexer.go b/vendor/github.com/pelletier/go-toml/lexer.go new file mode 100644 index 000000000000..d11de428594c --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/lexer.go @@ -0,0 +1,750 @@ +// TOML lexer. +// +// Written using the principles developed by Rob Pike in +// http://www.youtube.com/watch?v=HxaD_trXwRE + +package toml + +import ( + "bytes" + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +var dateRegexp *regexp.Regexp + +// Define state functions +type tomlLexStateFn func() tomlLexStateFn + +// Define lexer +type tomlLexer struct { + inputIdx int + input []rune // Textual source + currentTokenStart int + currentTokenStop int + tokens []token + depth int + line int + col int + endbufferLine int + endbufferCol int +} + +// Basic read operations on input + +func (l *tomlLexer) read() rune { + r := l.peek() + if r == '\n' { + l.endbufferLine++ + l.endbufferCol = 1 + } else { + l.endbufferCol++ + } + l.inputIdx++ + return r +} + +func (l *tomlLexer) next() rune { + r := l.read() + + if r != eof { + l.currentTokenStop++ + } + return r +} + +func (l *tomlLexer) ignore() { + l.currentTokenStart = l.currentTokenStop + l.line = l.endbufferLine + l.col = l.endbufferCol +} + +func (l *tomlLexer) skip() { + l.next() + l.ignore() +} + +func (l *tomlLexer) fastForward(n int) { + for i := 0; i < n; i++ { + l.next() + } +} + +func (l *tomlLexer) emitWithValue(t tokenType, value string) { + l.tokens = append(l.tokens, token{ + Position: Position{l.line, l.col}, + typ: t, + val: value, + }) + l.ignore() +} + +func (l *tomlLexer) emit(t tokenType) { + l.emitWithValue(t, string(l.input[l.currentTokenStart:l.currentTokenStop])) +} + +func (l *tomlLexer) peek() rune { + if l.inputIdx >= len(l.input) { + return eof + } + return l.input[l.inputIdx] +} + +func (l *tomlLexer) peekString(size int) string { + maxIdx := len(l.input) + upperIdx := l.inputIdx + size // FIXME: potential overflow + if upperIdx > maxIdx { + upperIdx = maxIdx + } + return string(l.input[l.inputIdx:upperIdx]) +} + +func (l *tomlLexer) follow(next string) bool { + return next == l.peekString(len(next)) +} + +// Error management + +func (l *tomlLexer) errorf(format string, args ...interface{}) tomlLexStateFn { + l.tokens = append(l.tokens, token{ + Position: Position{l.line, l.col}, + typ: tokenError, + val: fmt.Sprintf(format, args...), + }) + return nil +} + +// State functions + +func (l *tomlLexer) lexVoid() tomlLexStateFn { + for { + next := l.peek() + switch next { + case '[': + return l.lexTableKey + case '#': + return l.lexComment(l.lexVoid) + case '=': + return l.lexEqual + case '\r': + fallthrough + case '\n': + l.skip() + continue + } + + if isSpace(next) { + l.skip() + } + + if l.depth > 0 { + return l.lexRvalue + } + + if isKeyStartChar(next) { + return l.lexKey + } + + if next == eof { + l.next() + break + } + } + + l.emit(tokenEOF) + return nil +} + +func (l *tomlLexer) lexRvalue() tomlLexStateFn { + for { + next := l.peek() + switch next { + case '.': + return l.errorf("cannot start float with a dot") + case '=': + return l.lexEqual + case '[': + l.depth++ + return l.lexLeftBracket + case ']': + l.depth-- + return l.lexRightBracket + case '{': + return l.lexLeftCurlyBrace + case '}': + return l.lexRightCurlyBrace + case '#': + return l.lexComment(l.lexRvalue) + case '"': + return l.lexString + case '\'': + return l.lexLiteralString + case ',': + return l.lexComma + case '\r': + fallthrough + case '\n': + l.skip() + if l.depth == 0 { + return l.lexVoid + } + return l.lexRvalue + case '_': + return l.errorf("cannot start number with underscore") + } + + if l.follow("true") { + return l.lexTrue + } + + if l.follow("false") { + return l.lexFalse + } + + if l.follow("inf") { + return l.lexInf + } + + if l.follow("nan") { + return l.lexNan + } + + if isSpace(next) { + l.skip() + continue + } + + if next == eof { + l.next() + break + } + + possibleDate := l.peekString(35) + dateMatch := dateRegexp.FindString(possibleDate) + if dateMatch != "" { + l.fastForward(len(dateMatch)) + return l.lexDate + } + + if next == '+' || next == '-' || isDigit(next) { + return l.lexNumber + } + + if isAlphanumeric(next) { + return l.lexKey + } + + return l.errorf("no value can start with %c", next) + } + + l.emit(tokenEOF) + return nil +} + +func (l *tomlLexer) lexLeftCurlyBrace() tomlLexStateFn { + l.next() + l.emit(tokenLeftCurlyBrace) + return l.lexRvalue +} + +func (l *tomlLexer) lexRightCurlyBrace() tomlLexStateFn { + l.next() + l.emit(tokenRightCurlyBrace) + return l.lexRvalue +} + +func (l *tomlLexer) lexDate() tomlLexStateFn { + l.emit(tokenDate) + return l.lexRvalue +} + +func (l *tomlLexer) lexTrue() tomlLexStateFn { + l.fastForward(4) + l.emit(tokenTrue) + return l.lexRvalue +} + +func (l *tomlLexer) lexFalse() tomlLexStateFn { + l.fastForward(5) + l.emit(tokenFalse) + return l.lexRvalue +} + +func (l *tomlLexer) lexInf() tomlLexStateFn { + l.fastForward(3) + l.emit(tokenInf) + return l.lexRvalue +} + +func (l *tomlLexer) lexNan() tomlLexStateFn { + l.fastForward(3) + l.emit(tokenNan) + return l.lexRvalue +} + +func (l *tomlLexer) lexEqual() tomlLexStateFn { + l.next() + l.emit(tokenEqual) + return l.lexRvalue +} + +func (l *tomlLexer) lexComma() tomlLexStateFn { + l.next() + l.emit(tokenComma) + return l.lexRvalue +} + +// Parse the key and emits its value without escape sequences. +// bare keys, basic string keys and literal string keys are supported. +func (l *tomlLexer) lexKey() tomlLexStateFn { + growingString := "" + + for r := l.peek(); isKeyChar(r) || r == '\n' || r == '\r'; r = l.peek() { + if r == '"' { + l.next() + str, err := l.lexStringAsString(`"`, false, true) + if err != nil { + return l.errorf(err.Error()) + } + growingString += str + l.next() + continue + } else if r == '\'' { + l.next() + str, err := l.lexLiteralStringAsString(`'`, false) + if err != nil { + return l.errorf(err.Error()) + } + growingString += str + l.next() + continue + } else if r == '\n' { + return l.errorf("keys cannot contain new lines") + } else if isSpace(r) { + break + } else if !isValidBareChar(r) { + return l.errorf("keys cannot contain %c character", r) + } + growingString += string(r) + l.next() + } + l.emitWithValue(tokenKey, growingString) + return l.lexVoid +} + +func (l *tomlLexer) lexComment(previousState tomlLexStateFn) tomlLexStateFn { + return func() tomlLexStateFn { + for next := l.peek(); next != '\n' && next != eof; next = l.peek() { + if next == '\r' && l.follow("\r\n") { + break + } + l.next() + } + l.ignore() + return previousState + } +} + +func (l *tomlLexer) lexLeftBracket() tomlLexStateFn { + l.next() + l.emit(tokenLeftBracket) + return l.lexRvalue +} + +func (l *tomlLexer) lexLiteralStringAsString(terminator string, discardLeadingNewLine bool) (string, error) { + growingString := "" + + if discardLeadingNewLine { + if l.follow("\r\n") { + l.skip() + l.skip() + } else if l.peek() == '\n' { + l.skip() + } + } + + // find end of string + for { + if l.follow(terminator) { + return growingString, nil + } + + next := l.peek() + if next == eof { + break + } + growingString += string(l.next()) + } + + return "", errors.New("unclosed string") +} + +func (l *tomlLexer) lexLiteralString() tomlLexStateFn { + l.skip() + + // handle special case for triple-quote + terminator := "'" + discardLeadingNewLine := false + if l.follow("''") { + l.skip() + l.skip() + terminator = "'''" + discardLeadingNewLine = true + } + + str, err := l.lexLiteralStringAsString(terminator, discardLeadingNewLine) + if err != nil { + return l.errorf(err.Error()) + } + + l.emitWithValue(tokenString, str) + l.fastForward(len(terminator)) + l.ignore() + return l.lexRvalue +} + +// Lex a string and return the results as a string. +// Terminator is the substring indicating the end of the token. +// The resulting string does not include the terminator. +func (l *tomlLexer) lexStringAsString(terminator string, discardLeadingNewLine, acceptNewLines bool) (string, error) { + growingString := "" + + if discardLeadingNewLine { + if l.follow("\r\n") { + l.skip() + l.skip() + } else if l.peek() == '\n' { + l.skip() + } + } + + for { + if l.follow(terminator) { + return growingString, nil + } + + if l.follow("\\") { + l.next() + switch l.peek() { + case '\r': + fallthrough + case '\n': + fallthrough + case '\t': + fallthrough + case ' ': + // skip all whitespace chars following backslash + for strings.ContainsRune("\r\n\t ", l.peek()) { + l.next() + } + case '"': + growingString += "\"" + l.next() + case 'n': + growingString += "\n" + l.next() + case 'b': + growingString += "\b" + l.next() + case 'f': + growingString += "\f" + l.next() + case '/': + growingString += "/" + l.next() + case 't': + growingString += "\t" + l.next() + case 'r': + growingString += "\r" + l.next() + case '\\': + growingString += "\\" + l.next() + case 'u': + l.next() + code := "" + for i := 0; i < 4; i++ { + c := l.peek() + if !isHexDigit(c) { + return "", errors.New("unfinished unicode escape") + } + l.next() + code = code + string(c) + } + intcode, err := strconv.ParseInt(code, 16, 32) + if err != nil { + return "", errors.New("invalid unicode escape: \\u" + code) + } + growingString += string(rune(intcode)) + case 'U': + l.next() + code := "" + for i := 0; i < 8; i++ { + c := l.peek() + if !isHexDigit(c) { + return "", errors.New("unfinished unicode escape") + } + l.next() + code = code + string(c) + } + intcode, err := strconv.ParseInt(code, 16, 64) + if err != nil { + return "", errors.New("invalid unicode escape: \\U" + code) + } + growingString += string(rune(intcode)) + default: + return "", errors.New("invalid escape sequence: \\" + string(l.peek())) + } + } else { + r := l.peek() + + if 0x00 <= r && r <= 0x1F && !(acceptNewLines && (r == '\n' || r == '\r')) { + return "", fmt.Errorf("unescaped control character %U", r) + } + l.next() + growingString += string(r) + } + + if l.peek() == eof { + break + } + } + + return "", errors.New("unclosed string") +} + +func (l *tomlLexer) lexString() tomlLexStateFn { + l.skip() + + // handle special case for triple-quote + terminator := `"` + discardLeadingNewLine := false + acceptNewLines := false + if l.follow(`""`) { + l.skip() + l.skip() + terminator = `"""` + discardLeadingNewLine = true + acceptNewLines = true + } + + str, err := l.lexStringAsString(terminator, discardLeadingNewLine, acceptNewLines) + + if err != nil { + return l.errorf(err.Error()) + } + + l.emitWithValue(tokenString, str) + l.fastForward(len(terminator)) + l.ignore() + return l.lexRvalue +} + +func (l *tomlLexer) lexTableKey() tomlLexStateFn { + l.next() + + if l.peek() == '[' { + // token '[[' signifies an array of tables + l.next() + l.emit(tokenDoubleLeftBracket) + return l.lexInsideTableArrayKey + } + // vanilla table key + l.emit(tokenLeftBracket) + return l.lexInsideTableKey +} + +// Parse the key till "]]", but only bare keys are supported +func (l *tomlLexer) lexInsideTableArrayKey() tomlLexStateFn { + for r := l.peek(); r != eof; r = l.peek() { + switch r { + case ']': + if l.currentTokenStop > l.currentTokenStart { + l.emit(tokenKeyGroupArray) + } + l.next() + if l.peek() != ']' { + break + } + l.next() + l.emit(tokenDoubleRightBracket) + return l.lexVoid + case '[': + return l.errorf("table array key cannot contain ']'") + default: + l.next() + } + } + return l.errorf("unclosed table array key") +} + +// Parse the key till "]" but only bare keys are supported +func (l *tomlLexer) lexInsideTableKey() tomlLexStateFn { + for r := l.peek(); r != eof; r = l.peek() { + switch r { + case ']': + if l.currentTokenStop > l.currentTokenStart { + l.emit(tokenKeyGroup) + } + l.next() + l.emit(tokenRightBracket) + return l.lexVoid + case '[': + return l.errorf("table key cannot contain ']'") + default: + l.next() + } + } + return l.errorf("unclosed table key") +} + +func (l *tomlLexer) lexRightBracket() tomlLexStateFn { + l.next() + l.emit(tokenRightBracket) + return l.lexRvalue +} + +type validRuneFn func(r rune) bool + +func isValidHexRune(r rune) bool { + return r >= 'a' && r <= 'f' || + r >= 'A' && r <= 'F' || + r >= '0' && r <= '9' || + r == '_' +} + +func isValidOctalRune(r rune) bool { + return r >= '0' && r <= '7' || r == '_' +} + +func isValidBinaryRune(r rune) bool { + return r == '0' || r == '1' || r == '_' +} + +func (l *tomlLexer) lexNumber() tomlLexStateFn { + r := l.peek() + + if r == '0' { + follow := l.peekString(2) + if len(follow) == 2 { + var isValidRune validRuneFn + switch follow[1] { + case 'x': + isValidRune = isValidHexRune + case 'o': + isValidRune = isValidOctalRune + case 'b': + isValidRune = isValidBinaryRune + default: + if follow[1] >= 'a' && follow[1] <= 'z' || follow[1] >= 'A' && follow[1] <= 'Z' { + return l.errorf("unknown number base: %s. possible options are x (hex) o (octal) b (binary)", string(follow[1])) + } + } + + if isValidRune != nil { + l.next() + l.next() + digitSeen := false + for { + next := l.peek() + if !isValidRune(next) { + break + } + digitSeen = true + l.next() + } + + if !digitSeen { + return l.errorf("number needs at least one digit") + } + + l.emit(tokenInteger) + + return l.lexRvalue + } + } + } + + if r == '+' || r == '-' { + l.next() + if l.follow("inf") { + return l.lexInf + } + if l.follow("nan") { + return l.lexNan + } + } + + pointSeen := false + expSeen := false + digitSeen := false + for { + next := l.peek() + if next == '.' { + if pointSeen { + return l.errorf("cannot have two dots in one float") + } + l.next() + if !isDigit(l.peek()) { + return l.errorf("float cannot end with a dot") + } + pointSeen = true + } else if next == 'e' || next == 'E' { + expSeen = true + l.next() + r := l.peek() + if r == '+' || r == '-' { + l.next() + } + } else if isDigit(next) { + digitSeen = true + l.next() + } else if next == '_' { + l.next() + } else { + break + } + if pointSeen && !digitSeen { + return l.errorf("cannot start float with a dot") + } + } + + if !digitSeen { + return l.errorf("no digit in that number") + } + if pointSeen || expSeen { + l.emit(tokenFloat) + } else { + l.emit(tokenInteger) + } + return l.lexRvalue +} + +func (l *tomlLexer) run() { + for state := l.lexVoid; state != nil; { + state = state() + } +} + +func init() { + dateRegexp = regexp.MustCompile(`^\d{1,4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d{1,9})?(Z|[+-]\d{2}:\d{2})`) +} + +// Entry point +func lexToml(inputBytes []byte) []token { + runes := bytes.Runes(inputBytes) + l := &tomlLexer{ + input: runes, + tokens: make([]token, 0, 256), + line: 1, + col: 1, + endbufferLine: 1, + endbufferCol: 1, + } + l.run() + return l.tokens +} diff --git a/vendor/github.com/pelletier/go-toml/marshal.go b/vendor/github.com/pelletier/go-toml/marshal.go new file mode 100644 index 000000000000..671da5564c30 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/marshal.go @@ -0,0 +1,609 @@ +package toml + +import ( + "bytes" + "errors" + "fmt" + "io" + "reflect" + "strconv" + "strings" + "time" +) + +const tagKeyMultiline = "multiline" + +type tomlOpts struct { + name string + comment string + commented bool + multiline bool + include bool + omitempty bool +} + +type encOpts struct { + quoteMapKeys bool + arraysOneElementPerLine bool +} + +var encOptsDefaults = encOpts{ + quoteMapKeys: false, +} + +var timeType = reflect.TypeOf(time.Time{}) +var marshalerType = reflect.TypeOf(new(Marshaler)).Elem() + +// Check if the given marshall type maps to a Tree primitive +func isPrimitive(mtype reflect.Type) bool { + switch mtype.Kind() { + case reflect.Ptr: + return isPrimitive(mtype.Elem()) + case reflect.Bool: + return true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return true + case reflect.Float32, reflect.Float64: + return true + case reflect.String: + return true + case reflect.Struct: + return mtype == timeType || isCustomMarshaler(mtype) + default: + return false + } +} + +// Check if the given marshall type maps to a Tree slice +func isTreeSlice(mtype reflect.Type) bool { + switch mtype.Kind() { + case reflect.Slice: + return !isOtherSlice(mtype) + default: + return false + } +} + +// Check if the given marshall type maps to a non-Tree slice +func isOtherSlice(mtype reflect.Type) bool { + switch mtype.Kind() { + case reflect.Ptr: + return isOtherSlice(mtype.Elem()) + case reflect.Slice: + return isPrimitive(mtype.Elem()) || isOtherSlice(mtype.Elem()) + default: + return false + } +} + +// Check if the given marshall type maps to a Tree +func isTree(mtype reflect.Type) bool { + switch mtype.Kind() { + case reflect.Map: + return true + case reflect.Struct: + return !isPrimitive(mtype) + default: + return false + } +} + +func isCustomMarshaler(mtype reflect.Type) bool { + return mtype.Implements(marshalerType) +} + +func callCustomMarshaler(mval reflect.Value) ([]byte, error) { + return mval.Interface().(Marshaler).MarshalTOML() +} + +// Marshaler is the interface implemented by types that +// can marshal themselves into valid TOML. +type Marshaler interface { + MarshalTOML() ([]byte, error) +} + +/* +Marshal returns the TOML encoding of v. Behavior is similar to the Go json +encoder, except that there is no concept of a Marshaler interface or MarshalTOML +function for sub-structs, and currently only definite types can be marshaled +(i.e. no `interface{}`). + +The following struct annotations are supported: + + toml:"Field" Overrides the field's name to output. + omitempty When set, empty values and groups are not emitted. + comment:"comment" Emits a # comment on the same line. This supports new lines. + commented:"true" Emits the value as commented. + +Note that pointers are automatically assigned the "omitempty" option, as TOML +explicitly does not handle null values (saying instead the label should be +dropped). + +Tree structural types and corresponding marshal types: + + *Tree (*)struct, (*)map[string]interface{} + []*Tree (*)[](*)struct, (*)[](*)map[string]interface{} + []interface{} (as interface{}) (*)[]primitive, (*)[]([]interface{}) + interface{} (*)primitive + +Tree primitive types and corresponding marshal types: + + uint64 uint, uint8-uint64, pointers to same + int64 int, int8-uint64, pointers to same + float64 float32, float64, pointers to same + string string, pointers to same + bool bool, pointers to same + time.Time time.Time{}, pointers to same +*/ +func Marshal(v interface{}) ([]byte, error) { + return NewEncoder(nil).marshal(v) +} + +// Encoder writes TOML values to an output stream. +type Encoder struct { + w io.Writer + encOpts +} + +// NewEncoder returns a new encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + w: w, + encOpts: encOptsDefaults, + } +} + +// Encode writes the TOML encoding of v to the stream. +// +// See the documentation for Marshal for details. +func (e *Encoder) Encode(v interface{}) error { + b, err := e.marshal(v) + if err != nil { + return err + } + if _, err := e.w.Write(b); err != nil { + return err + } + return nil +} + +// QuoteMapKeys sets up the encoder to encode +// maps with string type keys with quoted TOML keys. +// +// This relieves the character limitations on map keys. +func (e *Encoder) QuoteMapKeys(v bool) *Encoder { + e.quoteMapKeys = v + return e +} + +// ArraysWithOneElementPerLine sets up the encoder to encode arrays +// with more than one element on multiple lines instead of one. +// +// For example: +// +// A = [1,2,3] +// +// Becomes +// +// A = [ +// 1, +// 2, +// 3, +// ] +func (e *Encoder) ArraysWithOneElementPerLine(v bool) *Encoder { + e.arraysOneElementPerLine = v + return e +} + +func (e *Encoder) marshal(v interface{}) ([]byte, error) { + mtype := reflect.TypeOf(v) + if mtype.Kind() != reflect.Struct { + return []byte{}, errors.New("Only a struct can be marshaled to TOML") + } + sval := reflect.ValueOf(v) + if isCustomMarshaler(mtype) { + return callCustomMarshaler(sval) + } + t, err := e.valueToTree(mtype, sval) + if err != nil { + return []byte{}, err + } + + var buf bytes.Buffer + _, err = t.writeTo(&buf, "", "", 0, e.arraysOneElementPerLine) + + return buf.Bytes(), err +} + +// Convert given marshal struct or map value to toml tree +func (e *Encoder) valueToTree(mtype reflect.Type, mval reflect.Value) (*Tree, error) { + if mtype.Kind() == reflect.Ptr { + return e.valueToTree(mtype.Elem(), mval.Elem()) + } + tval := newTree() + switch mtype.Kind() { + case reflect.Struct: + for i := 0; i < mtype.NumField(); i++ { + mtypef, mvalf := mtype.Field(i), mval.Field(i) + opts := tomlOptions(mtypef) + if opts.include && (!opts.omitempty || !isZero(mvalf)) { + val, err := e.valueToToml(mtypef.Type, mvalf) + if err != nil { + return nil, err + } + + tval.SetWithOptions(opts.name, SetOptions{ + Comment: opts.comment, + Commented: opts.commented, + Multiline: opts.multiline, + }, val) + } + } + case reflect.Map: + for _, key := range mval.MapKeys() { + mvalf := mval.MapIndex(key) + val, err := e.valueToToml(mtype.Elem(), mvalf) + if err != nil { + return nil, err + } + if e.quoteMapKeys { + keyStr, err := tomlValueStringRepresentation(key.String(), "", e.arraysOneElementPerLine) + if err != nil { + return nil, err + } + tval.SetPath([]string{keyStr}, val) + } else { + tval.Set(key.String(), val) + } + } + } + return tval, nil +} + +// Convert given marshal slice to slice of Toml trees +func (e *Encoder) valueToTreeSlice(mtype reflect.Type, mval reflect.Value) ([]*Tree, error) { + tval := make([]*Tree, mval.Len(), mval.Len()) + for i := 0; i < mval.Len(); i++ { + val, err := e.valueToTree(mtype.Elem(), mval.Index(i)) + if err != nil { + return nil, err + } + tval[i] = val + } + return tval, nil +} + +// Convert given marshal slice to slice of toml values +func (e *Encoder) valueToOtherSlice(mtype reflect.Type, mval reflect.Value) (interface{}, error) { + tval := make([]interface{}, mval.Len(), mval.Len()) + for i := 0; i < mval.Len(); i++ { + val, err := e.valueToToml(mtype.Elem(), mval.Index(i)) + if err != nil { + return nil, err + } + tval[i] = val + } + return tval, nil +} + +// Convert given marshal value to toml value +func (e *Encoder) valueToToml(mtype reflect.Type, mval reflect.Value) (interface{}, error) { + if mtype.Kind() == reflect.Ptr { + return e.valueToToml(mtype.Elem(), mval.Elem()) + } + switch { + case isCustomMarshaler(mtype): + return callCustomMarshaler(mval) + case isTree(mtype): + return e.valueToTree(mtype, mval) + case isTreeSlice(mtype): + return e.valueToTreeSlice(mtype, mval) + case isOtherSlice(mtype): + return e.valueToOtherSlice(mtype, mval) + default: + switch mtype.Kind() { + case reflect.Bool: + return mval.Bool(), nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return mval.Int(), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return mval.Uint(), nil + case reflect.Float32, reflect.Float64: + return mval.Float(), nil + case reflect.String: + return mval.String(), nil + case reflect.Struct: + return mval.Interface().(time.Time), nil + default: + return nil, fmt.Errorf("Marshal can't handle %v(%v)", mtype, mtype.Kind()) + } + } +} + +// Unmarshal attempts to unmarshal the Tree into a Go struct pointed by v. +// Neither Unmarshaler interfaces nor UnmarshalTOML functions are supported for +// sub-structs, and only definite types can be unmarshaled. +func (t *Tree) Unmarshal(v interface{}) error { + d := Decoder{tval: t} + return d.unmarshal(v) +} + +// Marshal returns the TOML encoding of Tree. +// See Marshal() documentation for types mapping table. +func (t *Tree) Marshal() ([]byte, error) { + var buf bytes.Buffer + err := NewEncoder(&buf).Encode(t) + return buf.Bytes(), err +} + +// Unmarshal parses the TOML-encoded data and stores the result in the value +// pointed to by v. Behavior is similar to the Go json encoder, except that there +// is no concept of an Unmarshaler interface or UnmarshalTOML function for +// sub-structs, and currently only definite types can be unmarshaled to (i.e. no +// `interface{}`). +// +// The following struct annotations are supported: +// +// toml:"Field" Overrides the field's name to map to. +// +// See Marshal() documentation for types mapping table. +func Unmarshal(data []byte, v interface{}) error { + t, err := LoadReader(bytes.NewReader(data)) + if err != nil { + return err + } + return t.Unmarshal(v) +} + +// Decoder reads and decodes TOML values from an input stream. +type Decoder struct { + r io.Reader + tval *Tree + encOpts +} + +// NewDecoder returns a new decoder that reads from r. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + r: r, + encOpts: encOptsDefaults, + } +} + +// Decode reads a TOML-encoded value from it's input +// and unmarshals it in the value pointed at by v. +// +// See the documentation for Marshal for details. +func (d *Decoder) Decode(v interface{}) error { + var err error + d.tval, err = LoadReader(d.r) + if err != nil { + return err + } + return d.unmarshal(v) +} + +func (d *Decoder) unmarshal(v interface{}) error { + mtype := reflect.TypeOf(v) + if mtype.Kind() != reflect.Ptr || mtype.Elem().Kind() != reflect.Struct { + return errors.New("Only a pointer to struct can be unmarshaled from TOML") + } + + sval, err := d.valueFromTree(mtype.Elem(), d.tval) + if err != nil { + return err + } + reflect.ValueOf(v).Elem().Set(sval) + return nil +} + +// Convert toml tree to marshal struct or map, using marshal type +func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree) (reflect.Value, error) { + if mtype.Kind() == reflect.Ptr { + return d.unwrapPointer(mtype, tval) + } + var mval reflect.Value + switch mtype.Kind() { + case reflect.Struct: + mval = reflect.New(mtype).Elem() + for i := 0; i < mtype.NumField(); i++ { + mtypef := mtype.Field(i) + opts := tomlOptions(mtypef) + if opts.include { + baseKey := opts.name + keysToTry := []string{baseKey, strings.ToLower(baseKey), strings.ToTitle(baseKey)} + for _, key := range keysToTry { + exists := tval.Has(key) + if !exists { + continue + } + val := tval.Get(key) + mvalf, err := d.valueFromToml(mtypef.Type, val) + if err != nil { + return mval, formatError(err, tval.GetPosition(key)) + } + mval.Field(i).Set(mvalf) + break + } + } + } + case reflect.Map: + mval = reflect.MakeMap(mtype) + for _, key := range tval.Keys() { + // TODO: path splits key + val := tval.GetPath([]string{key}) + mvalf, err := d.valueFromToml(mtype.Elem(), val) + if err != nil { + return mval, formatError(err, tval.GetPosition(key)) + } + mval.SetMapIndex(reflect.ValueOf(key), mvalf) + } + } + return mval, nil +} + +// Convert toml value to marshal struct/map slice, using marshal type +func (d *Decoder) valueFromTreeSlice(mtype reflect.Type, tval []*Tree) (reflect.Value, error) { + mval := reflect.MakeSlice(mtype, len(tval), len(tval)) + for i := 0; i < len(tval); i++ { + val, err := d.valueFromTree(mtype.Elem(), tval[i]) + if err != nil { + return mval, err + } + mval.Index(i).Set(val) + } + return mval, nil +} + +// Convert toml value to marshal primitive slice, using marshal type +func (d *Decoder) valueFromOtherSlice(mtype reflect.Type, tval []interface{}) (reflect.Value, error) { + mval := reflect.MakeSlice(mtype, len(tval), len(tval)) + for i := 0; i < len(tval); i++ { + val, err := d.valueFromToml(mtype.Elem(), tval[i]) + if err != nil { + return mval, err + } + mval.Index(i).Set(val) + } + return mval, nil +} + +// Convert toml value to marshal value, using marshal type +func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}) (reflect.Value, error) { + if mtype.Kind() == reflect.Ptr { + return d.unwrapPointer(mtype, tval) + } + + switch tval.(type) { + case *Tree: + if isTree(mtype) { + return d.valueFromTree(mtype, tval.(*Tree)) + } + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a tree", tval, tval) + case []*Tree: + if isTreeSlice(mtype) { + return d.valueFromTreeSlice(mtype, tval.([]*Tree)) + } + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to trees", tval, tval) + case []interface{}: + if isOtherSlice(mtype) { + return d.valueFromOtherSlice(mtype, tval.([]interface{})) + } + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a slice", tval, tval) + default: + switch mtype.Kind() { + case reflect.Bool, reflect.Struct: + val := reflect.ValueOf(tval) + // if this passes for when mtype is reflect.Struct, tval is a time.Time + if !val.Type().ConvertibleTo(mtype) { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) + } + + return val.Convert(mtype), nil + case reflect.String: + val := reflect.ValueOf(tval) + // stupidly, int64 is convertible to string. So special case this. + if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Int64 { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) + } + + return val.Convert(mtype), nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + val := reflect.ValueOf(tval) + if !val.Type().ConvertibleTo(mtype) { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) + } + if reflect.Indirect(reflect.New(mtype)).OverflowInt(val.Int()) { + return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) + } + + return val.Convert(mtype), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + val := reflect.ValueOf(tval) + if !val.Type().ConvertibleTo(mtype) { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) + } + if val.Int() < 0 { + return reflect.ValueOf(nil), fmt.Errorf("%v(%T) is negative so does not fit in %v", tval, tval, mtype.String()) + } + if reflect.Indirect(reflect.New(mtype)).OverflowUint(uint64(val.Int())) { + return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) + } + + return val.Convert(mtype), nil + case reflect.Float32, reflect.Float64: + val := reflect.ValueOf(tval) + if !val.Type().ConvertibleTo(mtype) { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) + } + if reflect.Indirect(reflect.New(mtype)).OverflowFloat(val.Float()) { + return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) + } + + return val.Convert(mtype), nil + default: + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v(%v)", tval, tval, mtype, mtype.Kind()) + } + } +} + +func (d *Decoder) unwrapPointer(mtype reflect.Type, tval interface{}) (reflect.Value, error) { + val, err := d.valueFromToml(mtype.Elem(), tval) + if err != nil { + return reflect.ValueOf(nil), err + } + mval := reflect.New(mtype.Elem()) + mval.Elem().Set(val) + return mval, nil +} + +func tomlOptions(vf reflect.StructField) tomlOpts { + tag := vf.Tag.Get("toml") + parse := strings.Split(tag, ",") + var comment string + if c := vf.Tag.Get("comment"); c != "" { + comment = c + } + commented, _ := strconv.ParseBool(vf.Tag.Get("commented")) + multiline, _ := strconv.ParseBool(vf.Tag.Get(tagKeyMultiline)) + result := tomlOpts{name: vf.Name, comment: comment, commented: commented, multiline: multiline, include: true, omitempty: false} + if parse[0] != "" { + if parse[0] == "-" && len(parse) == 1 { + result.include = false + } else { + result.name = strings.Trim(parse[0], " ") + } + } + if vf.PkgPath != "" { + result.include = false + } + if len(parse) > 1 && strings.Trim(parse[1], " ") == "omitempty" { + result.omitempty = true + } + if vf.Type.Kind() == reflect.Ptr { + result.omitempty = true + } + return result +} + +func isZero(val reflect.Value) bool { + switch val.Type().Kind() { + case reflect.Map: + fallthrough + case reflect.Array: + fallthrough + case reflect.Slice: + return val.Len() == 0 + default: + return reflect.DeepEqual(val.Interface(), reflect.Zero(val.Type()).Interface()) + } +} + +func formatError(err error, pos Position) error { + if err.Error()[0] == '(' { // Error already contains position information + return err + } + return fmt.Errorf("%s: %s", pos, err) +} diff --git a/vendor/github.com/pelletier/go-toml/parser.go b/vendor/github.com/pelletier/go-toml/parser.go new file mode 100644 index 000000000000..2d27599a9993 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/parser.go @@ -0,0 +1,430 @@ +// TOML Parser. + +package toml + +import ( + "errors" + "fmt" + "math" + "reflect" + "regexp" + "strconv" + "strings" + "time" +) + +type tomlParser struct { + flowIdx int + flow []token + tree *Tree + currentTable []string + seenTableKeys []string +} + +type tomlParserStateFn func() tomlParserStateFn + +// Formats and panics an error message based on a token +func (p *tomlParser) raiseError(tok *token, msg string, args ...interface{}) { + panic(tok.Position.String() + ": " + fmt.Sprintf(msg, args...)) +} + +func (p *tomlParser) run() { + for state := p.parseStart; state != nil; { + state = state() + } +} + +func (p *tomlParser) peek() *token { + if p.flowIdx >= len(p.flow) { + return nil + } + return &p.flow[p.flowIdx] +} + +func (p *tomlParser) assume(typ tokenType) { + tok := p.getToken() + if tok == nil { + p.raiseError(tok, "was expecting token %s, but token stream is empty", tok) + } + if tok.typ != typ { + p.raiseError(tok, "was expecting token %s, but got %s instead", typ, tok) + } +} + +func (p *tomlParser) getToken() *token { + tok := p.peek() + if tok == nil { + return nil + } + p.flowIdx++ + return tok +} + +func (p *tomlParser) parseStart() tomlParserStateFn { + tok := p.peek() + + // end of stream, parsing is finished + if tok == nil { + return nil + } + + switch tok.typ { + case tokenDoubleLeftBracket: + return p.parseGroupArray + case tokenLeftBracket: + return p.parseGroup + case tokenKey: + return p.parseAssign + case tokenEOF: + return nil + default: + p.raiseError(tok, "unexpected token") + } + return nil +} + +func (p *tomlParser) parseGroupArray() tomlParserStateFn { + startToken := p.getToken() // discard the [[ + key := p.getToken() + if key.typ != tokenKeyGroupArray { + p.raiseError(key, "unexpected token %s, was expecting a table array key", key) + } + + // get or create table array element at the indicated part in the path + keys, err := parseKey(key.val) + if err != nil { + p.raiseError(key, "invalid table array key: %s", err) + } + p.tree.createSubTree(keys[:len(keys)-1], startToken.Position) // create parent entries + destTree := p.tree.GetPath(keys) + var array []*Tree + if destTree == nil { + array = make([]*Tree, 0) + } else if target, ok := destTree.([]*Tree); ok && target != nil { + array = destTree.([]*Tree) + } else { + p.raiseError(key, "key %s is already assigned and not of type table array", key) + } + p.currentTable = keys + + // add a new tree to the end of the table array + newTree := newTree() + newTree.position = startToken.Position + array = append(array, newTree) + p.tree.SetPath(p.currentTable, array) + + // remove all keys that were children of this table array + prefix := key.val + "." + found := false + for ii := 0; ii < len(p.seenTableKeys); { + tableKey := p.seenTableKeys[ii] + if strings.HasPrefix(tableKey, prefix) { + p.seenTableKeys = append(p.seenTableKeys[:ii], p.seenTableKeys[ii+1:]...) + } else { + found = (tableKey == key.val) + ii++ + } + } + + // keep this key name from use by other kinds of assignments + if !found { + p.seenTableKeys = append(p.seenTableKeys, key.val) + } + + // move to next parser state + p.assume(tokenDoubleRightBracket) + return p.parseStart +} + +func (p *tomlParser) parseGroup() tomlParserStateFn { + startToken := p.getToken() // discard the [ + key := p.getToken() + if key.typ != tokenKeyGroup { + p.raiseError(key, "unexpected token %s, was expecting a table key", key) + } + for _, item := range p.seenTableKeys { + if item == key.val { + p.raiseError(key, "duplicated tables") + } + } + + p.seenTableKeys = append(p.seenTableKeys, key.val) + keys, err := parseKey(key.val) + if err != nil { + p.raiseError(key, "invalid table array key: %s", err) + } + if err := p.tree.createSubTree(keys, startToken.Position); err != nil { + p.raiseError(key, "%s", err) + } + p.assume(tokenRightBracket) + p.currentTable = keys + return p.parseStart +} + +func (p *tomlParser) parseAssign() tomlParserStateFn { + key := p.getToken() + p.assume(tokenEqual) + + value := p.parseRvalue() + var tableKey []string + if len(p.currentTable) > 0 { + tableKey = p.currentTable + } else { + tableKey = []string{} + } + + // find the table to assign, looking out for arrays of tables + var targetNode *Tree + switch node := p.tree.GetPath(tableKey).(type) { + case []*Tree: + targetNode = node[len(node)-1] + case *Tree: + targetNode = node + default: + p.raiseError(key, "Unknown table type for path: %s", + strings.Join(tableKey, ".")) + } + + // assign value to the found table + keyVals := []string{key.val} + if len(keyVals) != 1 { + p.raiseError(key, "Invalid key") + } + keyVal := keyVals[0] + localKey := []string{keyVal} + finalKey := append(tableKey, keyVal) + if targetNode.GetPath(localKey) != nil { + p.raiseError(key, "The following key was defined twice: %s", + strings.Join(finalKey, ".")) + } + var toInsert interface{} + + switch value.(type) { + case *Tree, []*Tree: + toInsert = value + default: + toInsert = &tomlValue{value: value, position: key.Position} + } + targetNode.values[keyVal] = toInsert + return p.parseStart +} + +var numberUnderscoreInvalidRegexp *regexp.Regexp +var hexNumberUnderscoreInvalidRegexp *regexp.Regexp + +func numberContainsInvalidUnderscore(value string) error { + if numberUnderscoreInvalidRegexp.MatchString(value) { + return errors.New("invalid use of _ in number") + } + return nil +} + +func hexNumberContainsInvalidUnderscore(value string) error { + if hexNumberUnderscoreInvalidRegexp.MatchString(value) { + return errors.New("invalid use of _ in hex number") + } + return nil +} + +func cleanupNumberToken(value string) string { + cleanedVal := strings.Replace(value, "_", "", -1) + return cleanedVal +} + +func (p *tomlParser) parseRvalue() interface{} { + tok := p.getToken() + if tok == nil || tok.typ == tokenEOF { + p.raiseError(tok, "expecting a value") + } + + switch tok.typ { + case tokenString: + return tok.val + case tokenTrue: + return true + case tokenFalse: + return false + case tokenInf: + if tok.val[0] == '-' { + return math.Inf(-1) + } + return math.Inf(1) + case tokenNan: + return math.NaN() + case tokenInteger: + cleanedVal := cleanupNumberToken(tok.val) + var err error + var val int64 + if len(cleanedVal) >= 3 && cleanedVal[0] == '0' { + switch cleanedVal[1] { + case 'x': + err = hexNumberContainsInvalidUnderscore(tok.val) + if err != nil { + p.raiseError(tok, "%s", err) + } + val, err = strconv.ParseInt(cleanedVal[2:], 16, 64) + case 'o': + err = numberContainsInvalidUnderscore(tok.val) + if err != nil { + p.raiseError(tok, "%s", err) + } + val, err = strconv.ParseInt(cleanedVal[2:], 8, 64) + case 'b': + err = numberContainsInvalidUnderscore(tok.val) + if err != nil { + p.raiseError(tok, "%s", err) + } + val, err = strconv.ParseInt(cleanedVal[2:], 2, 64) + default: + panic("invalid base") // the lexer should catch this first + } + } else { + err = numberContainsInvalidUnderscore(tok.val) + if err != nil { + p.raiseError(tok, "%s", err) + } + val, err = strconv.ParseInt(cleanedVal, 10, 64) + } + if err != nil { + p.raiseError(tok, "%s", err) + } + return val + case tokenFloat: + err := numberContainsInvalidUnderscore(tok.val) + if err != nil { + p.raiseError(tok, "%s", err) + } + cleanedVal := cleanupNumberToken(tok.val) + val, err := strconv.ParseFloat(cleanedVal, 64) + if err != nil { + p.raiseError(tok, "%s", err) + } + return val + case tokenDate: + val, err := time.ParseInLocation(time.RFC3339Nano, tok.val, time.UTC) + if err != nil { + p.raiseError(tok, "%s", err) + } + return val + case tokenLeftBracket: + return p.parseArray() + case tokenLeftCurlyBrace: + return p.parseInlineTable() + case tokenEqual: + p.raiseError(tok, "cannot have multiple equals for the same key") + case tokenError: + p.raiseError(tok, "%s", tok) + } + + p.raiseError(tok, "never reached") + + return nil +} + +func tokenIsComma(t *token) bool { + return t != nil && t.typ == tokenComma +} + +func (p *tomlParser) parseInlineTable() *Tree { + tree := newTree() + var previous *token +Loop: + for { + follow := p.peek() + if follow == nil || follow.typ == tokenEOF { + p.raiseError(follow, "unterminated inline table") + } + switch follow.typ { + case tokenRightCurlyBrace: + p.getToken() + break Loop + case tokenKey: + if !tokenIsComma(previous) && previous != nil { + p.raiseError(follow, "comma expected between fields in inline table") + } + key := p.getToken() + p.assume(tokenEqual) + value := p.parseRvalue() + tree.Set(key.val, value) + case tokenComma: + if previous == nil { + p.raiseError(follow, "inline table cannot start with a comma") + } + if tokenIsComma(previous) { + p.raiseError(follow, "need field between two commas in inline table") + } + p.getToken() + default: + p.raiseError(follow, "unexpected token type in inline table: %s", follow.String()) + } + previous = follow + } + if tokenIsComma(previous) { + p.raiseError(previous, "trailing comma at the end of inline table") + } + return tree +} + +func (p *tomlParser) parseArray() interface{} { + var array []interface{} + arrayType := reflect.TypeOf(nil) + for { + follow := p.peek() + if follow == nil || follow.typ == tokenEOF { + p.raiseError(follow, "unterminated array") + } + if follow.typ == tokenRightBracket { + p.getToken() + break + } + val := p.parseRvalue() + if arrayType == nil { + arrayType = reflect.TypeOf(val) + } + if reflect.TypeOf(val) != arrayType { + p.raiseError(follow, "mixed types in array") + } + array = append(array, val) + follow = p.peek() + if follow == nil || follow.typ == tokenEOF { + p.raiseError(follow, "unterminated array") + } + if follow.typ != tokenRightBracket && follow.typ != tokenComma { + p.raiseError(follow, "missing comma") + } + if follow.typ == tokenComma { + p.getToken() + } + } + // An array of Trees is actually an array of inline + // tables, which is a shorthand for a table array. If the + // array was not converted from []interface{} to []*Tree, + // the two notations would not be equivalent. + if arrayType == reflect.TypeOf(newTree()) { + tomlArray := make([]*Tree, len(array)) + for i, v := range array { + tomlArray[i] = v.(*Tree) + } + return tomlArray + } + return array +} + +func parseToml(flow []token) *Tree { + result := newTree() + result.position = Position{1, 1} + parser := &tomlParser{ + flowIdx: 0, + flow: flow, + tree: result, + currentTable: make([]string, 0), + seenTableKeys: make([]string, 0), + } + parser.run() + return result +} + +func init() { + numberUnderscoreInvalidRegexp = regexp.MustCompile(`([^\d]_|_[^\d])|_$|^_`) + hexNumberUnderscoreInvalidRegexp = regexp.MustCompile(`(^0x_)|([^\da-f]_|_[^\da-f])|_$|^_`) +} diff --git a/vendor/github.com/pelletier/go-toml/position.go b/vendor/github.com/pelletier/go-toml/position.go new file mode 100644 index 000000000000..c17bff87baaa --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/position.go @@ -0,0 +1,29 @@ +// Position support for go-toml + +package toml + +import ( + "fmt" +) + +// Position of a document element within a TOML document. +// +// Line and Col are both 1-indexed positions for the element's line number and +// column number, respectively. Values of zero or less will cause Invalid(), +// to return true. +type Position struct { + Line int // line within the document + Col int // column within the line +} + +// String representation of the position. +// Displays 1-indexed line and column numbers. +func (p Position) String() string { + return fmt.Sprintf("(%d, %d)", p.Line, p.Col) +} + +// Invalid returns whether or not the position is valid (i.e. with negative or +// null values) +func (p Position) Invalid() bool { + return p.Line <= 0 || p.Col <= 0 +} diff --git a/vendor/github.com/pelletier/go-toml/token.go b/vendor/github.com/pelletier/go-toml/token.go new file mode 100644 index 000000000000..1a9081346679 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/token.go @@ -0,0 +1,144 @@ +package toml + +import ( + "fmt" + "strconv" + "unicode" +) + +// Define tokens +type tokenType int + +const ( + eof = -(iota + 1) +) + +const ( + tokenError tokenType = iota + tokenEOF + tokenComment + tokenKey + tokenString + tokenInteger + tokenTrue + tokenFalse + tokenFloat + tokenInf + tokenNan + tokenEqual + tokenLeftBracket + tokenRightBracket + tokenLeftCurlyBrace + tokenRightCurlyBrace + tokenLeftParen + tokenRightParen + tokenDoubleLeftBracket + tokenDoubleRightBracket + tokenDate + tokenKeyGroup + tokenKeyGroupArray + tokenComma + tokenColon + tokenDollar + tokenStar + tokenQuestion + tokenDot + tokenDotDot + tokenEOL +) + +var tokenTypeNames = []string{ + "Error", + "EOF", + "Comment", + "Key", + "String", + "Integer", + "True", + "False", + "Float", + "Inf", + "NaN", + "=", + "[", + "]", + "{", + "}", + "(", + ")", + "]]", + "[[", + "Date", + "KeyGroup", + "KeyGroupArray", + ",", + ":", + "$", + "*", + "?", + ".", + "..", + "EOL", +} + +type token struct { + Position + typ tokenType + val string +} + +func (tt tokenType) String() string { + idx := int(tt) + if idx < len(tokenTypeNames) { + return tokenTypeNames[idx] + } + return "Unknown" +} + +func (t token) Int() int { + if result, err := strconv.Atoi(t.val); err != nil { + panic(err) + } else { + return result + } +} + +func (t token) String() string { + switch t.typ { + case tokenEOF: + return "EOF" + case tokenError: + return t.val + } + + return fmt.Sprintf("%q", t.val) +} + +func isSpace(r rune) bool { + return r == ' ' || r == '\t' +} + +func isAlphanumeric(r rune) bool { + return unicode.IsLetter(r) || r == '_' +} + +func isKeyChar(r rune) bool { + // Keys start with the first character that isn't whitespace or [ and end + // with the last non-whitespace character before the equals sign. Keys + // cannot contain a # character." + return !(r == '\r' || r == '\n' || r == eof || r == '=') +} + +func isKeyStartChar(r rune) bool { + return !(isSpace(r) || r == '\r' || r == '\n' || r == eof || r == '[') +} + +func isDigit(r rune) bool { + return unicode.IsNumber(r) +} + +func isHexDigit(r rune) bool { + return isDigit(r) || + (r >= 'a' && r <= 'f') || + (r >= 'A' && r <= 'F') +} diff --git a/vendor/github.com/pelletier/go-toml/toml.go b/vendor/github.com/pelletier/go-toml/toml.go new file mode 100644 index 000000000000..98c185ad0b8e --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/toml.go @@ -0,0 +1,367 @@ +package toml + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "runtime" + "strings" +) + +type tomlValue struct { + value interface{} // string, int64, uint64, float64, bool, time.Time, [] of any of this list + comment string + commented bool + multiline bool + position Position +} + +// Tree is the result of the parsing of a TOML file. +type Tree struct { + values map[string]interface{} // string -> *tomlValue, *Tree, []*Tree + comment string + commented bool + position Position +} + +func newTree() *Tree { + return &Tree{ + values: make(map[string]interface{}), + position: Position{}, + } +} + +// TreeFromMap initializes a new Tree object using the given map. +func TreeFromMap(m map[string]interface{}) (*Tree, error) { + result, err := toTree(m) + if err != nil { + return nil, err + } + return result.(*Tree), nil +} + +// Position returns the position of the tree. +func (t *Tree) Position() Position { + return t.position +} + +// Has returns a boolean indicating if the given key exists. +func (t *Tree) Has(key string) bool { + if key == "" { + return false + } + return t.HasPath(strings.Split(key, ".")) +} + +// HasPath returns true if the given path of keys exists, false otherwise. +func (t *Tree) HasPath(keys []string) bool { + return t.GetPath(keys) != nil +} + +// Keys returns the keys of the toplevel tree (does not recurse). +func (t *Tree) Keys() []string { + keys := make([]string, len(t.values)) + i := 0 + for k := range t.values { + keys[i] = k + i++ + } + return keys +} + +// Get the value at key in the Tree. +// Key is a dot-separated path (e.g. a.b.c) without single/double quoted strings. +// If you need to retrieve non-bare keys, use GetPath. +// Returns nil if the path does not exist in the tree. +// If keys is of length zero, the current tree is returned. +func (t *Tree) Get(key string) interface{} { + if key == "" { + return t + } + return t.GetPath(strings.Split(key, ".")) +} + +// GetPath returns the element in the tree indicated by 'keys'. +// If keys is of length zero, the current tree is returned. +func (t *Tree) GetPath(keys []string) interface{} { + if len(keys) == 0 { + return t + } + subtree := t + for _, intermediateKey := range keys[:len(keys)-1] { + value, exists := subtree.values[intermediateKey] + if !exists { + return nil + } + switch node := value.(type) { + case *Tree: + subtree = node + case []*Tree: + // go to most recent element + if len(node) == 0 { + return nil + } + subtree = node[len(node)-1] + default: + return nil // cannot navigate through other node types + } + } + // branch based on final node type + switch node := subtree.values[keys[len(keys)-1]].(type) { + case *tomlValue: + return node.value + default: + return node + } +} + +// GetPosition returns the position of the given key. +func (t *Tree) GetPosition(key string) Position { + if key == "" { + return t.position + } + return t.GetPositionPath(strings.Split(key, ".")) +} + +// GetPositionPath returns the element in the tree indicated by 'keys'. +// If keys is of length zero, the current tree is returned. +func (t *Tree) GetPositionPath(keys []string) Position { + if len(keys) == 0 { + return t.position + } + subtree := t + for _, intermediateKey := range keys[:len(keys)-1] { + value, exists := subtree.values[intermediateKey] + if !exists { + return Position{0, 0} + } + switch node := value.(type) { + case *Tree: + subtree = node + case []*Tree: + // go to most recent element + if len(node) == 0 { + return Position{0, 0} + } + subtree = node[len(node)-1] + default: + return Position{0, 0} + } + } + // branch based on final node type + switch node := subtree.values[keys[len(keys)-1]].(type) { + case *tomlValue: + return node.position + case *Tree: + return node.position + case []*Tree: + // go to most recent element + if len(node) == 0 { + return Position{0, 0} + } + return node[len(node)-1].position + default: + return Position{0, 0} + } +} + +// GetDefault works like Get but with a default value +func (t *Tree) GetDefault(key string, def interface{}) interface{} { + val := t.Get(key) + if val == nil { + return def + } + return val +} + +// SetOptions arguments are supplied to the SetWithOptions and SetPathWithOptions functions to modify marshalling behaviour. +// The default values within the struct are valid default options. +type SetOptions struct { + Comment string + Commented bool + Multiline bool +} + +// SetWithOptions is the same as Set, but allows you to provide formatting +// instructions to the key, that will be used by Marshal(). +func (t *Tree) SetWithOptions(key string, opts SetOptions, value interface{}) { + t.SetPathWithOptions(strings.Split(key, "."), opts, value) +} + +// SetPathWithOptions is the same as SetPath, but allows you to provide +// formatting instructions to the key, that will be reused by Marshal(). +func (t *Tree) SetPathWithOptions(keys []string, opts SetOptions, value interface{}) { + subtree := t + for _, intermediateKey := range keys[:len(keys)-1] { + nextTree, exists := subtree.values[intermediateKey] + if !exists { + nextTree = newTree() + subtree.values[intermediateKey] = nextTree // add new element here + } + switch node := nextTree.(type) { + case *Tree: + subtree = node + case []*Tree: + // go to most recent element + if len(node) == 0 { + // create element if it does not exist + subtree.values[intermediateKey] = append(node, newTree()) + } + subtree = node[len(node)-1] + } + } + + var toInsert interface{} + + switch value.(type) { + case *Tree: + tt := value.(*Tree) + tt.comment = opts.Comment + toInsert = value + case []*Tree: + toInsert = value + case *tomlValue: + tt := value.(*tomlValue) + tt.comment = opts.Comment + toInsert = tt + default: + toInsert = &tomlValue{value: value, comment: opts.Comment, commented: opts.Commented, multiline: opts.Multiline} + } + + subtree.values[keys[len(keys)-1]] = toInsert +} + +// Set an element in the tree. +// Key is a dot-separated path (e.g. a.b.c). +// Creates all necessary intermediate trees, if needed. +func (t *Tree) Set(key string, value interface{}) { + t.SetWithComment(key, "", false, value) +} + +// SetWithComment is the same as Set, but allows you to provide comment +// information to the key, that will be reused by Marshal(). +func (t *Tree) SetWithComment(key string, comment string, commented bool, value interface{}) { + t.SetPathWithComment(strings.Split(key, "."), comment, commented, value) +} + +// SetPath sets an element in the tree. +// Keys is an array of path elements (e.g. {"a","b","c"}). +// Creates all necessary intermediate trees, if needed. +func (t *Tree) SetPath(keys []string, value interface{}) { + t.SetPathWithComment(keys, "", false, value) +} + +// SetPathWithComment is the same as SetPath, but allows you to provide comment +// information to the key, that will be reused by Marshal(). +func (t *Tree) SetPathWithComment(keys []string, comment string, commented bool, value interface{}) { + subtree := t + for _, intermediateKey := range keys[:len(keys)-1] { + nextTree, exists := subtree.values[intermediateKey] + if !exists { + nextTree = newTree() + subtree.values[intermediateKey] = nextTree // add new element here + } + switch node := nextTree.(type) { + case *Tree: + subtree = node + case []*Tree: + // go to most recent element + if len(node) == 0 { + // create element if it does not exist + subtree.values[intermediateKey] = append(node, newTree()) + } + subtree = node[len(node)-1] + } + } + + var toInsert interface{} + + switch value.(type) { + case *Tree: + tt := value.(*Tree) + tt.comment = comment + toInsert = value + case []*Tree: + toInsert = value + case *tomlValue: + tt := value.(*tomlValue) + tt.comment = comment + toInsert = tt + default: + toInsert = &tomlValue{value: value, comment: comment, commented: commented} + } + + subtree.values[keys[len(keys)-1]] = toInsert +} + +// createSubTree takes a tree and a key and create the necessary intermediate +// subtrees to create a subtree at that point. In-place. +// +// e.g. passing a.b.c will create (assuming tree is empty) tree[a], tree[a][b] +// and tree[a][b][c] +// +// Returns nil on success, error object on failure +func (t *Tree) createSubTree(keys []string, pos Position) error { + subtree := t + for _, intermediateKey := range keys { + nextTree, exists := subtree.values[intermediateKey] + if !exists { + tree := newTree() + tree.position = pos + subtree.values[intermediateKey] = tree + nextTree = tree + } + + switch node := nextTree.(type) { + case []*Tree: + subtree = node[len(node)-1] + case *Tree: + subtree = node + default: + return fmt.Errorf("unknown type for path %s (%s): %T (%#v)", + strings.Join(keys, "."), intermediateKey, nextTree, nextTree) + } + } + return nil +} + +// LoadBytes creates a Tree from a []byte. +func LoadBytes(b []byte) (tree *Tree, err error) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } + err = errors.New(r.(string)) + } + }() + tree = parseToml(lexToml(b)) + return +} + +// LoadReader creates a Tree from any io.Reader. +func LoadReader(reader io.Reader) (tree *Tree, err error) { + inputBytes, err := ioutil.ReadAll(reader) + if err != nil { + return + } + tree, err = LoadBytes(inputBytes) + return +} + +// Load creates a Tree from a string. +func Load(content string) (tree *Tree, err error) { + return LoadBytes([]byte(content)) +} + +// LoadFile creates a Tree from a file. +func LoadFile(path string) (tree *Tree, err error) { + file, err := os.Open(path) + if err != nil { + return nil, err + } + defer file.Close() + return LoadReader(file) +} diff --git a/vendor/github.com/pelletier/go-toml/tomltree_create.go b/vendor/github.com/pelletier/go-toml/tomltree_create.go new file mode 100644 index 000000000000..79610e9b340c --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/tomltree_create.go @@ -0,0 +1,142 @@ +package toml + +import ( + "fmt" + "reflect" + "time" +) + +var kindToType = [reflect.String + 1]reflect.Type{ + reflect.Bool: reflect.TypeOf(true), + reflect.String: reflect.TypeOf(""), + reflect.Float32: reflect.TypeOf(float64(1)), + reflect.Float64: reflect.TypeOf(float64(1)), + reflect.Int: reflect.TypeOf(int64(1)), + reflect.Int8: reflect.TypeOf(int64(1)), + reflect.Int16: reflect.TypeOf(int64(1)), + reflect.Int32: reflect.TypeOf(int64(1)), + reflect.Int64: reflect.TypeOf(int64(1)), + reflect.Uint: reflect.TypeOf(uint64(1)), + reflect.Uint8: reflect.TypeOf(uint64(1)), + reflect.Uint16: reflect.TypeOf(uint64(1)), + reflect.Uint32: reflect.TypeOf(uint64(1)), + reflect.Uint64: reflect.TypeOf(uint64(1)), +} + +// typeFor returns a reflect.Type for a reflect.Kind, or nil if none is found. +// supported values: +// string, bool, int64, uint64, float64, time.Time, int, int8, int16, int32, uint, uint8, uint16, uint32, float32 +func typeFor(k reflect.Kind) reflect.Type { + if k > 0 && int(k) < len(kindToType) { + return kindToType[k] + } + return nil +} + +func simpleValueCoercion(object interface{}) (interface{}, error) { + switch original := object.(type) { + case string, bool, int64, uint64, float64, time.Time: + return original, nil + case int: + return int64(original), nil + case int8: + return int64(original), nil + case int16: + return int64(original), nil + case int32: + return int64(original), nil + case uint: + return uint64(original), nil + case uint8: + return uint64(original), nil + case uint16: + return uint64(original), nil + case uint32: + return uint64(original), nil + case float32: + return float64(original), nil + case fmt.Stringer: + return original.String(), nil + default: + return nil, fmt.Errorf("cannot convert type %T to Tree", object) + } +} + +func sliceToTree(object interface{}) (interface{}, error) { + // arrays are a bit tricky, since they can represent either a + // collection of simple values, which is represented by one + // *tomlValue, or an array of tables, which is represented by an + // array of *Tree. + + // holding the assumption that this function is called from toTree only when value.Kind() is Array or Slice + value := reflect.ValueOf(object) + insideType := value.Type().Elem() + length := value.Len() + if length > 0 { + insideType = reflect.ValueOf(value.Index(0).Interface()).Type() + } + if insideType.Kind() == reflect.Map { + // this is considered as an array of tables + tablesArray := make([]*Tree, 0, length) + for i := 0; i < length; i++ { + table := value.Index(i) + tree, err := toTree(table.Interface()) + if err != nil { + return nil, err + } + tablesArray = append(tablesArray, tree.(*Tree)) + } + return tablesArray, nil + } + + sliceType := typeFor(insideType.Kind()) + if sliceType == nil { + sliceType = insideType + } + + arrayValue := reflect.MakeSlice(reflect.SliceOf(sliceType), 0, length) + + for i := 0; i < length; i++ { + val := value.Index(i).Interface() + simpleValue, err := simpleValueCoercion(val) + if err != nil { + return nil, err + } + arrayValue = reflect.Append(arrayValue, reflect.ValueOf(simpleValue)) + } + return &tomlValue{value: arrayValue.Interface(), position: Position{}}, nil +} + +func toTree(object interface{}) (interface{}, error) { + value := reflect.ValueOf(object) + + if value.Kind() == reflect.Map { + values := map[string]interface{}{} + keys := value.MapKeys() + for _, key := range keys { + if key.Kind() != reflect.String { + if _, ok := key.Interface().(string); !ok { + return nil, fmt.Errorf("map key needs to be a string, not %T (%v)", key.Interface(), key.Kind()) + } + } + + v := value.MapIndex(key) + newValue, err := toTree(v.Interface()) + if err != nil { + return nil, err + } + values[key.String()] = newValue + } + return &Tree{values: values, position: Position{}}, nil + } + + if value.Kind() == reflect.Array || value.Kind() == reflect.Slice { + return sliceToTree(object) + } + + simpleValue, err := simpleValueCoercion(object) + if err != nil { + return nil, err + } + return &tomlValue{value: simpleValue, position: Position{}}, nil +} diff --git a/vendor/github.com/pelletier/go-toml/tomltree_write.go b/vendor/github.com/pelletier/go-toml/tomltree_write.go new file mode 100644 index 000000000000..e4049e29f2a1 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/tomltree_write.go @@ -0,0 +1,333 @@ +package toml + +import ( + "bytes" + "fmt" + "io" + "math" + "reflect" + "sort" + "strconv" + "strings" + "time" +) + +// Encodes a string to a TOML-compliant multi-line string value +// This function is a clone of the existing encodeTomlString function, except that whitespace characters +// are preserved. Quotation marks and backslashes are also not escaped. +func encodeMultilineTomlString(value string) string { + var b bytes.Buffer + + for _, rr := range value { + switch rr { + case '\b': + b.WriteString(`\b`) + case '\t': + b.WriteString("\t") + case '\n': + b.WriteString("\n") + case '\f': + b.WriteString(`\f`) + case '\r': + b.WriteString("\r") + case '"': + b.WriteString(`"`) + case '\\': + b.WriteString(`\`) + default: + intRr := uint16(rr) + if intRr < 0x001F { + b.WriteString(fmt.Sprintf("\\u%0.4X", intRr)) + } else { + b.WriteRune(rr) + } + } + } + return b.String() +} + +// Encodes a string to a TOML-compliant string value +func encodeTomlString(value string) string { + var b bytes.Buffer + + for _, rr := range value { + switch rr { + case '\b': + b.WriteString(`\b`) + case '\t': + b.WriteString(`\t`) + case '\n': + b.WriteString(`\n`) + case '\f': + b.WriteString(`\f`) + case '\r': + b.WriteString(`\r`) + case '"': + b.WriteString(`\"`) + case '\\': + b.WriteString(`\\`) + default: + intRr := uint16(rr) + if intRr < 0x001F { + b.WriteString(fmt.Sprintf("\\u%0.4X", intRr)) + } else { + b.WriteRune(rr) + } + } + } + return b.String() +} + +func tomlValueStringRepresentation(v interface{}, indent string, arraysOneElementPerLine bool) (string, error) { + // this interface check is added to dereference the change made in the writeTo function. + // That change was made to allow this function to see formatting options. + tv, ok := v.(*tomlValue) + if ok { + v = tv.value + } else { + tv = &tomlValue{} + } + + switch value := v.(type) { + case uint64: + return strconv.FormatUint(value, 10), nil + case int64: + return strconv.FormatInt(value, 10), nil + case float64: + // Ensure a round float does contain a decimal point. Otherwise feeding + // the output back to the parser would convert to an integer. + if math.Trunc(value) == value { + return strings.ToLower(strconv.FormatFloat(value, 'f', 1, 32)), nil + } + return strings.ToLower(strconv.FormatFloat(value, 'f', -1, 32)), nil + case string: + if tv.multiline { + return "\"\"\"\n" + encodeMultilineTomlString(value) + "\"\"\"", nil + } + return "\"" + encodeTomlString(value) + "\"", nil + case []byte: + b, _ := v.([]byte) + return tomlValueStringRepresentation(string(b), indent, arraysOneElementPerLine) + case bool: + if value { + return "true", nil + } + return "false", nil + case time.Time: + return value.Format(time.RFC3339), nil + case nil: + return "", nil + } + + rv := reflect.ValueOf(v) + + if rv.Kind() == reflect.Slice { + var values []string + for i := 0; i < rv.Len(); i++ { + item := rv.Index(i).Interface() + itemRepr, err := tomlValueStringRepresentation(item, indent, arraysOneElementPerLine) + if err != nil { + return "", err + } + values = append(values, itemRepr) + } + if arraysOneElementPerLine && len(values) > 1 { + stringBuffer := bytes.Buffer{} + valueIndent := indent + ` ` // TODO: move that to a shared encoder state + + stringBuffer.WriteString("[\n") + + for _, value := range values { + stringBuffer.WriteString(valueIndent) + stringBuffer.WriteString(value) + stringBuffer.WriteString(`,`) + stringBuffer.WriteString("\n") + } + + stringBuffer.WriteString(indent + "]") + + return stringBuffer.String(), nil + } + return "[" + strings.Join(values, ",") + "]", nil + } + return "", fmt.Errorf("unsupported value type %T: %v", v, v) +} + +func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool) (int64, error) { + simpleValuesKeys := make([]string, 0) + complexValuesKeys := make([]string, 0) + + for k := range t.values { + v := t.values[k] + switch v.(type) { + case *Tree, []*Tree: + complexValuesKeys = append(complexValuesKeys, k) + default: + simpleValuesKeys = append(simpleValuesKeys, k) + } + } + + sort.Strings(simpleValuesKeys) + sort.Strings(complexValuesKeys) + + for _, k := range simpleValuesKeys { + v, ok := t.values[k].(*tomlValue) + if !ok { + return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k]) + } + + repr, err := tomlValueStringRepresentation(v, indent, arraysOneElementPerLine) + if err != nil { + return bytesCount, err + } + + if v.comment != "" { + comment := strings.Replace(v.comment, "\n", "\n"+indent+"#", -1) + start := "# " + if strings.HasPrefix(comment, "#") { + start = "" + } + writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment, "\n") + bytesCount += int64(writtenBytesCountComment) + if errc != nil { + return bytesCount, errc + } + } + + var commented string + if v.commented { + commented = "# " + } + writtenBytesCount, err := writeStrings(w, indent, commented, k, " = ", repr, "\n") + bytesCount += int64(writtenBytesCount) + if err != nil { + return bytesCount, err + } + } + + for _, k := range complexValuesKeys { + v := t.values[k] + + combinedKey := k + if keyspace != "" { + combinedKey = keyspace + "." + combinedKey + } + var commented string + if t.commented { + commented = "# " + } + + switch node := v.(type) { + // node has to be of those two types given how keys are sorted above + case *Tree: + tv, ok := t.values[k].(*Tree) + if !ok { + return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k]) + } + if tv.comment != "" { + comment := strings.Replace(tv.comment, "\n", "\n"+indent+"#", -1) + start := "# " + if strings.HasPrefix(comment, "#") { + start = "" + } + writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment) + bytesCount += int64(writtenBytesCountComment) + if errc != nil { + return bytesCount, errc + } + } + writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[", combinedKey, "]\n") + bytesCount += int64(writtenBytesCount) + if err != nil { + return bytesCount, err + } + bytesCount, err = node.writeTo(w, indent+" ", combinedKey, bytesCount, arraysOneElementPerLine) + if err != nil { + return bytesCount, err + } + case []*Tree: + for _, subTree := range node { + writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[[", combinedKey, "]]\n") + bytesCount += int64(writtenBytesCount) + if err != nil { + return bytesCount, err + } + + bytesCount, err = subTree.writeTo(w, indent+" ", combinedKey, bytesCount, arraysOneElementPerLine) + if err != nil { + return bytesCount, err + } + } + } + } + + return bytesCount, nil +} + +func writeStrings(w io.Writer, s ...string) (int, error) { + var n int + for i := range s { + b, err := io.WriteString(w, s[i]) + n += b + if err != nil { + return n, err + } + } + return n, nil +} + +// WriteTo encode the Tree as Toml and writes it to the writer w. +// Returns the number of bytes written in case of success, or an error if anything happened. +func (t *Tree) WriteTo(w io.Writer) (int64, error) { + return t.writeTo(w, "", "", 0, false) +} + +// ToTomlString generates a human-readable representation of the current tree. +// Output spans multiple lines, and is suitable for ingest by a TOML parser. +// If the conversion cannot be performed, ToString returns a non-nil error. +func (t *Tree) ToTomlString() (string, error) { + var buf bytes.Buffer + _, err := t.WriteTo(&buf) + if err != nil { + return "", err + } + return buf.String(), nil +} + +// String generates a human-readable representation of the current tree. +// Alias of ToString. Present to implement the fmt.Stringer interface. +func (t *Tree) String() string { + result, _ := t.ToTomlString() + return result +} + +// ToMap recursively generates a representation of the tree using Go built-in structures. +// The following types are used: +// +// * bool +// * float64 +// * int64 +// * string +// * uint64 +// * time.Time +// * map[string]interface{} (where interface{} is any of this list) +// * []interface{} (where interface{} is any of this list) +func (t *Tree) ToMap() map[string]interface{} { + result := map[string]interface{}{} + + for k, v := range t.values { + switch node := v.(type) { + case []*Tree: + var array []interface{} + for _, item := range node { + array = append(array, item.ToMap()) + } + result[k] = array + case *Tree: + result[k] = node.ToMap() + case *tomlValue: + result[k] = node.value + } + } + return result +} diff --git a/vendor/github.com/siddontang/go/LICENSE b/vendor/github.com/siddontang/go/LICENSE new file mode 100644 index 000000000000..80511a0a784d --- /dev/null +++ b/vendor/github.com/siddontang/go/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2014 siddontang + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/siddontang/go/bson/LICENSE b/vendor/github.com/siddontang/go/bson/LICENSE new file mode 100644 index 000000000000..890326017b85 --- /dev/null +++ b/vendor/github.com/siddontang/go/bson/LICENSE @@ -0,0 +1,25 @@ +BSON library for Go + +Copyright (c) 2010-2012 - Gustavo Niemeyer + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/siddontang/go/filelock/LICENSE b/vendor/github.com/siddontang/go/filelock/LICENSE new file mode 100644 index 000000000000..fec05ce12959 --- /dev/null +++ b/vendor/github.com/siddontang/go/filelock/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 The LevelDB-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/siddontang/go/filelock/file_lock_generic.go b/vendor/github.com/siddontang/go/filelock/file_lock_generic.go new file mode 100644 index 000000000000..53c292acbdff --- /dev/null +++ b/vendor/github.com/siddontang/go/filelock/file_lock_generic.go @@ -0,0 +1,17 @@ +// Copyright 2012 The LevelDB-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package filelock + +import ( + "fmt" + "io" + "runtime" +) + +func Lock(name string) (io.Closer, error) { + return nil, fmt.Errorf("leveldb/db: file locking is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} diff --git a/vendor/github.com/siddontang/go/filelock/file_lock_solaris.go b/vendor/github.com/siddontang/go/filelock/file_lock_solaris.go new file mode 100644 index 000000000000..56ff3e2ceef2 --- /dev/null +++ b/vendor/github.com/siddontang/go/filelock/file_lock_solaris.go @@ -0,0 +1,43 @@ +// Copyright 2014 The LevelDB-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package filelock + +import ( + "io" + "os" + "syscall" +) + +// lockCloser hides all of an os.File's methods, except for Close. +type lockCloser struct { + f *os.File +} + +func (l lockCloser) Close() error { + return l.f.Close() +} + +func Lock(name string) (io.Closer, error) { + f, err := os.Create(name) + if err != nil { + return nil, err + } + + spec := syscall.Flock_t{ + Type: syscall.F_WRLCK, + Whence: int16(os.SEEK_SET), + Start: 0, + Len: 0, // 0 means to lock the entire file. + Pid: int32(os.Getpid()), + } + if err := syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &spec); err != nil { + f.Close() + return nil, err + } + + return lockCloser{f}, nil +} diff --git a/vendor/github.com/siddontang/go/filelock/file_lock_unix.go b/vendor/github.com/siddontang/go/filelock/file_lock_unix.go new file mode 100644 index 000000000000..f70ae6192c59 --- /dev/null +++ b/vendor/github.com/siddontang/go/filelock/file_lock_unix.go @@ -0,0 +1,51 @@ +// Copyright 2014 The LevelDB-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd + +package filelock + +import ( + "io" + "os" + "syscall" +) + +// lockCloser hides all of an os.File's methods, except for Close. +type lockCloser struct { + f *os.File +} + +func (l lockCloser) Close() error { + return l.f.Close() +} + +func Lock(name string) (io.Closer, error) { + f, err := os.Create(name) + if err != nil { + return nil, err + } + + /* + Some people tell me FcntlFlock does not exist, so use flock here + */ + if err := syscall.Flock(int(f.Fd()), syscall.LOCK_EX|syscall.LOCK_NB); err != nil { + f.Close() + return nil, err + } + + // spec := syscall.Flock_t{ + // Type: syscall.F_WRLCK, + // Whence: int16(os.SEEK_SET), + // Start: 0, + // Len: 0, // 0 means to lock the entire file. + // Pid: int32(os.Getpid()), + // } + // if err := syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &spec); err != nil { + // f.Close() + // return nil, err + // } + + return lockCloser{f}, nil +} diff --git a/vendor/github.com/siddontang/go/filelock/file_lock_windows.go b/vendor/github.com/siddontang/go/filelock/file_lock_windows.go new file mode 100644 index 000000000000..5d3e4ba2029a --- /dev/null +++ b/vendor/github.com/siddontang/go/filelock/file_lock_windows.go @@ -0,0 +1,36 @@ +// Copyright 2013 The LevelDB-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package filelock + +import ( + "io" + "syscall" +) + +// lockCloser hides all of an syscall.Handle's methods, except for Close. +type lockCloser struct { + fd syscall.Handle +} + +func (l lockCloser) Close() error { + return syscall.Close(l.fd) +} + +func Lock(name string) (io.Closer, error) { + p, err := syscall.UTF16PtrFromString(name) + if err != nil { + return nil, err + } + fd, err := syscall.CreateFile(p, + syscall.GENERIC_READ|syscall.GENERIC_WRITE, + 0, nil, syscall.CREATE_ALWAYS, + syscall.FILE_ATTRIBUTE_NORMAL, + 0, + ) + if err != nil { + return nil, err + } + return lockCloser{fd: fd}, nil +} diff --git a/vendor/github.com/siddontang/go/hack/hack.go b/vendor/github.com/siddontang/go/hack/hack.go new file mode 100644 index 000000000000..74ee83cbf5d5 --- /dev/null +++ b/vendor/github.com/siddontang/go/hack/hack.go @@ -0,0 +1,27 @@ +package hack + +import ( + "reflect" + "unsafe" +) + +// no copy to change slice to string +// use your own risk +func String(b []byte) (s string) { + pbytes := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + pstring := (*reflect.StringHeader)(unsafe.Pointer(&s)) + pstring.Data = pbytes.Data + pstring.Len = pbytes.Len + return +} + +// no copy to change string to slice +// use your own risk +func Slice(s string) (b []byte) { + pbytes := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + pstring := (*reflect.StringHeader)(unsafe.Pointer(&s)) + pbytes.Data = pstring.Data + pbytes.Len = pstring.Len + pbytes.Cap = pstring.Len + return +} diff --git a/vendor/github.com/siddontang/go/ioutil2/ioutil.go b/vendor/github.com/siddontang/go/ioutil2/ioutil.go new file mode 100644 index 000000000000..35c0ad3cad51 --- /dev/null +++ b/vendor/github.com/siddontang/go/ioutil2/ioutil.go @@ -0,0 +1,39 @@ +// Copyright 2012, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ioutil2 + +import ( + "io" + "io/ioutil" + "os" + "path" +) + +// Write file to temp and atomically move when everything else succeeds. +func WriteFileAtomic(filename string, data []byte, perm os.FileMode) error { + dir, name := path.Dir(filename), path.Base(filename) + f, err := ioutil.TempFile(dir, name) + if err != nil { + return err + } + n, err := f.Write(data) + f.Close() + if err == nil && n < len(data) { + err = io.ErrShortWrite + } else { + err = os.Chmod(f.Name(), perm) + } + if err != nil { + os.Remove(f.Name()) + return err + } + return os.Rename(f.Name(), filename) +} + +// Check file exists or not +func FileExists(name string) bool { + _, err := os.Stat(name) + return !os.IsNotExist(err) +} diff --git a/vendor/github.com/siddontang/go/ioutil2/sectionwriter.go b/vendor/github.com/siddontang/go/ioutil2/sectionwriter.go new file mode 100644 index 000000000000..c02ab0d5fd1f --- /dev/null +++ b/vendor/github.com/siddontang/go/ioutil2/sectionwriter.go @@ -0,0 +1,69 @@ +package ioutil2 + +import ( + "errors" + "io" +) + +var ErrExceedLimit = errors.New("write exceed limit") + +func NewSectionWriter(w io.WriterAt, off int64, n int64) *SectionWriter { + return &SectionWriter{w, off, off, off + n} +} + +type SectionWriter struct { + w io.WriterAt + base int64 + off int64 + limit int64 +} + +func (s *SectionWriter) Write(p []byte) (n int, err error) { + if s.off >= s.limit { + return 0, ErrExceedLimit + } + + if max := s.limit - s.off; int64(len(p)) > max { + return 0, ErrExceedLimit + } + + n, err = s.w.WriteAt(p, s.off) + s.off += int64(n) + return +} + +var errWhence = errors.New("Seek: invalid whence") +var errOffset = errors.New("Seek: invalid offset") + +func (s *SectionWriter) Seek(offset int64, whence int) (int64, error) { + switch whence { + default: + return 0, errWhence + case 0: + offset += s.base + case 1: + offset += s.off + case 2: + offset += s.limit + } + if offset < s.base { + return 0, errOffset + } + s.off = offset + return offset - s.base, nil +} + +func (s *SectionWriter) WriteAt(p []byte, off int64) (n int, err error) { + if off < 0 || off >= s.limit-s.base { + return 0, errOffset + } + off += s.base + if max := s.limit - off; int64(len(p)) > max { + return 0, ErrExceedLimit + } + + return s.w.WriteAt(p, off) +} + +// Size returns the size of the section in bytes. +func (s *SectionWriter) Size() int64 { return s.limit - s.base } diff --git a/vendor/github.com/siddontang/go/log/doc.go b/vendor/github.com/siddontang/go/log/doc.go new file mode 100644 index 000000000000..81a60ee853bc --- /dev/null +++ b/vendor/github.com/siddontang/go/log/doc.go @@ -0,0 +1,21 @@ +// log package supplies more advanced features than go orign log package. +// +// It supports log different level: trace, debug, info, warn, error, fatal. +// +// It also supports different log handlers which you can log to stdout, file, socket, etc... +// +// Use +// +// import "github.com/siddontang/go/log" +// +// //log with different level +// log.Info("hello world") +// log.Error("hello world") +// +// //create a logger with specified handler +// h := NewStreamHandler(os.Stdout) +// l := log.NewDefault(h) +// l.Info("hello world") +// l.Infof("%s %d", "hello", 123) +// +package log diff --git a/vendor/github.com/siddontang/go/log/filehandler.go b/vendor/github.com/siddontang/go/log/filehandler.go new file mode 100644 index 000000000000..2c158e2cf569 --- /dev/null +++ b/vendor/github.com/siddontang/go/log/filehandler.go @@ -0,0 +1,221 @@ +package log + +import ( + "fmt" + "os" + "path" + "time" +) + +//FileHandler writes log to a file. +type FileHandler struct { + fd *os.File +} + +func NewFileHandler(fileName string, flag int) (*FileHandler, error) { + dir := path.Dir(fileName) + os.Mkdir(dir, 0777) + + f, err := os.OpenFile(fileName, flag, 0) + if err != nil { + return nil, err + } + + h := new(FileHandler) + + h.fd = f + + return h, nil +} + +func (h *FileHandler) Write(b []byte) (n int, err error) { + return h.fd.Write(b) +} + +func (h *FileHandler) Close() error { + return h.fd.Close() +} + +//RotatingFileHandler writes log a file, if file size exceeds maxBytes, +//it will backup current file and open a new one. +// +//max backup file number is set by backupCount, it will delete oldest if backups too many. +type RotatingFileHandler struct { + fd *os.File + + fileName string + maxBytes int + curBytes int + backupCount int +} + +func NewRotatingFileHandler(fileName string, maxBytes int, backupCount int) (*RotatingFileHandler, error) { + dir := path.Dir(fileName) + os.MkdirAll(dir, 0777) + + h := new(RotatingFileHandler) + + if maxBytes <= 0 { + return nil, fmt.Errorf("invalid max bytes") + } + + h.fileName = fileName + h.maxBytes = maxBytes + h.backupCount = backupCount + + var err error + h.fd, err = os.OpenFile(fileName, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) + if err != nil { + return nil, err + } + + f, err := h.fd.Stat() + if err != nil { + return nil, err + } + h.curBytes = int(f.Size()) + + return h, nil +} + +func (h *RotatingFileHandler) Write(p []byte) (n int, err error) { + h.doRollover() + n, err = h.fd.Write(p) + h.curBytes += n + return +} + +func (h *RotatingFileHandler) Close() error { + if h.fd != nil { + return h.fd.Close() + } + return nil +} + +func (h *RotatingFileHandler) doRollover() { + + if h.curBytes < h.maxBytes { + return + } + + f, err := h.fd.Stat() + if err != nil { + return + } + + if h.maxBytes <= 0 { + return + } else if f.Size() < int64(h.maxBytes) { + h.curBytes = int(f.Size()) + return + } + + if h.backupCount > 0 { + h.fd.Close() + + for i := h.backupCount - 1; i > 0; i-- { + sfn := fmt.Sprintf("%s.%d", h.fileName, i) + dfn := fmt.Sprintf("%s.%d", h.fileName, i+1) + + os.Rename(sfn, dfn) + } + + dfn := fmt.Sprintf("%s.1", h.fileName) + os.Rename(h.fileName, dfn) + + h.fd, _ = os.OpenFile(h.fileName, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) + h.curBytes = 0 + f, err := h.fd.Stat() + if err != nil { + return + } + h.curBytes = int(f.Size()) + } +} + +//TimeRotatingFileHandler writes log to a file, +//it will backup current and open a new one, with a period time you sepecified. +// +//refer: http://docs.python.org/2/library/logging.handlers.html. +//same like python TimedRotatingFileHandler. +type TimeRotatingFileHandler struct { + fd *os.File + + baseName string + interval int64 + suffix string + rolloverAt int64 +} + +const ( + WhenSecond = iota + WhenMinute + WhenHour + WhenDay +) + +func NewTimeRotatingFileHandler(baseName string, when int8, interval int) (*TimeRotatingFileHandler, error) { + dir := path.Dir(baseName) + os.Mkdir(dir, 0777) + + h := new(TimeRotatingFileHandler) + + h.baseName = baseName + + switch when { + case WhenSecond: + h.interval = 1 + h.suffix = "2006-01-02_15-04-05" + case WhenMinute: + h.interval = 60 + h.suffix = "2006-01-02_15-04" + case WhenHour: + h.interval = 3600 + h.suffix = "2006-01-02_15" + case WhenDay: + h.interval = 3600 * 24 + h.suffix = "2006-01-02" + default: + return nil, fmt.Errorf("invalid when_rotate: %d", when) + } + + h.interval = h.interval * int64(interval) + + var err error + h.fd, err = os.OpenFile(h.baseName, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) + if err != nil { + return nil, err + } + + fInfo, _ := h.fd.Stat() + h.rolloverAt = fInfo.ModTime().Unix() + h.interval + + return h, nil +} + +func (h *TimeRotatingFileHandler) doRollover() { + //refer http://hg.python.org/cpython/file/2.7/Lib/logging/handlers.py + now := time.Now() + + if h.rolloverAt <= now.Unix() { + fName := h.baseName + now.Format(h.suffix) + h.fd.Close() + e := os.Rename(h.baseName, fName) + if e != nil { + panic(e) + } + + h.fd, _ = os.OpenFile(h.baseName, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) + + h.rolloverAt = time.Now().Unix() + h.interval + } +} + +func (h *TimeRotatingFileHandler) Write(b []byte) (n int, err error) { + h.doRollover() + return h.fd.Write(b) +} + +func (h *TimeRotatingFileHandler) Close() error { + return h.fd.Close() +} diff --git a/vendor/github.com/siddontang/go/log/handler.go b/vendor/github.com/siddontang/go/log/handler.go new file mode 100644 index 000000000000..4dc086f45c0a --- /dev/null +++ b/vendor/github.com/siddontang/go/log/handler.go @@ -0,0 +1,48 @@ +package log + +import ( + "io" +) + +//Handler writes logs to somewhere +type Handler interface { + Write(p []byte) (n int, err error) + Close() error +} + +//StreamHandler writes logs to a specified io Writer, maybe stdout, stderr, etc... +type StreamHandler struct { + w io.Writer +} + +func NewStreamHandler(w io.Writer) (*StreamHandler, error) { + h := new(StreamHandler) + + h.w = w + + return h, nil +} + +func (h *StreamHandler) Write(b []byte) (n int, err error) { + return h.w.Write(b) +} + +func (h *StreamHandler) Close() error { + return nil +} + +//NullHandler does nothing, it discards anything. +type NullHandler struct { +} + +func NewNullHandler() (*NullHandler, error) { + return new(NullHandler), nil +} + +func (h *NullHandler) Write(b []byte) (n int, err error) { + return len(b), nil +} + +func (h *NullHandler) Close() { + +} diff --git a/vendor/github.com/siddontang/go/log/log.go b/vendor/github.com/siddontang/go/log/log.go new file mode 100644 index 000000000000..371f6016871e --- /dev/null +++ b/vendor/github.com/siddontang/go/log/log.go @@ -0,0 +1,343 @@ +package log + +import ( + "fmt" + "os" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" +) + +//log level, from low to high, more high means more serious +const ( + LevelTrace = iota + LevelDebug + LevelInfo + LevelWarn + LevelError + LevelFatal +) + +const ( + Ltime = 1 << iota //time format "2006/01/02 15:04:05" + Lfile //file.go:123 + Llevel //[Trace|Debug|Info...] +) + +var LevelName [6]string = [6]string{"Trace", "Debug", "Info", "Warn", "Error", "Fatal"} + +const TimeFormat = "2006/01/02 15:04:05" + +const maxBufPoolSize = 16 + +type atomicInt32 int32 + +func (i *atomicInt32) Set(n int) { + atomic.StoreInt32((*int32)(i), int32(n)) +} + +func (i *atomicInt32) Get() int { + return int(atomic.LoadInt32((*int32)(i))) +} + +type Logger struct { + level atomicInt32 + flag int + + hMutex sync.Mutex + handler Handler + + bufMutex sync.Mutex + bufs [][]byte + + closed atomicInt32 +} + +//new a logger with specified handler and flag +func New(handler Handler, flag int) *Logger { + var l = new(Logger) + + l.level.Set(LevelInfo) + l.handler = handler + + l.flag = flag + + l.closed.Set(0) + + l.bufs = make([][]byte, 0, 16) + + return l +} + +//new a default logger with specified handler and flag: Ltime|Lfile|Llevel +func NewDefault(handler Handler) *Logger { + return New(handler, Ltime|Lfile|Llevel) +} + +func newStdHandler() *StreamHandler { + h, _ := NewStreamHandler(os.Stdout) + return h +} + +var std = NewDefault(newStdHandler()) + +func (l *Logger) popBuf() []byte { + l.bufMutex.Lock() + var buf []byte + if len(l.bufs) == 0 { + buf = make([]byte, 0, 1024) + } else { + buf = l.bufs[len(l.bufs)-1] + l.bufs = l.bufs[0 : len(l.bufs)-1] + } + l.bufMutex.Unlock() + + return buf +} + +func (l *Logger) putBuf(buf []byte) { + l.bufMutex.Lock() + if len(l.bufs) < maxBufPoolSize { + buf = buf[0:0] + l.bufs = append(l.bufs, buf) + } + l.bufMutex.Unlock() +} + +func (l *Logger) Close() { + if l.closed.Get() == 1 { + return + } + l.closed.Set(1) + + l.handler.Close() +} + +//set log level, any log level less than it will not log +func (l *Logger) SetLevel(level int) { + l.level.Set(level) +} + +// name can be in ["trace", "debug", "info", "warn", "error", "fatal"] +func (l *Logger) SetLevelByName(name string) { + name = strings.ToLower(name) + switch name { + case "trace": + l.SetLevel(LevelTrace) + case "debug": + l.SetLevel(LevelDebug) + case "info": + l.SetLevel(LevelInfo) + case "warn": + l.SetLevel(LevelWarn) + case "error": + l.SetLevel(LevelError) + case "fatal": + l.SetLevel(LevelFatal) + } +} + +func (l *Logger) SetHandler(h Handler) { + if l.closed.Get() == 1 { + return + } + + l.hMutex.Lock() + if l.handler != nil { + l.handler.Close() + } + l.handler = h + l.hMutex.Unlock() +} + +func (l *Logger) Output(callDepth int, level int, format string, v ...interface{}) { + if l.closed.Get() == 1 { + // closed + return + } + + if l.level.Get() > level { + // higher level can be logged + return + } + + var s string + if format == "" { + s = fmt.Sprint(v...) + } else { + s = fmt.Sprintf(format, v...) + } + + buf := l.popBuf() + + if l.flag&Ltime > 0 { + now := time.Now().Format(TimeFormat) + buf = append(buf, '[') + buf = append(buf, now...) + buf = append(buf, "] "...) + } + + if l.flag&Lfile > 0 { + _, file, line, ok := runtime.Caller(callDepth) + if !ok { + file = "???" + line = 0 + } else { + for i := len(file) - 1; i > 0; i-- { + if file[i] == '/' { + file = file[i+1:] + break + } + } + } + + buf = append(buf, file...) + buf = append(buf, ':') + + buf = strconv.AppendInt(buf, int64(line), 10) + buf = append(buf, ' ') + } + + if l.flag&Llevel > 0 { + buf = append(buf, '[') + buf = append(buf, LevelName[level]...) + buf = append(buf, "] "...) + } + + buf = append(buf, s...) + + if s[len(s)-1] != '\n' { + buf = append(buf, '\n') + } + + // l.msg <- buf + + l.hMutex.Lock() + l.handler.Write(buf) + l.hMutex.Unlock() + l.putBuf(buf) +} + +//log with Trace level +func (l *Logger) Trace(v ...interface{}) { + l.Output(2, LevelTrace, "", v...) +} + +//log with Debug level +func (l *Logger) Debug(v ...interface{}) { + l.Output(2, LevelDebug, "", v...) +} + +//log with info level +func (l *Logger) Info(v ...interface{}) { + l.Output(2, LevelInfo, "", v...) +} + +//log with warn level +func (l *Logger) Warn(v ...interface{}) { + l.Output(2, LevelWarn, "", v...) +} + +//log with error level +func (l *Logger) Error(v ...interface{}) { + l.Output(2, LevelError, "", v...) +} + +//log with fatal level +func (l *Logger) Fatal(v ...interface{}) { + l.Output(2, LevelFatal, "", v...) +} + +//log with Trace level +func (l *Logger) Tracef(format string, v ...interface{}) { + l.Output(2, LevelTrace, format, v...) +} + +//log with Debug level +func (l *Logger) Debugf(format string, v ...interface{}) { + l.Output(2, LevelDebug, format, v...) +} + +//log with info level +func (l *Logger) Infof(format string, v ...interface{}) { + l.Output(2, LevelInfo, format, v...) +} + +//log with warn level +func (l *Logger) Warnf(format string, v ...interface{}) { + l.Output(2, LevelWarn, format, v...) +} + +//log with error level +func (l *Logger) Errorf(format string, v ...interface{}) { + l.Output(2, LevelError, format, v...) +} + +//log with fatal level +func (l *Logger) Fatalf(format string, v ...interface{}) { + l.Output(2, LevelFatal, format, v...) +} + +func SetLevel(level int) { + std.SetLevel(level) +} + +// name can be in ["trace", "debug", "info", "warn", "error", "fatal"] +func SetLevelByName(name string) { + std.SetLevelByName(name) +} + +func SetHandler(h Handler) { + std.SetHandler(h) +} + +func Trace(v ...interface{}) { + std.Output(2, LevelTrace, "", v...) +} + +func Debug(v ...interface{}) { + std.Output(2, LevelDebug, "", v...) +} + +func Info(v ...interface{}) { + std.Output(2, LevelInfo, "", v...) +} + +func Warn(v ...interface{}) { + std.Output(2, LevelWarn, "", v...) +} + +func Error(v ...interface{}) { + std.Output(2, LevelError, "", v...) +} + +func Fatal(v ...interface{}) { + std.Output(2, LevelFatal, "", v...) +} + +func Tracef(format string, v ...interface{}) { + std.Output(2, LevelTrace, format, v...) +} + +func Debugf(format string, v ...interface{}) { + std.Output(2, LevelDebug, format, v...) +} + +func Infof(format string, v ...interface{}) { + std.Output(2, LevelInfo, format, v...) +} + +func Warnf(format string, v ...interface{}) { + std.Output(2, LevelWarn, format, v...) +} + +func Errorf(format string, v ...interface{}) { + std.Output(2, LevelError, format, v...) +} + +func Fatalf(format string, v ...interface{}) { + std.Output(2, LevelFatal, format, v...) +} diff --git a/vendor/github.com/siddontang/go/log/sockethandler.go b/vendor/github.com/siddontang/go/log/sockethandler.go new file mode 100644 index 000000000000..3e7494d9501b --- /dev/null +++ b/vendor/github.com/siddontang/go/log/sockethandler.go @@ -0,0 +1,65 @@ +package log + +import ( + "encoding/binary" + "net" + "time" +) + +//SocketHandler writes log to a connectionl. +//Network protocol is simple: log length + log | log length + log. log length is uint32, bigendian. +//you must implement your own log server, maybe you can use logd instead simply. +type SocketHandler struct { + c net.Conn + protocol string + addr string +} + +func NewSocketHandler(protocol string, addr string) (*SocketHandler, error) { + s := new(SocketHandler) + + s.protocol = protocol + s.addr = addr + + return s, nil +} + +func (h *SocketHandler) Write(p []byte) (n int, err error) { + if err = h.connect(); err != nil { + return + } + + buf := make([]byte, len(p)+4) + + binary.BigEndian.PutUint32(buf, uint32(len(p))) + + copy(buf[4:], p) + + n, err = h.c.Write(buf) + if err != nil { + h.c.Close() + h.c = nil + } + return +} + +func (h *SocketHandler) Close() error { + if h.c != nil { + h.c.Close() + } + return nil +} + +func (h *SocketHandler) connect() error { + if h.c != nil { + return nil + } + + var err error + h.c, err = net.DialTimeout(h.protocol, h.addr, 20*time.Second) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/siddontang/go/num/bytes.go b/vendor/github.com/siddontang/go/num/bytes.go new file mode 100644 index 000000000000..1f3def74ac73 --- /dev/null +++ b/vendor/github.com/siddontang/go/num/bytes.go @@ -0,0 +1,67 @@ +package num + +import ( + "encoding/binary" +) + +//all are bigendian format + +func BytesToUint16(b []byte) uint16 { + return binary.BigEndian.Uint16(b) +} + +func Uint16ToBytes(u uint16) []byte { + buf := make([]byte, 2) + binary.BigEndian.PutUint16(buf, u) + return buf +} + +func BytesToUint32(b []byte) uint32 { + return binary.BigEndian.Uint32(b) +} + +func Uint32ToBytes(u uint32) []byte { + buf := make([]byte, 4) + binary.BigEndian.PutUint32(buf, u) + return buf +} + +func BytesToUint64(b []byte) uint64 { + return binary.BigEndian.Uint64(b) +} + +func Uint64ToBytes(u uint64) []byte { + buf := make([]byte, 8) + binary.BigEndian.PutUint64(buf, u) + return buf +} + +func BytesToInt16(b []byte) int16 { + return int16(binary.BigEndian.Uint16(b)) +} + +func Int16ToBytes(u int16) []byte { + buf := make([]byte, 2) + binary.BigEndian.PutUint16(buf, uint16(u)) + return buf +} + +func BytesToInt32(b []byte) int32 { + return int32(binary.BigEndian.Uint32(b)) +} + +func Int32ToBytes(u int32) []byte { + buf := make([]byte, 4) + binary.BigEndian.PutUint32(buf, uint32(u)) + return buf +} + +func BytesToInt64(b []byte) int64 { + return int64(binary.BigEndian.Uint64(b)) +} + +func Int64ToBytes(u int64) []byte { + buf := make([]byte, 8) + binary.BigEndian.PutUint64(buf, uint64(u)) + return buf +} diff --git a/vendor/github.com/siddontang/go/num/cmp.go b/vendor/github.com/siddontang/go/num/cmp.go new file mode 100644 index 000000000000..78f8d4f14af3 --- /dev/null +++ b/vendor/github.com/siddontang/go/num/cmp.go @@ -0,0 +1,161 @@ +package num + +func MinUint(a uint, b uint) uint { + if a > b { + return b + } else { + return a + } +} + +func MaxUint(a uint, b uint) uint { + if a > b { + return a + } else { + return b + } +} + +func MinInt(a int, b int) int { + if a > b { + return b + } else { + return a + } +} + +func MaxInt(a int, b int) int { + if a > b { + return a + } else { + return b + } +} + +func MinUint8(a uint8, b uint8) uint8 { + if a > b { + return b + } else { + return a + } +} + +func MaxUint8(a uint8, b uint8) uint8 { + if a > b { + return a + } else { + return b + } +} + +func MinInt8(a int8, b int8) int8 { + if a > b { + return b + } else { + return a + } +} + +func MaxInt8(a int8, b int8) int8 { + if a > b { + return a + } else { + return b + } +} + +func MinUint16(a uint16, b uint16) uint16 { + if a > b { + return b + } else { + return a + } +} + +func MaxUint16(a uint16, b uint16) uint16 { + if a > b { + return a + } else { + return b + } +} + +func MinInt16(a int16, b int16) int16 { + if a > b { + return b + } else { + return a + } +} + +func MaxInt16(a int16, b int16) int16 { + if a > b { + return a + } else { + return b + } +} + +func MinUint32(a uint32, b uint32) uint32 { + if a > b { + return b + } else { + return a + } +} + +func MaxUint32(a uint32, b uint32) uint32 { + if a > b { + return a + } else { + return b + } +} + +func MinInt32(a int32, b int32) int32 { + if a > b { + return b + } else { + return a + } +} + +func MaxInt32(a int32, b int32) int32 { + if a > b { + return a + } else { + return b + } +} + +func MinUint64(a uint64, b uint64) uint64 { + if a > b { + return b + } else { + return a + } +} + +func MaxUint64(a uint64, b uint64) uint64 { + if a > b { + return a + } else { + return b + } +} + +func MinInt64(a int64, b int64) int64 { + if a > b { + return b + } else { + return a + } +} + +func MaxInt64(a int64, b int64) int64 { + if a > b { + return a + } else { + return b + } +} diff --git a/vendor/github.com/siddontang/go/num/str.go b/vendor/github.com/siddontang/go/num/str.go new file mode 100644 index 000000000000..4b304817b86d --- /dev/null +++ b/vendor/github.com/siddontang/go/num/str.go @@ -0,0 +1,157 @@ +package num + +import ( + "strconv" +) + +func ParseUint(s string) (uint, error) { + if v, err := strconv.ParseUint(s, 10, 0); err != nil { + return 0, err + } else { + return uint(v), nil + } +} + +func ParseUint8(s string) (uint8, error) { + if v, err := strconv.ParseUint(s, 10, 8); err != nil { + return 0, err + } else { + return uint8(v), nil + } +} + +func ParseUint16(s string) (uint16, error) { + if v, err := strconv.ParseUint(s, 10, 16); err != nil { + return 0, err + } else { + return uint16(v), nil + } +} + +func ParseUint32(s string) (uint32, error) { + if v, err := strconv.ParseUint(s, 10, 32); err != nil { + return 0, err + } else { + return uint32(v), nil + } +} + +func ParseUint64(s string) (uint64, error) { + return strconv.ParseUint(s, 10, 64) +} + +func ParseInt(s string) (int, error) { + if v, err := strconv.ParseInt(s, 10, 0); err != nil { + return 0, err + } else { + return int(v), nil + } +} + +func ParseInt8(s string) (int8, error) { + if v, err := strconv.ParseInt(s, 10, 8); err != nil { + return 0, err + } else { + return int8(v), nil + } +} + +func ParseInt16(s string) (int16, error) { + if v, err := strconv.ParseInt(s, 10, 16); err != nil { + return 0, err + } else { + return int16(v), nil + } +} + +func ParseInt32(s string) (int32, error) { + if v, err := strconv.ParseInt(s, 10, 32); err != nil { + return 0, err + } else { + return int32(v), nil + } +} + +func ParseInt64(s string) (int64, error) { + return strconv.ParseInt(s, 10, 64) +} + +func FormatInt(v int) string { + return strconv.FormatInt(int64(v), 10) +} + +func FormatInt8(v int8) string { + return strconv.FormatInt(int64(v), 10) +} + +func FormatInt16(v int16) string { + return strconv.FormatInt(int64(v), 10) +} + +func FormatInt32(v int32) string { + return strconv.FormatInt(int64(v), 10) +} + +func FormatInt64(v int64) string { + return strconv.FormatInt(int64(v), 10) +} + +func FormatUint(v uint) string { + return strconv.FormatUint(uint64(v), 10) +} + +func FormatUint8(v uint8) string { + return strconv.FormatUint(uint64(v), 10) +} + +func FormatUint16(v uint16) string { + return strconv.FormatUint(uint64(v), 10) +} + +func FormatUint32(v uint32) string { + return strconv.FormatUint(uint64(v), 10) +} + +func FormatUint64(v uint64) string { + return strconv.FormatUint(uint64(v), 10) +} + +func FormatIntToSlice(v int) []byte { + return strconv.AppendInt(nil, int64(v), 10) +} + +func FormatInt8ToSlice(v int8) []byte { + return strconv.AppendInt(nil, int64(v), 10) +} + +func FormatInt16ToSlice(v int16) []byte { + return strconv.AppendInt(nil, int64(v), 10) +} + +func FormatInt32ToSlice(v int32) []byte { + return strconv.AppendInt(nil, int64(v), 10) +} + +func FormatInt64ToSlice(v int64) []byte { + return strconv.AppendInt(nil, int64(v), 10) +} + +func FormatUintToSlice(v uint) []byte { + return strconv.AppendUint(nil, uint64(v), 10) +} + +func FormatUint8ToSlice(v uint8) []byte { + return strconv.AppendUint(nil, uint64(v), 10) +} + +func FormatUint16ToSlice(v uint16) []byte { + return strconv.AppendUint(nil, uint64(v), 10) +} + +func FormatUint32ToSlice(v uint32) []byte { + return strconv.AppendUint(nil, uint64(v), 10) +} + +func FormatUint64ToSlice(v uint64) []byte { + return strconv.AppendUint(nil, uint64(v), 10) +} diff --git a/vendor/github.com/siddontang/go/snappy/LICENSE b/vendor/github.com/siddontang/go/snappy/LICENSE new file mode 100644 index 000000000000..6050c10f4c8b --- /dev/null +++ b/vendor/github.com/siddontang/go/snappy/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/siddontang/go/snappy/decode.go b/vendor/github.com/siddontang/go/snappy/decode.go new file mode 100644 index 000000000000..d93c1b9dbfd7 --- /dev/null +++ b/vendor/github.com/siddontang/go/snappy/decode.go @@ -0,0 +1,124 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" +) + +// ErrCorrupt reports that the input is invalid. +var ErrCorrupt = errors.New("snappy: corrupt input") + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + v, _, err := decodedLen(src) + return v, err +} + +// decodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func decodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n == 0 { + return 0, 0, ErrCorrupt + } + if uint64(int(v)) != v { + return 0, 0, errors.New("snappy: decoded block is too large") + } + return int(v), n, nil +} + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// It is valid to pass a nil dst. +func Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if len(dst) < dLen { + dst = make([]byte, dLen) + } + + var d, offset, length int + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint(src[s] >> 2) + switch { + case x < 60: + s += 1 + case x == 60: + s += 2 + if s > len(src) { + return nil, ErrCorrupt + } + x = uint(src[s-1]) + case x == 61: + s += 3 + if s > len(src) { + return nil, ErrCorrupt + } + x = uint(src[s-2]) | uint(src[s-1])<<8 + case x == 62: + s += 4 + if s > len(src) { + return nil, ErrCorrupt + } + x = uint(src[s-3]) | uint(src[s-2])<<8 | uint(src[s-1])<<16 + case x == 63: + s += 5 + if s > len(src) { + return nil, ErrCorrupt + } + x = uint(src[s-4]) | uint(src[s-3])<<8 | uint(src[s-2])<<16 | uint(src[s-1])<<24 + } + length = int(x + 1) + if length <= 0 { + return nil, errors.New("snappy: unsupported literal length") + } + if length > len(dst)-d || length > len(src)-s { + return nil, ErrCorrupt + } + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if s > len(src) { + return nil, ErrCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = int(src[s-2])&0xe0<<3 | int(src[s-1]) + + case tagCopy2: + s += 3 + if s > len(src) { + return nil, ErrCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(src[s-2]) | int(src[s-1])<<8 + + case tagCopy4: + return nil, errors.New("snappy: unsupported COPY_4 tag") + } + + end := d + length + if offset > d || end > len(dst) { + return nil, ErrCorrupt + } + for ; d < end; d++ { + dst[d] = dst[d-offset] + } + } + if d != dLen { + return nil, ErrCorrupt + } + return dst[:d], nil +} diff --git a/vendor/github.com/siddontang/go/snappy/encode.go b/vendor/github.com/siddontang/go/snappy/encode.go new file mode 100644 index 000000000000..b2371db11c8f --- /dev/null +++ b/vendor/github.com/siddontang/go/snappy/encode.go @@ -0,0 +1,174 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" +) + +// We limit how far copy back-references can go, the same as the C++ code. +const maxOffset = 1 << 15 + +// emitLiteral writes a literal chunk and returns the number of bytes written. +func emitLiteral(dst, lit []byte) int { + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[0] = 60<<2 | tagLiteral + dst[1] = uint8(n) + i = 2 + case n < 1<<16: + dst[0] = 61<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + i = 3 + case n < 1<<24: + dst[0] = 62<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + dst[3] = uint8(n >> 16) + i = 4 + case int64(n) < 1<<32: + dst[0] = 63<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + dst[3] = uint8(n >> 16) + dst[4] = uint8(n >> 24) + i = 5 + default: + panic("snappy: source buffer is too long") + } + if copy(dst[i:], lit) != len(lit) { + panic("snappy: destination buffer is too short") + } + return i + len(lit) +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +func emitCopy(dst []byte, offset, length int) int { + i := 0 + for length > 0 { + x := length - 4 + if 0 <= x && x < 1<<3 && offset < 1<<11 { + dst[i+0] = uint8(offset>>8)&0x07<<5 | uint8(x)<<2 | tagCopy1 + dst[i+1] = uint8(offset) + i += 2 + break + } + + x = length + if x > 1<<6 { + x = 1 << 6 + } + dst[i+0] = uint8(x-1)<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= x + } + return i +} + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// It is valid to pass a nil dst. +func Encode(dst, src []byte) ([]byte, error) { + if n := MaxEncodedLen(len(src)); len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + // Return early if src is short. + if len(src) <= 4 { + if len(src) != 0 { + d += emitLiteral(dst[d:], src) + } + return dst[:d], nil + } + + // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. + const maxTableSize = 1 << 14 + shift, tableSize := uint(32-8), 1<<8 + for tableSize < maxTableSize && tableSize < len(src) { + shift-- + tableSize *= 2 + } + var table [maxTableSize]int + + // Iterate over the source bytes. + var ( + s int // The iterator position. + t int // The last position with the same hash as s. + lit int // The start position of any pending literal bytes. + ) + for s+3 < len(src) { + // Update the hash table. + b0, b1, b2, b3 := src[s], src[s+1], src[s+2], src[s+3] + h := uint32(b0) | uint32(b1)<<8 | uint32(b2)<<16 | uint32(b3)<<24 + p := &table[(h*0x1e35a7bd)>>shift] + // We need to to store values in [-1, inf) in table. To save + // some initialization time, (re)use the table's zero value + // and shift the values against this zero: add 1 on writes, + // subtract 1 on reads. + t, *p = *p-1, s+1 + // If t is invalid or src[s:s+4] differs from src[t:t+4], accumulate a literal byte. + if t < 0 || s-t >= maxOffset || b0 != src[t] || b1 != src[t+1] || b2 != src[t+2] || b3 != src[t+3] { + s++ + continue + } + // Otherwise, we have a match. First, emit any pending literal bytes. + if lit != s { + d += emitLiteral(dst[d:], src[lit:s]) + } + // Extend the match to be as long as possible. + s0 := s + s, t = s+4, t+4 + for s < len(src) && src[s] == src[t] { + s++ + t++ + } + // Emit the copied bytes. + d += emitCopy(dst[d:], s-t, s-s0) + lit = s + } + + // Emit any final pending literal bytes and return. + if lit != len(src) { + d += emitLiteral(dst[d:], src[lit:]) + } + return dst[:d], nil +} + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +func MaxEncodedLen(srcLen int) int { + // Compressed data can be defined as: + // compressed := item* literal* + // item := literal* copy + // + // The trailing literal sequence has a space blowup of at most 62/60 + // since a literal of length 60 needs one tag byte + one extra byte + // for length information. + // + // Item blowup is trickier to measure. Suppose the "copy" op copies + // 4 bytes of data. Because of a special check in the encoding code, + // we produce a 4-byte copy only if the offset is < 65536. Therefore + // the copy op takes 3 bytes to encode, and this type of item leads + // to at most the 62/60 blowup for representing literals. + // + // Suppose the "copy" op copies 5 bytes of data. If the offset is big + // enough, it will take 5 bytes to encode the copy op. Therefore the + // worst case here is a one-byte literal followed by a five-byte copy. + // That is, 6 bytes of input turn into 7 bytes of "compressed" data. + // + // This last factor dominates the blowup, so the final estimate is: + return 32 + srcLen + srcLen/6 +} diff --git a/vendor/github.com/siddontang/go/snappy/snappy.go b/vendor/github.com/siddontang/go/snappy/snappy.go new file mode 100644 index 000000000000..2f1b790d0b71 --- /dev/null +++ b/vendor/github.com/siddontang/go/snappy/snappy.go @@ -0,0 +1,38 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package snappy implements the snappy block-based compression format. +// It aims for very high speeds and reasonable compression. +// +// The C++ snappy implementation is at http://code.google.com/p/snappy/ +package snappy + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, this tag is a legacy format that is no longer supported. +*/ +const ( + tagLiteral = 0x00 + tagCopy1 = 0x01 + tagCopy2 = 0x02 + tagCopy4 = 0x03 +) diff --git a/vendor/github.com/siddontang/go/sync2/atomic.go b/vendor/github.com/siddontang/go/sync2/atomic.go new file mode 100644 index 000000000000..382fc20dfec7 --- /dev/null +++ b/vendor/github.com/siddontang/go/sync2/atomic.go @@ -0,0 +1,146 @@ +// Copyright 2013, Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sync2 + +import ( + "sync" + "sync/atomic" + "time" +) + +type AtomicInt32 int32 + +func (i *AtomicInt32) Add(n int32) int32 { + return atomic.AddInt32((*int32)(i), n) +} + +func (i *AtomicInt32) Set(n int32) { + atomic.StoreInt32((*int32)(i), n) +} + +func (i *AtomicInt32) Get() int32 { + return atomic.LoadInt32((*int32)(i)) +} + +func (i *AtomicInt32) CompareAndSwap(oldval, newval int32) (swapped bool) { + return atomic.CompareAndSwapInt32((*int32)(i), oldval, newval) +} + +type AtomicUint32 uint32 + +func (i *AtomicUint32) Add(n uint32) uint32 { + return atomic.AddUint32((*uint32)(i), n) +} + +func (i *AtomicUint32) Set(n uint32) { + atomic.StoreUint32((*uint32)(i), n) +} + +func (i *AtomicUint32) Get() uint32 { + return atomic.LoadUint32((*uint32)(i)) +} + +func (i *AtomicUint32) CompareAndSwap(oldval, newval uint32) (swapped bool) { + return atomic.CompareAndSwapUint32((*uint32)(i), oldval, newval) +} + +type AtomicInt64 int64 + +func (i *AtomicInt64) Add(n int64) int64 { + return atomic.AddInt64((*int64)(i), n) +} + +func (i *AtomicInt64) Set(n int64) { + atomic.StoreInt64((*int64)(i), n) +} + +func (i *AtomicInt64) Get() int64 { + return atomic.LoadInt64((*int64)(i)) +} + +func (i *AtomicInt64) CompareAndSwap(oldval, newval int64) (swapped bool) { + return atomic.CompareAndSwapInt64((*int64)(i), oldval, newval) +} + +type AtomicUint64 uint64 + +func (i *AtomicUint64) Add(n uint64) uint64 { + return atomic.AddUint64((*uint64)(i), n) +} + +func (i *AtomicUint64) Set(n uint64) { + atomic.StoreUint64((*uint64)(i), n) +} + +func (i *AtomicUint64) Get() uint64 { + return atomic.LoadUint64((*uint64)(i)) +} + +func (i *AtomicUint64) CompareAndSwap(oldval, newval uint64) (swapped bool) { + return atomic.CompareAndSwapUint64((*uint64)(i), oldval, newval) +} + +type AtomicDuration int64 + +func (d *AtomicDuration) Add(duration time.Duration) time.Duration { + return time.Duration(atomic.AddInt64((*int64)(d), int64(duration))) +} + +func (d *AtomicDuration) Set(duration time.Duration) { + atomic.StoreInt64((*int64)(d), int64(duration)) +} + +func (d *AtomicDuration) Get() time.Duration { + return time.Duration(atomic.LoadInt64((*int64)(d))) +} + +func (d *AtomicDuration) CompareAndSwap(oldval, newval time.Duration) (swapped bool) { + return atomic.CompareAndSwapInt64((*int64)(d), int64(oldval), int64(newval)) +} + +// AtomicString gives you atomic-style APIs for string, but +// it's only a convenience wrapper that uses a mutex. So, it's +// not as efficient as the rest of the atomic types. +type AtomicString struct { + mu sync.Mutex + str string +} + +func (s *AtomicString) Set(str string) { + s.mu.Lock() + s.str = str + s.mu.Unlock() +} + +func (s *AtomicString) Get() string { + s.mu.Lock() + str := s.str + s.mu.Unlock() + return str +} + +func (s *AtomicString) CompareAndSwap(oldval, newval string) (swapped bool) { + s.mu.Lock() + defer s.mu.Unlock() + if s.str == oldval { + s.str = newval + return true + } + return false +} + +type AtomicBool int32 + +func (b *AtomicBool) Set(v bool) { + if v { + atomic.StoreInt32((*int32)(b), 1) + } else { + atomic.StoreInt32((*int32)(b), 0) + } +} + +func (b *AtomicBool) Get() bool { + return atomic.LoadInt32((*int32)(b)) == 1 +} diff --git a/vendor/github.com/siddontang/go/sync2/semaphore.go b/vendor/github.com/siddontang/go/sync2/semaphore.go new file mode 100644 index 000000000000..d310da7294c7 --- /dev/null +++ b/vendor/github.com/siddontang/go/sync2/semaphore.go @@ -0,0 +1,65 @@ +package sync2 + +import ( + "sync" + "sync/atomic" + "time" +) + +func NewSemaphore(initialCount int) *Semaphore { + res := &Semaphore{ + counter: int64(initialCount), + } + res.cond.L = &res.lock + return res +} + +type Semaphore struct { + lock sync.Mutex + cond sync.Cond + counter int64 +} + +func (s *Semaphore) Release() { + s.lock.Lock() + s.counter += 1 + if s.counter >= 0 { + s.cond.Signal() + } + s.lock.Unlock() +} + +func (s *Semaphore) Acquire() { + s.lock.Lock() + for s.counter < 1 { + s.cond.Wait() + } + s.counter -= 1 + s.lock.Unlock() +} + +func (s *Semaphore) AcquireTimeout(timeout time.Duration) bool { + done := make(chan bool, 1) + // Gate used to communicate between the threads and decide what the result + // is. If the main thread decides, we have timed out, otherwise we succeed. + decided := new(int32) + go func() { + s.Acquire() + if atomic.SwapInt32(decided, 1) == 0 { + done <- true + } else { + // If we already decided the result, and this thread did not win + s.Release() + } + }() + select { + case <-done: + return true + case <-time.NewTimer(timeout).C: + if atomic.SwapInt32(decided, 1) == 1 { + // The other thread already decided the result + return true + } + return false + } +} diff --git a/vendor/github.com/siddontang/ledisdb/LICENSE b/vendor/github.com/siddontang/ledisdb/LICENSE new file mode 100644 index 000000000000..7ece9fdf5a64 --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 siddontang + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/siddontang/ledisdb/config/config.go b/vendor/github.com/siddontang/ledisdb/config/config.go new file mode 100644 index 000000000000..f8aa63940146 --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/config/config.go @@ -0,0 +1,315 @@ +package config + +import ( + "bytes" + "errors" + "io" + "io/ioutil" + "sync" + + "fmt" + + "github.com/pelletier/go-toml" + "github.com/siddontang/go/ioutil2" +) + +var ( + ErrNoConfigFile = errors.New("Running without a config file") +) + +const ( + DefaultAddr string = "127.0.0.1:6380" + + DefaultDBName string = "goleveldb" + + DefaultDataDir string = "./var" + + KB int = 1024 + MB int = KB * 1024 + GB int = MB * 1024 +) + +type LevelDBConfig struct { + Compression bool `toml:"compression"` + BlockSize int `toml:"block_size"` + WriteBufferSize int `toml:"write_buffer_size"` + CacheSize int `toml:"cache_size"` + MaxOpenFiles int `toml:"max_open_files"` + MaxFileSize int `toml:"max_file_size"` +} + +type RocksDBConfig struct { + Compression int `toml:"compression"` + BlockSize int `toml:"block_size"` + WriteBufferSize int `toml:"write_buffer_size"` + CacheSize int `toml:"cache_size"` + MaxOpenFiles int `toml:"max_open_files"` + MaxWriteBufferNum int `toml:"max_write_buffer_num"` + MinWriteBufferNumberToMerge int `toml:"min_write_buffer_number_to_merge"` + NumLevels int `toml:"num_levels"` + Level0FileNumCompactionTrigger int `toml:"level0_file_num_compaction_trigger"` + Level0SlowdownWritesTrigger int `toml:"level0_slowdown_writes_trigger"` + Level0StopWritesTrigger int `toml:"level0_stop_writes_trigger"` + TargetFileSizeBase int `toml:"target_file_size_base"` + TargetFileSizeMultiplier int `toml:"target_file_size_multiplier"` + MaxBytesForLevelBase int `toml:"max_bytes_for_level_base"` + MaxBytesForLevelMultiplier int `toml:"max_bytes_for_level_multiplier"` + DisableAutoCompactions bool `toml:"disable_auto_compactions"` + UseFsync bool `toml:"use_fsync"` + MaxBackgroundCompactions int `toml:"max_background_compactions"` + MaxBackgroundFlushes int `toml:"max_background_flushes"` + EnableStatistics bool `toml:"enable_statistics"` + StatsDumpPeriodSec int `toml:"stats_dump_period_sec"` + BackgroundThreads int `toml:"background_theads"` + HighPriorityBackgroundThreads int `toml:"high_priority_background_threads"` + DisableWAL bool `toml:"disable_wal"` + MaxManifestFileSize int `toml:"max_manifest_file_size"` +} + +type LMDBConfig struct { + MapSize int `toml:"map_size"` + NoSync bool `toml:"nosync"` +} + +type ReplicationConfig struct { + Path string `toml:"path"` + Sync bool `toml:"sync"` + WaitSyncTime int `toml:"wait_sync_time"` + WaitMaxSlaveAcks int `toml:"wait_max_slave_acks"` + ExpiredLogDays int `toml:"expired_log_days"` + StoreName string `toml:"store_name"` + MaxLogFileSize int64 `toml:"max_log_file_size"` + MaxLogFileNum int `toml:"max_log_file_num"` + SyncLog int `toml:"sync_log"` + Compression bool `toml:"compression"` + UseMmap bool `toml:"use_mmap"` +} + +type SnapshotConfig struct { + Path string `toml:"path"` + MaxNum int `toml:"max_num"` +} + +type TLS struct { + Enabled bool `toml:"enabled"` + Certificate string `toml:"certificate"` + Key string `toml:"key"` +} + +type AuthMethod func(c *Config, password string) bool + +type Config struct { + m sync.RWMutex `toml:"-"` + + AuthPassword string `toml:"auth_password"` + + //AuthMethod custom authentication method + AuthMethod AuthMethod `toml:"-"` + + FileName string `toml:"-"` + + // Addr can be empty to assign a local address dynamically + Addr string `toml:"addr"` + + AddrUnixSocketPerm string `toml:"addr_unixsocketperm"` + + HttpAddr string `toml:"http_addr"` + + SlaveOf string `toml:"slaveof"` + + Readonly bool `toml:readonly` + + DataDir string `toml:"data_dir"` + + Databases int `toml:"databases"` + + DBName string `toml:"db_name"` + DBPath string `toml:"db_path"` + DBSyncCommit int `toml:"db_sync_commit"` + + LevelDB LevelDBConfig `toml:"leveldb"` + RocksDB RocksDBConfig `toml:"rocksdb"` + + LMDB LMDBConfig `toml:"lmdb"` + + AccessLog string `toml:"access_log"` + + UseReplication bool `toml:"use_replication"` + Replication ReplicationConfig `toml:"replication"` + + Snapshot SnapshotConfig `toml:"snapshot"` + + ConnReadBufferSize int `toml:"conn_read_buffer_size"` + ConnWriteBufferSize int `toml:"conn_write_buffer_size"` + ConnKeepaliveInterval int `toml:"conn_keepalive_interval"` + + TTLCheckInterval int `toml:"ttl_check_interval"` + + //tls config + TLS TLS `toml:"tls"` +} + +func NewConfigWithFile(fileName string) (*Config, error) { + data, err := ioutil.ReadFile(fileName) + if err != nil { + return nil, err + } + + cfg, err := NewConfigWithData(data) + if err != nil { + return nil, err + } + + cfg.FileName = fileName + return cfg, nil +} + +func NewConfigWithData(data []byte) (*Config, error) { + cfg := NewConfigDefault() + + if err := toml.Unmarshal(data, cfg); err != nil { + return nil, fmt.Errorf("newConfigwithData: unmarashal: %s", err) + } + + cfg.adjust() + + return cfg, nil +} + +func NewConfigDefault() *Config { + cfg := new(Config) + + cfg.Addr = DefaultAddr + cfg.HttpAddr = "" + + cfg.DataDir = DefaultDataDir + + cfg.DBName = DefaultDBName + + cfg.SlaveOf = "" + cfg.Readonly = false + + // Disable Auth by default, by setting password to blank + cfg.AuthPassword = "" + + // default databases number + cfg.Databases = 16 + + // disable access log + cfg.AccessLog = "" + + cfg.LMDB.MapSize = 20 * MB + cfg.LMDB.NoSync = true + + cfg.UseReplication = false + cfg.Replication.WaitSyncTime = 500 + cfg.Replication.Compression = true + cfg.Replication.WaitMaxSlaveAcks = 2 + cfg.Replication.SyncLog = 0 + cfg.Replication.UseMmap = true + cfg.Snapshot.MaxNum = 1 + + cfg.RocksDB.EnableStatistics = false + cfg.RocksDB.UseFsync = false + cfg.RocksDB.DisableAutoCompactions = false + cfg.RocksDB.DisableWAL = false + + cfg.adjust() + + return cfg +} + +func getDefault(d int, s int) int { + if s <= 0 { + return d + } + + return s +} + +func (cfg *Config) adjust() { + cfg.LevelDB.adjust() + + cfg.RocksDB.adjust() + + cfg.Replication.ExpiredLogDays = getDefault(7, cfg.Replication.ExpiredLogDays) + cfg.Replication.MaxLogFileNum = getDefault(50, cfg.Replication.MaxLogFileNum) + cfg.ConnReadBufferSize = getDefault(4*KB, cfg.ConnReadBufferSize) + cfg.ConnWriteBufferSize = getDefault(4*KB, cfg.ConnWriteBufferSize) + cfg.TTLCheckInterval = getDefault(1, cfg.TTLCheckInterval) + cfg.Databases = getDefault(16, cfg.Databases) +} + +func (cfg *LevelDBConfig) adjust() { + cfg.CacheSize = getDefault(4*MB, cfg.CacheSize) + cfg.BlockSize = getDefault(4*KB, cfg.BlockSize) + cfg.WriteBufferSize = getDefault(4*MB, cfg.WriteBufferSize) + cfg.MaxOpenFiles = getDefault(1024, cfg.MaxOpenFiles) + cfg.MaxFileSize = getDefault(32*MB, cfg.MaxFileSize) +} + +func (cfg *RocksDBConfig) adjust() { + cfg.CacheSize = getDefault(4*MB, cfg.CacheSize) + cfg.BlockSize = getDefault(4*KB, cfg.BlockSize) + cfg.WriteBufferSize = getDefault(4*MB, cfg.WriteBufferSize) + cfg.MaxOpenFiles = getDefault(1024, cfg.MaxOpenFiles) + cfg.MaxWriteBufferNum = getDefault(2, cfg.MaxWriteBufferNum) + cfg.MinWriteBufferNumberToMerge = getDefault(1, cfg.MinWriteBufferNumberToMerge) + cfg.NumLevels = getDefault(7, cfg.NumLevels) + cfg.Level0FileNumCompactionTrigger = getDefault(4, cfg.Level0FileNumCompactionTrigger) + cfg.Level0SlowdownWritesTrigger = getDefault(16, cfg.Level0SlowdownWritesTrigger) + cfg.Level0StopWritesTrigger = getDefault(64, cfg.Level0StopWritesTrigger) + cfg.TargetFileSizeBase = getDefault(32*MB, cfg.TargetFileSizeBase) + cfg.TargetFileSizeMultiplier = getDefault(1, cfg.TargetFileSizeMultiplier) + cfg.MaxBytesForLevelBase = getDefault(32*MB, cfg.MaxBytesForLevelBase) + cfg.MaxBytesForLevelMultiplier = getDefault(1, cfg.MaxBytesForLevelMultiplier) + cfg.MaxBackgroundCompactions = getDefault(1, cfg.MaxBackgroundCompactions) + cfg.MaxBackgroundFlushes = getDefault(1, cfg.MaxBackgroundFlushes) + cfg.StatsDumpPeriodSec = getDefault(3600, cfg.StatsDumpPeriodSec) + cfg.BackgroundThreads = getDefault(2, cfg.BackgroundThreads) + cfg.HighPriorityBackgroundThreads = getDefault(1, cfg.HighPriorityBackgroundThreads) + cfg.MaxManifestFileSize = getDefault(20*MB, cfg.MaxManifestFileSize) +} + +func (cfg *Config) Dump(w io.Writer) error { + data, err := toml.Marshal(*cfg) + if err != nil { + return err + } + if _, err := w.Write(data); err != nil { + return err + } + + return nil +} + +func (cfg *Config) DumpFile(fileName string) error { + var b bytes.Buffer + + if err := cfg.Dump(&b); err != nil { + return err + } + + return ioutil2.WriteFileAtomic(fileName, b.Bytes(), 0644) +} + +func (cfg *Config) Rewrite() error { + if len(cfg.FileName) == 0 { + return ErrNoConfigFile + } + + return cfg.DumpFile(cfg.FileName) +} + +func (cfg *Config) GetReadonly() bool { + cfg.m.RLock() + b := cfg.Readonly + cfg.m.RUnlock() + return b +} + +func (cfg *Config) SetReadonly(b bool) { + cfg.m.Lock() + cfg.Readonly = b + cfg.m.Unlock() +} diff --git a/vendor/github.com/siddontang/ledisdb/ledis/batch.go b/vendor/github.com/siddontang/ledisdb/ledis/batch.go new file mode 100644 index 000000000000..6800dfe898f0 --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/ledis/batch.go @@ -0,0 +1,139 @@ +package ledis + +import ( + "sync" + + "github.com/siddontang/go/log" + "github.com/siddontang/ledisdb/rpl" + "github.com/siddontang/ledisdb/store" +) + +type batch struct { + l *Ledis + + *store.WriteBatch + + sync.Locker + + // tx *Tx +} + +func (b *batch) Commit() error { + if b.l.cfg.GetReadonly() { + return ErrWriteInROnly + } + + return b.l.handleCommit(b.WriteBatch, b.WriteBatch) + + // if b.tx == nil { + // return b.l.handleCommit(b.WriteBatch, b.WriteBatch) + // } else { + // if b.l.r != nil { + // if err := b.tx.data.Append(b.WriteBatch.BatchData()); err != nil { + // return err + // } + // } + // return b.WriteBatch.Commit() + // } +} + +func (b *batch) Lock() { + b.Locker.Lock() +} + +func (b *batch) Unlock() { + b.WriteBatch.Rollback() + b.Locker.Unlock() +} + +func (b *batch) Put(key []byte, value []byte) { + b.WriteBatch.Put(key, value) +} + +func (b *batch) Delete(key []byte) { + b.WriteBatch.Delete(key) +} + +type dbBatchLocker struct { + l *sync.Mutex + wrLock *sync.RWMutex +} + +func (l *dbBatchLocker) Lock() { + l.wrLock.RLock() + l.l.Lock() +} + +func (l *dbBatchLocker) Unlock() { + l.l.Unlock() + l.wrLock.RUnlock() +} + +// type txBatchLocker struct { +// } + +// func (l *txBatchLocker) Lock() {} +// func (l *txBatchLocker) Unlock() {} + +// type multiBatchLocker struct { +// } + +// func (l *multiBatchLocker) Lock() {} +// func (l *multiBatchLocker) Unlock() {} + +func (l *Ledis) newBatch(wb *store.WriteBatch, locker sync.Locker) *batch { + b := new(batch) + b.l = l + b.WriteBatch = wb + + b.Locker = locker + + return b +} + +type commiter interface { + Commit() error +} + +type commitDataGetter interface { + Data() []byte +} + +func (l *Ledis) handleCommit(g commitDataGetter, c commiter) error { + l.commitLock.Lock() + + var err error + if l.r != nil { + var rl *rpl.Log + if rl, err = l.r.Log(g.Data()); err != nil { + l.commitLock.Unlock() + + log.Fatalf("write wal error %s", err.Error()) + return err + } + + l.propagate(rl) + + if err = c.Commit(); err != nil { + l.commitLock.Unlock() + + log.Fatalf("commit error %s", err.Error()) + l.noticeReplication() + return err + } + + if err = l.r.UpdateCommitID(rl.ID); err != nil { + l.commitLock.Unlock() + + log.Fatalf("update commit id error %s", err.Error()) + l.noticeReplication() + return err + } + } else { + err = c.Commit() + } + + l.commitLock.Unlock() + + return err +} diff --git a/vendor/github.com/siddontang/ledisdb/ledis/const.go b/vendor/github.com/siddontang/ledisdb/ledis/const.go new file mode 100644 index 000000000000..d35ca3b52232 --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/ledis/const.go @@ -0,0 +1,144 @@ +package ledis + +import ( + "errors" +) + +const Version = "0.5" + +type DataType byte + +// for out use +const ( + KV DataType = iota + LIST + HASH + SET + ZSET +) + +func (d DataType) String() string { + switch d { + case KV: + return KVName + case LIST: + return ListName + case HASH: + return HashName + case SET: + return SetName + case ZSET: + return ZSetName + default: + return "unknown" + } +} + +const ( + KVName = "KV" + ListName = "LIST" + HashName = "HASH" + SetName = "SET" + ZSetName = "ZSET" +) + +// for backend store +const ( + NoneType byte = 0 + KVType byte = 1 + HashType byte = 2 + HSizeType byte = 3 + ListType byte = 4 + LMetaType byte = 5 + ZSetType byte = 6 + ZSizeType byte = 7 + ZScoreType byte = 8 + // BitType byte = 9 + // BitMetaType byte = 10 + SetType byte = 11 + SSizeType byte = 12 + + maxDataType byte = 100 + + /* + I make a big mistake about TTL time key format and have to use a new one (change 101 to 103). + You must run the ledis-upgrade-ttl to upgrade db. + */ + ObsoleteExpTimeType byte = 101 + ExpMetaType byte = 102 + ExpTimeType byte = 103 + + MetaType byte = 201 +) + +var ( + TypeName = map[byte]string{ + KVType: "kv", + HashType: "hash", + HSizeType: "hsize", + ListType: "list", + LMetaType: "lmeta", + ZSetType: "zset", + ZSizeType: "zsize", + ZScoreType: "zscore", + // BitType: "bit", + // BitMetaType: "bitmeta", + SetType: "set", + SSizeType: "ssize", + ExpTimeType: "exptime", + ExpMetaType: "expmeta", + } +) + +const ( + defaultScanCount int = 10 +) + +var ( + errKeySize = errors.New("invalid key size") + errValueSize = errors.New("invalid value size") + errHashFieldSize = errors.New("invalid hash field size") + errSetMemberSize = errors.New("invalid set member size") + errZSetMemberSize = errors.New("invalid zset member size") + errExpireValue = errors.New("invalid expire value") + errListIndex = errors.New("invalid list index") +) + +const ( + MaxDatabases int = 10240 + + //max key size + MaxKeySize int = 1024 + + //max hash field size + MaxHashFieldSize int = 1024 + + //max zset member size + MaxZSetMemberSize int = 1024 + + //max set member size + MaxSetMemberSize int = 1024 + + //max value size + MaxValueSize int = 1024 * 1024 * 1024 +) + +var ( + ErrScoreMiss = errors.New("zset score miss") + ErrWriteInROnly = errors.New("write not support in readonly mode") + ErrRplInRDWR = errors.New("replication not support in read write mode") + ErrRplNotSupport = errors.New("replication not support") +) + +// const ( +// DBAutoCommit uint8 = 0x0 +// DBInTransaction uint8 = 0x1 +// DBInMulti uint8 = 0x2 +// ) + +const ( + BitAND = "and" + BitOR = "or" + BitXOR = "xor" + BitNot = "not" +) diff --git a/vendor/github.com/siddontang/ledisdb/ledis/doc.go b/vendor/github.com/siddontang/ledisdb/ledis/doc.go new file mode 100644 index 000000000000..c6bfe7807bef --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/ledis/doc.go @@ -0,0 +1,58 @@ +// Package ledis is a high performance embedded NoSQL. +// +// Ledis supports various data structure like kv, list, hash and zset like redis. +// +// Other features include replication, data with a limited time-to-live. +// +// Usage +// +// First create a ledis instance before use: +// +// l := ledis.Open(cfg) +// +// cfg is a Config instance which contains configuration for ledis use, +// like DataDir (root directory for ledis working to store data). +// +// After you create a ledis instance, you can select a DB to store you data: +// +// db, _ := l.Select(0) +// +// DB must be selected by a index, ledis supports only 16 databases, so the index range is [0-15]. +// +// KV +// +// KV is the most basic ledis type like any other key-value database. +// +// err := db.Set(key, value) +// value, err := db.Get(key) +// +// List +// +// List is simply lists of values, sorted by insertion order. +// You can push or pop value on the list head (left) or tail (right). +// +// err := db.LPush(key, value1) +// err := db.RPush(key, value2) +// value1, err := db.LPop(key) +// value2, err := db.RPop(key) +// +// Hash +// +// Hash is a map between fields and values. +// +// n, err := db.HSet(key, field1, value1) +// n, err := db.HSet(key, field2, value2) +// value1, err := db.HGet(key, field1) +// value2, err := db.HGet(key, field2) +// +// ZSet +// +// ZSet is a sorted collections of values. +// Every member of zset is associated with score, a int64 value which used to sort, from smallest to greatest score. +// Members are unique, but score may be same. +// +// n, err := db.ZAdd(key, ScorePair{score1, member1}, ScorePair{score2, member2}) +// ay, err := db.ZRangeByScore(key, minScore, maxScore, 0, -1) +// +// +package ledis diff --git a/vendor/github.com/siddontang/ledisdb/ledis/dump.go b/vendor/github.com/siddontang/ledisdb/ledis/dump.go new file mode 100644 index 000000000000..3e01ec2a5550 --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/ledis/dump.go @@ -0,0 +1,223 @@ +package ledis + +import ( + "bufio" + "bytes" + "encoding/binary" + "io" + "os" + + "github.com/siddontang/go/snappy" + "github.com/siddontang/ledisdb/store" +) + +type DumpHead struct { + CommitID uint64 +} + +func (h *DumpHead) Read(r io.Reader) error { + if err := binary.Read(r, binary.BigEndian, &h.CommitID); err != nil { + return err + } + + return nil +} + +func (h *DumpHead) Write(w io.Writer) error { + if err := binary.Write(w, binary.BigEndian, h.CommitID); err != nil { + return err + } + + return nil +} + +func (l *Ledis) DumpFile(path string) error { + f, err := os.Create(path) + if err != nil { + return err + } + defer f.Close() + + return l.Dump(f) +} + +func (l *Ledis) Dump(w io.Writer) error { + var err error + + var commitID uint64 + var snap *store.Snapshot + + l.wLock.Lock() + + if l.r != nil { + if commitID, err = l.r.LastCommitID(); err != nil { + l.wLock.Unlock() + return err + } + } + + if snap, err = l.ldb.NewSnapshot(); err != nil { + l.wLock.Unlock() + return err + } + defer snap.Close() + + l.wLock.Unlock() + + wb := bufio.NewWriterSize(w, 4096) + + h := &DumpHead{commitID} + + if err = h.Write(wb); err != nil { + return err + } + + it := snap.NewIterator() + defer it.Close() + it.SeekToFirst() + + compressBuf := make([]byte, 4096) + + var key []byte + var value []byte + for ; it.Valid(); it.Next() { + key = it.RawKey() + value = it.RawValue() + + if key, err = snappy.Encode(compressBuf, key); err != nil { + return err + } + + if err = binary.Write(wb, binary.BigEndian, uint16(len(key))); err != nil { + return err + } + + if _, err = wb.Write(key); err != nil { + return err + } + + if value, err = snappy.Encode(compressBuf, value); err != nil { + return err + } + + if err = binary.Write(wb, binary.BigEndian, uint32(len(value))); err != nil { + return err + } + + if _, err = wb.Write(value); err != nil { + return err + } + } + + if err = wb.Flush(); err != nil { + return err + } + + compressBuf = nil + + return nil +} + +// clear all data and load dump file to db +func (l *Ledis) LoadDumpFile(path string) (*DumpHead, error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + + return l.LoadDump(f) +} + +// clear all data and load dump file to db +func (l *Ledis) LoadDump(r io.Reader) (*DumpHead, error) { + l.wLock.Lock() + defer l.wLock.Unlock() + + var err error + if err = l.flushAll(); err != nil { + return nil, err + } + + rb := bufio.NewReaderSize(r, 4096) + + h := new(DumpHead) + + if err = h.Read(rb); err != nil { + return nil, err + } + + var keyLen uint16 + var valueLen uint32 + + var keyBuf bytes.Buffer + var valueBuf bytes.Buffer + + deKeyBuf := make([]byte, 4096) + deValueBuf := make([]byte, 4096) + + var key, value []byte + + wb := l.ldb.NewWriteBatch() + defer wb.Close() + + n := 0 + + for { + if err = binary.Read(rb, binary.BigEndian, &keyLen); err != nil && err != io.EOF { + return nil, err + } else if err == io.EOF { + break + } + + if _, err = io.CopyN(&keyBuf, rb, int64(keyLen)); err != nil { + return nil, err + } + + if key, err = snappy.Decode(deKeyBuf, keyBuf.Bytes()); err != nil { + return nil, err + } + + if err = binary.Read(rb, binary.BigEndian, &valueLen); err != nil { + return nil, err + } + + if _, err = io.CopyN(&valueBuf, rb, int64(valueLen)); err != nil { + return nil, err + } + + if value, err = snappy.Decode(deValueBuf, valueBuf.Bytes()); err != nil { + return nil, err + } + + wb.Put(key, value) + n++ + if n%1024 == 0 { + if err = wb.Commit(); err != nil { + return nil, err + } + } + + // if err = l.ldb.Put(key, value); err != nil { + // return nil, err + // } + + keyBuf.Reset() + valueBuf.Reset() + } + + if err = wb.Commit(); err != nil { + return nil, err + } + + deKeyBuf = nil + deValueBuf = nil + + if l.r != nil { + if err := l.r.UpdateCommitID(h.CommitID); err != nil { + return nil, err + } + } + + return h, nil +} diff --git a/vendor/github.com/siddontang/ledisdb/ledis/event.go b/vendor/github.com/siddontang/ledisdb/ledis/event.go new file mode 100644 index 000000000000..d14309def4fe --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/ledis/event.go @@ -0,0 +1,126 @@ +package ledis + +import ( + "errors" + "fmt" + "strconv" + + "github.com/siddontang/go/hack" +) + +var errInvalidEvent = errors.New("invalid event") + +func formatEventKey(buf []byte, k []byte) ([]byte, error) { + if len(k) < 2 { + return nil, errInvalidEvent + } + + buf = append(buf, fmt.Sprintf("DB:%2d ", k[0])...) + buf = append(buf, fmt.Sprintf("%s ", TypeName[k[1]])...) + + db := new(DB) + index, _, err := decodeDBIndex(k) + if err != nil { + return nil, err + } + db.setIndex(index) + + //to do format at respective place + + switch k[1] { + case KVType: + if key, err := db.decodeKVKey(k); err != nil { + return nil, err + } else { + buf = strconv.AppendQuote(buf, hack.String(key)) + } + case HashType: + if key, field, err := db.hDecodeHashKey(k); err != nil { + return nil, err + } else { + buf = strconv.AppendQuote(buf, hack.String(key)) + buf = append(buf, ' ') + buf = strconv.AppendQuote(buf, hack.String(field)) + } + case HSizeType: + if key, err := db.hDecodeSizeKey(k); err != nil { + return nil, err + } else { + buf = strconv.AppendQuote(buf, hack.String(key)) + } + case ListType: + if key, seq, err := db.lDecodeListKey(k); err != nil { + return nil, err + } else { + buf = strconv.AppendQuote(buf, hack.String(key)) + buf = append(buf, ' ') + buf = strconv.AppendInt(buf, int64(seq), 10) + } + case LMetaType: + if key, err := db.lDecodeMetaKey(k); err != nil { + return nil, err + } else { + buf = strconv.AppendQuote(buf, hack.String(key)) + } + case ZSetType: + if key, m, err := db.zDecodeSetKey(k); err != nil { + return nil, err + } else { + buf = strconv.AppendQuote(buf, hack.String(key)) + buf = append(buf, ' ') + buf = strconv.AppendQuote(buf, hack.String(m)) + } + case ZSizeType: + if key, err := db.zDecodeSizeKey(k); err != nil { + return nil, err + } else { + buf = strconv.AppendQuote(buf, hack.String(key)) + } + case ZScoreType: + if key, m, score, err := db.zDecodeScoreKey(k); err != nil { + return nil, err + } else { + buf = strconv.AppendQuote(buf, hack.String(key)) + buf = append(buf, ' ') + buf = strconv.AppendQuote(buf, hack.String(m)) + buf = append(buf, ' ') + buf = strconv.AppendInt(buf, score, 10) + } + case SetType: + if key, member, err := db.sDecodeSetKey(k); err != nil { + return nil, err + } else { + buf = strconv.AppendQuote(buf, hack.String(key)) + buf = append(buf, ' ') + buf = strconv.AppendQuote(buf, hack.String(member)) + } + case SSizeType: + if key, err := db.sDecodeSizeKey(k); err != nil { + return nil, err + } else { + buf = strconv.AppendQuote(buf, hack.String(key)) + } + case ExpTimeType: + if tp, key, t, err := db.expDecodeTimeKey(k); err != nil { + return nil, err + } else { + buf = append(buf, TypeName[tp]...) + buf = append(buf, ' ') + buf = strconv.AppendQuote(buf, hack.String(key)) + buf = append(buf, ' ') + buf = strconv.AppendInt(buf, t, 10) + } + case ExpMetaType: + if tp, key, err := db.expDecodeMetaKey(k); err != nil { + return nil, err + } else { + buf = append(buf, TypeName[tp]...) + buf = append(buf, ' ') + buf = strconv.AppendQuote(buf, hack.String(key)) + } + default: + return nil, errInvalidEvent + } + + return buf, nil +} diff --git a/vendor/github.com/siddontang/ledisdb/ledis/ledis.go b/vendor/github.com/siddontang/ledisdb/ledis/ledis.go new file mode 100644 index 000000000000..8d654ba4c891 --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/ledis/ledis.go @@ -0,0 +1,241 @@ +package ledis + +import ( + "fmt" + "io" + "os" + "path" + "sync" + "time" + + "github.com/siddontang/go/filelock" + "github.com/siddontang/go/log" + "github.com/siddontang/ledisdb/config" + "github.com/siddontang/ledisdb/rpl" + "github.com/siddontang/ledisdb/store" +) + +type Ledis struct { + cfg *config.Config + + ldb *store.DB + + dbLock sync.Mutex + dbs map[int]*DB + + quit chan struct{} + wg sync.WaitGroup + + //for replication + r *rpl.Replication + rc chan struct{} + rbatch *store.WriteBatch + rDoneCh chan struct{} + rhs []NewLogEventHandler + + wLock sync.RWMutex //allow one write at same time + commitLock sync.Mutex //allow one write commit at same time + + lock io.Closer + + ttlCheckers []*ttlChecker + ttlCheckerCh chan *ttlChecker +} + +func Open(cfg *config.Config) (*Ledis, error) { + if len(cfg.DataDir) == 0 { + cfg.DataDir = config.DefaultDataDir + } + + if cfg.Databases == 0 { + cfg.Databases = 16 + } else if cfg.Databases > MaxDatabases { + cfg.Databases = MaxDatabases + } + + os.MkdirAll(cfg.DataDir, 0755) + + var err error + + l := new(Ledis) + l.cfg = cfg + + if l.lock, err = filelock.Lock(path.Join(cfg.DataDir, "LOCK")); err != nil { + return nil, err + } + + l.quit = make(chan struct{}) + + if l.ldb, err = store.Open(cfg); err != nil { + return nil, err + } + + if cfg.UseReplication { + if l.r, err = rpl.NewReplication(cfg); err != nil { + return nil, err + } + + l.rc = make(chan struct{}, 1) + l.rbatch = l.ldb.NewWriteBatch() + l.rDoneCh = make(chan struct{}, 1) + + l.wg.Add(1) + go l.onReplication() + + //first we must try wait all replication ok + //maybe some logs are not committed + l.WaitReplication() + } else { + l.r = nil + } + + l.dbs = make(map[int]*DB, 16) + + l.checkTTL() + + return l, nil +} + +func (l *Ledis) Close() { + close(l.quit) + l.wg.Wait() + + l.ldb.Close() + + if l.r != nil { + l.r.Close() + //l.r = nil + } + + if l.lock != nil { + l.lock.Close() + //l.lock = nil + } +} + +func (l *Ledis) Select(index int) (*DB, error) { + if index < 0 || index >= l.cfg.Databases { + return nil, fmt.Errorf("invalid db index %d, must in [0, %d]", index, l.cfg.Databases-1) + } + + l.dbLock.Lock() + defer l.dbLock.Unlock() + + db, ok := l.dbs[index] + if ok { + return db, nil + } + + db = l.newDB(index) + l.dbs[index] = db + + go func(db *DB) { + l.ttlCheckerCh <- db.ttlChecker + }(db) + + return db, nil +} + +// Flush All will clear all data and replication logs +func (l *Ledis) FlushAll() error { + l.wLock.Lock() + defer l.wLock.Unlock() + + return l.flushAll() +} + +func (l *Ledis) flushAll() error { + it := l.ldb.NewIterator() + defer it.Close() + + it.SeekToFirst() + + w := l.ldb.NewWriteBatch() + defer w.Rollback() + + n := 0 + for ; it.Valid(); it.Next() { + n++ + if n == 10000 { + if err := w.Commit(); err != nil { + log.Fatalf("flush all commit error: %s", err.Error()) + return err + } + n = 0 + } + w.Delete(it.RawKey()) + } + + if err := w.Commit(); err != nil { + log.Fatalf("flush all commit error: %s", err.Error()) + return err + } + + if l.r != nil { + if err := l.r.Clear(); err != nil { + log.Fatalf("flush all replication clear error: %s", err.Error()) + return err + } + } + + return nil +} + +func (l *Ledis) IsReadOnly() bool { + if l.cfg.GetReadonly() { + return true + } else if l.r != nil { + if b, _ := l.r.CommitIDBehind(); b { + return true + } + } + return false +} + +func (l *Ledis) checkTTL() { + l.ttlCheckers = make([]*ttlChecker, 0, 16) + l.ttlCheckerCh = make(chan *ttlChecker, 16) + + if l.cfg.TTLCheckInterval == 0 { + l.cfg.TTLCheckInterval = 1 + } + + l.wg.Add(1) + go func() { + defer l.wg.Done() + + tick := time.NewTicker(time.Duration(l.cfg.TTLCheckInterval) * time.Second) + defer tick.Stop() + + for { + select { + case <-tick.C: + if l.IsReadOnly() { + break + } + + for _, c := range l.ttlCheckers { + c.check() + } + case c := <-l.ttlCheckerCh: + l.ttlCheckers = append(l.ttlCheckers, c) + c.check() + case <-l.quit: + return + } + } + + }() + +} + +func (l *Ledis) StoreStat() *store.Stat { + return l.ldb.Stat() +} + +func (l *Ledis) CompactStore() error { + l.wLock.Lock() + defer l.wLock.Unlock() + + return l.ldb.Compact() +} diff --git a/vendor/github.com/siddontang/ledisdb/ledis/ledis_db.go b/vendor/github.com/siddontang/ledisdb/ledis/ledis_db.go new file mode 100644 index 000000000000..7b3ff0f7d3f4 --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/ledis/ledis_db.go @@ -0,0 +1,204 @@ +package ledis + +import ( + "bytes" + "encoding/binary" + "fmt" + "sync" + + "github.com/siddontang/ledisdb/store" +) + +type ibucket interface { + Get(key []byte) ([]byte, error) + GetSlice(key []byte) (store.Slice, error) + + Put(key []byte, value []byte) error + Delete(key []byte) error + + NewIterator() *store.Iterator + + NewWriteBatch() *store.WriteBatch + + RangeIterator(min []byte, max []byte, rangeType uint8) *store.RangeLimitIterator + RevRangeIterator(min []byte, max []byte, rangeType uint8) *store.RangeLimitIterator + RangeLimitIterator(min []byte, max []byte, rangeType uint8, offset int, count int) *store.RangeLimitIterator + RevRangeLimitIterator(min []byte, max []byte, rangeType uint8, offset int, count int) *store.RangeLimitIterator +} + +type DB struct { + l *Ledis + + sdb *store.DB + + bucket ibucket + + index int + + // buffer to store index varint + indexVarBuf []byte + + kvBatch *batch + listBatch *batch + hashBatch *batch + zsetBatch *batch + // binBatch *batch + setBatch *batch + + // status uint8 + + ttlChecker *ttlChecker + + lbkeys *lBlockKeys +} + +func (l *Ledis) newDB(index int) *DB { + d := new(DB) + + d.l = l + + d.sdb = l.ldb + + d.bucket = d.sdb + + // d.status = DBAutoCommit + d.setIndex(index) + + d.kvBatch = d.newBatch() + d.listBatch = d.newBatch() + d.hashBatch = d.newBatch() + d.zsetBatch = d.newBatch() + // d.binBatch = d.newBatch() + d.setBatch = d.newBatch() + + d.lbkeys = newLBlockKeys() + + d.ttlChecker = d.newTTLChecker() + + return d +} + +func decodeDBIndex(buf []byte) (int, int, error) { + index, n := binary.Uvarint(buf) + if n == 0 { + return 0, 0, fmt.Errorf("buf is too small to save index") + } else if n < 0 { + return 0, 0, fmt.Errorf("value larger than 64 bits") + } else if index > uint64(MaxDatabases) { + return 0, 0, fmt.Errorf("value %d is larger than max databases %d", index, MaxDatabases) + } + return int(index), n, nil +} + +func (db *DB) setIndex(index int) { + db.index = index + // the most size for varint is 10 bytes + buf := make([]byte, 10) + n := binary.PutUvarint(buf, uint64(index)) + + db.indexVarBuf = buf[0:n] +} + +func (db *DB) checkKeyIndex(buf []byte) (int, error) { + if len(buf) < len(db.indexVarBuf) { + return 0, fmt.Errorf("key is too small") + } else if !bytes.Equal(db.indexVarBuf, buf[0:len(db.indexVarBuf)]) { + return 0, fmt.Errorf("invalid db index") + } + + return len(db.indexVarBuf), nil +} + +func (db *DB) newTTLChecker() *ttlChecker { + c := new(ttlChecker) + c.db = db + c.txs = make([]*batch, maxDataType) + c.cbs = make([]onExpired, maxDataType) + c.nc = 0 + + c.register(KVType, db.kvBatch, db.delete) + c.register(ListType, db.listBatch, db.lDelete) + c.register(HashType, db.hashBatch, db.hDelete) + c.register(ZSetType, db.zsetBatch, db.zDelete) + // c.register(BitType, db.binBatch, db.bDelete) + c.register(SetType, db.setBatch, db.sDelete) + + return c +} + +func (db *DB) newBatch() *batch { + return db.l.newBatch(db.bucket.NewWriteBatch(), &dbBatchLocker{l: &sync.Mutex{}, wrLock: &db.l.wLock}) +} + +func (db *DB) Index() int { + return int(db.index) +} + +// func (db *DB) IsAutoCommit() bool { +// return db.status == DBAutoCommit +// } + +func (db *DB) FlushAll() (drop int64, err error) { + all := [...](func() (int64, error)){ + db.flush, + db.lFlush, + db.hFlush, + db.zFlush, + db.sFlush} + + for _, flush := range all { + if n, e := flush(); e != nil { + err = e + return + } else { + drop += n + } + } + + return +} + +func (db *DB) flushType(t *batch, dataType byte) (drop int64, err error) { + var deleteFunc func(t *batch, key []byte) int64 + var metaDataType byte + switch dataType { + case KVType: + deleteFunc = db.delete + metaDataType = KVType + case ListType: + deleteFunc = db.lDelete + metaDataType = LMetaType + case HashType: + deleteFunc = db.hDelete + metaDataType = HSizeType + case ZSetType: + deleteFunc = db.zDelete + metaDataType = ZSizeType + // case BitType: + // deleteFunc = db.bDelete + // metaDataType = BitMetaType + case SetType: + deleteFunc = db.sDelete + metaDataType = SSizeType + default: + return 0, fmt.Errorf("invalid data type: %s", TypeName[dataType]) + } + + var keys [][]byte + keys, err = db.scanGeneric(metaDataType, nil, 1024, false, "", false) + for len(keys) != 0 || err != nil { + for _, key := range keys { + deleteFunc(t, key) + db.rmExpire(t, dataType, key) + + } + + if err = t.Commit(); err != nil { + return + } else { + drop += int64(len(keys)) + } + keys, err = db.scanGeneric(metaDataType, nil, 1024, false, "", false) + } + return +} diff --git a/vendor/github.com/siddontang/ledisdb/ledis/migrate.go b/vendor/github.com/siddontang/ledisdb/ledis/migrate.go new file mode 100644 index 000000000000..aca8a86c5ed0 --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/ledis/migrate.go @@ -0,0 +1,189 @@ +package ledis + +import ( + "fmt" + + "github.com/siddontang/rdb" +) + +/* + To support redis <-> ledisdb, the dump value format is the same as redis. + We will not support bitmap, and may add bit operations for kv later. + + But you must know that we use int64 for zset score, not double. + Only support rdb version 6. +*/ + +func (db *DB) Dump(key []byte) ([]byte, error) { + v, err := db.Get(key) + if err != nil { + return nil, err + } else if v == nil { + return nil, err + } + + return rdb.Dump(rdb.String(v)) +} + +func (db *DB) LDump(key []byte) ([]byte, error) { + v, err := db.LRange(key, 0, -1) + if err != nil { + return nil, err + } else if len(v) == 0 { + return nil, err + } + + return rdb.Dump(rdb.List(v)) +} + +func (db *DB) HDump(key []byte) ([]byte, error) { + v, err := db.HGetAll(key) + if err != nil { + return nil, err + } else if len(v) == 0 { + return nil, err + } + + o := make(rdb.Hash, len(v)) + for i := 0; i < len(v); i++ { + o[i].Field = v[i].Field + o[i].Value = v[i].Value + } + + return rdb.Dump(o) +} + +func (db *DB) SDump(key []byte) ([]byte, error) { + v, err := db.SMembers(key) + if err != nil { + return nil, err + } else if len(v) == 0 { + return nil, err + } + + return rdb.Dump(rdb.Set(v)) +} + +func (db *DB) ZDump(key []byte) ([]byte, error) { + v, err := db.ZRangeByScore(key, MinScore, MaxScore, 0, -1) + if err != nil { + return nil, err + } else if len(v) == 0 { + return nil, err + } + + o := make(rdb.ZSet, len(v)) + for i := 0; i < len(v); i++ { + o[i].Member = v[i].Member + o[i].Score = float64(v[i].Score) + } + + return rdb.Dump(o) +} + +func (db *DB) Restore(key []byte, ttl int64, data []byte) error { + d, err := rdb.DecodeDump(data) + if err != nil { + return err + } + + //ttl is milliseconds, but we only support seconds + //later may support milliseconds + if ttl > 0 { + ttl = ttl / 1e3 + if ttl == 0 { + ttl = 1 + } + } + + switch value := d.(type) { + case rdb.String: + if _, err = db.Del(key); err != nil { + return err + } + + if err = db.Set(key, value); err != nil { + return err + } + + if ttl > 0 { + if _, err = db.Expire(key, ttl); err != nil { + return err + } + } + case rdb.Hash: + //first clear old key + if _, err = db.HClear(key); err != nil { + return err + } + + fv := make([]FVPair, len(value)) + for i := 0; i < len(value); i++ { + fv[i] = FVPair{Field: value[i].Field, Value: value[i].Value} + } + + if err = db.HMset(key, fv...); err != nil { + return err + } + + if ttl > 0 { + if _, err = db.HExpire(key, ttl); err != nil { + return err + } + } + case rdb.List: + //first clear old key + if _, err = db.LClear(key); err != nil { + return err + } + + if _, err = db.RPush(key, value...); err != nil { + return err + } + + if ttl > 0 { + if _, err = db.LExpire(key, ttl); err != nil { + return err + } + } + case rdb.ZSet: + //first clear old key + if _, err = db.ZClear(key); err != nil { + return err + } + + sp := make([]ScorePair, len(value)) + for i := 0; i < len(value); i++ { + sp[i] = ScorePair{int64(value[i].Score), value[i].Member} + } + + if _, err = db.ZAdd(key, sp...); err != nil { + return err + } + + if ttl > 0 { + if _, err = db.ZExpire(key, ttl); err != nil { + return err + } + } + case rdb.Set: + //first clear old key + if _, err = db.SClear(key); err != nil { + return err + } + + if _, err = db.SAdd(key, value...); err != nil { + return err + } + + if ttl > 0 { + if _, err = db.SExpire(key, ttl); err != nil { + return err + } + } + default: + return fmt.Errorf("invalid data type %T", d) + } + + return nil +} diff --git a/vendor/github.com/siddontang/ledisdb/ledis/replication.go b/vendor/github.com/siddontang/ledisdb/ledis/replication.go new file mode 100644 index 000000000000..20c20994b78a --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/ledis/replication.go @@ -0,0 +1,250 @@ +package ledis + +import ( + "bytes" + "errors" + "io" + "time" + + "github.com/siddontang/go/log" + "github.com/siddontang/go/snappy" + "github.com/siddontang/ledisdb/rpl" + "github.com/siddontang/ledisdb/store" +) + +const ( + maxReplLogSize = 1 * 1024 * 1024 +) + +var ( + ErrLogMissed = errors.New("log is pured in server") +) + +func (l *Ledis) ReplicationUsed() bool { + return l.r != nil +} + +func (l *Ledis) handleReplication() error { + l.wLock.Lock() + defer l.wLock.Unlock() + + defer AsyncNotify(l.rDoneCh) + + rl := &rpl.Log{} + + var err error + for { + if err = l.r.NextNeedCommitLog(rl); err != nil { + if err != rpl.ErrNoBehindLog { + log.Errorf("get next commit log err, %s", err.Error) + return err + } else { + return nil + } + } else { + l.rbatch.Rollback() + + if rl.Compression == 1 { + //todo optimize + if rl.Data, err = snappy.Decode(nil, rl.Data); err != nil { + log.Errorf("decode log error %s", err.Error()) + return err + } + } + + if bd, err := store.NewBatchData(rl.Data); err != nil { + log.Errorf("decode batch log error %s", err.Error()) + return err + } else if err = bd.Replay(l.rbatch); err != nil { + log.Errorf("replay batch log error %s", err.Error()) + } + + l.commitLock.Lock() + if err = l.rbatch.Commit(); err != nil { + log.Errorf("commit log error %s", err.Error()) + } else if err = l.r.UpdateCommitID(rl.ID); err != nil { + log.Errorf("update commit id error %s", err.Error()) + } + + l.commitLock.Unlock() + if err != nil { + return err + } + } + + } +} + +func (l *Ledis) onReplication() { + defer l.wg.Done() + + l.noticeReplication() + + for { + select { + case <-l.rc: + l.handleReplication() + case <-l.quit: + return + } + } +} + +func (l *Ledis) WaitReplication() error { + if !l.ReplicationUsed() { + return ErrRplNotSupport + + } + + for i := 0; i < 100; i++ { + l.noticeReplication() + + select { + case <-l.rDoneCh: + case <-l.quit: + return nil + } + time.Sleep(100 * time.Millisecond) + + b, err := l.r.CommitIDBehind() + if err != nil { + return err + } else if !b { + return nil + } + } + + return errors.New("wait replication too many times") +} + +func (l *Ledis) StoreLogsFromReader(rb io.Reader) error { + if !l.ReplicationUsed() { + return ErrRplNotSupport + } else if !l.cfg.Readonly { + return ErrRplInRDWR + } + + log := &rpl.Log{} + + for { + if err := log.Decode(rb); err != nil { + if err == io.EOF { + break + } else { + return err + } + } + + if err := l.r.StoreLog(log); err != nil { + return err + } + + } + + l.noticeReplication() + + return nil +} + +func (l *Ledis) noticeReplication() { + AsyncNotify(l.rc) +} + +func (l *Ledis) StoreLogsFromData(data []byte) error { + rb := bytes.NewReader(data) + + return l.StoreLogsFromReader(rb) +} + +func (l *Ledis) ReadLogsTo(startLogID uint64, w io.Writer) (n int, nextLogID uint64, err error) { + if !l.ReplicationUsed() { + // no replication log + nextLogID = 0 + err = ErrRplNotSupport + return + } + + var firtID, lastID uint64 + + firtID, err = l.r.FirstLogID() + if err != nil { + return + } + + if startLogID < firtID { + err = ErrLogMissed + return + } + + lastID, err = l.r.LastLogID() + if err != nil { + return + } + + nextLogID = startLogID + + log := &rpl.Log{} + for i := startLogID; i <= lastID; i++ { + if err = l.r.GetLog(i, log); err != nil { + return + } + + if err = log.Encode(w); err != nil { + return + } + + nextLogID = i + 1 + + n += log.Size() + + if n > maxReplLogSize { + break + } + } + + return +} + +// try to read events, if no events read, try to wait the new event singal until timeout seconds +func (l *Ledis) ReadLogsToTimeout(startLogID uint64, w io.Writer, timeout int, quitCh chan struct{}) (n int, nextLogID uint64, err error) { + n, nextLogID, err = l.ReadLogsTo(startLogID, w) + if err != nil { + return + } else if n != 0 { + return + } + //no events read + select { + case <-l.r.WaitLog(): + case <-time.After(time.Duration(timeout) * time.Second): + case <-quitCh: + return + } + return l.ReadLogsTo(startLogID, w) +} + +func (l *Ledis) propagate(rl *rpl.Log) { + for _, h := range l.rhs { + h(rl) + } +} + +type NewLogEventHandler func(rl *rpl.Log) + +func (l *Ledis) AddNewLogEventHandler(h NewLogEventHandler) error { + if !l.ReplicationUsed() { + return ErrRplNotSupport + } + + l.rhs = append(l.rhs, h) + + return nil +} + +func (l *Ledis) ReplicationStat() (*rpl.Stat, error) { + if !l.ReplicationUsed() { + return nil, ErrRplNotSupport + } + + return l.r.Stat() +} diff --git a/vendor/github.com/siddontang/ledisdb/ledis/scan.go b/vendor/github.com/siddontang/ledisdb/ledis/scan.go new file mode 100644 index 000000000000..c4540a6105ed --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/ledis/scan.go @@ -0,0 +1,396 @@ +package ledis + +import ( + "errors" + "regexp" + + "github.com/siddontang/ledisdb/store" +) + +var errDataType = errors.New("error data type") +var errMetaKey = errors.New("error meta key") + +//fif inclusive is true, scan range [cursor, inf) else (cursor, inf) +func (db *DB) Scan(dataType DataType, cursor []byte, count int, inclusive bool, match string) ([][]byte, error) { + storeDataType, err := getDataStoreType(dataType) + if err != nil { + return nil, err + } + + return db.scanGeneric(storeDataType, cursor, count, inclusive, match, false) +} + +//if inclusive is true, revscan range (-inf, cursor] else (inf, cursor) +func (db *DB) RevScan(dataType DataType, cursor []byte, count int, inclusive bool, match string) ([][]byte, error) { + storeDataType, err := getDataStoreType(dataType) + if err != nil { + return nil, err + } + + return db.scanGeneric(storeDataType, cursor, count, inclusive, match, true) +} + +func getDataStoreType(dataType DataType) (byte, error) { + var storeDataType byte + switch dataType { + case KV: + storeDataType = KVType + case LIST: + storeDataType = LMetaType + case HASH: + storeDataType = HSizeType + case SET: + storeDataType = SSizeType + case ZSET: + storeDataType = ZSizeType + default: + return 0, errDataType + } + return storeDataType, nil +} + +func buildMatchRegexp(match string) (*regexp.Regexp, error) { + var err error + var r *regexp.Regexp = nil + + if len(match) > 0 { + if r, err = regexp.Compile(match); err != nil { + return nil, err + } + } + + return r, nil +} + +func (db *DB) buildScanIterator(minKey []byte, maxKey []byte, inclusive bool, reverse bool) *store.RangeLimitIterator { + tp := store.RangeOpen + + if !reverse { + if inclusive { + tp = store.RangeROpen + } + } else { + if inclusive { + tp = store.RangeLOpen + } + } + + var it *store.RangeLimitIterator + if !reverse { + it = db.bucket.RangeIterator(minKey, maxKey, tp) + } else { + it = db.bucket.RevRangeIterator(minKey, maxKey, tp) + } + + return it +} + +func (db *DB) buildScanKeyRange(storeDataType byte, key []byte, reverse bool) (minKey []byte, maxKey []byte, err error) { + if !reverse { + if minKey, err = db.encodeScanMinKey(storeDataType, key); err != nil { + return + } + if maxKey, err = db.encodeScanMaxKey(storeDataType, nil); err != nil { + return + } + } else { + if minKey, err = db.encodeScanMinKey(storeDataType, nil); err != nil { + return + } + if maxKey, err = db.encodeScanMaxKey(storeDataType, key); err != nil { + return + } + } + return +} + +func checkScanCount(count int) int { + if count <= 0 { + count = defaultScanCount + } + + return count +} + +func (db *DB) scanGeneric(storeDataType byte, key []byte, count int, + inclusive bool, match string, reverse bool) ([][]byte, error) { + + r, err := buildMatchRegexp(match) + if err != nil { + return nil, err + } + + minKey, maxKey, err := db.buildScanKeyRange(storeDataType, key, reverse) + if err != nil { + return nil, err + } + + count = checkScanCount(count) + + it := db.buildScanIterator(minKey, maxKey, inclusive, reverse) + + v := make([][]byte, 0, count) + + for i := 0; it.Valid() && i < count; it.Next() { + if k, err := db.decodeScanKey(storeDataType, it.Key()); err != nil { + continue + } else if r != nil && !r.Match(k) { + continue + } else { + v = append(v, k) + i++ + } + } + it.Close() + return v, nil +} + +func (db *DB) encodeScanMinKey(storeDataType byte, key []byte) ([]byte, error) { + return db.encodeScanKey(storeDataType, key) +} + +func (db *DB) encodeScanMaxKey(storeDataType byte, key []byte) ([]byte, error) { + if len(key) > 0 { + return db.encodeScanKey(storeDataType, key) + } + + k, err := db.encodeScanKey(storeDataType, nil) + if err != nil { + return nil, err + } + k[len(k)-1] = storeDataType + 1 + return k, nil +} + +func (db *DB) encodeScanKey(storeDataType byte, key []byte) ([]byte, error) { + switch storeDataType { + case KVType: + return db.encodeKVKey(key), nil + case LMetaType: + return db.lEncodeMetaKey(key), nil + case HSizeType: + return db.hEncodeSizeKey(key), nil + case ZSizeType: + return db.zEncodeSizeKey(key), nil + case SSizeType: + return db.sEncodeSizeKey(key), nil + default: + return nil, errDataType + } +} + +func (db *DB) decodeScanKey(storeDataType byte, ek []byte) (key []byte, err error) { + switch storeDataType { + case KVType: + key, err = db.decodeKVKey(ek) + case LMetaType: + key, err = db.lDecodeMetaKey(ek) + case HSizeType: + key, err = db.hDecodeSizeKey(ek) + case ZSizeType: + key, err = db.zDecodeSizeKey(ek) + case SSizeType: + key, err = db.sDecodeSizeKey(ek) + default: + err = errDataType + } + return +} + +// for specail data scan + +func (db *DB) buildDataScanKeyRange(storeDataType byte, key []byte, cursor []byte, reverse bool) (minKey []byte, maxKey []byte, err error) { + if !reverse { + if minKey, err = db.encodeDataScanMinKey(storeDataType, key, cursor); err != nil { + return + } + if maxKey, err = db.encodeDataScanMaxKey(storeDataType, key, nil); err != nil { + return + } + } else { + if minKey, err = db.encodeDataScanMinKey(storeDataType, key, nil); err != nil { + return + } + if maxKey, err = db.encodeDataScanMaxKey(storeDataType, key, cursor); err != nil { + return + } + } + return +} + +func (db *DB) encodeDataScanMinKey(storeDataType byte, key []byte, cursor []byte) ([]byte, error) { + return db.encodeDataScanKey(storeDataType, key, cursor) +} + +func (db *DB) encodeDataScanMaxKey(storeDataType byte, key []byte, cursor []byte) ([]byte, error) { + if len(cursor) > 0 { + return db.encodeDataScanKey(storeDataType, key, cursor) + } + + k, err := db.encodeDataScanKey(storeDataType, key, nil) + if err != nil { + return nil, err + } + + // here, the last byte is the start seperator, set it to stop seperator + k[len(k)-1] = k[len(k)-1] + 1 + return k, nil +} + +func (db *DB) encodeDataScanKey(storeDataType byte, key []byte, cursor []byte) ([]byte, error) { + switch storeDataType { + case HashType: + return db.hEncodeHashKey(key, cursor), nil + case ZSetType: + return db.zEncodeSetKey(key, cursor), nil + case SetType: + return db.sEncodeSetKey(key, cursor), nil + default: + return nil, errDataType + } +} + +func (db *DB) buildDataScanIterator(storeDataType byte, key []byte, cursor []byte, count int, + inclusive bool, reverse bool) (*store.RangeLimitIterator, error) { + + if err := checkKeySize(key); err != nil { + return nil, err + } + + minKey, maxKey, err := db.buildDataScanKeyRange(storeDataType, key, cursor, reverse) + if err != nil { + return nil, err + } + + it := db.buildScanIterator(minKey, maxKey, inclusive, reverse) + + return it, nil +} + +func (db *DB) hScanGeneric(key []byte, cursor []byte, count int, inclusive bool, match string, reverse bool) ([]FVPair, error) { + count = checkScanCount(count) + + r, err := buildMatchRegexp(match) + if err != nil { + return nil, err + } + + v := make([]FVPair, 0, count) + + it, err := db.buildDataScanIterator(HashType, key, cursor, count, inclusive, reverse) + if err != nil { + return nil, err + } + + defer it.Close() + + for i := 0; it.Valid() && i < count; it.Next() { + _, f, err := db.hDecodeHashKey(it.Key()) + if err != nil { + return nil, err + } else if r != nil && !r.Match(f) { + continue + } + + v = append(v, FVPair{Field: f, Value: it.Value()}) + + i++ + } + + return v, nil +} + +func (db *DB) HScan(key []byte, cursor []byte, count int, inclusive bool, match string) ([]FVPair, error) { + return db.hScanGeneric(key, cursor, count, inclusive, match, false) +} + +func (db *DB) HRevScan(key []byte, cursor []byte, count int, inclusive bool, match string) ([]FVPair, error) { + return db.hScanGeneric(key, cursor, count, inclusive, match, true) +} + +func (db *DB) sScanGeneric(key []byte, cursor []byte, count int, inclusive bool, match string, reverse bool) ([][]byte, error) { + count = checkScanCount(count) + + r, err := buildMatchRegexp(match) + if err != nil { + return nil, err + } + + v := make([][]byte, 0, count) + + it, err := db.buildDataScanIterator(SetType, key, cursor, count, inclusive, reverse) + if err != nil { + return nil, err + } + + defer it.Close() + + for i := 0; it.Valid() && i < count; it.Next() { + _, m, err := db.sDecodeSetKey(it.Key()) + if err != nil { + return nil, err + } else if r != nil && !r.Match(m) { + continue + } + + v = append(v, m) + + i++ + } + + return v, nil +} + +func (db *DB) SScan(key []byte, cursor []byte, count int, inclusive bool, match string) ([][]byte, error) { + return db.sScanGeneric(key, cursor, count, inclusive, match, false) +} + +func (db *DB) SRevScan(key []byte, cursor []byte, count int, inclusive bool, match string) ([][]byte, error) { + return db.sScanGeneric(key, cursor, count, inclusive, match, true) +} + +func (db *DB) zScanGeneric(key []byte, cursor []byte, count int, inclusive bool, match string, reverse bool) ([]ScorePair, error) { + count = checkScanCount(count) + + r, err := buildMatchRegexp(match) + if err != nil { + return nil, err + } + + v := make([]ScorePair, 0, count) + + it, err := db.buildDataScanIterator(ZSetType, key, cursor, count, inclusive, reverse) + if err != nil { + return nil, err + } + + defer it.Close() + + for i := 0; it.Valid() && i < count; it.Next() { + _, m, err := db.zDecodeSetKey(it.Key()) + if err != nil { + return nil, err + } else if r != nil && !r.Match(m) { + continue + } + + score, err := Int64(it.Value(), nil) + if err != nil { + return nil, err + } + + v = append(v, ScorePair{Score: score, Member: m}) + + i++ + } + + return v, nil +} + +func (db *DB) ZScan(key []byte, cursor []byte, count int, inclusive bool, match string) ([]ScorePair, error) { + return db.zScanGeneric(key, cursor, count, inclusive, match, false) +} + +func (db *DB) ZRevScan(key []byte, cursor []byte, count int, inclusive bool, match string) ([]ScorePair, error) { + return db.zScanGeneric(key, cursor, count, inclusive, match, true) +} diff --git a/vendor/github.com/siddontang/ledisdb/ledis/sort.go b/vendor/github.com/siddontang/ledisdb/ledis/sort.go new file mode 100644 index 000000000000..6a54c075b822 --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/ledis/sort.go @@ -0,0 +1,233 @@ +package ledis + +import ( + "bytes" + "fmt" + "sort" + "strconv" + + "github.com/siddontang/ledisdb/store" +) + +type Limit struct { + Offset int + Size int +} + +func getSortRange(values [][]byte, offset int, size int) (int, int) { + var start = 0 + if offset > 0 { + start = offset + } + + valueLen := len(values) + var end = valueLen - 1 + if size > 0 { + end = start + size - 1 + } + + if start >= valueLen { + start = valueLen - 1 + end = valueLen - 2 + } + + if end >= valueLen { + end = valueLen - 1 + } + + return start, end +} + +var hashPattern = []byte("*->") + +func (db *DB) lookupKeyByPattern(pattern []byte, subKey []byte) []byte { + // If the pattern is #, return the substitution key itself + if bytes.Equal(pattern, []byte{'#'}) { + return subKey + } + + // If we can't find '*' in the pattern, return nil + if !bytes.Contains(pattern, []byte{'*'}) { + return nil + } + + key := pattern + var field []byte = nil + + // Find out if we're dealing with a hash dereference + if n := bytes.Index(pattern, hashPattern); n > 0 && n+3 < len(pattern) { + key = pattern[0 : n+1] + field = pattern[n+3:] + } + + // Perform the '*' substitution + key = bytes.Replace(key, []byte{'*'}, subKey, 1) + + var value []byte + if field == nil { + value, _ = db.Get(key) + } else { + value, _ = db.HGet(key, field) + } + + return value +} + +type sortItem struct { + value []byte + cmpValue []byte + score float64 +} + +type sortItemSlice struct { + alpha bool + sortByPattern bool + items []sortItem +} + +func (s *sortItemSlice) Len() int { + return len(s.items) +} + +func (s *sortItemSlice) Swap(i, j int) { + s.items[i], s.items[j] = s.items[j], s.items[i] +} + +func (s *sortItemSlice) Less(i, j int) bool { + s1 := s.items[i] + s2 := s.items[j] + if !s.alpha { + if s1.score < s2.score { + return true + } else if s1.score > s2.score { + return false + } else { + return bytes.Compare(s1.value, s2.value) < 0 + } + } else { + if s.sortByPattern { + if s1.cmpValue == nil || s2.cmpValue == nil { + if s1.cmpValue == nil { + return true + } else { + return false + } + } else { + // Unlike redis, we only use bytes compare + return bytes.Compare(s1.cmpValue, s2.cmpValue) < 0 + } + } else { + // Unlike redis, we only use bytes compare + return bytes.Compare(s1.value, s2.value) < 0 + } + } +} + +func (db *DB) xsort(values [][]byte, offset int, size int, alpha bool, desc bool, sortBy []byte, sortGet [][]byte) ([][]byte, error) { + if len(values) == 0 { + return [][]byte{}, nil + } + + start, end := getSortRange(values, offset, size) + + dontsort := 0 + + if sortBy != nil { + if !bytes.Contains(sortBy, []byte{'*'}) { + dontsort = 1 + } + } + + items := &sortItemSlice{ + alpha: alpha, + sortByPattern: sortBy != nil, + items: make([]sortItem, len(values)), + } + + for i, value := range values { + items.items[i].value = value + items.items[i].score = 0 + items.items[i].cmpValue = nil + + if dontsort == 0 { + var cmpValue []byte + if sortBy != nil { + cmpValue = db.lookupKeyByPattern(sortBy, value) + } else { + // use value iteself to sort by + cmpValue = value + } + + if cmpValue == nil { + continue + } + + if alpha { + if sortBy != nil { + items.items[i].cmpValue = cmpValue + } + } else { + score, err := strconv.ParseFloat(string(cmpValue), 64) + if err != nil { + return nil, fmt.Errorf("%s scores can't be converted into double", cmpValue) + } + items.items[i].score = score + } + } + } + + if dontsort == 0 { + if !desc { + sort.Sort(items) + } else { + sort.Sort(sort.Reverse(items)) + } + } + + var resLen int = end - start + 1 + if len(sortGet) > 0 { + resLen = len(sortGet) * (end - start + 1) + } + + res := make([][]byte, 0, resLen) + for i := start; i <= end; i++ { + if len(sortGet) == 0 { + res = append(res, items.items[i].value) + } else { + for _, getPattern := range sortGet { + v := db.lookupKeyByPattern(getPattern, items.items[i].value) + res = append(res, v) + } + } + } + + return res, nil +} + +func (db *DB) XLSort(key []byte, offset int, size int, alpha bool, desc bool, sortBy []byte, sortGet [][]byte) ([][]byte, error) { + values, err := db.LRange(key, 0, -1) + + if err != nil { + return nil, err + } + + return db.xsort(values, offset, size, alpha, desc, sortBy, sortGet) +} + +func (db *DB) XSSort(key []byte, offset int, size int, alpha bool, desc bool, sortBy []byte, sortGet [][]byte) ([][]byte, error) { + values, err := db.SMembers(key) + if err != nil { + return nil, err + } + + return db.xsort(values, offset, size, alpha, desc, sortBy, sortGet) +} + +func (db *DB) XZSort(key []byte, offset int, size int, alpha bool, desc bool, sortBy []byte, sortGet [][]byte) ([][]byte, error) { + values, err := db.ZRangeByLex(key, nil, nil, store.RangeClose, 0, -1) + if err != nil { + return nil, err + } + + return db.xsort(values, offset, size, alpha, desc, sortBy, sortGet) +} diff --git a/vendor/github.com/siddontang/ledisdb/ledis/t_hash.go b/vendor/github.com/siddontang/ledisdb/ledis/t_hash.go new file mode 100644 index 000000000000..c822e232da63 --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/ledis/t_hash.go @@ -0,0 +1,537 @@ +package ledis + +import ( + "encoding/binary" + "errors" + "time" + + "github.com/siddontang/go/num" + "github.com/siddontang/ledisdb/store" +) + +type FVPair struct { + Field []byte + Value []byte +} + +var errHashKey = errors.New("invalid hash key") +var errHSizeKey = errors.New("invalid hsize key") + +const ( + hashStartSep byte = ':' + hashStopSep byte = hashStartSep + 1 +) + +func checkHashKFSize(key []byte, field []byte) error { + if len(key) > MaxKeySize || len(key) == 0 { + return errKeySize + } else if len(field) > MaxHashFieldSize || len(field) == 0 { + return errHashFieldSize + } + return nil +} + +func (db *DB) hEncodeSizeKey(key []byte) []byte { + buf := make([]byte, len(key)+1+len(db.indexVarBuf)) + + pos := 0 + n := copy(buf, db.indexVarBuf) + + pos += n + buf[pos] = HSizeType + + pos++ + copy(buf[pos:], key) + + return buf +} + +func (db *DB) hDecodeSizeKey(ek []byte) ([]byte, error) { + pos, err := db.checkKeyIndex(ek) + if err != nil { + return nil, err + } + + if pos+1 > len(ek) || ek[pos] != HSizeType { + return nil, errHSizeKey + } + pos++ + + return ek[pos:], nil +} + +func (db *DB) hEncodeHashKey(key []byte, field []byte) []byte { + buf := make([]byte, len(key)+len(field)+1+1+2+len(db.indexVarBuf)) + + pos := 0 + n := copy(buf, db.indexVarBuf) + pos += n + + buf[pos] = HashType + pos++ + + binary.BigEndian.PutUint16(buf[pos:], uint16(len(key))) + pos += 2 + + copy(buf[pos:], key) + pos += len(key) + + buf[pos] = hashStartSep + pos++ + copy(buf[pos:], field) + + return buf +} + +func (db *DB) hDecodeHashKey(ek []byte) ([]byte, []byte, error) { + pos, err := db.checkKeyIndex(ek) + if err != nil { + return nil, nil, err + } + + if pos+1 > len(ek) || ek[pos] != HashType { + return nil, nil, errHashKey + } + pos++ + + if pos+2 > len(ek) { + return nil, nil, errHashKey + } + + keyLen := int(binary.BigEndian.Uint16(ek[pos:])) + pos += 2 + + if keyLen+pos > len(ek) { + return nil, nil, errHashKey + } + + key := ek[pos : pos+keyLen] + pos += keyLen + + if ek[pos] != hashStartSep { + return nil, nil, errHashKey + } + + pos++ + field := ek[pos:] + return key, field, nil +} + +func (db *DB) hEncodeStartKey(key []byte) []byte { + return db.hEncodeHashKey(key, nil) +} + +func (db *DB) hEncodeStopKey(key []byte) []byte { + k := db.hEncodeHashKey(key, nil) + + k[len(k)-1] = hashStopSep + + return k +} + +func (db *DB) hSetItem(key []byte, field []byte, value []byte) (int64, error) { + t := db.hashBatch + + ek := db.hEncodeHashKey(key, field) + + var n int64 = 1 + if v, _ := db.bucket.Get(ek); v != nil { + n = 0 + } else { + if _, err := db.hIncrSize(key, 1); err != nil { + return 0, err + } + } + + t.Put(ek, value) + return n, nil +} + +// ps : here just focus on deleting the hash data, +// any other likes expire is ignore. +func (db *DB) hDelete(t *batch, key []byte) int64 { + sk := db.hEncodeSizeKey(key) + start := db.hEncodeStartKey(key) + stop := db.hEncodeStopKey(key) + + var num int64 = 0 + it := db.bucket.RangeLimitIterator(start, stop, store.RangeROpen, 0, -1) + for ; it.Valid(); it.Next() { + t.Delete(it.Key()) + num++ + } + it.Close() + + t.Delete(sk) + return num +} + +func (db *DB) hExpireAt(key []byte, when int64) (int64, error) { + t := db.hashBatch + t.Lock() + defer t.Unlock() + + if hlen, err := db.HLen(key); err != nil || hlen == 0 { + return 0, err + } else { + db.expireAt(t, HashType, key, when) + if err := t.Commit(); err != nil { + return 0, err + } + } + return 1, nil +} + +func (db *DB) HLen(key []byte) (int64, error) { + if err := checkKeySize(key); err != nil { + return 0, err + } + + return Int64(db.bucket.Get(db.hEncodeSizeKey(key))) +} + +func (db *DB) HSet(key []byte, field []byte, value []byte) (int64, error) { + if err := checkHashKFSize(key, field); err != nil { + return 0, err + } else if err := checkValueSize(value); err != nil { + return 0, err + } + + t := db.hashBatch + t.Lock() + defer t.Unlock() + + n, err := db.hSetItem(key, field, value) + if err != nil { + return 0, err + } + + err = t.Commit() + return n, err +} + +func (db *DB) HGet(key []byte, field []byte) ([]byte, error) { + if err := checkHashKFSize(key, field); err != nil { + return nil, err + } + + return db.bucket.Get(db.hEncodeHashKey(key, field)) +} + +func (db *DB) HMset(key []byte, args ...FVPair) error { + t := db.hashBatch + t.Lock() + defer t.Unlock() + + var err error + var ek []byte + var num int64 = 0 + for i := 0; i < len(args); i++ { + if err := checkHashKFSize(key, args[i].Field); err != nil { + return err + } else if err := checkValueSize(args[i].Value); err != nil { + return err + } + + ek = db.hEncodeHashKey(key, args[i].Field) + + if v, err := db.bucket.Get(ek); err != nil { + return err + } else if v == nil { + num++ + } + + t.Put(ek, args[i].Value) + } + + if _, err = db.hIncrSize(key, num); err != nil { + return err + } + + //todo add binglog + err = t.Commit() + return err +} + +func (db *DB) HMget(key []byte, args ...[]byte) ([][]byte, error) { + var ek []byte + + it := db.bucket.NewIterator() + defer it.Close() + + r := make([][]byte, len(args)) + for i := 0; i < len(args); i++ { + if err := checkHashKFSize(key, args[i]); err != nil { + return nil, err + } + + ek = db.hEncodeHashKey(key, args[i]) + + r[i] = it.Find(ek) + } + + return r, nil +} + +func (db *DB) HDel(key []byte, args ...[]byte) (int64, error) { + t := db.hashBatch + + var ek []byte + var v []byte + var err error + + t.Lock() + defer t.Unlock() + + it := db.bucket.NewIterator() + defer it.Close() + + var num int64 = 0 + for i := 0; i < len(args); i++ { + if err := checkHashKFSize(key, args[i]); err != nil { + return 0, err + } + + ek = db.hEncodeHashKey(key, args[i]) + + v = it.RawFind(ek) + if v == nil { + continue + } else { + num++ + t.Delete(ek) + } + } + + if _, err = db.hIncrSize(key, -num); err != nil { + return 0, err + } + + err = t.Commit() + + return num, err +} + +func (db *DB) hIncrSize(key []byte, delta int64) (int64, error) { + t := db.hashBatch + sk := db.hEncodeSizeKey(key) + + var err error + var size int64 = 0 + if size, err = Int64(db.bucket.Get(sk)); err != nil { + return 0, err + } else { + size += delta + if size <= 0 { + size = 0 + t.Delete(sk) + db.rmExpire(t, HashType, key) + } else { + t.Put(sk, PutInt64(size)) + } + } + + return size, nil +} + +func (db *DB) HIncrBy(key []byte, field []byte, delta int64) (int64, error) { + if err := checkHashKFSize(key, field); err != nil { + return 0, err + } + + t := db.hashBatch + var ek []byte + var err error + + t.Lock() + defer t.Unlock() + + ek = db.hEncodeHashKey(key, field) + + var n int64 = 0 + if n, err = StrInt64(db.bucket.Get(ek)); err != nil { + return 0, err + } + + n += delta + + _, err = db.hSetItem(key, field, num.FormatInt64ToSlice(n)) + if err != nil { + return 0, err + } + + err = t.Commit() + + return n, err +} + +func (db *DB) HGetAll(key []byte) ([]FVPair, error) { + if err := checkKeySize(key); err != nil { + return nil, err + } + + start := db.hEncodeStartKey(key) + stop := db.hEncodeStopKey(key) + + v := make([]FVPair, 0, 16) + + it := db.bucket.RangeLimitIterator(start, stop, store.RangeROpen, 0, -1) + defer it.Close() + + for ; it.Valid(); it.Next() { + _, f, err := db.hDecodeHashKey(it.Key()) + if err != nil { + return nil, err + } + + v = append(v, FVPair{Field: f, Value: it.Value()}) + } + + return v, nil +} + +func (db *DB) HKeys(key []byte) ([][]byte, error) { + if err := checkKeySize(key); err != nil { + return nil, err + } + + start := db.hEncodeStartKey(key) + stop := db.hEncodeStopKey(key) + + v := make([][]byte, 0, 16) + + it := db.bucket.RangeLimitIterator(start, stop, store.RangeROpen, 0, -1) + defer it.Close() + + for ; it.Valid(); it.Next() { + _, f, err := db.hDecodeHashKey(it.Key()) + if err != nil { + return nil, err + } + v = append(v, f) + } + + return v, nil +} + +func (db *DB) HValues(key []byte) ([][]byte, error) { + if err := checkKeySize(key); err != nil { + return nil, err + } + + start := db.hEncodeStartKey(key) + stop := db.hEncodeStopKey(key) + + v := make([][]byte, 0, 16) + + it := db.bucket.RangeLimitIterator(start, stop, store.RangeROpen, 0, -1) + defer it.Close() + + for ; it.Valid(); it.Next() { + _, _, err := db.hDecodeHashKey(it.Key()) + if err != nil { + return nil, err + } + + v = append(v, it.Value()) + } + + return v, nil +} + +func (db *DB) HClear(key []byte) (int64, error) { + if err := checkKeySize(key); err != nil { + return 0, err + } + + t := db.hashBatch + t.Lock() + defer t.Unlock() + + num := db.hDelete(t, key) + db.rmExpire(t, HashType, key) + + err := t.Commit() + return num, err +} + +func (db *DB) HMclear(keys ...[]byte) (int64, error) { + t := db.hashBatch + t.Lock() + defer t.Unlock() + + for _, key := range keys { + if err := checkKeySize(key); err != nil { + return 0, err + } + + db.hDelete(t, key) + db.rmExpire(t, HashType, key) + } + + err := t.Commit() + return int64(len(keys)), err +} + +func (db *DB) hFlush() (drop int64, err error) { + t := db.hashBatch + + t.Lock() + defer t.Unlock() + + return db.flushType(t, HashType) +} + +func (db *DB) HExpire(key []byte, duration int64) (int64, error) { + if duration <= 0 { + return 0, errExpireValue + } + + return db.hExpireAt(key, time.Now().Unix()+duration) +} + +func (db *DB) HExpireAt(key []byte, when int64) (int64, error) { + if when <= time.Now().Unix() { + return 0, errExpireValue + } + + return db.hExpireAt(key, when) +} + +func (db *DB) HTTL(key []byte) (int64, error) { + if err := checkKeySize(key); err != nil { + return -1, err + } + + return db.ttl(HashType, key) +} + +func (db *DB) HPersist(key []byte) (int64, error) { + if err := checkKeySize(key); err != nil { + return 0, err + } + + t := db.hashBatch + t.Lock() + defer t.Unlock() + + n, err := db.rmExpire(t, HashType, key) + if err != nil { + return 0, err + } + + err = t.Commit() + return n, err +} + +func (db *DB) HKeyExists(key []byte) (int64, error) { + if err := checkKeySize(key); err != nil { + return 0, err + } + sk := db.hEncodeSizeKey(key) + v, err := db.bucket.Get(sk) + if v != nil && err == nil { + return 1, nil + } + return 0, err +} diff --git a/vendor/github.com/siddontang/ledisdb/ledis/t_kv.go b/vendor/github.com/siddontang/ledisdb/ledis/t_kv.go new file mode 100644 index 000000000000..624287fb89fa --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/ledis/t_kv.go @@ -0,0 +1,769 @@ +package ledis + +import ( + "encoding/binary" + "errors" + "fmt" + "strings" + "time" + + "github.com/siddontang/go/num" + "github.com/siddontang/ledisdb/store" +) + +type KVPair struct { + Key []byte + Value []byte +} + +var errKVKey = errors.New("invalid encode kv key") + +func checkKeySize(key []byte) error { + if len(key) > MaxKeySize || len(key) == 0 { + return errKeySize + } + return nil +} + +func checkValueSize(value []byte) error { + if len(value) > MaxValueSize { + return errValueSize + } + + return nil +} + +func (db *DB) encodeKVKey(key []byte) []byte { + ek := make([]byte, len(key)+1+len(db.indexVarBuf)) + pos := copy(ek, db.indexVarBuf) + ek[pos] = KVType + pos++ + copy(ek[pos:], key) + return ek +} + +func (db *DB) decodeKVKey(ek []byte) ([]byte, error) { + pos, err := db.checkKeyIndex(ek) + if err != nil { + return nil, err + } + if pos+1 > len(ek) || ek[pos] != KVType { + return nil, errKVKey + } + + pos++ + + return ek[pos:], nil +} + +func (db *DB) encodeKVMinKey() []byte { + ek := db.encodeKVKey(nil) + return ek +} + +func (db *DB) encodeKVMaxKey() []byte { + ek := db.encodeKVKey(nil) + ek[len(ek)-1] = KVType + 1 + return ek +} + +func (db *DB) incr(key []byte, delta int64) (int64, error) { + if err := checkKeySize(key); err != nil { + return 0, err + } + + var err error + key = db.encodeKVKey(key) + + t := db.kvBatch + + t.Lock() + defer t.Unlock() + + var n int64 + n, err = StrInt64(db.bucket.Get(key)) + if err != nil { + return 0, err + } + + n += delta + + t.Put(key, num.FormatInt64ToSlice(n)) + + err = t.Commit() + return n, err +} + +// ps : here just focus on deleting the key-value data, +// any other likes expire is ignore. +func (db *DB) delete(t *batch, key []byte) int64 { + key = db.encodeKVKey(key) + t.Delete(key) + return 1 +} + +func (db *DB) setExpireAt(key []byte, when int64) (int64, error) { + t := db.kvBatch + t.Lock() + defer t.Unlock() + + if exist, err := db.Exists(key); err != nil || exist == 0 { + return 0, err + } else { + db.expireAt(t, KVType, key, when) + if err := t.Commit(); err != nil { + return 0, err + } + } + return 1, nil +} + +func (db *DB) Decr(key []byte) (int64, error) { + return db.incr(key, -1) +} + +func (db *DB) DecrBy(key []byte, decrement int64) (int64, error) { + return db.incr(key, -decrement) +} + +func (db *DB) Del(keys ...[]byte) (int64, error) { + if len(keys) == 0 { + return 0, nil + } + + codedKeys := make([][]byte, len(keys)) + for i, k := range keys { + codedKeys[i] = db.encodeKVKey(k) + } + + t := db.kvBatch + t.Lock() + defer t.Unlock() + + for i, k := range keys { + t.Delete(codedKeys[i]) + db.rmExpire(t, KVType, k) + } + + err := t.Commit() + return int64(len(keys)), err +} + +func (db *DB) Exists(key []byte) (int64, error) { + if err := checkKeySize(key); err != nil { + return 0, err + } + + var err error + key = db.encodeKVKey(key) + + var v []byte + v, err = db.bucket.Get(key) + if v != nil && err == nil { + return 1, nil + } + + return 0, err +} + +func (db *DB) Get(key []byte) ([]byte, error) { + if err := checkKeySize(key); err != nil { + return nil, err + } + + key = db.encodeKVKey(key) + + return db.bucket.Get(key) +} + +func (db *DB) GetSlice(key []byte) (store.Slice, error) { + if err := checkKeySize(key); err != nil { + return nil, err + } + + key = db.encodeKVKey(key) + + return db.bucket.GetSlice(key) +} + +func (db *DB) GetSet(key []byte, value []byte) ([]byte, error) { + if err := checkKeySize(key); err != nil { + return nil, err + } else if err := checkValueSize(value); err != nil { + return nil, err + } + + key = db.encodeKVKey(key) + + t := db.kvBatch + + t.Lock() + defer t.Unlock() + + oldValue, err := db.bucket.Get(key) + if err != nil { + return nil, err + } + + t.Put(key, value) + + err = t.Commit() + + return oldValue, err +} + +func (db *DB) Incr(key []byte) (int64, error) { + return db.incr(key, 1) +} + +func (db *DB) IncrBy(key []byte, increment int64) (int64, error) { + return db.incr(key, increment) +} + +func (db *DB) MGet(keys ...[]byte) ([][]byte, error) { + values := make([][]byte, len(keys)) + + it := db.bucket.NewIterator() + defer it.Close() + + for i := range keys { + if err := checkKeySize(keys[i]); err != nil { + return nil, err + } + + values[i] = it.Find(db.encodeKVKey(keys[i])) + } + + return values, nil +} + +func (db *DB) MSet(args ...KVPair) error { + if len(args) == 0 { + return nil + } + + t := db.kvBatch + + var err error + var key []byte + var value []byte + + t.Lock() + defer t.Unlock() + + for i := 0; i < len(args); i++ { + if err := checkKeySize(args[i].Key); err != nil { + return err + } else if err := checkValueSize(args[i].Value); err != nil { + return err + } + + key = db.encodeKVKey(args[i].Key) + + value = args[i].Value + + t.Put(key, value) + + } + + err = t.Commit() + return err +} + +func (db *DB) Set(key []byte, value []byte) error { + if err := checkKeySize(key); err != nil { + return err + } else if err := checkValueSize(value); err != nil { + return err + } + + var err error + key = db.encodeKVKey(key) + + t := db.kvBatch + + t.Lock() + defer t.Unlock() + + t.Put(key, value) + + err = t.Commit() + + return err +} + +func (db *DB) SetNX(key []byte, value []byte) (int64, error) { + if err := checkKeySize(key); err != nil { + return 0, err + } else if err := checkValueSize(value); err != nil { + return 0, err + } + + var err error + key = db.encodeKVKey(key) + + var n int64 = 1 + + t := db.kvBatch + + t.Lock() + defer t.Unlock() + + if v, err := db.bucket.Get(key); err != nil { + return 0, err + } else if v != nil { + n = 0 + } else { + t.Put(key, value) + + err = t.Commit() + } + + return n, err +} + +func (db *DB) SetEX(key []byte, duration int64, value []byte) error { + if err := checkKeySize(key); err != nil { + return err + } else if err := checkValueSize(value); err != nil { + return err + } else if duration <= 0 { + return errExpireValue + } + + ek := db.encodeKVKey(key) + + t := db.kvBatch + + t.Lock() + defer t.Unlock() + + t.Put(ek, value) + db.expireAt(t, KVType, key, time.Now().Unix()+duration) + + if err := t.Commit(); err != nil { + return err + } + + return nil +} + +func (db *DB) flush() (drop int64, err error) { + t := db.kvBatch + t.Lock() + defer t.Unlock() + return db.flushType(t, KVType) +} + +func (db *DB) Expire(key []byte, duration int64) (int64, error) { + if duration <= 0 { + return 0, errExpireValue + } + + return db.setExpireAt(key, time.Now().Unix()+duration) +} + +func (db *DB) ExpireAt(key []byte, when int64) (int64, error) { + if when <= time.Now().Unix() { + return 0, errExpireValue + } + + return db.setExpireAt(key, when) +} + +func (db *DB) TTL(key []byte) (int64, error) { + if err := checkKeySize(key); err != nil { + return -1, err + } + + return db.ttl(KVType, key) +} + +func (db *DB) Persist(key []byte) (int64, error) { + if err := checkKeySize(key); err != nil { + return 0, err + } + + t := db.kvBatch + t.Lock() + defer t.Unlock() + n, err := db.rmExpire(t, KVType, key) + if err != nil { + return 0, err + } + + err = t.Commit() + return n, err +} + +func (db *DB) SetRange(key []byte, offset int, value []byte) (int64, error) { + if len(value) == 0 { + return 0, nil + } + + if err := checkKeySize(key); err != nil { + return 0, err + } else if len(value)+offset > MaxValueSize { + return 0, errValueSize + } + + key = db.encodeKVKey(key) + + t := db.kvBatch + + t.Lock() + defer t.Unlock() + + oldValue, err := db.bucket.Get(key) + if err != nil { + return 0, err + } + + extra := offset + len(value) - len(oldValue) + if extra > 0 { + oldValue = append(oldValue, make([]byte, extra)...) + } + + copy(oldValue[offset:], value) + + t.Put(key, oldValue) + + if err := t.Commit(); err != nil { + return 0, err + } + + return int64(len(oldValue)), nil +} + +func getRange(start int, end int, valLen int) (int, int) { + if start < 0 { + start = valLen + start + } + + if end < 0 { + end = valLen + end + } + + if start < 0 { + start = 0 + } + + if end < 0 { + end = 0 + } + + if end >= valLen { + end = valLen - 1 + } + return start, end +} + +func (db *DB) GetRange(key []byte, start int, end int) ([]byte, error) { + if err := checkKeySize(key); err != nil { + return nil, err + } + key = db.encodeKVKey(key) + + value, err := db.bucket.Get(key) + if err != nil { + return nil, err + } + + valLen := len(value) + + start, end = getRange(start, end, valLen) + + if start > end { + return nil, nil + } + + return value[start : end+1], nil +} + +func (db *DB) StrLen(key []byte) (int64, error) { + s, err := db.GetSlice(key) + if err != nil { + return 0, err + } + + n := s.Size() + s.Free() + return int64(n), nil +} + +func (db *DB) Append(key []byte, value []byte) (int64, error) { + if len(value) == 0 { + return 0, nil + } + + if err := checkKeySize(key); err != nil { + return 0, err + } + key = db.encodeKVKey(key) + + t := db.kvBatch + + t.Lock() + defer t.Unlock() + + oldValue, err := db.bucket.Get(key) + if err != nil { + return 0, err + } + + if len(oldValue)+len(value) > MaxValueSize { + return 0, errValueSize + } + + oldValue = append(oldValue, value...) + + t.Put(key, oldValue) + + if err := t.Commit(); err != nil { + return 0, nil + } + + return int64(len(oldValue)), nil +} + +func (db *DB) BitOP(op string, destKey []byte, srcKeys ...[]byte) (int64, error) { + if err := checkKeySize(destKey); err != nil { + return 0, err + } + + op = strings.ToLower(op) + if len(srcKeys) == 0 { + return 0, nil + } else if op == BitNot && len(srcKeys) > 1 { + return 0, fmt.Errorf("BITOP NOT has only one srckey") + } else if len(srcKeys) < 2 { + return 0, nil + } + + key := db.encodeKVKey(srcKeys[0]) + + value, err := db.bucket.Get(key) + if err != nil { + return 0, err + } + + if op == BitNot { + for i := 0; i < len(value); i++ { + value[i] = ^value[i] + } + } else { + for j := 1; j < len(srcKeys); j++ { + if err := checkKeySize(srcKeys[j]); err != nil { + return 0, err + } + + key = db.encodeKVKey(srcKeys[j]) + ovalue, err := db.bucket.Get(key) + if err != nil { + return 0, err + } + + if len(value) < len(ovalue) { + value, ovalue = ovalue, value + } + + for i := 0; i < len(ovalue); i++ { + switch op { + case BitAND: + value[i] &= ovalue[i] + case BitOR: + value[i] |= ovalue[i] + case BitXOR: + value[i] ^= ovalue[i] + default: + return 0, fmt.Errorf("invalid op type: %s", op) + } + } + + for i := len(ovalue); i < len(value); i++ { + switch op { + case BitAND: + value[i] &= 0 + case BitOR: + value[i] |= 0 + case BitXOR: + value[i] ^= 0 + } + } + } + } + + key = db.encodeKVKey(destKey) + + t := db.kvBatch + + t.Lock() + defer t.Unlock() + + t.Put(key, value) + + if err := t.Commit(); err != nil { + return 0, err + } + + return int64(len(value)), nil +} + +var bitsInByte = [256]int32{0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, + 4, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 1, 2, 2, 3, 2, 3, + 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, + 5, 5, 6, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, + 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, + 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 1, 2, + 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, + 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 2, 3, 3, 4, 3, 4, 4, + 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, + 6, 7, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 4, 5, 5, 6, 5, + 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8} + +func numberBitCount(i uint32) uint32 { + i = i - ((i >> 1) & 0x55555555) + i = (i & 0x33333333) + ((i >> 2) & 0x33333333) + return (((i + (i >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24 +} + +func (db *DB) BitCount(key []byte, start int, end int) (int64, error) { + if err := checkKeySize(key); err != nil { + return 0, err + } + + key = db.encodeKVKey(key) + value, err := db.bucket.Get(key) + if err != nil { + return 0, err + } + + start, end = getRange(start, end, len(value)) + value = value[start : end+1] + + var n int64 = 0 + + pos := 0 + for ; pos+4 <= len(value); pos = pos + 4 { + n += int64(numberBitCount(binary.BigEndian.Uint32(value[pos : pos+4]))) + } + + for ; pos < len(value); pos++ { + n += int64(bitsInByte[value[pos]]) + } + + return n, nil +} + +func (db *DB) BitPos(key []byte, on int, start int, end int) (int64, error) { + if err := checkKeySize(key); err != nil { + return 0, err + } + + if (on & ^1) != 0 { + return 0, fmt.Errorf("bit must be 0 or 1, not %d", on) + } + + var skipValue uint8 = 0 + if on == 0 { + skipValue = 0xFF + } + + key = db.encodeKVKey(key) + value, err := db.bucket.Get(key) + if err != nil { + return 0, err + } + + start, end = getRange(start, end, len(value)) + value = value[start : end+1] + + for i, v := range value { + if uint8(v) != skipValue { + for j := 0; j < 8; j++ { + isNull := uint8(v)&(1<> 3) + extra := byteOffset + 1 - len(value) + if extra > 0 { + value = append(value, make([]byte, extra)...) + } + + byteVal := value[byteOffset] + bit := 7 - uint8(uint32(offset)&0x7) + bitVal := byteVal & (1 << bit) + + byteVal &= ^(1 << bit) + byteVal |= (uint8(on&0x1) << bit) + + value[byteOffset] = byteVal + + t.Put(key, value) + if err := t.Commit(); err != nil { + return 0, err + } + + if bitVal > 0 { + return 1, nil + } else { + return 0, nil + } +} + +func (db *DB) GetBit(key []byte, offset int) (int64, error) { + if err := checkKeySize(key); err != nil { + return 0, err + } + + key = db.encodeKVKey(key) + + value, err := db.bucket.Get(key) + if err != nil { + return 0, err + } + + byteOffset := uint32(offset) >> 3 + bit := 7 - uint8(uint32(offset)&0x7) + + if byteOffset >= uint32(len(value)) { + return 0, nil + } + + bitVal := value[byteOffset] & (1 << bit) + if bitVal > 0 { + return 1, nil + } else { + return 0, nil + } +} diff --git a/vendor/github.com/siddontang/ledisdb/ledis/t_list.go b/vendor/github.com/siddontang/ledisdb/ledis/t_list.go new file mode 100644 index 000000000000..83ed56e57fc2 --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/ledis/t_list.go @@ -0,0 +1,783 @@ +package ledis + +import ( + "container/list" + "encoding/binary" + "errors" + "sync" + "time" + + "github.com/siddontang/go/hack" + "github.com/siddontang/go/log" + "github.com/siddontang/go/num" + "github.com/siddontang/ledisdb/store" + "golang.org/x/net/context" +) + +const ( + listHeadSeq int32 = 1 + listTailSeq int32 = 2 + + listMinSeq int32 = 1000 + listMaxSeq int32 = 1<<31 - 1000 + listInitialSeq int32 = listMinSeq + (listMaxSeq-listMinSeq)/2 +) + +var errLMetaKey = errors.New("invalid lmeta key") +var errListKey = errors.New("invalid list key") +var errListSeq = errors.New("invalid list sequence, overflow") + +func (db *DB) lEncodeMetaKey(key []byte) []byte { + buf := make([]byte, len(key)+1+len(db.indexVarBuf)) + pos := copy(buf, db.indexVarBuf) + buf[pos] = LMetaType + pos++ + + copy(buf[pos:], key) + return buf +} + +func (db *DB) lDecodeMetaKey(ek []byte) ([]byte, error) { + pos, err := db.checkKeyIndex(ek) + if err != nil { + return nil, err + } + + if pos+1 > len(ek) || ek[pos] != LMetaType { + return nil, errLMetaKey + } + + pos++ + return ek[pos:], nil +} + +func (db *DB) lEncodeListKey(key []byte, seq int32) []byte { + buf := make([]byte, len(key)+7+len(db.indexVarBuf)) + + pos := copy(buf, db.indexVarBuf) + + buf[pos] = ListType + pos++ + + binary.BigEndian.PutUint16(buf[pos:], uint16(len(key))) + pos += 2 + + copy(buf[pos:], key) + pos += len(key) + + binary.BigEndian.PutUint32(buf[pos:], uint32(seq)) + + return buf +} + +func (db *DB) lDecodeListKey(ek []byte) (key []byte, seq int32, err error) { + pos := 0 + pos, err = db.checkKeyIndex(ek) + if err != nil { + return + } + + if pos+1 > len(ek) || ek[pos] != ListType { + err = errListKey + return + } + + pos++ + + if pos+2 > len(ek) { + err = errListKey + return + } + + keyLen := int(binary.BigEndian.Uint16(ek[pos:])) + pos += 2 + if keyLen+pos+4 != len(ek) { + err = errListKey + return + } + + key = ek[pos : pos+keyLen] + seq = int32(binary.BigEndian.Uint32(ek[pos+keyLen:])) + return +} + +func (db *DB) lpush(key []byte, whereSeq int32, args ...[]byte) (int64, error) { + if err := checkKeySize(key); err != nil { + return 0, err + } + + var headSeq int32 + var tailSeq int32 + var size int32 + var err error + + t := db.listBatch + t.Lock() + defer t.Unlock() + + metaKey := db.lEncodeMetaKey(key) + headSeq, tailSeq, size, err = db.lGetMeta(nil, metaKey) + if err != nil { + return 0, err + } + + var pushCnt int = len(args) + if pushCnt == 0 { + return int64(size), nil + } + + var seq int32 = headSeq + var delta int32 = -1 + if whereSeq == listTailSeq { + seq = tailSeq + delta = 1 + } + + // append elements + if size > 0 { + seq += delta + } + + for i := 0; i < pushCnt; i++ { + ek := db.lEncodeListKey(key, seq+int32(i)*delta) + t.Put(ek, args[i]) + } + + seq += int32(pushCnt-1) * delta + if seq <= listMinSeq || seq >= listMaxSeq { + return 0, errListSeq + } + + // set meta info + if whereSeq == listHeadSeq { + headSeq = seq + } else { + tailSeq = seq + } + + db.lSetMeta(metaKey, headSeq, tailSeq) + + err = t.Commit() + + if err == nil { + db.lSignalAsReady(key) + } + + return int64(size) + int64(pushCnt), err +} + +func (db *DB) lpop(key []byte, whereSeq int32) ([]byte, error) { + if err := checkKeySize(key); err != nil { + return nil, err + } + + t := db.listBatch + t.Lock() + defer t.Unlock() + + var headSeq int32 + var tailSeq int32 + var size int32 + var err error + + metaKey := db.lEncodeMetaKey(key) + headSeq, tailSeq, size, err = db.lGetMeta(nil, metaKey) + if err != nil { + return nil, err + } else if size == 0 { + return nil, nil + } + + var value []byte + + var seq int32 = headSeq + if whereSeq == listTailSeq { + seq = tailSeq + } + + itemKey := db.lEncodeListKey(key, seq) + value, err = db.bucket.Get(itemKey) + if err != nil { + return nil, err + } + + if whereSeq == listHeadSeq { + headSeq += 1 + } else { + tailSeq -= 1 + } + + t.Delete(itemKey) + size = db.lSetMeta(metaKey, headSeq, tailSeq) + if size == 0 { + db.rmExpire(t, ListType, key) + } + + err = t.Commit() + return value, err +} + +func (db *DB) ltrim2(key []byte, startP, stopP int64) (err error) { + if err := checkKeySize(key); err != nil { + return err + } + + t := db.listBatch + t.Lock() + defer t.Unlock() + + var headSeq int32 + var llen int32 + start := int32(startP) + stop := int32(stopP) + + ek := db.lEncodeMetaKey(key) + if headSeq, _, llen, err = db.lGetMeta(nil, ek); err != nil { + return err + } else { + if start < 0 { + start = llen + start + } + if stop < 0 { + stop = llen + stop + } + if start >= llen || start > stop { + db.lDelete(t, key) + db.rmExpire(t, ListType, key) + return t.Commit() + } + + if start < 0 { + start = 0 + } + if stop >= llen { + stop = llen - 1 + } + } + + if start > 0 { + for i := int32(0); i < start; i++ { + t.Delete(db.lEncodeListKey(key, headSeq+i)) + } + } + if stop < int32(llen-1) { + for i := int32(stop + 1); i < llen; i++ { + t.Delete(db.lEncodeListKey(key, headSeq+i)) + } + } + + db.lSetMeta(ek, headSeq+start, headSeq+stop) + + return t.Commit() +} + +func (db *DB) ltrim(key []byte, trimSize, whereSeq int32) (int32, error) { + if err := checkKeySize(key); err != nil { + return 0, err + } + + if trimSize == 0 { + return 0, nil + } + + t := db.listBatch + t.Lock() + defer t.Unlock() + + var headSeq int32 + var tailSeq int32 + var size int32 + var err error + + metaKey := db.lEncodeMetaKey(key) + headSeq, tailSeq, size, err = db.lGetMeta(nil, metaKey) + if err != nil { + return 0, err + } else if size == 0 { + return 0, nil + } + + var ( + trimStartSeq int32 + trimEndSeq int32 + ) + + if whereSeq == listHeadSeq { + trimStartSeq = headSeq + trimEndSeq = num.MinInt32(trimStartSeq+trimSize-1, tailSeq) + headSeq = trimEndSeq + 1 + } else { + trimEndSeq = tailSeq + trimStartSeq = num.MaxInt32(trimEndSeq-trimSize+1, headSeq) + tailSeq = trimStartSeq - 1 + } + + for trimSeq := trimStartSeq; trimSeq <= trimEndSeq; trimSeq++ { + itemKey := db.lEncodeListKey(key, trimSeq) + t.Delete(itemKey) + } + + size = db.lSetMeta(metaKey, headSeq, tailSeq) + if size == 0 { + db.rmExpire(t, ListType, key) + } + + err = t.Commit() + return trimEndSeq - trimStartSeq + 1, err +} + +// ps : here just focus on deleting the list data, +// any other likes expire is ignore. +func (db *DB) lDelete(t *batch, key []byte) int64 { + mk := db.lEncodeMetaKey(key) + + var headSeq int32 + var tailSeq int32 + var err error + + it := db.bucket.NewIterator() + defer it.Close() + + headSeq, tailSeq, _, err = db.lGetMeta(it, mk) + if err != nil { + return 0 + } + + var num int64 = 0 + startKey := db.lEncodeListKey(key, headSeq) + stopKey := db.lEncodeListKey(key, tailSeq) + + rit := store.NewRangeIterator(it, &store.Range{startKey, stopKey, store.RangeClose}) + for ; rit.Valid(); rit.Next() { + t.Delete(rit.RawKey()) + num++ + } + + t.Delete(mk) + + return num +} + +func (db *DB) lGetMeta(it *store.Iterator, ek []byte) (headSeq int32, tailSeq int32, size int32, err error) { + var v []byte + if it != nil { + v = it.Find(ek) + } else { + v, err = db.bucket.Get(ek) + } + if err != nil { + return + } else if v == nil { + headSeq = listInitialSeq + tailSeq = listInitialSeq + size = 0 + return + } else { + headSeq = int32(binary.LittleEndian.Uint32(v[0:4])) + tailSeq = int32(binary.LittleEndian.Uint32(v[4:8])) + size = tailSeq - headSeq + 1 + } + return +} + +func (db *DB) lSetMeta(ek []byte, headSeq int32, tailSeq int32) int32 { + t := db.listBatch + + var size int32 = tailSeq - headSeq + 1 + if size < 0 { + // todo : log error + panic + log.Fatalf("invalid meta sequence range [%d, %d]", headSeq, tailSeq) + } else if size == 0 { + t.Delete(ek) + } else { + buf := make([]byte, 8) + + binary.LittleEndian.PutUint32(buf[0:4], uint32(headSeq)) + binary.LittleEndian.PutUint32(buf[4:8], uint32(tailSeq)) + + t.Put(ek, buf) + } + + return size +} + +func (db *DB) lExpireAt(key []byte, when int64) (int64, error) { + t := db.listBatch + t.Lock() + defer t.Unlock() + + if llen, err := db.LLen(key); err != nil || llen == 0 { + return 0, err + } else { + db.expireAt(t, ListType, key, when) + if err := t.Commit(); err != nil { + return 0, err + } + } + return 1, nil +} + +func (db *DB) LIndex(key []byte, index int32) ([]byte, error) { + if err := checkKeySize(key); err != nil { + return nil, err + } + + var seq int32 + var headSeq int32 + var tailSeq int32 + var err error + + metaKey := db.lEncodeMetaKey(key) + + it := db.bucket.NewIterator() + defer it.Close() + + headSeq, tailSeq, _, err = db.lGetMeta(it, metaKey) + if err != nil { + return nil, err + } + + if index >= 0 { + seq = headSeq + index + } else { + seq = tailSeq + index + 1 + } + + sk := db.lEncodeListKey(key, seq) + v := it.Find(sk) + + return v, nil +} + +func (db *DB) LLen(key []byte) (int64, error) { + if err := checkKeySize(key); err != nil { + return 0, err + } + + ek := db.lEncodeMetaKey(key) + _, _, size, err := db.lGetMeta(nil, ek) + return int64(size), err +} + +func (db *DB) LPop(key []byte) ([]byte, error) { + return db.lpop(key, listHeadSeq) +} + +func (db *DB) LTrim(key []byte, start, stop int64) error { + return db.ltrim2(key, start, stop) +} + +func (db *DB) LTrimFront(key []byte, trimSize int32) (int32, error) { + return db.ltrim(key, trimSize, listHeadSeq) +} + +func (db *DB) LTrimBack(key []byte, trimSize int32) (int32, error) { + return db.ltrim(key, trimSize, listTailSeq) +} + +func (db *DB) LPush(key []byte, args ...[]byte) (int64, error) { + return db.lpush(key, listHeadSeq, args...) +} +func (db *DB) LSet(key []byte, index int32, value []byte) error { + if err := checkKeySize(key); err != nil { + return err + } + + var seq int32 + var headSeq int32 + var tailSeq int32 + //var size int32 + var err error + t := db.listBatch + t.Lock() + defer t.Unlock() + metaKey := db.lEncodeMetaKey(key) + + headSeq, tailSeq, _, err = db.lGetMeta(nil, metaKey) + if err != nil { + return err + } + + if index >= 0 { + seq = headSeq + index + } else { + seq = tailSeq + index + 1 + } + if seq < headSeq || seq > tailSeq { + return errListIndex + } + sk := db.lEncodeListKey(key, seq) + t.Put(sk, value) + err = t.Commit() + return err +} + +func (db *DB) LRange(key []byte, start int32, stop int32) ([][]byte, error) { + if err := checkKeySize(key); err != nil { + return nil, err + } + + var headSeq int32 + var llen int32 + var err error + + metaKey := db.lEncodeMetaKey(key) + + it := db.bucket.NewIterator() + defer it.Close() + + if headSeq, _, llen, err = db.lGetMeta(it, metaKey); err != nil { + return nil, err + } + + if start < 0 { + start = llen + start + } + if stop < 0 { + stop = llen + stop + } + if start < 0 { + start = 0 + } + + if start > stop || start >= llen { + return [][]byte{}, nil + } + + if stop >= llen { + stop = llen - 1 + } + + limit := (stop - start) + 1 + headSeq += start + + v := make([][]byte, 0, limit) + + startKey := db.lEncodeListKey(key, headSeq) + rit := store.NewRangeLimitIterator(it, + &store.Range{ + Min: startKey, + Max: nil, + Type: store.RangeClose}, + &store.Limit{ + Offset: 0, + Count: int(limit)}) + + for ; rit.Valid(); rit.Next() { + v = append(v, rit.Value()) + } + + return v, nil +} + +func (db *DB) RPop(key []byte) ([]byte, error) { + return db.lpop(key, listTailSeq) +} + +func (db *DB) RPush(key []byte, args ...[]byte) (int64, error) { + return db.lpush(key, listTailSeq, args...) +} + +func (db *DB) LClear(key []byte) (int64, error) { + if err := checkKeySize(key); err != nil { + return 0, err + } + + t := db.listBatch + t.Lock() + defer t.Unlock() + + num := db.lDelete(t, key) + db.rmExpire(t, ListType, key) + + err := t.Commit() + return num, err +} + +func (db *DB) LMclear(keys ...[]byte) (int64, error) { + t := db.listBatch + t.Lock() + defer t.Unlock() + + for _, key := range keys { + if err := checkKeySize(key); err != nil { + return 0, err + } + + db.lDelete(t, key) + db.rmExpire(t, ListType, key) + + } + + err := t.Commit() + return int64(len(keys)), err +} + +func (db *DB) lFlush() (drop int64, err error) { + t := db.listBatch + t.Lock() + defer t.Unlock() + return db.flushType(t, ListType) +} + +func (db *DB) LExpire(key []byte, duration int64) (int64, error) { + if duration <= 0 { + return 0, errExpireValue + } + + return db.lExpireAt(key, time.Now().Unix()+duration) +} + +func (db *DB) LExpireAt(key []byte, when int64) (int64, error) { + if when <= time.Now().Unix() { + return 0, errExpireValue + } + + return db.lExpireAt(key, when) +} + +func (db *DB) LTTL(key []byte) (int64, error) { + if err := checkKeySize(key); err != nil { + return -1, err + } + + return db.ttl(ListType, key) +} + +func (db *DB) LPersist(key []byte) (int64, error) { + if err := checkKeySize(key); err != nil { + return 0, err + } + + t := db.listBatch + t.Lock() + defer t.Unlock() + + n, err := db.rmExpire(t, ListType, key) + if err != nil { + return 0, err + } + + err = t.Commit() + return n, err +} + +func (db *DB) lEncodeMinKey() []byte { + return db.lEncodeMetaKey(nil) +} + +func (db *DB) lEncodeMaxKey() []byte { + ek := db.lEncodeMetaKey(nil) + ek[len(ek)-1] = LMetaType + 1 + return ek +} + +func (db *DB) BLPop(keys [][]byte, timeout time.Duration) ([]interface{}, error) { + return db.lblockPop(keys, listHeadSeq, timeout) +} + +func (db *DB) BRPop(keys [][]byte, timeout time.Duration) ([]interface{}, error) { + return db.lblockPop(keys, listTailSeq, timeout) +} + +func (db *DB) LKeyExists(key []byte) (int64, error) { + if err := checkKeySize(key); err != nil { + return 0, err + } + sk := db.lEncodeMetaKey(key) + v, err := db.bucket.Get(sk) + if v != nil && err == nil { + return 1, nil + } + return 0, err +} + +func (db *DB) lblockPop(keys [][]byte, whereSeq int32, timeout time.Duration) ([]interface{}, error) { + for { + var ctx context.Context + var cancel context.CancelFunc + if timeout > 0 { + ctx, cancel = context.WithTimeout(context.Background(), timeout) + } else { + ctx, cancel = context.WithCancel(context.Background()) + } + + for _, key := range keys { + v, err := db.lbkeys.popOrWait(db, key, whereSeq, cancel) + + if err != nil { + cancel() + return nil, err + } else if v != nil { + cancel() + return []interface{}{key, v}, nil + } + } + + //blocking wait + <-ctx.Done() + cancel() + + //if ctx.Err() is a deadline exceeded (timeout) we return + //otherwise we try to pop one of the keys again. + if ctx.Err() == context.DeadlineExceeded { + return nil, nil + } + } +} + +func (db *DB) lSignalAsReady(key []byte) { + db.lbkeys.signal(key) +} + +type lBlockKeys struct { + sync.Mutex + + keys map[string]*list.List +} + +func newLBlockKeys() *lBlockKeys { + l := new(lBlockKeys) + + l.keys = make(map[string]*list.List) + return l +} + +func (l *lBlockKeys) signal(key []byte) { + l.Lock() + defer l.Unlock() + + s := hack.String(key) + fns, ok := l.keys[s] + if !ok { + return + } + for e := fns.Front(); e != nil; e = e.Next() { + fn := e.Value.(context.CancelFunc) + fn() + } + + delete(l.keys, s) +} + +func (l *lBlockKeys) popOrWait(db *DB, key []byte, whereSeq int32, fn context.CancelFunc) ([]interface{}, error) { + v, err := db.lpop(key, whereSeq) + if err != nil { + return nil, err + } else if v != nil { + return []interface{}{key, v}, nil + } + + l.Lock() + + s := hack.String(key) + chs, ok := l.keys[s] + if !ok { + chs = list.New() + l.keys[s] = chs + } + + chs.PushBack(fn) + l.Unlock() + return nil, nil +} diff --git a/vendor/github.com/siddontang/ledisdb/ledis/t_set.go b/vendor/github.com/siddontang/ledisdb/ledis/t_set.go new file mode 100644 index 000000000000..a4eaf9513192 --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/ledis/t_set.go @@ -0,0 +1,627 @@ +package ledis + +import ( + "encoding/binary" + "errors" + "time" + + "github.com/siddontang/go/hack" + "github.com/siddontang/ledisdb/store" +) + +var errSetKey = errors.New("invalid set key") +var errSSizeKey = errors.New("invalid ssize key") + +const ( + setStartSep byte = ':' + setStopSep byte = setStartSep + 1 + UnionType byte = 51 + DiffType byte = 52 + InterType byte = 53 +) + +func checkSetKMSize(key []byte, member []byte) error { + if len(key) > MaxKeySize || len(key) == 0 { + return errKeySize + } else if len(member) > MaxSetMemberSize || len(member) == 0 { + return errSetMemberSize + } + return nil +} + +func (db *DB) sEncodeSizeKey(key []byte) []byte { + buf := make([]byte, len(key)+1+len(db.indexVarBuf)) + + pos := copy(buf, db.indexVarBuf) + buf[pos] = SSizeType + + pos++ + + copy(buf[pos:], key) + return buf +} + +func (db *DB) sDecodeSizeKey(ek []byte) ([]byte, error) { + pos, err := db.checkKeyIndex(ek) + if err != nil { + return nil, err + } + + if pos+1 > len(ek) || ek[pos] != SSizeType { + return nil, errSSizeKey + } + pos++ + + return ek[pos:], nil +} + +func (db *DB) sEncodeSetKey(key []byte, member []byte) []byte { + buf := make([]byte, len(key)+len(member)+1+1+2+len(db.indexVarBuf)) + + pos := copy(buf, db.indexVarBuf) + + buf[pos] = SetType + pos++ + + binary.BigEndian.PutUint16(buf[pos:], uint16(len(key))) + pos += 2 + + copy(buf[pos:], key) + pos += len(key) + + buf[pos] = setStartSep + pos++ + copy(buf[pos:], member) + + return buf +} + +func (db *DB) sDecodeSetKey(ek []byte) ([]byte, []byte, error) { + pos, err := db.checkKeyIndex(ek) + if err != nil { + return nil, nil, err + } + + if pos+1 > len(ek) || ek[pos] != SetType { + return nil, nil, errSetKey + } + + pos++ + + if pos+2 > len(ek) { + return nil, nil, errSetKey + } + + keyLen := int(binary.BigEndian.Uint16(ek[pos:])) + pos += 2 + + if keyLen+pos > len(ek) { + return nil, nil, errSetKey + } + + key := ek[pos : pos+keyLen] + pos += keyLen + + if ek[pos] != hashStartSep { + return nil, nil, errSetKey + } + + pos++ + member := ek[pos:] + return key, member, nil +} + +func (db *DB) sEncodeStartKey(key []byte) []byte { + return db.sEncodeSetKey(key, nil) +} + +func (db *DB) sEncodeStopKey(key []byte) []byte { + k := db.sEncodeSetKey(key, nil) + + k[len(k)-1] = setStopSep + + return k +} + +func (db *DB) sFlush() (drop int64, err error) { + + t := db.setBatch + t.Lock() + defer t.Unlock() + + return db.flushType(t, SetType) +} + +func (db *DB) sDelete(t *batch, key []byte) int64 { + sk := db.sEncodeSizeKey(key) + start := db.sEncodeStartKey(key) + stop := db.sEncodeStopKey(key) + + var num int64 = 0 + it := db.bucket.RangeLimitIterator(start, stop, store.RangeROpen, 0, -1) + for ; it.Valid(); it.Next() { + t.Delete(it.RawKey()) + num++ + } + + it.Close() + + t.Delete(sk) + return num +} + +func (db *DB) sIncrSize(key []byte, delta int64) (int64, error) { + t := db.setBatch + sk := db.sEncodeSizeKey(key) + + var err error + var size int64 = 0 + if size, err = Int64(db.bucket.Get(sk)); err != nil { + return 0, err + } else { + size += delta + if size <= 0 { + size = 0 + t.Delete(sk) + db.rmExpire(t, SetType, key) + } else { + t.Put(sk, PutInt64(size)) + } + } + + return size, nil +} + +func (db *DB) sExpireAt(key []byte, when int64) (int64, error) { + t := db.setBatch + t.Lock() + defer t.Unlock() + + if scnt, err := db.SCard(key); err != nil || scnt == 0 { + return 0, err + } else { + db.expireAt(t, SetType, key, when) + if err := t.Commit(); err != nil { + return 0, err + } + + } + + return 1, nil +} + +func (db *DB) sSetItem(key []byte, member []byte) (int64, error) { + t := db.setBatch + ek := db.sEncodeSetKey(key, member) + + var n int64 = 1 + if v, _ := db.bucket.Get(ek); v != nil { + n = 0 + } else { + if _, err := db.sIncrSize(key, 1); err != nil { + return 0, err + } + } + + t.Put(ek, nil) + return n, nil +} + +func (db *DB) SAdd(key []byte, args ...[]byte) (int64, error) { + t := db.setBatch + t.Lock() + defer t.Unlock() + + var err error + var ek []byte + var num int64 = 0 + for i := 0; i < len(args); i++ { + if err := checkSetKMSize(key, args[i]); err != nil { + return 0, err + } + + ek = db.sEncodeSetKey(key, args[i]) + + if v, err := db.bucket.Get(ek); err != nil { + return 0, err + } else if v == nil { + num++ + } + + t.Put(ek, nil) + } + + if _, err = db.sIncrSize(key, num); err != nil { + return 0, err + } + + err = t.Commit() + return num, err + +} + +func (db *DB) SCard(key []byte) (int64, error) { + if err := checkKeySize(key); err != nil { + return 0, err + } + + sk := db.sEncodeSizeKey(key) + + return Int64(db.bucket.Get(sk)) +} + +func (db *DB) sDiffGeneric(keys ...[]byte) ([][]byte, error) { + destMap := make(map[string]bool) + + members, err := db.SMembers(keys[0]) + if err != nil { + return nil, err + } + + for _, m := range members { + destMap[hack.String(m)] = true + } + + for _, k := range keys[1:] { + members, err := db.SMembers(k) + if err != nil { + return nil, err + } + + for _, m := range members { + if _, ok := destMap[hack.String(m)]; !ok { + continue + } else if ok { + delete(destMap, hack.String(m)) + } + } + // O - A = O, O is zero set. + if len(destMap) == 0 { + return nil, nil + } + } + + slice := make([][]byte, len(destMap)) + idx := 0 + for k, v := range destMap { + if !v { + continue + } + slice[idx] = []byte(k) + idx++ + } + + return slice, nil +} + +func (db *DB) SDiff(keys ...[]byte) ([][]byte, error) { + v, err := db.sDiffGeneric(keys...) + return v, err +} + +func (db *DB) SDiffStore(dstKey []byte, keys ...[]byte) (int64, error) { + n, err := db.sStoreGeneric(dstKey, DiffType, keys...) + return n, err +} + +func (db *DB) SKeyExists(key []byte) (int64, error) { + if err := checkKeySize(key); err != nil { + return 0, err + } + sk := db.sEncodeSizeKey(key) + v, err := db.bucket.Get(sk) + if v != nil && err == nil { + return 1, nil + } + return 0, err +} + +func (db *DB) sInterGeneric(keys ...[]byte) ([][]byte, error) { + destMap := make(map[string]bool) + + members, err := db.SMembers(keys[0]) + if err != nil { + return nil, err + } + + for _, m := range members { + destMap[hack.String(m)] = true + } + + for _, key := range keys[1:] { + if err := checkKeySize(key); err != nil { + return nil, err + } + + members, err := db.SMembers(key) + if err != nil { + return nil, err + } else if len(members) == 0 { + return nil, err + } + + tempMap := make(map[string]bool) + for _, member := range members { + if err := checkKeySize(member); err != nil { + return nil, err + } + if _, ok := destMap[hack.String(member)]; ok { + tempMap[hack.String(member)] = true //mark this item as selected + } + } + destMap = tempMap //reduce the size of the result set + if len(destMap) == 0 { + return nil, nil + } + } + + slice := make([][]byte, len(destMap)) + idx := 0 + for k, v := range destMap { + if !v { + continue + } + + slice[idx] = []byte(k) + idx++ + } + + return slice, nil + +} + +func (db *DB) SInter(keys ...[]byte) ([][]byte, error) { + v, err := db.sInterGeneric(keys...) + return v, err + +} + +func (db *DB) SInterStore(dstKey []byte, keys ...[]byte) (int64, error) { + n, err := db.sStoreGeneric(dstKey, InterType, keys...) + return n, err +} + +func (db *DB) SIsMember(key []byte, member []byte) (int64, error) { + ek := db.sEncodeSetKey(key, member) + + var n int64 = 1 + if v, err := db.bucket.Get(ek); err != nil { + return 0, err + } else if v == nil { + n = 0 + } + return n, nil +} + +func (db *DB) SMembers(key []byte) ([][]byte, error) { + if err := checkKeySize(key); err != nil { + return nil, err + } + + start := db.sEncodeStartKey(key) + stop := db.sEncodeStopKey(key) + + v := make([][]byte, 0, 16) + + it := db.bucket.RangeLimitIterator(start, stop, store.RangeROpen, 0, -1) + defer it.Close() + + for ; it.Valid(); it.Next() { + _, m, err := db.sDecodeSetKey(it.Key()) + if err != nil { + return nil, err + } + + v = append(v, m) + } + + return v, nil +} + +func (db *DB) SRem(key []byte, args ...[]byte) (int64, error) { + t := db.setBatch + t.Lock() + defer t.Unlock() + + var ek []byte + var v []byte + var err error + + it := db.bucket.NewIterator() + defer it.Close() + + var num int64 = 0 + for i := 0; i < len(args); i++ { + if err := checkSetKMSize(key, args[i]); err != nil { + return 0, err + } + + ek = db.sEncodeSetKey(key, args[i]) + + v = it.RawFind(ek) + if v == nil { + continue + } else { + num++ + t.Delete(ek) + } + } + + if _, err = db.sIncrSize(key, -num); err != nil { + return 0, err + } + + err = t.Commit() + return num, err + +} + +func (db *DB) sUnionGeneric(keys ...[]byte) ([][]byte, error) { + dstMap := make(map[string]bool) + + for _, key := range keys { + if err := checkKeySize(key); err != nil { + return nil, err + } + + members, err := db.SMembers(key) + if err != nil { + return nil, err + } + + for _, member := range members { + dstMap[hack.String(member)] = true + } + } + + slice := make([][]byte, len(dstMap)) + idx := 0 + for k, v := range dstMap { + if !v { + continue + } + slice[idx] = []byte(k) + idx++ + } + + return slice, nil +} + +func (db *DB) SUnion(keys ...[]byte) ([][]byte, error) { + v, err := db.sUnionGeneric(keys...) + return v, err +} + +func (db *DB) SUnionStore(dstKey []byte, keys ...[]byte) (int64, error) { + n, err := db.sStoreGeneric(dstKey, UnionType, keys...) + return n, err +} + +func (db *DB) sStoreGeneric(dstKey []byte, optType byte, keys ...[]byte) (int64, error) { + if err := checkKeySize(dstKey); err != nil { + return 0, err + } + + t := db.setBatch + t.Lock() + defer t.Unlock() + + db.sDelete(t, dstKey) + + var err error + var ek []byte + var v [][]byte + + switch optType { + case UnionType: + v, err = db.sUnionGeneric(keys...) + case DiffType: + v, err = db.sDiffGeneric(keys...) + case InterType: + v, err = db.sInterGeneric(keys...) + } + + if err != nil { + return 0, err + } + + for _, m := range v { + if err := checkSetKMSize(dstKey, m); err != nil { + return 0, err + } + + ek = db.sEncodeSetKey(dstKey, m) + + if _, err := db.bucket.Get(ek); err != nil { + return 0, err + } + + t.Put(ek, nil) + } + + var n = int64(len(v)) + sk := db.sEncodeSizeKey(dstKey) + t.Put(sk, PutInt64(n)) + + if err = t.Commit(); err != nil { + return 0, err + } + return n, nil +} + +func (db *DB) SClear(key []byte) (int64, error) { + if err := checkKeySize(key); err != nil { + return 0, err + } + + t := db.setBatch + t.Lock() + defer t.Unlock() + + num := db.sDelete(t, key) + db.rmExpire(t, SetType, key) + + err := t.Commit() + return num, err +} + +func (db *DB) SMclear(keys ...[]byte) (int64, error) { + t := db.setBatch + t.Lock() + defer t.Unlock() + + for _, key := range keys { + if err := checkKeySize(key); err != nil { + return 0, err + } + + db.sDelete(t, key) + db.rmExpire(t, SetType, key) + } + + err := t.Commit() + return int64(len(keys)), err +} + +func (db *DB) SExpire(key []byte, duration int64) (int64, error) { + if duration <= 0 { + return 0, errExpireValue + } + + return db.sExpireAt(key, time.Now().Unix()+duration) + +} + +func (db *DB) SExpireAt(key []byte, when int64) (int64, error) { + if when <= time.Now().Unix() { + return 0, errExpireValue + } + + return db.sExpireAt(key, when) + +} + +func (db *DB) STTL(key []byte) (int64, error) { + if err := checkKeySize(key); err != nil { + return -1, err + } + + return db.ttl(SetType, key) +} + +func (db *DB) SPersist(key []byte) (int64, error) { + if err := checkKeySize(key); err != nil { + return 0, err + } + + t := db.setBatch + t.Lock() + defer t.Unlock() + + n, err := db.rmExpire(t, SetType, key) + if err != nil { + return 0, err + } + err = t.Commit() + return n, err +} diff --git a/vendor/github.com/siddontang/ledisdb/ledis/t_ttl.go b/vendor/github.com/siddontang/ledisdb/ledis/t_ttl.go new file mode 100644 index 000000000000..2c979ae342ed --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/ledis/t_ttl.go @@ -0,0 +1,213 @@ +package ledis + +import ( + "encoding/binary" + "errors" + "sync" + "time" + + "github.com/siddontang/ledisdb/store" +) + +var ( + errExpMetaKey = errors.New("invalid expire meta key") + errExpTimeKey = errors.New("invalid expire time key") +) + +type onExpired func(*batch, []byte) int64 + +type ttlChecker struct { + sync.Mutex + db *DB + txs []*batch + cbs []onExpired + + //next check time + nc int64 +} + +var errExpType = errors.New("invalid expire type") + +func (db *DB) expEncodeTimeKey(dataType byte, key []byte, when int64) []byte { + buf := make([]byte, len(key)+10+len(db.indexVarBuf)) + + pos := copy(buf, db.indexVarBuf) + + buf[pos] = ExpTimeType + pos++ + + binary.BigEndian.PutUint64(buf[pos:], uint64(when)) + pos += 8 + + buf[pos] = dataType + pos++ + + copy(buf[pos:], key) + + return buf +} + +func (db *DB) expEncodeMetaKey(dataType byte, key []byte) []byte { + buf := make([]byte, len(key)+2+len(db.indexVarBuf)) + + pos := copy(buf, db.indexVarBuf) + buf[pos] = ExpMetaType + pos++ + buf[pos] = dataType + pos++ + + copy(buf[pos:], key) + + return buf +} + +func (db *DB) expDecodeMetaKey(mk []byte) (byte, []byte, error) { + pos, err := db.checkKeyIndex(mk) + if err != nil { + return 0, nil, err + } + + if pos+2 > len(mk) || mk[pos] != ExpMetaType { + return 0, nil, errExpMetaKey + } + + return mk[pos+1], mk[pos+2:], nil +} + +func (db *DB) expDecodeTimeKey(tk []byte) (byte, []byte, int64, error) { + pos, err := db.checkKeyIndex(tk) + if err != nil { + return 0, nil, 0, err + } + + if pos+10 > len(tk) || tk[pos] != ExpTimeType { + return 0, nil, 0, errExpTimeKey + } + + return tk[pos+9], tk[pos+10:], int64(binary.BigEndian.Uint64(tk[pos+1:])), nil +} + +func (db *DB) expire(t *batch, dataType byte, key []byte, duration int64) { + db.expireAt(t, dataType, key, time.Now().Unix()+duration) +} + +func (db *DB) expireAt(t *batch, dataType byte, key []byte, when int64) { + mk := db.expEncodeMetaKey(dataType, key) + tk := db.expEncodeTimeKey(dataType, key, when) + + t.Put(tk, mk) + t.Put(mk, PutInt64(when)) + + db.ttlChecker.setNextCheckTime(when, false) +} + +func (db *DB) ttl(dataType byte, key []byte) (t int64, err error) { + mk := db.expEncodeMetaKey(dataType, key) + + if t, err = Int64(db.bucket.Get(mk)); err != nil || t == 0 { + t = -1 + } else { + t -= time.Now().Unix() + if t <= 0 { + t = -1 + } + // if t == -1 : to remove ???? + } + + return t, err +} + +func (db *DB) rmExpire(t *batch, dataType byte, key []byte) (int64, error) { + mk := db.expEncodeMetaKey(dataType, key) + if v, err := db.bucket.Get(mk); err != nil { + return 0, err + } else if v == nil { + return 0, nil + } else if when, err2 := Int64(v, nil); err2 != nil { + return 0, err2 + } else { + tk := db.expEncodeTimeKey(dataType, key, when) + t.Delete(mk) + t.Delete(tk) + return 1, nil + } +} + +func (c *ttlChecker) register(dataType byte, t *batch, f onExpired) { + c.txs[dataType] = t + c.cbs[dataType] = f +} + +func (c *ttlChecker) setNextCheckTime(when int64, force bool) { + c.Lock() + if force { + c.nc = when + } else if c.nc > when { + c.nc = when + } + c.Unlock() +} + +func (c *ttlChecker) check() { + now := time.Now().Unix() + + c.Lock() + nc := c.nc + c.Unlock() + + if now < nc { + return + } + + nc = now + 3600 + + db := c.db + dbGet := db.bucket.Get + + minKey := db.expEncodeTimeKey(NoneType, nil, 0) + maxKey := db.expEncodeTimeKey(maxDataType, nil, nc) + + it := db.bucket.RangeLimitIterator(minKey, maxKey, store.RangeROpen, 0, -1) + for ; it.Valid(); it.Next() { + tk := it.RawKey() + mk := it.RawValue() + + dt, k, nt, err := db.expDecodeTimeKey(tk) + if err != nil { + continue + } + + if nt > now { + //the next ttl check time is nt! + nc = nt + break + } + + t := c.txs[dt] + cb := c.cbs[dt] + if tk == nil || cb == nil { + continue + } + + t.Lock() + + if exp, err := Int64(dbGet(mk)); err == nil { + // check expire again + if exp <= now { + cb(t, k) + t.Delete(tk) + t.Delete(mk) + + t.Commit() + } + + } + + t.Unlock() + } + it.Close() + + c.setNextCheckTime(nc, true) + + return +} diff --git a/vendor/github.com/siddontang/ledisdb/ledis/t_zset.go b/vendor/github.com/siddontang/ledisdb/ledis/t_zset.go new file mode 100644 index 000000000000..fe11df872300 --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/ledis/t_zset.go @@ -0,0 +1,1063 @@ +package ledis + +import ( + "bytes" + "encoding/binary" + "errors" + "time" + + "github.com/siddontang/go/hack" + "github.com/siddontang/ledisdb/store" +) + +const ( + MinScore int64 = -1<<63 + 1 + MaxScore int64 = 1<<63 - 1 + InvalidScore int64 = -1 << 63 + + AggregateSum byte = 0 + AggregateMin byte = 1 + AggregateMax byte = 2 +) + +type ScorePair struct { + Score int64 + Member []byte +} + +var errZSizeKey = errors.New("invalid zsize key") +var errZSetKey = errors.New("invalid zset key") +var errZScoreKey = errors.New("invalid zscore key") +var errScoreOverflow = errors.New("zset score overflow") +var errInvalidAggregate = errors.New("invalid aggregate") +var errInvalidWeightNum = errors.New("invalid weight number") +var errInvalidSrcKeyNum = errors.New("invalid src key number") + +const ( + zsetNScoreSep byte = '<' + zsetPScoreSep byte = zsetNScoreSep + 1 + zsetStopScoreSep byte = zsetPScoreSep + 1 + + zsetStartMemSep byte = ':' + zsetStopMemSep byte = zsetStartMemSep + 1 +) + +func checkZSetKMSize(key []byte, member []byte) error { + if len(key) > MaxKeySize || len(key) == 0 { + return errKeySize + } else if len(member) > MaxZSetMemberSize || len(member) == 0 { + return errZSetMemberSize + } + return nil +} + +func (db *DB) zEncodeSizeKey(key []byte) []byte { + buf := make([]byte, len(key)+1+len(db.indexVarBuf)) + pos := copy(buf, db.indexVarBuf) + buf[pos] = ZSizeType + pos++ + copy(buf[pos:], key) + return buf +} + +func (db *DB) zDecodeSizeKey(ek []byte) ([]byte, error) { + pos, err := db.checkKeyIndex(ek) + if err != nil { + return nil, err + } + + if pos+1 > len(ek) || ek[pos] != ZSizeType { + return nil, errZSizeKey + } + pos++ + return ek[pos:], nil +} + +func (db *DB) zEncodeSetKey(key []byte, member []byte) []byte { + buf := make([]byte, len(key)+len(member)+4+len(db.indexVarBuf)) + + pos := copy(buf, db.indexVarBuf) + + buf[pos] = ZSetType + pos++ + + binary.BigEndian.PutUint16(buf[pos:], uint16(len(key))) + pos += 2 + + copy(buf[pos:], key) + pos += len(key) + + buf[pos] = zsetStartMemSep + pos++ + + copy(buf[pos:], member) + + return buf +} + +func (db *DB) zDecodeSetKey(ek []byte) ([]byte, []byte, error) { + pos, err := db.checkKeyIndex(ek) + if err != nil { + return nil, nil, err + } + + if pos+1 > len(ek) || ek[pos] != ZSetType { + return nil, nil, errZSetKey + } + + pos++ + + if pos+2 > len(ek) { + return nil, nil, errZSetKey + } + + keyLen := int(binary.BigEndian.Uint16(ek[pos:])) + if keyLen+pos > len(ek) { + return nil, nil, errZSetKey + } + + pos += 2 + key := ek[pos : pos+keyLen] + + if ek[pos+keyLen] != zsetStartMemSep { + return nil, nil, errZSetKey + } + pos++ + + member := ek[pos+keyLen:] + return key, member, nil +} + +func (db *DB) zEncodeStartSetKey(key []byte) []byte { + k := db.zEncodeSetKey(key, nil) + return k +} + +func (db *DB) zEncodeStopSetKey(key []byte) []byte { + k := db.zEncodeSetKey(key, nil) + k[len(k)-1] = zsetStartMemSep + 1 + return k +} + +func (db *DB) zEncodeScoreKey(key []byte, member []byte, score int64) []byte { + buf := make([]byte, len(key)+len(member)+13+len(db.indexVarBuf)) + + pos := copy(buf, db.indexVarBuf) + + buf[pos] = ZScoreType + pos++ + + binary.BigEndian.PutUint16(buf[pos:], uint16(len(key))) + pos += 2 + + copy(buf[pos:], key) + pos += len(key) + + if score < 0 { + buf[pos] = zsetNScoreSep + } else { + buf[pos] = zsetPScoreSep + } + + pos++ + binary.BigEndian.PutUint64(buf[pos:], uint64(score)) + pos += 8 + + buf[pos] = zsetStartMemSep + pos++ + + copy(buf[pos:], member) + return buf +} + +func (db *DB) zEncodeStartScoreKey(key []byte, score int64) []byte { + return db.zEncodeScoreKey(key, nil, score) +} + +func (db *DB) zEncodeStopScoreKey(key []byte, score int64) []byte { + k := db.zEncodeScoreKey(key, nil, score) + k[len(k)-1] = zsetStopMemSep + return k +} + +func (db *DB) zDecodeScoreKey(ek []byte) (key []byte, member []byte, score int64, err error) { + pos := 0 + pos, err = db.checkKeyIndex(ek) + if err != nil { + return + } + + if pos+1 > len(ek) || ek[pos] != ZScoreType { + err = errZScoreKey + return + } + pos++ + + if pos+2 > len(ek) { + err = errZScoreKey + return + } + keyLen := int(binary.BigEndian.Uint16(ek[pos:])) + pos += 2 + + if keyLen+pos > len(ek) { + err = errZScoreKey + return + } + + key = ek[pos : pos+keyLen] + pos += keyLen + + if pos+10 > len(ek) { + err = errZScoreKey + return + } + + if (ek[pos] != zsetNScoreSep) && (ek[pos] != zsetPScoreSep) { + err = errZScoreKey + return + } + pos++ + + score = int64(binary.BigEndian.Uint64(ek[pos:])) + pos += 8 + + if ek[pos] != zsetStartMemSep { + err = errZScoreKey + return + } + + pos++ + + member = ek[pos:] + return +} + +func (db *DB) zSetItem(t *batch, key []byte, score int64, member []byte) (int64, error) { + if score <= MinScore || score >= MaxScore { + return 0, errScoreOverflow + } + + var exists int64 = 0 + ek := db.zEncodeSetKey(key, member) + + if v, err := db.bucket.Get(ek); err != nil { + return 0, err + } else if v != nil { + exists = 1 + + if s, err := Int64(v, err); err != nil { + return 0, err + } else { + sk := db.zEncodeScoreKey(key, member, s) + t.Delete(sk) + } + } + + t.Put(ek, PutInt64(score)) + + sk := db.zEncodeScoreKey(key, member, score) + t.Put(sk, []byte{}) + + return exists, nil +} + +func (db *DB) zDelItem(t *batch, key []byte, member []byte, skipDelScore bool) (int64, error) { + ek := db.zEncodeSetKey(key, member) + if v, err := db.bucket.Get(ek); err != nil { + return 0, err + } else if v == nil { + //not exists + return 0, nil + } else { + //exists + if !skipDelScore { + //we must del score + if s, err := Int64(v, err); err != nil { + return 0, err + } else { + sk := db.zEncodeScoreKey(key, member, s) + t.Delete(sk) + } + } + } + + t.Delete(ek) + + return 1, nil +} + +func (db *DB) zDelete(t *batch, key []byte) int64 { + delMembCnt, _ := db.zRemRange(t, key, MinScore, MaxScore, 0, -1) + // todo : log err + return delMembCnt +} + +func (db *DB) zExpireAt(key []byte, when int64) (int64, error) { + t := db.zsetBatch + t.Lock() + defer t.Unlock() + + if zcnt, err := db.ZCard(key); err != nil || zcnt == 0 { + return 0, err + } else { + db.expireAt(t, ZSetType, key, when) + if err := t.Commit(); err != nil { + return 0, err + } + } + return 1, nil +} + +func (db *DB) ZAdd(key []byte, args ...ScorePair) (int64, error) { + if len(args) == 0 { + return 0, nil + } + + t := db.zsetBatch + t.Lock() + defer t.Unlock() + + var num int64 = 0 + for i := 0; i < len(args); i++ { + score := args[i].Score + member := args[i].Member + + if err := checkZSetKMSize(key, member); err != nil { + return 0, err + } + + if n, err := db.zSetItem(t, key, score, member); err != nil { + return 0, err + } else if n == 0 { + //add new + num++ + } + } + + if _, err := db.zIncrSize(t, key, num); err != nil { + return 0, err + } + + err := t.Commit() + return num, err +} + +func (db *DB) zIncrSize(t *batch, key []byte, delta int64) (int64, error) { + sk := db.zEncodeSizeKey(key) + + size, err := Int64(db.bucket.Get(sk)) + if err != nil { + return 0, err + } else { + size += delta + if size <= 0 { + size = 0 + t.Delete(sk) + db.rmExpire(t, ZSetType, key) + } else { + t.Put(sk, PutInt64(size)) + } + } + + return size, nil +} + +func (db *DB) ZCard(key []byte) (int64, error) { + if err := checkKeySize(key); err != nil { + return 0, err + } + + sk := db.zEncodeSizeKey(key) + return Int64(db.bucket.Get(sk)) +} + +func (db *DB) ZScore(key []byte, member []byte) (int64, error) { + if err := checkZSetKMSize(key, member); err != nil { + return InvalidScore, err + } + + var score int64 = InvalidScore + + k := db.zEncodeSetKey(key, member) + if v, err := db.bucket.Get(k); err != nil { + return InvalidScore, err + } else if v == nil { + return InvalidScore, ErrScoreMiss + } else { + if score, err = Int64(v, nil); err != nil { + return InvalidScore, err + } + } + + return score, nil +} + +func (db *DB) ZRem(key []byte, members ...[]byte) (int64, error) { + if len(members) == 0 { + return 0, nil + } + + t := db.zsetBatch + t.Lock() + defer t.Unlock() + + var num int64 = 0 + for i := 0; i < len(members); i++ { + if err := checkZSetKMSize(key, members[i]); err != nil { + return 0, err + } + + if n, err := db.zDelItem(t, key, members[i], false); err != nil { + return 0, err + } else if n == 1 { + num++ + } + } + + if _, err := db.zIncrSize(t, key, -num); err != nil { + return 0, err + } + + err := t.Commit() + return num, err +} + +func (db *DB) ZIncrBy(key []byte, delta int64, member []byte) (int64, error) { + if err := checkZSetKMSize(key, member); err != nil { + return InvalidScore, err + } + + t := db.zsetBatch + t.Lock() + defer t.Unlock() + + ek := db.zEncodeSetKey(key, member) + + var oldScore int64 = 0 + v, err := db.bucket.Get(ek) + if err != nil { + return InvalidScore, err + } else if v == nil { + db.zIncrSize(t, key, 1) + } else { + if oldScore, err = Int64(v, err); err != nil { + return InvalidScore, err + } + } + + newScore := oldScore + delta + if newScore >= MaxScore || newScore <= MinScore { + return InvalidScore, errScoreOverflow + } + + sk := db.zEncodeScoreKey(key, member, newScore) + t.Put(sk, []byte{}) + t.Put(ek, PutInt64(newScore)) + + if v != nil { + // so as to update score, we must delete the old one + oldSk := db.zEncodeScoreKey(key, member, oldScore) + t.Delete(oldSk) + } + + err = t.Commit() + return newScore, err +} + +func (db *DB) ZCount(key []byte, min int64, max int64) (int64, error) { + if err := checkKeySize(key); err != nil { + return 0, err + } + minKey := db.zEncodeStartScoreKey(key, min) + maxKey := db.zEncodeStopScoreKey(key, max) + + rangeType := store.RangeROpen + + it := db.bucket.RangeLimitIterator(minKey, maxKey, rangeType, 0, -1) + var n int64 = 0 + for ; it.Valid(); it.Next() { + n++ + } + it.Close() + + return n, nil +} + +func (db *DB) zrank(key []byte, member []byte, reverse bool) (int64, error) { + if err := checkZSetKMSize(key, member); err != nil { + return 0, err + } + + k := db.zEncodeSetKey(key, member) + + it := db.bucket.NewIterator() + defer it.Close() + + if v := it.Find(k); v == nil { + return -1, nil + } else { + if s, err := Int64(v, nil); err != nil { + return 0, err + } else { + var rit *store.RangeLimitIterator + + sk := db.zEncodeScoreKey(key, member, s) + + if !reverse { + minKey := db.zEncodeStartScoreKey(key, MinScore) + + rit = store.NewRangeIterator(it, &store.Range{minKey, sk, store.RangeClose}) + } else { + maxKey := db.zEncodeStopScoreKey(key, MaxScore) + rit = store.NewRevRangeIterator(it, &store.Range{sk, maxKey, store.RangeClose}) + } + + var lastKey []byte = nil + var n int64 = 0 + + for ; rit.Valid(); rit.Next() { + n++ + + lastKey = rit.BufKey(lastKey) + } + + if _, m, _, err := db.zDecodeScoreKey(lastKey); err == nil && bytes.Equal(m, member) { + n-- + return n, nil + } + } + } + + return -1, nil +} + +func (db *DB) zIterator(key []byte, min int64, max int64, offset int, count int, reverse bool) *store.RangeLimitIterator { + minKey := db.zEncodeStartScoreKey(key, min) + maxKey := db.zEncodeStopScoreKey(key, max) + + if !reverse { + return db.bucket.RangeLimitIterator(minKey, maxKey, store.RangeClose, offset, count) + } else { + return db.bucket.RevRangeLimitIterator(minKey, maxKey, store.RangeClose, offset, count) + } +} + +func (db *DB) zRemRange(t *batch, key []byte, min int64, max int64, offset int, count int) (int64, error) { + if len(key) > MaxKeySize { + return 0, errKeySize + } + + it := db.zIterator(key, min, max, offset, count, false) + var num int64 = 0 + for ; it.Valid(); it.Next() { + sk := it.RawKey() + _, m, _, err := db.zDecodeScoreKey(sk) + if err != nil { + continue + } + + if n, err := db.zDelItem(t, key, m, true); err != nil { + return 0, err + } else if n == 1 { + num++ + } + + t.Delete(sk) + } + it.Close() + + if _, err := db.zIncrSize(t, key, -num); err != nil { + return 0, err + } + + return num, nil +} + +func (db *DB) zRange(key []byte, min int64, max int64, offset int, count int, reverse bool) ([]ScorePair, error) { + if len(key) > MaxKeySize { + return nil, errKeySize + } + + if offset < 0 { + return []ScorePair{}, nil + } + + nv := count + // count may be very large, so we must limit it for below mem make. + if nv <= 0 || nv > 1024 { + nv = 64 + } + + v := make([]ScorePair, 0, nv) + + var it *store.RangeLimitIterator + + //if reverse and offset is 0, count < 0, we may use forward iterator then reverse + //because store iterator prev is slower than next + if !reverse || (offset == 0 && count < 0) { + it = db.zIterator(key, min, max, offset, count, false) + } else { + it = db.zIterator(key, min, max, offset, count, true) + } + + for ; it.Valid(); it.Next() { + _, m, s, err := db.zDecodeScoreKey(it.Key()) + //may be we will check key equal? + if err != nil { + continue + } + + v = append(v, ScorePair{Member: m, Score: s}) + } + it.Close() + + if reverse && (offset == 0 && count < 0) { + for i, j := 0, len(v)-1; i < j; i, j = i+1, j-1 { + v[i], v[j] = v[j], v[i] + } + } + + return v, nil +} + +func (db *DB) zParseLimit(key []byte, start int, stop int) (offset int, count int, err error) { + if start < 0 || stop < 0 { + //refer redis implementation + var size int64 + size, err = db.ZCard(key) + if err != nil { + return + } + + llen := int(size) + + if start < 0 { + start = llen + start + } + if stop < 0 { + stop = llen + stop + } + + if start < 0 { + start = 0 + } + + if start >= llen { + offset = -1 + return + } + } + + if start > stop { + offset = -1 + return + } + + offset = start + count = (stop - start) + 1 + return +} + +func (db *DB) ZClear(key []byte) (int64, error) { + t := db.zsetBatch + t.Lock() + defer t.Unlock() + + rmCnt, err := db.zRemRange(t, key, MinScore, MaxScore, 0, -1) + if err == nil { + err = t.Commit() + } + + return rmCnt, err +} + +func (db *DB) ZMclear(keys ...[]byte) (int64, error) { + t := db.zsetBatch + t.Lock() + defer t.Unlock() + + for _, key := range keys { + if _, err := db.zRemRange(t, key, MinScore, MaxScore, 0, -1); err != nil { + return 0, err + } + } + + err := t.Commit() + + return int64(len(keys)), err +} + +func (db *DB) ZRange(key []byte, start int, stop int) ([]ScorePair, error) { + return db.ZRangeGeneric(key, start, stop, false) +} + +//min and max must be inclusive +//if no limit, set offset = 0 and count = -1 +func (db *DB) ZRangeByScore(key []byte, min int64, max int64, + offset int, count int) ([]ScorePair, error) { + return db.ZRangeByScoreGeneric(key, min, max, offset, count, false) +} + +func (db *DB) ZRank(key []byte, member []byte) (int64, error) { + return db.zrank(key, member, false) +} + +func (db *DB) ZRemRangeByRank(key []byte, start int, stop int) (int64, error) { + offset, count, err := db.zParseLimit(key, start, stop) + if err != nil { + return 0, err + } + + var rmCnt int64 + + t := db.zsetBatch + t.Lock() + defer t.Unlock() + + rmCnt, err = db.zRemRange(t, key, MinScore, MaxScore, offset, count) + if err == nil { + err = t.Commit() + } + + return rmCnt, err +} + +//min and max must be inclusive +func (db *DB) ZRemRangeByScore(key []byte, min int64, max int64) (int64, error) { + t := db.zsetBatch + t.Lock() + defer t.Unlock() + + rmCnt, err := db.zRemRange(t, key, min, max, 0, -1) + if err == nil { + err = t.Commit() + } + + return rmCnt, err +} + +func (db *DB) ZRevRange(key []byte, start int, stop int) ([]ScorePair, error) { + return db.ZRangeGeneric(key, start, stop, true) +} + +func (db *DB) ZRevRank(key []byte, member []byte) (int64, error) { + return db.zrank(key, member, true) +} + +//min and max must be inclusive +//if no limit, set offset = 0 and count = -1 +func (db *DB) ZRevRangeByScore(key []byte, min int64, max int64, offset int, count int) ([]ScorePair, error) { + return db.ZRangeByScoreGeneric(key, min, max, offset, count, true) +} + +func (db *DB) ZRangeGeneric(key []byte, start int, stop int, reverse bool) ([]ScorePair, error) { + offset, count, err := db.zParseLimit(key, start, stop) + if err != nil { + return nil, err + } + + return db.zRange(key, MinScore, MaxScore, offset, count, reverse) +} + +//min and max must be inclusive +//if no limit, set offset = 0 and count = -1 +func (db *DB) ZRangeByScoreGeneric(key []byte, min int64, max int64, + offset int, count int, reverse bool) ([]ScorePair, error) { + + return db.zRange(key, min, max, offset, count, reverse) +} + +func (db *DB) zFlush() (drop int64, err error) { + t := db.zsetBatch + t.Lock() + defer t.Unlock() + return db.flushType(t, ZSetType) +} + +func (db *DB) ZExpire(key []byte, duration int64) (int64, error) { + if duration <= 0 { + return 0, errExpireValue + } + + return db.zExpireAt(key, time.Now().Unix()+duration) +} + +func (db *DB) ZExpireAt(key []byte, when int64) (int64, error) { + if when <= time.Now().Unix() { + return 0, errExpireValue + } + + return db.zExpireAt(key, when) +} + +func (db *DB) ZTTL(key []byte) (int64, error) { + if err := checkKeySize(key); err != nil { + return -1, err + } + + return db.ttl(ZSetType, key) +} + +func (db *DB) ZPersist(key []byte) (int64, error) { + if err := checkKeySize(key); err != nil { + return 0, err + } + + t := db.zsetBatch + t.Lock() + defer t.Unlock() + + n, err := db.rmExpire(t, ZSetType, key) + if err != nil { + return 0, err + } + + err = t.Commit() + return n, err +} + +func getAggregateFunc(aggregate byte) func(int64, int64) int64 { + switch aggregate { + case AggregateSum: + return func(a int64, b int64) int64 { + return a + b + } + case AggregateMax: + return func(a int64, b int64) int64 { + if a > b { + return a + } + return b + } + case AggregateMin: + return func(a int64, b int64) int64 { + if a > b { + return b + } + return a + } + } + return nil +} + +func (db *DB) ZUnionStore(destKey []byte, srcKeys [][]byte, weights []int64, aggregate byte) (int64, error) { + + var destMap = map[string]int64{} + aggregateFunc := getAggregateFunc(aggregate) + if aggregateFunc == nil { + return 0, errInvalidAggregate + } + if len(srcKeys) < 1 { + return 0, errInvalidSrcKeyNum + } + if weights != nil { + if len(srcKeys) != len(weights) { + return 0, errInvalidWeightNum + } + } else { + weights = make([]int64, len(srcKeys)) + for i := 0; i < len(weights); i++ { + weights[i] = 1 + } + } + + for i, key := range srcKeys { + scorePairs, err := db.ZRange(key, 0, -1) + if err != nil { + return 0, err + } + for _, pair := range scorePairs { + if score, ok := destMap[hack.String(pair.Member)]; !ok { + destMap[hack.String(pair.Member)] = pair.Score * weights[i] + } else { + destMap[hack.String(pair.Member)] = aggregateFunc(score, pair.Score*weights[i]) + } + } + } + + t := db.zsetBatch + t.Lock() + defer t.Unlock() + + db.zDelete(t, destKey) + + for member, score := range destMap { + if err := checkZSetKMSize(destKey, []byte(member)); err != nil { + return 0, err + } + + if _, err := db.zSetItem(t, destKey, score, []byte(member)); err != nil { + return 0, err + } + } + + var n = int64(len(destMap)) + sk := db.zEncodeSizeKey(destKey) + t.Put(sk, PutInt64(n)) + + if err := t.Commit(); err != nil { + return 0, err + } + return n, nil +} + +func (db *DB) ZInterStore(destKey []byte, srcKeys [][]byte, weights []int64, aggregate byte) (int64, error) { + + aggregateFunc := getAggregateFunc(aggregate) + if aggregateFunc == nil { + return 0, errInvalidAggregate + } + if len(srcKeys) < 1 { + return 0, errInvalidSrcKeyNum + } + if weights != nil { + if len(srcKeys) != len(weights) { + return 0, errInvalidWeightNum + } + } else { + weights = make([]int64, len(srcKeys)) + for i := 0; i < len(weights); i++ { + weights[i] = 1 + } + } + + var destMap = map[string]int64{} + scorePairs, err := db.ZRange(srcKeys[0], 0, -1) + if err != nil { + return 0, err + } + for _, pair := range scorePairs { + destMap[hack.String(pair.Member)] = pair.Score * weights[0] + } + + for i, key := range srcKeys[1:] { + scorePairs, err := db.ZRange(key, 0, -1) + if err != nil { + return 0, err + } + tmpMap := map[string]int64{} + for _, pair := range scorePairs { + if score, ok := destMap[hack.String(pair.Member)]; ok { + tmpMap[hack.String(pair.Member)] = aggregateFunc(score, pair.Score*weights[i+1]) + } + } + destMap = tmpMap + } + + t := db.zsetBatch + t.Lock() + defer t.Unlock() + + db.zDelete(t, destKey) + + for member, score := range destMap { + if err := checkZSetKMSize(destKey, []byte(member)); err != nil { + return 0, err + } + if _, err := db.zSetItem(t, destKey, score, []byte(member)); err != nil { + return 0, err + } + } + + var n int64 = int64(len(destMap)) + sk := db.zEncodeSizeKey(destKey) + t.Put(sk, PutInt64(n)) + + if err := t.Commit(); err != nil { + return 0, err + } + return n, nil +} + +func (db *DB) ZRangeByLex(key []byte, min []byte, max []byte, rangeType uint8, offset int, count int) ([][]byte, error) { + if min == nil { + min = db.zEncodeStartSetKey(key) + } else { + min = db.zEncodeSetKey(key, min) + } + if max == nil { + max = db.zEncodeStopSetKey(key) + } else { + max = db.zEncodeSetKey(key, max) + } + + it := db.bucket.RangeLimitIterator(min, max, rangeType, offset, count) + defer it.Close() + + ay := make([][]byte, 0, 16) + for ; it.Valid(); it.Next() { + if _, m, err := db.zDecodeSetKey(it.Key()); err == nil { + ay = append(ay, m) + } + } + + return ay, nil +} + +func (db *DB) ZRemRangeByLex(key []byte, min []byte, max []byte, rangeType uint8) (int64, error) { + if min == nil { + min = db.zEncodeStartSetKey(key) + } else { + min = db.zEncodeSetKey(key, min) + } + if max == nil { + max = db.zEncodeStopSetKey(key) + } else { + max = db.zEncodeSetKey(key, max) + } + + t := db.zsetBatch + t.Lock() + defer t.Unlock() + + it := db.bucket.RangeIterator(min, max, rangeType) + defer it.Close() + + var n int64 = 0 + for ; it.Valid(); it.Next() { + t.Delete(it.RawKey()) + n++ + } + + if err := t.Commit(); err != nil { + return 0, err + } + + return n, nil +} + +func (db *DB) ZLexCount(key []byte, min []byte, max []byte, rangeType uint8) (int64, error) { + if min == nil { + min = db.zEncodeStartSetKey(key) + } else { + min = db.zEncodeSetKey(key, min) + } + if max == nil { + max = db.zEncodeStopSetKey(key) + } else { + max = db.zEncodeSetKey(key, max) + } + + it := db.bucket.RangeIterator(min, max, rangeType) + defer it.Close() + + var n int64 = 0 + for ; it.Valid(); it.Next() { + n++ + } + + return n, nil +} + +func (db *DB) ZKeyExists(key []byte) (int64, error) { + if err := checkKeySize(key); err != nil { + return 0, err + } + sk := db.zEncodeSizeKey(key) + v, err := db.bucket.Get(sk) + if v != nil && err == nil { + return 1, nil + } + return 0, err +} diff --git a/vendor/github.com/siddontang/ledisdb/ledis/util.go b/vendor/github.com/siddontang/ledisdb/ledis/util.go new file mode 100644 index 000000000000..26ee6d08d87b --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/ledis/util.go @@ -0,0 +1,95 @@ +package ledis + +import ( + "encoding/binary" + "errors" + "strconv" + + "github.com/siddontang/go/hack" +) + +var errIntNumber = errors.New("invalid integer") + +/* + Below I forget why I use little endian to store int. + Maybe I was foolish at that time. +*/ + +func Int64(v []byte, err error) (int64, error) { + if err != nil { + return 0, err + } else if v == nil || len(v) == 0 { + return 0, nil + } else if len(v) != 8 { + return 0, errIntNumber + } + + return int64(binary.LittleEndian.Uint64(v)), nil +} + +func Uint64(v []byte, err error) (uint64, error) { + if err != nil { + return 0, err + } else if v == nil || len(v) == 0 { + return 0, nil + } else if len(v) != 8 { + return 0, errIntNumber + } + + return binary.LittleEndian.Uint64(v), nil +} + +func PutInt64(v int64) []byte { + b := make([]byte, 8) + binary.LittleEndian.PutUint64(b, uint64(v)) + return b +} + +func StrInt64(v []byte, err error) (int64, error) { + if err != nil { + return 0, err + } else if v == nil { + return 0, nil + } else { + return strconv.ParseInt(hack.String(v), 10, 64) + } +} + +func StrUint64(v []byte, err error) (uint64, error) { + if err != nil { + return 0, err + } else if v == nil { + return 0, nil + } else { + return strconv.ParseUint(hack.String(v), 10, 64) + } +} + +func StrInt32(v []byte, err error) (int32, error) { + if err != nil { + return 0, err + } else if v == nil { + return 0, nil + } else { + res, err := strconv.ParseInt(hack.String(v), 10, 32) + return int32(res), err + } +} + +func StrInt8(v []byte, err error) (int8, error) { + if err != nil { + return 0, err + } else if v == nil { + return 0, nil + } else { + res, err := strconv.ParseInt(hack.String(v), 10, 8) + return int8(res), err + } +} + +func AsyncNotify(ch chan struct{}) { + select { + case ch <- struct{}{}: + default: + } +} diff --git a/vendor/github.com/siddontang/ledisdb/rpl/file_io.go b/vendor/github.com/siddontang/ledisdb/rpl/file_io.go new file mode 100644 index 000000000000..6eac87890907 --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/rpl/file_io.go @@ -0,0 +1,363 @@ +package rpl + +import ( + "fmt" + "io" + "os" + + "github.com/edsrzf/mmap-go" + "github.com/siddontang/go/log" +) + +//like leveldb or rocksdb file interface, haha! + +type writeFile interface { + Sync() error + Write(b []byte) (n int, err error) + Close() error + ReadAt(buf []byte, offset int64) (int, error) + Truncate(size int64) error + SetOffset(o int64) + Name() string + Size() int + Offset() int64 +} + +type readFile interface { + ReadAt(buf []byte, offset int64) (int, error) + Close() error + Size() int + Name() string +} + +type rawWriteFile struct { + writeFile + f *os.File + offset int64 + name string +} + +func newRawWriteFile(name string, size int64) (writeFile, error) { + m := new(rawWriteFile) + var err error + + m.name = name + + m.f, err = os.OpenFile(name, os.O_CREATE|os.O_RDWR, 0644) + if err != nil { + return nil, err + } + + return m, nil +} + +func (m *rawWriteFile) Close() error { + if err := m.f.Truncate(m.offset); err != nil { + return fmt.Errorf("close truncate %s error %s", m.name, err.Error()) + } + + if err := m.f.Close(); err != nil { + return fmt.Errorf("close %s error %s", m.name, err.Error()) + } + + return nil +} + +func (m *rawWriteFile) Sync() error { + return m.f.Sync() +} + +func (m *rawWriteFile) Write(b []byte) (n int, err error) { + n, err = m.f.WriteAt(b, m.offset) + if err != nil { + return + } else if n != len(b) { + err = io.ErrShortWrite + return + } + + m.offset += int64(n) + return +} + +func (m *rawWriteFile) ReadAt(buf []byte, offset int64) (int, error) { + return m.f.ReadAt(buf, offset) +} + +func (m *rawWriteFile) Truncate(size int64) error { + var err error + if err = m.f.Truncate(size); err != nil { + return err + } + + if m.offset > size { + m.offset = size + } + return nil +} + +func (m *rawWriteFile) SetOffset(o int64) { + m.offset = o +} + +func (m *rawWriteFile) Offset() int64 { + return m.offset +} + +func (m *rawWriteFile) Name() string { + return m.name +} + +func (m *rawWriteFile) Size() int { + st, _ := m.f.Stat() + return int(st.Size()) +} + +type rawReadFile struct { + readFile + + f *os.File + name string +} + +func newRawReadFile(name string) (readFile, error) { + m := new(rawReadFile) + + var err error + m.f, err = os.Open(name) + m.name = name + + if err != nil { + return nil, err + } + + return m, err +} + +func (m *rawReadFile) Close() error { + return m.f.Close() +} + +func (m *rawReadFile) Size() int { + st, _ := m.f.Stat() + return int(st.Size()) +} + +func (m *rawReadFile) ReadAt(b []byte, offset int64) (int, error) { + return m.f.ReadAt(b, offset) +} + +func (m *rawReadFile) Name() string { + return m.name +} + +///////////////////////////////////////////////// + +type mmapWriteFile struct { + writeFile + + f *os.File + m mmap.MMap + name string + size int64 + offset int64 +} + +func newMmapWriteFile(name string, size int64) (writeFile, error) { + m := new(mmapWriteFile) + + m.name = name + + var err error + + m.f, err = os.OpenFile(name, os.O_CREATE|os.O_RDWR, 0644) + if err != nil { + return nil, err + } + + if size == 0 { + st, _ := m.f.Stat() + size = st.Size() + } + + if err = m.f.Truncate(size); err != nil { + return nil, err + } + + if m.m, err = mmap.Map(m.f, mmap.RDWR, 0); err != nil { + return nil, err + } + + m.size = size + m.offset = 0 + return m, nil +} + +func (m *mmapWriteFile) Size() int { + return int(m.size) +} + +func (m *mmapWriteFile) Sync() error { + return m.m.Flush() +} + +func (m *mmapWriteFile) Close() error { + if err := m.m.Unmap(); err != nil { + return fmt.Errorf("unmap %s error %s", m.name, err.Error()) + } + + if err := m.f.Truncate(m.offset); err != nil { + return fmt.Errorf("close truncate %s error %s", m.name, err.Error()) + } + + if err := m.f.Close(); err != nil { + return fmt.Errorf("close %s error %s", m.name, err.Error()) + } + + return nil +} + +func (m *mmapWriteFile) Write(b []byte) (n int, err error) { + extra := int64(len(b)) - (m.size - m.offset) + if extra > 0 { + newSize := m.size + extra + m.size/10 + if err = m.Truncate(newSize); err != nil { + return + } + m.size = newSize + } + + n = copy(m.m[m.offset:], b) + if n != len(b) { + return 0, io.ErrShortWrite + } + + m.offset += int64(len(b)) + return len(b), nil +} + +func (m *mmapWriteFile) ReadAt(buf []byte, offset int64) (int, error) { + if offset > m.offset { + return 0, fmt.Errorf("invalid offset %d", offset) + } + + n := copy(buf, m.m[offset:m.offset]) + if n != len(buf) { + return n, io.ErrUnexpectedEOF + } + + return n, nil +} + +func (m *mmapWriteFile) Truncate(size int64) error { + var err error + if err = m.m.Unmap(); err != nil { + return err + } + + if err = m.f.Truncate(size); err != nil { + return err + } + + if m.m, err = mmap.Map(m.f, mmap.RDWR, 0); err != nil { + return err + } + + m.size = size + if m.offset > m.size { + m.offset = m.size + } + return nil +} + +func (m *mmapWriteFile) SetOffset(o int64) { + m.offset = o +} + +func (m *mmapWriteFile) Offset() int64 { + return m.offset +} + +func (m *mmapWriteFile) Name() string { + return m.name +} + +type mmapReadFile struct { + readFile + + f *os.File + m mmap.MMap + name string +} + +func newMmapReadFile(name string) (readFile, error) { + m := new(mmapReadFile) + + m.name = name + + var err error + m.f, err = os.Open(name) + if err != nil { + return nil, err + } + + m.m, err = mmap.Map(m.f, mmap.RDONLY, 0) + return m, err +} + +func (m *mmapReadFile) ReadAt(buf []byte, offset int64) (int, error) { + if int64(offset) > int64(len(m.m)) { + return 0, fmt.Errorf("invalid offset %d", offset) + } + + n := copy(buf, m.m[offset:]) + if n != len(buf) { + return n, io.ErrUnexpectedEOF + } + + return n, nil +} + +func (m *mmapReadFile) Close() error { + if m.m != nil { + if err := m.m.Unmap(); err != nil { + log.Errorf("unmap %s error %s", m.name, err.Error()) + } + m.m = nil + } + + if m.f != nil { + if err := m.f.Close(); err != nil { + log.Errorf("close %s error %s", m.name, err.Error()) + } + m.f = nil + } + + return nil +} + +func (m *mmapReadFile) Size() int { + return len(m.m) +} + +func (m *mmapReadFile) Name() string { + return m.name +} + +///////////////////////////////////// + +func newWriteFile(useMmap bool, name string, size int64) (writeFile, error) { + if useMmap { + return newMmapWriteFile(name, size) + } else { + return newRawWriteFile(name, size) + } +} + +func newReadFile(useMmap bool, name string) (readFile, error) { + if useMmap { + return newMmapReadFile(name) + } else { + return newRawReadFile(name) + } +} diff --git a/vendor/github.com/siddontang/ledisdb/rpl/file_store.go b/vendor/github.com/siddontang/ledisdb/rpl/file_store.go new file mode 100644 index 000000000000..f6f708b7c60f --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/rpl/file_store.go @@ -0,0 +1,416 @@ +package rpl + +import ( + "fmt" + "io/ioutil" + "os" + "sort" + "sync" + "time" + + "github.com/siddontang/go/log" + "github.com/siddontang/go/num" + "github.com/siddontang/ledisdb/config" +) + +const ( + defaultMaxLogFileSize = int64(256 * 1024 * 1024) + + maxLogFileSize = int64(1024 * 1024 * 1024) + + defaultLogNumInFile = int64(1024 * 1024) +) + +/* + File Store: + 00000001.data + 00000001.meta + 00000002.data + 00000002.meta + + data: log1 data | log2 data | magic data + + if data has no magic data, it means that we don't close replication gracefully. + so we must repair the log data + log data: id (bigendian uint64), create time (bigendian uint32), compression (byte), data len(bigendian uint32), data + split data = log0 data + [padding 0] -> file % pagesize() == 0 + + meta: log1 offset | log2 offset + log offset: bigendian uint32 | bigendian uint32 + + //sha1 of github.com/siddontang/ledisdb 20 bytes + magic data = "\x1c\x1d\xb8\x88\xff\x9e\x45\x55\x40\xf0\x4c\xda\xe0\xce\x47\xde\x65\x48\x71\x17" + + we must guarantee that the log id is monotonic increment strictly. + if log1's id is 1, log2 must be 2 +*/ + +type FileStore struct { + LogStore + + cfg *config.Config + + base string + + rm sync.RWMutex + wm sync.Mutex + + rs tableReaders + w *tableWriter + + quit chan struct{} +} + +func NewFileStore(base string, cfg *config.Config) (*FileStore, error) { + s := new(FileStore) + + s.quit = make(chan struct{}) + + var err error + + if err = os.MkdirAll(base, 0755); err != nil { + return nil, err + } + + s.base = base + + if cfg.Replication.MaxLogFileSize == 0 { + cfg.Replication.MaxLogFileSize = defaultMaxLogFileSize + } + + cfg.Replication.MaxLogFileSize = num.MinInt64(cfg.Replication.MaxLogFileSize, maxLogFileSize) + + s.cfg = cfg + + if err = s.load(); err != nil { + return nil, err + } + + index := int64(1) + if len(s.rs) != 0 { + index = s.rs[len(s.rs)-1].index + 1 + } + + s.w = newTableWriter(s.base, index, cfg.Replication.MaxLogFileSize, cfg.Replication.UseMmap) + s.w.SetSyncType(cfg.Replication.SyncLog) + + go s.checkTableReaders() + + return s, nil +} + +func (s *FileStore) GetLog(id uint64, l *Log) error { + //first search in table writer + if err := s.w.GetLog(id, l); err == nil { + return nil + } else if err != ErrLogNotFound { + return err + } + + s.rm.RLock() + t := s.rs.Search(id) + + if t == nil { + s.rm.RUnlock() + + return ErrLogNotFound + } + + err := t.GetLog(id, l) + s.rm.RUnlock() + + return err +} + +func (s *FileStore) FirstID() (uint64, error) { + id := uint64(0) + + s.rm.RLock() + if len(s.rs) > 0 { + id = s.rs[0].first + } else { + id = 0 + } + s.rm.RUnlock() + + if id > 0 { + return id, nil + } + + //if id = 0, + + return s.w.First(), nil +} + +func (s *FileStore) LastID() (uint64, error) { + id := s.w.Last() + if id > 0 { + return id, nil + } + + //if table writer has no last id, we may find in the last table reader + + s.rm.RLock() + if len(s.rs) > 0 { + id = s.rs[len(s.rs)-1].last + } + s.rm.RUnlock() + + return id, nil +} + +func (s *FileStore) StoreLog(l *Log) error { + s.wm.Lock() + err := s.storeLog(l) + s.wm.Unlock() + return err +} + +func (s *FileStore) storeLog(l *Log) error { + err := s.w.StoreLog(l) + if err == nil { + return nil + } else if err != errTableNeedFlush { + return err + } + + var r *tableReader + r, err = s.w.Flush() + + if err != nil { + log.Fatalf("write table flush error %s, can not store!!!", err.Error()) + + s.w.Close() + + return err + } + + s.rm.Lock() + s.rs = append(s.rs, r) + s.rm.Unlock() + + err = s.w.StoreLog(l) + + return err +} + +func (s *FileStore) PurgeExpired(n int64) error { + s.rm.Lock() + + var purges []*tableReader + + t := uint32(time.Now().Unix() - int64(n)) + + for i, r := range s.rs { + if r.lastTime > t { + purges = append([]*tableReader{}, s.rs[0:i]...) + n := copy(s.rs, s.rs[i:]) + s.rs = s.rs[0:n] + break + } + } + + s.rm.Unlock() + + s.purgeTableReaders(purges) + + return nil +} + +func (s *FileStore) Sync() error { + return s.w.Sync() +} + +func (s *FileStore) Clear() error { + s.wm.Lock() + s.rm.Lock() + + defer func() { + s.rm.Unlock() + s.wm.Unlock() + }() + + s.w.Close() + + for i := range s.rs { + s.rs[i].Close() + } + + s.rs = tableReaders{} + + if err := os.RemoveAll(s.base); err != nil { + return err + } + + if err := os.MkdirAll(s.base, 0755); err != nil { + return err + } + + s.w = newTableWriter(s.base, 1, s.cfg.Replication.MaxLogFileSize, s.cfg.Replication.UseMmap) + + return nil +} + +func (s *FileStore) Close() error { + close(s.quit) + + s.wm.Lock() + s.rm.Lock() + + if r, err := s.w.Flush(); err != nil { + if err != errNilHandler { + log.Errorf("close err: %s", err.Error()) + } + } else { + r.Close() + s.w.Close() + } + + for i := range s.rs { + s.rs[i].Close() + } + + s.rs = tableReaders{} + + s.rm.Unlock() + s.wm.Unlock() + + return nil +} + +func (s *FileStore) checkTableReaders() { + t := time.NewTicker(60 * time.Second) + defer t.Stop() + for { + select { + case <-t.C: + s.rm.Lock() + + for _, r := range s.rs { + if !r.Keepalived() { + r.Close() + } + } + + purges := []*tableReader{} + maxNum := s.cfg.Replication.MaxLogFileNum + num := len(s.rs) + if num > maxNum { + purges = s.rs[:num-maxNum] + s.rs = s.rs[num-maxNum:] + } + + s.rm.Unlock() + + s.purgeTableReaders(purges) + + case <-s.quit: + return + } + } +} + +func (s *FileStore) purgeTableReaders(purges []*tableReader) { + for _, r := range purges { + dataName := fmtTableDataName(r.base, r.index) + metaName := fmtTableMetaName(r.base, r.index) + r.Close() + if err := os.Remove(dataName); err != nil { + log.Errorf("purge table data %s err: %s", dataName, err.Error()) + } + if err := os.Remove(metaName); err != nil { + log.Errorf("purge table meta %s err: %s", metaName, err.Error()) + } + + } +} + +func (s *FileStore) load() error { + fs, err := ioutil.ReadDir(s.base) + if err != nil { + return err + } + + s.rs = make(tableReaders, 0, len(fs)) + + var r *tableReader + var index int64 + for _, f := range fs { + if _, err := fmt.Sscanf(f.Name(), "%08d.data", &index); err == nil { + if r, err = newTableReader(s.base, index, s.cfg.Replication.UseMmap); err != nil { + log.Errorf("load table %s err: %s", f.Name(), err.Error()) + } else { + s.rs = append(s.rs, r) + } + } + } + + if err := s.rs.check(); err != nil { + return err + } + + return nil +} + +type tableReaders []*tableReader + +func (ts tableReaders) Len() int { + return len(ts) +} + +func (ts tableReaders) Swap(i, j int) { + ts[i], ts[j] = ts[j], ts[i] +} + +func (ts tableReaders) Less(i, j int) bool { + return ts[i].first < ts[j].first +} + +func (ts tableReaders) Search(id uint64) *tableReader { + i, j := 0, len(ts)-1 + + for i <= j { + h := i + (j-i)/2 + + if ts[h].first <= id && id <= ts[h].last { + return ts[h] + } else if ts[h].last < id { + i = h + 1 + } else { + j = h - 1 + } + } + + return nil +} + +func (ts tableReaders) check() error { + if len(ts) == 0 { + return nil + } + + sort.Sort(ts) + + first := ts[0].first + last := ts[0].last + index := ts[0].index + + if first == 0 || first > last { + return fmt.Errorf("invalid log in table %s", ts[0]) + } + + for i := 1; i < len(ts); i++ { + if ts[i].first <= last { + return fmt.Errorf("invalid first log id %d in table %s", ts[i].first, ts[i]) + } + + if ts[i].index <= index { + return fmt.Errorf("invalid index %d in table %s", ts[i].index, ts[i]) + } + + first = ts[i].first + last = ts[i].last + index = ts[i].index + } + return nil +} diff --git a/vendor/github.com/siddontang/ledisdb/rpl/file_table.go b/vendor/github.com/siddontang/ledisdb/rpl/file_table.go new file mode 100644 index 000000000000..9658102ff65c --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/rpl/file_table.go @@ -0,0 +1,571 @@ +package rpl + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "path" + "sync" + "time" + + "github.com/siddontang/go/log" + "github.com/siddontang/go/sync2" +) + +var ( + magic = []byte("\x1c\x1d\xb8\x88\xff\x9e\x45\x55\x40\xf0\x4c\xda\xe0\xce\x47\xde\x65\x48\x71\x17") + errTableNeedFlush = errors.New("write table need flush") + errNilHandler = errors.New("nil write handler") +) + +const tableReaderKeepaliveInterval int64 = 30 + +func fmtTableDataName(base string, index int64) string { + return path.Join(base, fmt.Sprintf("%08d.data", index)) +} + +func fmtTableMetaName(base string, index int64) string { + return path.Join(base, fmt.Sprintf("%08d.meta", index)) +} + +type tableReader struct { + sync.Mutex + + base string + index int64 + + data readFile + meta readFile + + first uint64 + last uint64 + + lastTime uint32 + + lastReadTime sync2.AtomicInt64 + + useMmap bool +} + +func newTableReader(base string, index int64, useMmap bool) (*tableReader, error) { + if index <= 0 { + return nil, fmt.Errorf("invalid index %d", index) + } + t := new(tableReader) + t.base = base + t.index = index + + t.useMmap = useMmap + + var err error + + if err = t.check(); err != nil { + log.Errorf("check %d error: %s, try to repair", t.index, err.Error()) + + if err = t.repair(); err != nil { + log.Errorf("repair %d error: %s", t.index, err.Error()) + return nil, err + } + } + + t.close() + + return t, nil +} + +func (t *tableReader) String() string { + return fmt.Sprintf("%d", t.index) +} + +func (t *tableReader) Close() { + t.Lock() + + t.close() + + t.Unlock() +} + +func (t *tableReader) close() { + if t.data != nil { + t.data.Close() + t.data = nil + } + + if t.meta != nil { + t.meta.Close() + t.meta = nil + } +} + +func (t *tableReader) Keepalived() bool { + l := t.lastReadTime.Get() + if l > 0 && time.Now().Unix()-l > tableReaderKeepaliveInterval { + return false + } + + return true +} + +func (t *tableReader) getLogPos(index int) (uint32, error) { + var buf [4]byte + if _, err := t.meta.ReadAt(buf[0:4], int64(index)*4); err != nil { + return 0, err + } + + return binary.BigEndian.Uint32(buf[0:4]), nil +} + +func (t *tableReader) checkData() error { + var err error + //check will use raw file mode + if t.data, err = newReadFile(false, fmtTableDataName(t.base, t.index)); err != nil { + return err + } + + if t.data.Size() < len(magic) { + return fmt.Errorf("data file %s size %d too short", t.data.Name(), t.data.Size()) + } + + buf := make([]byte, len(magic)) + if _, err := t.data.ReadAt(buf, int64(t.data.Size()-len(magic))); err != nil { + return err + } + + if !bytes.Equal(magic, buf) { + return fmt.Errorf("data file %s invalid magic data %q", t.data.Name(), buf) + } + + return nil +} + +func (t *tableReader) checkMeta() error { + var err error + //check will use raw file mode + if t.meta, err = newReadFile(false, fmtTableMetaName(t.base, t.index)); err != nil { + return err + } + + if t.meta.Size()%4 != 0 || t.meta.Size() == 0 { + return fmt.Errorf("meta file %s invalid offset len %d, must 4 multiple and not 0", t.meta.Name(), t.meta.Size()) + } + + return nil +} + +func (t *tableReader) check() error { + var err error + + if err := t.checkMeta(); err != nil { + return err + } + + if err := t.checkData(); err != nil { + return err + } + + firstLogPos, _ := t.getLogPos(0) + lastLogPos, _ := t.getLogPos(t.meta.Size()/4 - 1) + + if firstLogPos != 0 { + return fmt.Errorf("invalid first log pos %d, must 0", firstLogPos) + } + + var l Log + if _, err = t.decodeLogHead(&l, t.data, int64(firstLogPos)); err != nil { + return fmt.Errorf("decode first log err %s", err.Error()) + } + + t.first = l.ID + var n int64 + if n, err = t.decodeLogHead(&l, t.data, int64(lastLogPos)); err != nil { + return fmt.Errorf("decode last log err %s", err.Error()) + } else if n+int64(len(magic)) != int64(t.data.Size()) { + return fmt.Errorf("extra log data at offset %d", n) + } + + t.last = l.ID + t.lastTime = l.CreateTime + + if t.first > t.last { + return fmt.Errorf("invalid log table first %d > last %d", t.first, t.last) + } else if (t.last - t.first + 1) != uint64(t.meta.Size()/4) { + return fmt.Errorf("invalid log table, first %d, last %d, and log num %d", t.first, t.last, t.meta.Size()/4) + } + + return nil +} + +func (t *tableReader) repair() error { + t.close() + + var err error + var data writeFile + var meta writeFile + + //repair will use raw file mode + data, err = newWriteFile(false, fmtTableDataName(t.base, t.index), 0) + data.SetOffset(int64(data.Size())) + + meta, err = newWriteFile(false, fmtTableMetaName(t.base, t.index), int64(defaultLogNumInFile*4)) + + var l Log + var pos int64 = 0 + var nextPos int64 = 0 + b := make([]byte, 4) + + t.first = 0 + t.last = 0 + + for { + nextPos, err = t.decodeLogHead(&l, data, pos) + if err != nil { + //if error, we may lost all logs from pos + log.Errorf("%s may lost logs from %d", data.Name(), pos) + break + } + + if l.ID == 0 { + log.Errorf("%s may lost logs from %d, invalid log 0", data.Name(), pos) + break + } + + if t.first == 0 { + t.first = l.ID + } + + if t.last == 0 { + t.last = l.ID + } else if l.ID <= t.last { + log.Errorf("%s may lost logs from %d, invalid logid %d", t.data.Name(), pos, l.ID) + break + } + + t.last = l.ID + t.lastTime = l.CreateTime + + binary.BigEndian.PutUint32(b, uint32(pos)) + meta.Write(b) + + pos = nextPos + + t.lastTime = l.CreateTime + } + + var e error + if err := meta.Close(); err != nil { + e = err + } + + data.SetOffset(pos) + + if _, err = data.Write(magic); err != nil { + log.Errorf("write magic error %s", err.Error()) + } + + if err = data.Close(); err != nil { + return err + } + + return e +} + +func (t *tableReader) decodeLogHead(l *Log, r io.ReaderAt, pos int64) (int64, error) { + dataLen, err := l.DecodeHeadAt(r, pos) + if err != nil { + return 0, err + } + + return pos + int64(l.HeadSize()) + int64(dataLen), nil +} + +func (t *tableReader) GetLog(id uint64, l *Log) error { + if id < t.first || id > t.last { + return ErrLogNotFound + } + + t.lastReadTime.Set(time.Now().Unix()) + + t.Lock() + + if err := t.openTable(); err != nil { + t.close() + t.Unlock() + return err + } + t.Unlock() + + pos, err := t.getLogPos(int(id - t.first)) + if err != nil { + return err + } + + if err := l.DecodeAt(t.data, int64(pos)); err != nil { + return err + } else if l.ID != id { + return fmt.Errorf("invalid log id %d != %d", l.ID, id) + } + + return nil +} + +func (t *tableReader) openTable() error { + var err error + if t.data == nil { + if t.data, err = newReadFile(t.useMmap, fmtTableDataName(t.base, t.index)); err != nil { + return err + } + } + + if t.meta == nil { + if t.meta, err = newReadFile(t.useMmap, fmtTableMetaName(t.base, t.index)); err != nil { + return err + } + + } + + return nil +} + +type tableWriter struct { + sync.RWMutex + + data writeFile + meta writeFile + + base string + index int64 + + first uint64 + last uint64 + lastTime uint32 + + maxLogSize int64 + + closed bool + + syncType int + + posBuf []byte + + useMmap bool +} + +func newTableWriter(base string, index int64, maxLogSize int64, useMmap bool) *tableWriter { + if index <= 0 { + panic(fmt.Errorf("invalid index %d", index)) + } + + t := new(tableWriter) + + t.base = base + t.index = index + + t.maxLogSize = maxLogSize + + t.closed = false + + t.posBuf = make([]byte, 4) + + t.useMmap = useMmap + + return t +} + +func (t *tableWriter) String() string { + return fmt.Sprintf("%d", t.index) +} + +func (t *tableWriter) SetMaxLogSize(s int64) { + t.maxLogSize = s +} + +func (t *tableWriter) SetSyncType(tp int) { + t.syncType = tp +} + +func (t *tableWriter) close() { + if t.meta != nil { + if err := t.meta.Close(); err != nil { + log.Fatalf("close log meta error %s", err.Error()) + } + t.meta = nil + } + + if t.data != nil { + if _, err := t.data.Write(magic); err != nil { + log.Fatalf("write magic error %s", err.Error()) + } + + if err := t.data.Close(); err != nil { + log.Fatalf("close log data error %s", err.Error()) + } + t.data = nil + } +} + +func (t *tableWriter) Close() { + t.Lock() + t.closed = true + + t.close() + t.Unlock() +} + +func (t *tableWriter) First() uint64 { + t.Lock() + id := t.first + t.Unlock() + return id +} + +func (t *tableWriter) Last() uint64 { + t.Lock() + id := t.last + t.Unlock() + return id +} + +func (t *tableWriter) Flush() (*tableReader, error) { + t.Lock() + + if t.data == nil || t.meta == nil { + t.Unlock() + return nil, errNilHandler + } + + tr := new(tableReader) + tr.base = t.base + tr.index = t.index + + tr.first = t.first + tr.last = t.last + tr.lastTime = t.lastTime + tr.useMmap = t.useMmap + + t.close() + + t.first = 0 + t.last = 0 + t.index = t.index + 1 + + t.Unlock() + + return tr, nil +} + +func (t *tableWriter) StoreLog(l *Log) error { + t.Lock() + err := t.storeLog(l) + t.Unlock() + + return err +} + +func (t *tableWriter) openFile() error { + var err error + if t.data == nil { + if t.data, err = newWriteFile(t.useMmap, fmtTableDataName(t.base, t.index), t.maxLogSize+t.maxLogSize/10+int64(len(magic))); err != nil { + return err + } + } + + if t.meta == nil { + if t.meta, err = newWriteFile(t.useMmap, fmtTableMetaName(t.base, t.index), int64(defaultLogNumInFile*4)); err != nil { + return err + } + } + return err +} + +func (t *tableWriter) storeLog(l *Log) error { + if l.ID == 0 { + return ErrStoreLogID + } + + if t.closed { + return fmt.Errorf("table writer is closed") + } + + if t.last > 0 && l.ID != t.last+1 { + return ErrStoreLogID + } + + if t.data != nil && t.data.Offset() > t.maxLogSize { + return errTableNeedFlush + } + + var err error + if err = t.openFile(); err != nil { + return err + } + + offsetPos := t.data.Offset() + if err = l.Encode(t.data); err != nil { + return err + } + + binary.BigEndian.PutUint32(t.posBuf, uint32(offsetPos)) + if _, err = t.meta.Write(t.posBuf); err != nil { + return err + } + + if t.first == 0 { + t.first = l.ID + } + + t.last = l.ID + t.lastTime = l.CreateTime + + if t.syncType == 2 { + if err := t.data.Sync(); err != nil { + log.Errorf("sync table error %s", err.Error()) + } + } + + return nil +} + +func (t *tableWriter) GetLog(id uint64, l *Log) error { + t.RLock() + defer t.RUnlock() + + if id < t.first || id > t.last { + return ErrLogNotFound + } + + var buf [4]byte + if _, err := t.meta.ReadAt(buf[0:4], int64((id-t.first)*4)); err != nil { + return err + } + + offset := binary.BigEndian.Uint32(buf[0:4]) + + if err := l.DecodeAt(t.data, int64(offset)); err != nil { + return err + } else if l.ID != id { + return fmt.Errorf("invalid log id %d != %d", id, l.ID) + } + + return nil +} + +func (t *tableWriter) Sync() error { + t.Lock() + + var err error + if t.data != nil { + err = t.data.Sync() + t.Unlock() + return err + } + + if t.meta != nil { + err = t.meta.Sync() + } + + t.Unlock() + + return err +} diff --git a/vendor/github.com/siddontang/ledisdb/rpl/goleveldb_store.go b/vendor/github.com/siddontang/ledisdb/rpl/goleveldb_store.go new file mode 100644 index 000000000000..445c17cfcf68 --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/rpl/goleveldb_store.go @@ -0,0 +1,225 @@ +package rpl + +import ( + "bytes" + "fmt" + "os" + "sync" + "time" + + "github.com/siddontang/go/num" + "github.com/siddontang/ledisdb/config" + "github.com/siddontang/ledisdb/store" +) + +type GoLevelDBStore struct { + LogStore + + m sync.Mutex + db *store.DB + + cfg *config.Config + + first uint64 + last uint64 + + buf bytes.Buffer +} + +func (s *GoLevelDBStore) FirstID() (uint64, error) { + s.m.Lock() + id, err := s.firstID() + s.m.Unlock() + + return id, err +} + +func (s *GoLevelDBStore) LastID() (uint64, error) { + s.m.Lock() + id, err := s.lastID() + s.m.Unlock() + + return id, err +} + +func (s *GoLevelDBStore) firstID() (uint64, error) { + if s.first != InvalidLogID { + return s.first, nil + } + + it := s.db.NewIterator() + defer it.Close() + + it.SeekToFirst() + + if it.Valid() { + s.first = num.BytesToUint64(it.RawKey()) + } + + return s.first, nil +} + +func (s *GoLevelDBStore) lastID() (uint64, error) { + if s.last != InvalidLogID { + return s.last, nil + } + + it := s.db.NewIterator() + defer it.Close() + + it.SeekToLast() + + if it.Valid() { + s.last = num.BytesToUint64(it.RawKey()) + } + + return s.last, nil +} + +func (s *GoLevelDBStore) GetLog(id uint64, log *Log) error { + v, err := s.db.Get(num.Uint64ToBytes(id)) + if err != nil { + return err + } else if v == nil { + return ErrLogNotFound + } else { + return log.Decode(bytes.NewBuffer(v)) + } +} + +func (s *GoLevelDBStore) StoreLog(log *Log) error { + s.m.Lock() + defer s.m.Unlock() + + last, err := s.lastID() + if err != nil { + return err + } + + s.last = InvalidLogID + + s.buf.Reset() + + if log.ID != last+1 { + return ErrStoreLogID + } + + last = log.ID + key := num.Uint64ToBytes(log.ID) + + if err := log.Encode(&s.buf); err != nil { + return err + } + + if err = s.db.Put(key, s.buf.Bytes()); err != nil { + return err + } + + s.last = last + return nil +} + +func (s *GoLevelDBStore) PurgeExpired(n int64) error { + if n <= 0 { + return fmt.Errorf("invalid expired time %d", n) + } + + t := uint32(time.Now().Unix() - int64(n)) + + s.m.Lock() + defer s.m.Unlock() + + s.reset() + + it := s.db.NewIterator() + it.SeekToFirst() + + w := s.db.NewWriteBatch() + defer w.Rollback() + + l := new(Log) + for ; it.Valid(); it.Next() { + v := it.RawValue() + + if err := l.Unmarshal(v); err != nil { + return err + } else if l.CreateTime > t { + break + } else { + w.Delete(it.RawKey()) + } + } + + if err := w.Commit(); err != nil { + return err + } + + return nil +} + +func (s *GoLevelDBStore) Sync() error { + //no other way for sync, so ignore here + return nil +} + +func (s *GoLevelDBStore) reset() { + s.first = InvalidLogID + s.last = InvalidLogID +} + +func (s *GoLevelDBStore) Clear() error { + s.m.Lock() + defer s.m.Unlock() + + if s.db != nil { + s.db.Close() + } + + s.reset() + os.RemoveAll(s.cfg.DBPath) + + return s.open() +} + +func (s *GoLevelDBStore) Close() error { + s.m.Lock() + defer s.m.Unlock() + + if s.db == nil { + return nil + } + + err := s.db.Close() + s.db = nil + return err +} + +func (s *GoLevelDBStore) open() error { + var err error + + s.first = InvalidLogID + s.last = InvalidLogID + + s.db, err = store.Open(s.cfg) + return err +} + +func NewGoLevelDBStore(base string, syncLog int) (*GoLevelDBStore, error) { + cfg := config.NewConfigDefault() + cfg.DBName = "goleveldb" + cfg.DBPath = base + cfg.LevelDB.BlockSize = 16 * 1024 * 1024 + cfg.LevelDB.CacheSize = 64 * 1024 * 1024 + cfg.LevelDB.WriteBufferSize = 64 * 1024 * 1024 + cfg.LevelDB.Compression = false + cfg.DBSyncCommit = syncLog + + s := new(GoLevelDBStore) + s.cfg = cfg + + if err := s.open(); err != nil { + return nil, err + } + + return s, nil +} diff --git a/vendor/github.com/siddontang/ledisdb/rpl/log.go b/vendor/github.com/siddontang/ledisdb/rpl/log.go new file mode 100644 index 000000000000..ad0b48cd4f79 --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/rpl/log.go @@ -0,0 +1,167 @@ +package rpl + +import ( + "bytes" + "encoding/binary" + "io" + "sync" +) + +const LogHeadSize = 17 + +type Log struct { + ID uint64 + CreateTime uint32 + Compression uint8 + + Data []byte +} + +func (l *Log) HeadSize() int { + return LogHeadSize +} + +func (l *Log) Size() int { + return l.HeadSize() + len(l.Data) +} + +func (l *Log) Marshal() ([]byte, error) { + buf := bytes.NewBuffer(make([]byte, l.Size())) + buf.Reset() + + if err := l.Encode(buf); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +func (l *Log) Unmarshal(b []byte) error { + buf := bytes.NewBuffer(b) + + return l.Decode(buf) +} + +var headPool = sync.Pool{ + New: func() interface{} { return make([]byte, LogHeadSize) }, +} + +func (l *Log) Encode(w io.Writer) error { + b := headPool.Get().([]byte) + pos := 0 + + binary.BigEndian.PutUint64(b[pos:], l.ID) + pos += 8 + binary.BigEndian.PutUint32(b[pos:], uint32(l.CreateTime)) + pos += 4 + b[pos] = l.Compression + pos++ + binary.BigEndian.PutUint32(b[pos:], uint32(len(l.Data))) + + n, err := w.Write(b) + headPool.Put(b) + + if err != nil { + return err + } else if n != LogHeadSize { + return io.ErrShortWrite + } + + if n, err = w.Write(l.Data); err != nil { + return err + } else if n != len(l.Data) { + return io.ErrShortWrite + } + return nil +} + +func (l *Log) Decode(r io.Reader) error { + length, err := l.DecodeHead(r) + if err != nil { + return err + } + + l.growData(int(length)) + + if _, err := io.ReadFull(r, l.Data); err != nil { + return err + } + + return nil +} + +func (l *Log) DecodeHead(r io.Reader) (uint32, error) { + buf := headPool.Get().([]byte) + + if _, err := io.ReadFull(r, buf); err != nil { + headPool.Put(buf) + return 0, err + } + + length := l.decodeHeadBuf(buf) + + headPool.Put(buf) + + return length, nil +} + +func (l *Log) DecodeAt(r io.ReaderAt, pos int64) error { + length, err := l.DecodeHeadAt(r, pos) + if err != nil { + return err + } + + l.growData(int(length)) + var n int + n, err = r.ReadAt(l.Data, pos+int64(LogHeadSize)) + if err == io.EOF && n == len(l.Data) { + err = nil + } + + return err +} + +func (l *Log) growData(length int) { + l.Data = l.Data[0:0] + + if cap(l.Data) >= length { + l.Data = l.Data[0:length] + } else { + l.Data = make([]byte, length) + } +} + +func (l *Log) DecodeHeadAt(r io.ReaderAt, pos int64) (uint32, error) { + buf := headPool.Get().([]byte) + + n, err := r.ReadAt(buf, pos) + if err != nil && err != io.EOF { + headPool.Put(buf) + + return 0, err + } + + length := l.decodeHeadBuf(buf) + headPool.Put(buf) + + if err == io.EOF && (length != 0 || n != len(buf)) { + return 0, err + } + + return length, nil +} + +func (l *Log) decodeHeadBuf(buf []byte) uint32 { + pos := 0 + l.ID = binary.BigEndian.Uint64(buf[pos:]) + pos += 8 + + l.CreateTime = binary.BigEndian.Uint32(buf[pos:]) + pos += 4 + + l.Compression = uint8(buf[pos]) + pos++ + + length := binary.BigEndian.Uint32(buf[pos:]) + return length +} diff --git a/vendor/github.com/siddontang/ledisdb/rpl/rpl.go b/vendor/github.com/siddontang/ledisdb/rpl/rpl.go new file mode 100644 index 000000000000..0ebf66de413c --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/rpl/rpl.go @@ -0,0 +1,336 @@ +package rpl + +import ( + "encoding/binary" + "os" + "path" + "sync" + "time" + + "github.com/siddontang/go/log" + "github.com/siddontang/go/snappy" + "github.com/siddontang/ledisdb/config" +) + +type Stat struct { + FirstID uint64 + LastID uint64 + CommitID uint64 +} + +type Replication struct { + m sync.Mutex + + cfg *config.Config + + s LogStore + + commitID uint64 + commitLog *os.File + + quit chan struct{} + + wg sync.WaitGroup + + nc chan struct{} + + ncm sync.Mutex +} + +func NewReplication(cfg *config.Config) (*Replication, error) { + if len(cfg.Replication.Path) == 0 { + cfg.Replication.Path = path.Join(cfg.DataDir, "rpl") + } + + base := cfg.Replication.Path + + r := new(Replication) + + r.quit = make(chan struct{}) + r.nc = make(chan struct{}) + + r.cfg = cfg + + var err error + + switch cfg.Replication.StoreName { + case "goleveldb": + if r.s, err = NewGoLevelDBStore(path.Join(base, "wal"), cfg.Replication.SyncLog); err != nil { + return nil, err + } + default: + if r.s, err = NewFileStore(path.Join(base, "ldb"), cfg); err != nil { + return nil, err + } + } + + if r.commitLog, err = os.OpenFile(path.Join(base, "commit.log"), os.O_RDWR|os.O_CREATE, 0644); err != nil { + return nil, err + } + + if s, _ := r.commitLog.Stat(); s.Size() == 0 { + r.commitID = 0 + } else if err = binary.Read(r.commitLog, binary.BigEndian, &r.commitID); err != nil { + return nil, err + } + + log.Infof("staring replication with commit ID %d", r.commitID) + + r.wg.Add(1) + go r.run() + + return r, nil +} + +func (r *Replication) Close() error { + close(r.quit) + + r.wg.Wait() + + r.m.Lock() + defer r.m.Unlock() + + log.Infof("closing replication with commit ID %d", r.commitID) + + if r.s != nil { + r.s.Close() + r.s = nil + } + + if err := r.updateCommitID(r.commitID, true); err != nil { + log.Errorf("update commit id err %s", err.Error()) + } + + if r.commitLog != nil { + r.commitLog.Close() + r.commitLog = nil + } + + return nil +} + +func (r *Replication) Log(data []byte) (*Log, error) { + if r.cfg.Replication.Compression { + //todo optimize + var err error + if data, err = snappy.Encode(nil, data); err != nil { + return nil, err + } + } + + r.m.Lock() + + lastID, err := r.s.LastID() + if err != nil { + r.m.Unlock() + return nil, err + } + + commitId := r.commitID + if lastID < commitId { + lastID = commitId + } else if lastID > commitId { + r.m.Unlock() + return nil, ErrCommitIDBehind + } + + l := new(Log) + l.ID = lastID + 1 + l.CreateTime = uint32(time.Now().Unix()) + + if r.cfg.Replication.Compression { + l.Compression = 1 + } else { + l.Compression = 0 + } + + l.Data = data + + if err = r.s.StoreLog(l); err != nil { + r.m.Unlock() + return nil, err + } + + r.m.Unlock() + + r.ncm.Lock() + close(r.nc) + r.nc = make(chan struct{}) + r.ncm.Unlock() + + return l, nil +} + +func (r *Replication) WaitLog() <-chan struct{} { + r.ncm.Lock() + ch := r.nc + r.ncm.Unlock() + return ch +} + +func (r *Replication) StoreLog(log *Log) error { + r.m.Lock() + err := r.s.StoreLog(log) + r.m.Unlock() + + return err +} + +func (r *Replication) FirstLogID() (uint64, error) { + r.m.Lock() + id, err := r.s.FirstID() + r.m.Unlock() + + return id, err +} + +func (r *Replication) LastLogID() (uint64, error) { + r.m.Lock() + id, err := r.s.LastID() + r.m.Unlock() + return id, err +} + +func (r *Replication) LastCommitID() (uint64, error) { + r.m.Lock() + id := r.commitID + r.m.Unlock() + return id, nil +} + +func (r *Replication) UpdateCommitID(id uint64) error { + r.m.Lock() + err := r.updateCommitID(id, r.cfg.Replication.SyncLog == 2) + r.m.Unlock() + + return err +} + +func (r *Replication) Stat() (*Stat, error) { + r.m.Lock() + defer r.m.Unlock() + + s := &Stat{} + var err error + + if s.FirstID, err = r.s.FirstID(); err != nil { + return nil, err + } + + if s.LastID, err = r.s.LastID(); err != nil { + return nil, err + } + + s.CommitID = r.commitID + return s, nil +} + +func (r *Replication) updateCommitID(id uint64, force bool) error { + if force { + if _, err := r.commitLog.Seek(0, os.SEEK_SET); err != nil { + return err + } + + if err := binary.Write(r.commitLog, binary.BigEndian, id); err != nil { + return err + } + } + + r.commitID = id + + return nil +} + +func (r *Replication) CommitIDBehind() (bool, error) { + r.m.Lock() + + id, err := r.s.LastID() + if err != nil { + r.m.Unlock() + return false, err + } + + behind := id > r.commitID + r.m.Unlock() + + return behind, nil +} + +func (r *Replication) GetLog(id uint64, log *Log) error { + return r.s.GetLog(id, log) +} + +func (r *Replication) NextNeedCommitLog(log *Log) error { + r.m.Lock() + defer r.m.Unlock() + + id, err := r.s.LastID() + if err != nil { + return err + } + + if id <= r.commitID { + return ErrNoBehindLog + } + + return r.s.GetLog(r.commitID+1, log) + +} + +func (r *Replication) Clear() error { + return r.ClearWithCommitID(0) +} + +func (r *Replication) ClearWithCommitID(id uint64) error { + r.m.Lock() + defer r.m.Unlock() + + if err := r.s.Clear(); err != nil { + return err + } + + return r.updateCommitID(id, true) +} + +func (r *Replication) run() { + defer r.wg.Done() + + syncTc := time.NewTicker(1 * time.Second) + purgeTc := time.NewTicker(1 * time.Hour) + + for { + select { + case <-purgeTc.C: + n := (r.cfg.Replication.ExpiredLogDays * 24 * 3600) + r.m.Lock() + err := r.s.PurgeExpired(int64(n)) + r.m.Unlock() + if err != nil { + log.Errorf("purge expired log error %s", err.Error()) + } + case <-syncTc.C: + if r.cfg.Replication.SyncLog == 1 { + r.m.Lock() + err := r.s.Sync() + r.m.Unlock() + if err != nil { + log.Errorf("sync store error %s", err.Error()) + } + } + if r.cfg.Replication.SyncLog != 2 { + //we will sync commit id every 1 second + r.m.Lock() + err := r.updateCommitID(r.commitID, true) + r.m.Unlock() + + if err != nil { + log.Errorf("sync commitid error %s", err.Error()) + } + } + case <-r.quit: + syncTc.Stop() + purgeTc.Stop() + return + } + } +} diff --git a/vendor/github.com/siddontang/ledisdb/rpl/store.go b/vendor/github.com/siddontang/ledisdb/rpl/store.go new file mode 100644 index 000000000000..9f985ec6be0f --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/rpl/store.go @@ -0,0 +1,36 @@ +package rpl + +import ( + "errors" +) + +const ( + InvalidLogID uint64 = 0 +) + +var ( + ErrLogNotFound = errors.New("log not found") + ErrStoreLogID = errors.New("log id is less") + ErrNoBehindLog = errors.New("no behind commit log") + ErrCommitIDBehind = errors.New("commit id is behind last log id") +) + +type LogStore interface { + GetLog(id uint64, log *Log) error + + FirstID() (uint64, error) + LastID() (uint64, error) + + // if log id is less than current last id, return error + StoreLog(log *Log) error + + // Delete logs before n seconds + PurgeExpired(n int64) error + + Sync() error + + // Clear all logs + Clear() error + + Close() error +} diff --git a/vendor/github.com/siddontang/ledisdb/store/db.go b/vendor/github.com/siddontang/ledisdb/store/db.go new file mode 100644 index 000000000000..bfc0418298b8 --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/store/db.go @@ -0,0 +1,169 @@ +package store + +import ( + "sync" + "time" + + "github.com/siddontang/ledisdb/config" + "github.com/siddontang/ledisdb/store/driver" +) + +type DB struct { + db driver.IDB + name string + + st *Stat + + cfg *config.Config + + lastCommit time.Time + + m sync.Mutex +} + +func (db *DB) Close() error { + return db.db.Close() +} + +func (db *DB) String() string { + return db.name +} + +func (db *DB) NewIterator() *Iterator { + db.st.IterNum.Add(1) + + it := new(Iterator) + it.it = db.db.NewIterator() + it.st = db.st + + return it +} + +func (db *DB) Get(key []byte) ([]byte, error) { + t := time.Now() + v, err := db.db.Get(key) + db.st.statGet(v, err) + db.st.GetTotalTime.Add(time.Now().Sub(t)) + return v, err +} + +func (db *DB) Put(key []byte, value []byte) error { + db.st.PutNum.Add(1) + + if db.needSyncCommit() { + return db.db.SyncPut(key, value) + + } else { + return db.db.Put(key, value) + + } +} + +func (db *DB) Delete(key []byte) error { + db.st.DeleteNum.Add(1) + + if db.needSyncCommit() { + return db.db.SyncDelete(key) + } else { + return db.db.Delete(key) + } +} + +func (db *DB) NewWriteBatch() *WriteBatch { + db.st.BatchNum.Add(1) + wb := new(WriteBatch) + wb.wb = db.db.NewWriteBatch() + wb.st = db.st + wb.db = db + return wb +} + +func (db *DB) NewSnapshot() (*Snapshot, error) { + db.st.SnapshotNum.Add(1) + + var err error + s := &Snapshot{} + if s.ISnapshot, err = db.db.NewSnapshot(); err != nil { + return nil, err + } + s.st = db.st + + return s, nil +} + +func (db *DB) Compact() error { + db.st.CompactNum.Add(1) + + t := time.Now() + err := db.db.Compact() + + db.st.CompactTotalTime.Add(time.Now().Sub(t)) + + return err +} + +func (db *DB) RangeIterator(min []byte, max []byte, rangeType uint8) *RangeLimitIterator { + return NewRangeLimitIterator(db.NewIterator(), &Range{min, max, rangeType}, &Limit{0, -1}) +} + +func (db *DB) RevRangeIterator(min []byte, max []byte, rangeType uint8) *RangeLimitIterator { + return NewRevRangeLimitIterator(db.NewIterator(), &Range{min, max, rangeType}, &Limit{0, -1}) +} + +//count < 0, unlimit. +// +//offset must >= 0, if < 0, will get nothing. +func (db *DB) RangeLimitIterator(min []byte, max []byte, rangeType uint8, offset int, count int) *RangeLimitIterator { + return NewRangeLimitIterator(db.NewIterator(), &Range{min, max, rangeType}, &Limit{offset, count}) +} + +//count < 0, unlimit. +// +//offset must >= 0, if < 0, will get nothing. +func (db *DB) RevRangeLimitIterator(min []byte, max []byte, rangeType uint8, offset int, count int) *RangeLimitIterator { + return NewRevRangeLimitIterator(db.NewIterator(), &Range{min, max, rangeType}, &Limit{offset, count}) +} + +func (db *DB) Stat() *Stat { + return db.st +} + +func (db *DB) needSyncCommit() bool { + if db.cfg.DBSyncCommit == 0 { + return false + } else if db.cfg.DBSyncCommit == 2 { + return true + } else { + n := time.Now() + need := false + db.m.Lock() + + if n.Sub(db.lastCommit) > time.Second { + need = true + } + db.lastCommit = n + + db.m.Unlock() + return need + } + +} + +func (db *DB) GetSlice(key []byte) (Slice, error) { + if d, ok := db.db.(driver.ISliceGeter); ok { + t := time.Now() + v, err := d.GetSlice(key) + db.st.statGet(v, err) + db.st.GetTotalTime.Add(time.Now().Sub(t)) + return v, err + } else { + v, err := db.Get(key) + if err != nil { + return nil, err + } else if v == nil { + return nil, nil + } else { + return driver.GoSlice(v), nil + } + } +} diff --git a/vendor/github.com/siddontang/ledisdb/store/driver/driver.go b/vendor/github.com/siddontang/ledisdb/store/driver/driver.go new file mode 100644 index 000000000000..afa549cd258d --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/store/driver/driver.go @@ -0,0 +1,57 @@ +package driver + +type IDB interface { + Close() error + + Get(key []byte) ([]byte, error) + + Put(key []byte, value []byte) error + Delete(key []byte) error + + SyncPut(key []byte, value []byte) error + SyncDelete(key []byte) error + + NewIterator() IIterator + + NewWriteBatch() IWriteBatch + + NewSnapshot() (ISnapshot, error) + + Compact() error +} + +type ISnapshot interface { + Get(key []byte) ([]byte, error) + NewIterator() IIterator + Close() +} + +type IIterator interface { + Close() error + + First() + Last() + Seek(key []byte) + + Next() + Prev() + + Valid() bool + + Key() []byte + Value() []byte +} + +type IWriteBatch interface { + Put(key []byte, value []byte) + Delete(key []byte) + Commit() error + SyncCommit() error + Rollback() error + Data() []byte + Close() +} + +type ISliceGeter interface { + GetSlice(key []byte) (ISlice, error) +} diff --git a/vendor/github.com/siddontang/ledisdb/store/driver/slice.go b/vendor/github.com/siddontang/ledisdb/store/driver/slice.go new file mode 100644 index 000000000000..d0c80e0b8fcf --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/store/driver/slice.go @@ -0,0 +1,21 @@ +package driver + +type ISlice interface { + Data() []byte + Size() int + Free() +} + +type GoSlice []byte + +func (s GoSlice) Data() []byte { + return []byte(s) +} + +func (s GoSlice) Size() int { + return len(s) +} + +func (s GoSlice) Free() { + +} diff --git a/vendor/github.com/siddontang/ledisdb/store/driver/store.go b/vendor/github.com/siddontang/ledisdb/store/driver/store.go new file mode 100644 index 000000000000..fbaebfc98b05 --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/store/driver/store.go @@ -0,0 +1,46 @@ +package driver + +import ( + "fmt" + + "github.com/siddontang/ledisdb/config" +) + +type Store interface { + String() string + Open(path string, cfg *config.Config) (IDB, error) + Repair(path string, cfg *config.Config) error +} + +var dbs = map[string]Store{} + +func Register(s Store) { + name := s.String() + if _, ok := dbs[name]; ok { + panic(fmt.Errorf("store %s is registered", s)) + } + + dbs[name] = s +} + +func ListStores() []string { + s := []string{} + for k := range dbs { + s = append(s, k) + } + + return s +} + +func GetStore(cfg *config.Config) (Store, error) { + if len(cfg.DBName) == 0 { + cfg.DBName = config.DefaultDBName + } + + s, ok := dbs[cfg.DBName] + if !ok { + return nil, fmt.Errorf("store %s is not registered", cfg.DBName) + } + + return s, nil +} diff --git a/vendor/github.com/siddontang/ledisdb/store/goleveldb/batch.go b/vendor/github.com/siddontang/ledisdb/store/goleveldb/batch.go new file mode 100644 index 000000000000..2032279a2a8e --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/store/goleveldb/batch.go @@ -0,0 +1,39 @@ +package goleveldb + +import ( + "github.com/syndtr/goleveldb/leveldb" +) + +type WriteBatch struct { + db *DB + wbatch *leveldb.Batch +} + +func (w *WriteBatch) Put(key, value []byte) { + w.wbatch.Put(key, value) +} + +func (w *WriteBatch) Delete(key []byte) { + w.wbatch.Delete(key) +} + +func (w *WriteBatch) Commit() error { + return w.db.db.Write(w.wbatch, nil) +} + +func (w *WriteBatch) SyncCommit() error { + return w.db.db.Write(w.wbatch, w.db.syncOpts) +} + +func (w *WriteBatch) Rollback() error { + w.wbatch.Reset() + return nil +} + +func (w *WriteBatch) Close() { + w.wbatch.Reset() +} + +func (w *WriteBatch) Data() []byte { + return w.wbatch.Dump() +} diff --git a/vendor/github.com/siddontang/ledisdb/store/goleveldb/const.go b/vendor/github.com/siddontang/ledisdb/store/goleveldb/const.go new file mode 100644 index 000000000000..2fffa7c82bb4 --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/store/goleveldb/const.go @@ -0,0 +1,4 @@ +package goleveldb + +const DBName = "goleveldb" +const MemDBName = "memory" diff --git a/vendor/github.com/siddontang/ledisdb/store/goleveldb/db.go b/vendor/github.com/siddontang/ledisdb/store/goleveldb/db.go new file mode 100644 index 000000000000..1afc32fb75b5 --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/store/goleveldb/db.go @@ -0,0 +1,204 @@ +package goleveldb + +import ( + "github.com/syndtr/goleveldb/leveldb" + "github.com/syndtr/goleveldb/leveldb/cache" + "github.com/syndtr/goleveldb/leveldb/filter" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/storage" + "github.com/syndtr/goleveldb/leveldb/util" + + "github.com/siddontang/ledisdb/config" + "github.com/siddontang/ledisdb/store/driver" + + "os" +) + +const defaultFilterBits int = 10 + +type Store struct { +} + +func (s Store) String() string { + return DBName +} + +type MemStore struct { +} + +func (s MemStore) String() string { + return MemDBName +} + +type DB struct { + path string + + cfg *config.LevelDBConfig + + db *leveldb.DB + + opts *opt.Options + + iteratorOpts *opt.ReadOptions + + syncOpts *opt.WriteOptions + + cache cache.Cache + + filter filter.Filter +} + +func (s Store) Open(path string, cfg *config.Config) (driver.IDB, error) { + if err := os.MkdirAll(path, 0755); err != nil { + return nil, err + } + + db := new(DB) + db.path = path + db.cfg = &cfg.LevelDB + + db.initOpts() + + var err error + db.db, err = leveldb.OpenFile(db.path, db.opts) + + if err != nil { + return nil, err + } + + return db, nil +} + +func (s Store) Repair(path string, cfg *config.Config) error { + db, err := leveldb.RecoverFile(path, newOptions(&cfg.LevelDB)) + if err != nil { + return err + } + + db.Close() + return nil +} + +func (s MemStore) Open(path string, cfg *config.Config) (driver.IDB, error) { + db := new(DB) + db.path = path + db.cfg = &cfg.LevelDB + + db.initOpts() + + var err error + db.db, err = leveldb.Open(storage.NewMemStorage(), db.opts) + if err != nil { + return nil, err + } + + return db, nil +} + +func (s MemStore) Repair(path string, cfg *config.Config) error { + return nil +} + +func (db *DB) initOpts() { + db.opts = newOptions(db.cfg) + + db.iteratorOpts = &opt.ReadOptions{} + db.iteratorOpts.DontFillCache = true + + db.syncOpts = &opt.WriteOptions{} + db.syncOpts.Sync = true +} + +func newOptions(cfg *config.LevelDBConfig) *opt.Options { + opts := &opt.Options{} + opts.ErrorIfMissing = false + + opts.BlockCacheCapacity = cfg.CacheSize + + //we must use bloomfilter + opts.Filter = filter.NewBloomFilter(defaultFilterBits) + + if !cfg.Compression { + opts.Compression = opt.NoCompression + } else { + opts.Compression = opt.SnappyCompression + } + + opts.BlockSize = cfg.BlockSize + opts.WriteBuffer = cfg.WriteBufferSize + opts.OpenFilesCacheCapacity = cfg.MaxOpenFiles + + //here we use default value, later add config support + opts.CompactionTableSize = 32 * 1024 * 1024 + opts.WriteL0SlowdownTrigger = 16 + opts.WriteL0PauseTrigger = 64 + + return opts +} + +func (db *DB) Close() error { + return db.db.Close() +} + +func (db *DB) Put(key, value []byte) error { + return db.db.Put(key, value, nil) +} + +func (db *DB) Get(key []byte) ([]byte, error) { + v, err := db.db.Get(key, nil) + if err == leveldb.ErrNotFound { + return nil, nil + } + return v, nil +} + +func (db *DB) Delete(key []byte) error { + return db.db.Delete(key, nil) +} + +func (db *DB) SyncPut(key []byte, value []byte) error { + return db.db.Put(key, value, db.syncOpts) +} + +func (db *DB) SyncDelete(key []byte) error { + return db.db.Delete(key, db.syncOpts) +} + +func (db *DB) NewWriteBatch() driver.IWriteBatch { + wb := &WriteBatch{ + db: db, + wbatch: new(leveldb.Batch), + } + return wb +} + +func (db *DB) NewIterator() driver.IIterator { + it := &Iterator{ + db.db.NewIterator(nil, db.iteratorOpts), + } + + return it +} + +func (db *DB) NewSnapshot() (driver.ISnapshot, error) { + snapshot, err := db.db.GetSnapshot() + if err != nil { + return nil, err + } + + s := &Snapshot{ + db: db, + snp: snapshot, + } + + return s, nil +} + +func (db *DB) Compact() error { + return db.db.CompactRange(util.Range{nil, nil}) +} + +func init() { + driver.Register(Store{}) + driver.Register(MemStore{}) +} diff --git a/vendor/github.com/siddontang/ledisdb/store/goleveldb/iterator.go b/vendor/github.com/siddontang/ledisdb/store/goleveldb/iterator.go new file mode 100644 index 000000000000..c1fd8b5573bb --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/store/goleveldb/iterator.go @@ -0,0 +1,49 @@ +package goleveldb + +import ( + "github.com/syndtr/goleveldb/leveldb/iterator" +) + +type Iterator struct { + it iterator.Iterator +} + +func (it *Iterator) Key() []byte { + return it.it.Key() +} + +func (it *Iterator) Value() []byte { + return it.it.Value() +} + +func (it *Iterator) Close() error { + if it.it != nil { + it.it.Release() + it.it = nil + } + return nil +} + +func (it *Iterator) Valid() bool { + return it.it.Valid() +} + +func (it *Iterator) Next() { + it.it.Next() +} + +func (it *Iterator) Prev() { + it.it.Prev() +} + +func (it *Iterator) First() { + it.it.First() +} + +func (it *Iterator) Last() { + it.it.Last() +} + +func (it *Iterator) Seek(key []byte) { + it.it.Seek(key) +} diff --git a/vendor/github.com/siddontang/ledisdb/store/goleveldb/snapshot.go b/vendor/github.com/siddontang/ledisdb/store/goleveldb/snapshot.go new file mode 100644 index 000000000000..c615579bb89d --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/store/goleveldb/snapshot.go @@ -0,0 +1,26 @@ +package goleveldb + +import ( + "github.com/siddontang/ledisdb/store/driver" + "github.com/syndtr/goleveldb/leveldb" +) + +type Snapshot struct { + db *DB + snp *leveldb.Snapshot +} + +func (s *Snapshot) Get(key []byte) ([]byte, error) { + return s.snp.Get(key, s.db.iteratorOpts) +} + +func (s *Snapshot) NewIterator() driver.IIterator { + it := &Iterator{ + s.snp.NewIterator(nil, s.db.iteratorOpts), + } + return it +} + +func (s *Snapshot) Close() { + s.snp.Release() +} diff --git a/vendor/github.com/siddontang/ledisdb/store/iterator.go b/vendor/github.com/siddontang/ledisdb/store/iterator.go new file mode 100644 index 000000000000..12a03b6cd402 --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/store/iterator.go @@ -0,0 +1,334 @@ +package store + +import ( + "bytes" + + "github.com/siddontang/ledisdb/store/driver" +) + +const ( + IteratorForward uint8 = 0 + IteratorBackward uint8 = 1 +) + +const ( + RangeClose uint8 = 0x00 + RangeLOpen uint8 = 0x01 + RangeROpen uint8 = 0x10 + RangeOpen uint8 = 0x11 +) + +// min must less or equal than max +// +// range type: +// +// close: [min, max] +// open: (min, max) +// lopen: (min, max] +// ropen: [min, max) +// +type Range struct { + Min []byte + Max []byte + + Type uint8 +} + +type Limit struct { + Offset int + Count int +} + +type Iterator struct { + it driver.IIterator + st *Stat +} + +// Returns a copy of key. +func (it *Iterator) Key() []byte { + k := it.it.Key() + if k == nil { + return nil + } + + return append([]byte{}, k...) +} + +// Returns a copy of value. +func (it *Iterator) Value() []byte { + v := it.it.Value() + if v == nil { + return nil + } + + return append([]byte{}, v...) +} + +// Returns a reference of key. +// you must be careful that it will be changed after next iterate. +func (it *Iterator) RawKey() []byte { + return it.it.Key() +} + +// Returns a reference of value. +// you must be careful that it will be changed after next iterate. +func (it *Iterator) RawValue() []byte { + return it.it.Value() +} + +// Copy key to b, if b len is small or nil, returns a new one. +func (it *Iterator) BufKey(b []byte) []byte { + k := it.RawKey() + if k == nil { + return nil + } + if b == nil { + b = []byte{} + } + + b = b[0:0] + return append(b, k...) +} + +// Copy value to b, if b len is small or nil, returns a new one. +func (it *Iterator) BufValue(b []byte) []byte { + v := it.RawValue() + if v == nil { + return nil + } + + if b == nil { + b = []byte{} + } + + b = b[0:0] + return append(b, v...) +} + +func (it *Iterator) Close() { + if it.it != nil { + it.st.IterCloseNum.Add(1) + it.it.Close() + it.it = nil + } +} + +func (it *Iterator) Valid() bool { + return it.it.Valid() +} + +func (it *Iterator) Next() { + it.st.IterSeekNum.Add(1) + it.it.Next() +} + +func (it *Iterator) Prev() { + it.st.IterSeekNum.Add(1) + it.it.Prev() +} + +func (it *Iterator) SeekToFirst() { + it.st.IterSeekNum.Add(1) + it.it.First() +} + +func (it *Iterator) SeekToLast() { + it.st.IterSeekNum.Add(1) + it.it.Last() +} + +func (it *Iterator) Seek(key []byte) { + it.st.IterSeekNum.Add(1) + it.it.Seek(key) +} + +// Finds by key, if not found, nil returns. +func (it *Iterator) Find(key []byte) []byte { + it.Seek(key) + if it.Valid() { + k := it.RawKey() + if k == nil { + return nil + } else if bytes.Equal(k, key) { + return it.Value() + } + } + + return nil +} + +// Finds by key, if not found, nil returns, else a reference of value returns. +// you must be careful that it will be changed after next iterate. +func (it *Iterator) RawFind(key []byte) []byte { + it.Seek(key) + if it.Valid() { + k := it.RawKey() + if k == nil { + return nil + } else if bytes.Equal(k, key) { + return it.RawValue() + } + } + + return nil +} + +type RangeLimitIterator struct { + it *Iterator + + r *Range + l *Limit + + step int + + //0 for IteratorForward, 1 for IteratorBackward + direction uint8 +} + +func (it *RangeLimitIterator) Key() []byte { + return it.it.Key() +} + +func (it *RangeLimitIterator) Value() []byte { + return it.it.Value() +} + +func (it *RangeLimitIterator) RawKey() []byte { + return it.it.RawKey() +} + +func (it *RangeLimitIterator) RawValue() []byte { + return it.it.RawValue() +} + +func (it *RangeLimitIterator) BufKey(b []byte) []byte { + return it.it.BufKey(b) +} + +func (it *RangeLimitIterator) BufValue(b []byte) []byte { + return it.it.BufValue(b) +} + +func (it *RangeLimitIterator) Valid() bool { + if it.l.Offset < 0 { + return false + } else if !it.it.Valid() { + return false + } else if it.l.Count >= 0 && it.step >= it.l.Count { + return false + } + + if it.direction == IteratorForward { + if it.r.Max != nil { + r := bytes.Compare(it.it.RawKey(), it.r.Max) + if it.r.Type&RangeROpen > 0 { + return !(r >= 0) + } else { + return !(r > 0) + } + } + } else { + if it.r.Min != nil { + r := bytes.Compare(it.it.RawKey(), it.r.Min) + if it.r.Type&RangeLOpen > 0 { + return !(r <= 0) + } else { + return !(r < 0) + } + } + } + + return true +} + +func (it *RangeLimitIterator) Next() { + it.step++ + + if it.direction == IteratorForward { + it.it.Next() + } else { + it.it.Prev() + } +} + +func (it *RangeLimitIterator) Close() { + it.it.Close() +} + +func NewRangeLimitIterator(i *Iterator, r *Range, l *Limit) *RangeLimitIterator { + return rangeLimitIterator(i, r, l, IteratorForward) +} + +func NewRevRangeLimitIterator(i *Iterator, r *Range, l *Limit) *RangeLimitIterator { + return rangeLimitIterator(i, r, l, IteratorBackward) +} + +func NewRangeIterator(i *Iterator, r *Range) *RangeLimitIterator { + return rangeLimitIterator(i, r, &Limit{0, -1}, IteratorForward) +} + +func NewRevRangeIterator(i *Iterator, r *Range) *RangeLimitIterator { + return rangeLimitIterator(i, r, &Limit{0, -1}, IteratorBackward) +} + +func rangeLimitIterator(i *Iterator, r *Range, l *Limit, direction uint8) *RangeLimitIterator { + it := new(RangeLimitIterator) + + it.it = i + + it.r = r + it.l = l + it.direction = direction + + it.step = 0 + + if l.Offset < 0 { + return it + } + + if direction == IteratorForward { + if r.Min == nil { + it.it.SeekToFirst() + } else { + it.it.Seek(r.Min) + + if r.Type&RangeLOpen > 0 { + if it.it.Valid() && bytes.Equal(it.it.RawKey(), r.Min) { + it.it.Next() + } + } + } + } else { + if r.Max == nil { + it.it.SeekToLast() + } else { + it.it.Seek(r.Max) + + if !it.it.Valid() { + it.it.SeekToLast() + } else { + if !bytes.Equal(it.it.RawKey(), r.Max) { + it.it.Prev() + } + } + + if r.Type&RangeROpen > 0 { + if it.it.Valid() && bytes.Equal(it.it.RawKey(), r.Max) { + it.it.Prev() + } + } + } + } + + for i := 0; i < l.Offset; i++ { + if it.it.Valid() { + if it.direction == IteratorForward { + it.it.Next() + } else { + it.it.Prev() + } + } + } + + return it +} diff --git a/vendor/github.com/siddontang/ledisdb/store/leveldb/batch.go b/vendor/github.com/siddontang/ledisdb/store/leveldb/batch.go new file mode 100644 index 000000000000..cc1b02448896 --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/store/leveldb/batch.go @@ -0,0 +1,99 @@ +// +build leveldb + +package leveldb + +// #cgo LDFLAGS: -lleveldb +// #include "leveldb/c.h" +// #include "leveldb_ext.h" +import "C" + +import ( + "unsafe" + + "github.com/syndtr/goleveldb/leveldb" +) + +type WriteBatch struct { + db *DB + wbatch *C.leveldb_writebatch_t +} + +func newWriteBatch(db *DB) *WriteBatch { + w := new(WriteBatch) + w.db = db + w.wbatch = C.leveldb_writebatch_create() + + return w +} + +func (w *WriteBatch) Close() { + if w.wbatch != nil { + C.leveldb_writebatch_destroy(w.wbatch) + w.wbatch = nil + } +} + +func (w *WriteBatch) Put(key, value []byte) { + var k, v *C.char + if len(key) != 0 { + k = (*C.char)(unsafe.Pointer(&key[0])) + } + if len(value) != 0 { + v = (*C.char)(unsafe.Pointer(&value[0])) + } + + lenk := len(key) + lenv := len(value) + + C.leveldb_writebatch_put(w.wbatch, k, C.size_t(lenk), v, C.size_t(lenv)) +} + +func (w *WriteBatch) Delete(key []byte) { + C.leveldb_writebatch_delete(w.wbatch, + (*C.char)(unsafe.Pointer(&key[0])), C.size_t(len(key))) +} + +func (w *WriteBatch) Commit() error { + return w.commit(w.db.writeOpts) +} + +func (w *WriteBatch) SyncCommit() error { + return w.commit(w.db.syncOpts) +} + +func (w *WriteBatch) Rollback() error { + C.leveldb_writebatch_clear(w.wbatch) + + return nil +} + +func (w *WriteBatch) commit(wb *WriteOptions) error { + var errStr *C.char + C.leveldb_write(w.db.db, wb.Opt, w.wbatch, &errStr) + if errStr != nil { + return saveError(errStr) + } + return nil +} + +//export leveldb_writebatch_iterate_put +func leveldb_writebatch_iterate_put(p unsafe.Pointer, k *C.char, klen C.size_t, v *C.char, vlen C.size_t) { + b := (*leveldb.Batch)(p) + key := slice(unsafe.Pointer(k), int(klen)) + value := slice(unsafe.Pointer(v), int(vlen)) + b.Put(key, value) +} + +//export leveldb_writebatch_iterate_delete +func leveldb_writebatch_iterate_delete(p unsafe.Pointer, k *C.char, klen C.size_t) { + b := (*leveldb.Batch)(p) + key := slice(unsafe.Pointer(k), int(klen)) + b.Delete(key) +} + +func (w *WriteBatch) Data() []byte { + gbatch := leveldb.Batch{} + C.leveldb_writebatch_iterate_ext(w.wbatch, + unsafe.Pointer(&gbatch)) + return gbatch.Dump() +} diff --git a/vendor/github.com/siddontang/ledisdb/store/leveldb/cache.go b/vendor/github.com/siddontang/ledisdb/store/leveldb/cache.go new file mode 100644 index 000000000000..e5587cbf89e2 --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/store/leveldb/cache.go @@ -0,0 +1,20 @@ +// +build leveldb + +package leveldb + +// #cgo LDFLAGS: -lleveldb +// #include +// #include "leveldb/c.h" +import "C" + +type Cache struct { + Cache *C.leveldb_cache_t +} + +func NewLRUCache(capacity int) *Cache { + return &Cache{C.leveldb_cache_create_lru(C.size_t(capacity))} +} + +func (c *Cache) Close() { + C.leveldb_cache_destroy(c.Cache) +} diff --git a/vendor/github.com/siddontang/ledisdb/store/leveldb/const.go b/vendor/github.com/siddontang/ledisdb/store/leveldb/const.go new file mode 100644 index 000000000000..df5b3c7a83b5 --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/store/leveldb/const.go @@ -0,0 +1,3 @@ +package leveldb + +const DBName = "leveldb" diff --git a/vendor/github.com/siddontang/ledisdb/store/leveldb/db.go b/vendor/github.com/siddontang/ledisdb/store/leveldb/db.go new file mode 100644 index 000000000000..7f1ee676ec39 --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/store/leveldb/db.go @@ -0,0 +1,314 @@ +// +build leveldb + +// Package leveldb is a wrapper for c++ leveldb +package leveldb + +/* +#cgo LDFLAGS: -lleveldb +#include +#include "leveldb_ext.h" +*/ +import "C" + +import ( + "os" + "runtime" + "unsafe" + + "github.com/siddontang/ledisdb/config" + "github.com/siddontang/ledisdb/store/driver" +) + +const defaultFilterBits int = 10 + +type Store struct { +} + +func (s Store) String() string { + return DBName +} + +func (s Store) Open(path string, cfg *config.Config) (driver.IDB, error) { + if err := os.MkdirAll(path, 0755); err != nil { + return nil, err + } + + db := new(DB) + db.path = path + db.cfg = &cfg.LevelDB + + if err := db.open(); err != nil { + return nil, err + } + + return db, nil +} + +func (s Store) Repair(path string, cfg *config.Config) error { + db := new(DB) + db.cfg = &cfg.LevelDB + db.path = path + + err := db.open() + defer db.Close() + + //open ok, do not need repair + if err == nil { + return nil + } + + var errStr *C.char + ldbname := C.CString(path) + defer C.leveldb_free(unsafe.Pointer(ldbname)) + + C.leveldb_repair_db(db.opts.Opt, ldbname, &errStr) + if errStr != nil { + return saveError(errStr) + } + return nil +} + +type DB struct { + path string + + cfg *config.LevelDBConfig + + db *C.leveldb_t + + opts *Options + + //for default read and write options + readOpts *ReadOptions + writeOpts *WriteOptions + iteratorOpts *ReadOptions + + syncOpts *WriteOptions + + cache *Cache + + filter *FilterPolicy +} + +func (db *DB) open() error { + db.initOptions(db.cfg) + + var errStr *C.char + ldbname := C.CString(db.path) + defer C.leveldb_free(unsafe.Pointer(ldbname)) + + db.db = C.leveldb_open(db.opts.Opt, ldbname, &errStr) + if errStr != nil { + db.db = nil + return saveError(errStr) + } + return nil +} + +func (db *DB) initOptions(cfg *config.LevelDBConfig) { + opts := NewOptions() + + opts.SetCreateIfMissing(true) + + db.cache = NewLRUCache(cfg.CacheSize) + opts.SetCache(db.cache) + + //we must use bloomfilter + db.filter = NewBloomFilter(defaultFilterBits) + opts.SetFilterPolicy(db.filter) + + if !cfg.Compression { + opts.SetCompression(NoCompression) + } else { + opts.SetCompression(SnappyCompression) + } + + opts.SetBlockSize(cfg.BlockSize) + + opts.SetWriteBufferSize(cfg.WriteBufferSize) + + opts.SetMaxOpenFiles(cfg.MaxOpenFiles) + + opts.SetMaxFileSize(cfg.MaxFileSize) + + db.opts = opts + + db.readOpts = NewReadOptions() + db.writeOpts = NewWriteOptions() + + db.syncOpts = NewWriteOptions() + db.syncOpts.SetSync(true) + + db.iteratorOpts = NewReadOptions() + db.iteratorOpts.SetFillCache(false) +} + +func (db *DB) Close() error { + if db.db != nil { + C.leveldb_close(db.db) + db.db = nil + } + + db.opts.Close() + + if db.cache != nil { + db.cache.Close() + } + + if db.filter != nil { + db.filter.Close() + } + + db.readOpts.Close() + db.writeOpts.Close() + db.iteratorOpts.Close() + + return nil +} + +func (db *DB) Put(key, value []byte) error { + return db.put(db.writeOpts, key, value) +} + +func (db *DB) Get(key []byte) ([]byte, error) { + return db.get(db.readOpts, key) +} + +func (db *DB) Delete(key []byte) error { + return db.delete(db.writeOpts, key) +} + +func (db *DB) SyncPut(key []byte, value []byte) error { + return db.put(db.syncOpts, key, value) +} + +func (db *DB) SyncDelete(key []byte) error { + return db.delete(db.syncOpts, key) +} + +func (db *DB) NewWriteBatch() driver.IWriteBatch { + wb := newWriteBatch(db) + + runtime.SetFinalizer(wb, func(w *WriteBatch) { + w.Close() + }) + + return wb +} + +func (db *DB) NewIterator() driver.IIterator { + it := new(Iterator) + + it.it = C.leveldb_create_iterator(db.db, db.iteratorOpts.Opt) + + return it +} + +func (db *DB) NewSnapshot() (driver.ISnapshot, error) { + snap := &Snapshot{ + db: db, + snap: C.leveldb_create_snapshot(db.db), + readOpts: NewReadOptions(), + iteratorOpts: NewReadOptions(), + } + snap.readOpts.SetSnapshot(snap) + snap.iteratorOpts.SetSnapshot(snap) + snap.iteratorOpts.SetFillCache(false) + + return snap, nil +} + +func (db *DB) put(wo *WriteOptions, key, value []byte) error { + var errStr *C.char + var k, v *C.char + if len(key) != 0 { + k = (*C.char)(unsafe.Pointer(&key[0])) + } + if len(value) != 0 { + v = (*C.char)(unsafe.Pointer(&value[0])) + } + + lenk := len(key) + lenv := len(value) + C.leveldb_put( + db.db, wo.Opt, k, C.size_t(lenk), v, C.size_t(lenv), &errStr) + + if errStr != nil { + return saveError(errStr) + } + return nil +} + +func (db *DB) get(ro *ReadOptions, key []byte) ([]byte, error) { + var errStr *C.char + var vallen C.size_t + var k *C.char + if len(key) != 0 { + k = (*C.char)(unsafe.Pointer(&key[0])) + } + + value := C.leveldb_get( + db.db, ro.Opt, k, C.size_t(len(key)), &vallen, &errStr) + + if errStr != nil { + return nil, saveError(errStr) + } + + if value == nil { + return nil, nil + } + + defer C.leveldb_free(unsafe.Pointer(value)) + + return C.GoBytes(unsafe.Pointer(value), C.int(vallen)), nil +} + +func (db *DB) getSlice(ro *ReadOptions, key []byte) (driver.ISlice, error) { + var errStr *C.char + var vallen C.size_t + var k *C.char + if len(key) != 0 { + k = (*C.char)(unsafe.Pointer(&key[0])) + } + + value := C.leveldb_get( + db.db, ro.Opt, k, C.size_t(len(key)), &vallen, &errStr) + + if errStr != nil { + return nil, saveError(errStr) + } + + if value == nil { + return nil, nil + } + + return NewCSlice(unsafe.Pointer(value), int(vallen)), nil +} + +func (db *DB) delete(wo *WriteOptions, key []byte) error { + var errStr *C.char + var k *C.char + if len(key) != 0 { + k = (*C.char)(unsafe.Pointer(&key[0])) + } + + C.leveldb_delete( + db.db, wo.Opt, k, C.size_t(len(key)), &errStr) + + if errStr != nil { + return saveError(errStr) + } + return nil +} + +func (db *DB) Compact() error { + C.leveldb_compact_range(db.db, nil, 0, nil, 0) + return nil +} + +func (db *DB) GetSlice(key []byte) (driver.ISlice, error) { + return db.getSlice(db.readOpts, key) +} + +func init() { + driver.Register(Store{}) +} diff --git a/vendor/github.com/siddontang/ledisdb/store/leveldb/filterpolicy.go b/vendor/github.com/siddontang/ledisdb/store/leveldb/filterpolicy.go new file mode 100644 index 000000000000..640139fb8b8e --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/store/leveldb/filterpolicy.go @@ -0,0 +1,21 @@ +// +build leveldb + +package leveldb + +// #cgo LDFLAGS: -lleveldb +// #include +// #include "leveldb/c.h" +import "C" + +type FilterPolicy struct { + Policy *C.leveldb_filterpolicy_t +} + +func NewBloomFilter(bitsPerKey int) *FilterPolicy { + policy := C.leveldb_filterpolicy_create_bloom(C.int(bitsPerKey)) + return &FilterPolicy{policy} +} + +func (fp *FilterPolicy) Close() { + C.leveldb_filterpolicy_destroy(fp.Policy) +} diff --git a/vendor/github.com/siddontang/ledisdb/store/leveldb/iterator.go b/vendor/github.com/siddontang/ledisdb/store/leveldb/iterator.go new file mode 100644 index 000000000000..49cfd7db18ea --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/store/leveldb/iterator.go @@ -0,0 +1,70 @@ +// +build leveldb + +package leveldb + +// #cgo LDFLAGS: -lleveldb +// #include +// #include "leveldb/c.h" +// #include "leveldb_ext.h" +import "C" + +import ( + "unsafe" +) + +type Iterator struct { + it *C.leveldb_iterator_t + isValid C.uchar +} + +func (it *Iterator) Key() []byte { + var klen C.size_t + kdata := C.leveldb_iter_key(it.it, &klen) + if kdata == nil { + return nil + } + + return slice(unsafe.Pointer(kdata), int(C.int(klen))) +} + +func (it *Iterator) Value() []byte { + var vlen C.size_t + vdata := C.leveldb_iter_value(it.it, &vlen) + if vdata == nil { + return nil + } + + return slice(unsafe.Pointer(vdata), int(C.int(vlen))) +} + +func (it *Iterator) Close() error { + if it.it != nil { + C.leveldb_iter_destroy(it.it) + it.it = nil + } + return nil +} + +func (it *Iterator) Valid() bool { + return ucharToBool(it.isValid) +} + +func (it *Iterator) Next() { + it.isValid = C.leveldb_iter_next_ext(it.it) +} + +func (it *Iterator) Prev() { + it.isValid = C.leveldb_iter_prev_ext(it.it) +} + +func (it *Iterator) First() { + it.isValid = C.leveldb_iter_seek_to_first_ext(it.it) +} + +func (it *Iterator) Last() { + it.isValid = C.leveldb_iter_seek_to_last_ext(it.it) +} + +func (it *Iterator) Seek(key []byte) { + it.isValid = C.leveldb_iter_seek_ext(it.it, (*C.char)(unsafe.Pointer(&key[0])), C.size_t(len(key))) +} diff --git a/vendor/github.com/siddontang/ledisdb/store/leveldb/leveldb_ext.cc b/vendor/github.com/siddontang/ledisdb/store/leveldb/leveldb_ext.cc new file mode 100644 index 000000000000..540b7397b01b --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/store/leveldb/leveldb_ext.cc @@ -0,0 +1,95 @@ +// +build leveldb + +#include "leveldb_ext.h" + +#include +//#include + +//#include "leveldb/db.h" + +//using namespace leveldb; + +extern "C" { + +// static bool SaveError(char** errptr, const Status& s) { +// assert(errptr != NULL); +// if (s.ok()) { +// return false; +// } else if (*errptr == NULL) { +// *errptr = strdup(s.ToString().c_str()); +// } else { +// free(*errptr); +// *errptr = strdup(s.ToString().c_str()); +// } +// return true; +// } + +// void* leveldb_get_ext( +// leveldb_t* db, +// const leveldb_readoptions_t* options, +// const char* key, size_t keylen, +// char** valptr, +// size_t* vallen, +// char** errptr) { + +// std::string *tmp = new(std::string); + +// //very tricky, maybe changed with c++ leveldb upgrade +// Status s = (*(DB**)db)->Get(*(ReadOptions*)options, Slice(key, keylen), tmp); + +// if (s.ok()) { +// *valptr = (char*)tmp->data(); +// *vallen = tmp->size(); +// } else { +// delete(tmp); +// tmp = NULL; +// *valptr = NULL; +// *vallen = 0; +// if (!s.IsNotFound()) { +// SaveError(errptr, s); +// } +// } +// return tmp; +// } + +// void leveldb_get_free_ext(void* context) { +// std::string* s = (std::string*)context; + +// delete(s); +// } + + +unsigned char leveldb_iter_seek_to_first_ext(leveldb_iterator_t* iter) { + leveldb_iter_seek_to_first(iter); + return leveldb_iter_valid(iter); +} + +unsigned char leveldb_iter_seek_to_last_ext(leveldb_iterator_t* iter) { + leveldb_iter_seek_to_last(iter); + return leveldb_iter_valid(iter); +} + +unsigned char leveldb_iter_seek_ext(leveldb_iterator_t* iter, const char* k, size_t klen) { + leveldb_iter_seek(iter, k, klen); + return leveldb_iter_valid(iter); +} + +unsigned char leveldb_iter_next_ext(leveldb_iterator_t* iter) { + leveldb_iter_next(iter); + return leveldb_iter_valid(iter); +} + +unsigned char leveldb_iter_prev_ext(leveldb_iterator_t* iter) { + leveldb_iter_prev(iter); + return leveldb_iter_valid(iter); +} + +extern void leveldb_writebatch_iterate_put(void*, const char* k, size_t klen, const char* v, size_t vlen); +extern void leveldb_writebatch_iterate_delete(void*, const char* k, size_t klen); + +void leveldb_writebatch_iterate_ext(leveldb_writebatch_t* w, void *p) { + leveldb_writebatch_iterate(w, p, + leveldb_writebatch_iterate_put, leveldb_writebatch_iterate_delete); +} + +} \ No newline at end of file diff --git a/vendor/github.com/siddontang/ledisdb/store/leveldb/leveldb_ext.h b/vendor/github.com/siddontang/ledisdb/store/leveldb/leveldb_ext.h new file mode 100644 index 000000000000..3eed41bdf9b0 --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/store/leveldb/leveldb_ext.h @@ -0,0 +1,41 @@ +// +build leveldb + +#ifndef LEVELDB_EXT_H +#define LEVELDB_EXT_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "leveldb/c.h" + + +// /* Returns NULL if not found. Otherwise stores the value in **valptr. +// Stores the length of the value in *vallen. +// Returns a context must be later to free*/ +// extern void* leveldb_get_ext( +// leveldb_t* db, +// const leveldb_readoptions_t* options, +// const char* key, size_t keylen, +// char** valptr, +// size_t* vallen, +// char** errptr); + +// // Free context returns by leveldb_get_ext +// extern void leveldb_get_free_ext(void* context); + + +// Below iterator functions like leveldb iterator but returns valid status for iterator +extern unsigned char leveldb_iter_seek_to_first_ext(leveldb_iterator_t*); +extern unsigned char leveldb_iter_seek_to_last_ext(leveldb_iterator_t*); +extern unsigned char leveldb_iter_seek_ext(leveldb_iterator_t*, const char* k, size_t klen); +extern unsigned char leveldb_iter_next_ext(leveldb_iterator_t*); +extern unsigned char leveldb_iter_prev_ext(leveldb_iterator_t*); + +extern void leveldb_writebatch_iterate_ext(leveldb_writebatch_t*, void* p); + +#ifdef __cplusplus +} +#endif + +#endif \ No newline at end of file diff --git a/vendor/github.com/siddontang/ledisdb/store/leveldb/options.go b/vendor/github.com/siddontang/ledisdb/store/leveldb/options.go new file mode 100644 index 000000000000..68733bb612be --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/store/leveldb/options.go @@ -0,0 +1,126 @@ +// +build leveldb + +package leveldb + +// #cgo LDFLAGS: -lleveldb +// #include "leveldb/c.h" +import "C" + +type CompressionOpt int + +const ( + NoCompression = CompressionOpt(0) + SnappyCompression = CompressionOpt(1) +) + +type Options struct { + Opt *C.leveldb_options_t +} + +type ReadOptions struct { + Opt *C.leveldb_readoptions_t +} + +type WriteOptions struct { + Opt *C.leveldb_writeoptions_t +} + +func NewOptions() *Options { + opt := C.leveldb_options_create() + return &Options{opt} +} + +func NewReadOptions() *ReadOptions { + opt := C.leveldb_readoptions_create() + return &ReadOptions{opt} +} + +func NewWriteOptions() *WriteOptions { + opt := C.leveldb_writeoptions_create() + return &WriteOptions{opt} +} + +func (o *Options) Close() { + C.leveldb_options_destroy(o.Opt) +} + +func (o *Options) SetComparator(cmp *C.leveldb_comparator_t) { + C.leveldb_options_set_comparator(o.Opt, cmp) +} + +func (o *Options) SetErrorIfExists(error_if_exists bool) { + eie := boolToUchar(error_if_exists) + C.leveldb_options_set_error_if_exists(o.Opt, eie) +} + +func (o *Options) SetCache(cache *Cache) { + C.leveldb_options_set_cache(o.Opt, cache.Cache) +} + +func (o *Options) SetWriteBufferSize(s int) { + C.leveldb_options_set_write_buffer_size(o.Opt, C.size_t(s)) +} + +func (o *Options) SetParanoidChecks(pc bool) { + C.leveldb_options_set_paranoid_checks(o.Opt, boolToUchar(pc)) +} + +func (o *Options) SetMaxOpenFiles(n int) { + C.leveldb_options_set_max_open_files(o.Opt, C.int(n)) +} + +func (o *Options) SetMaxFileSize(n int) { + C.leveldb_options_set_max_file_size(o.Opt, C.size_t(n)) +} + +func (o *Options) SetBlockSize(s int) { + C.leveldb_options_set_block_size(o.Opt, C.size_t(s)) +} + +func (o *Options) SetBlockRestartInterval(n int) { + C.leveldb_options_set_block_restart_interval(o.Opt, C.int(n)) +} + +func (o *Options) SetCompression(t CompressionOpt) { + C.leveldb_options_set_compression(o.Opt, C.int(t)) +} + +func (o *Options) SetCreateIfMissing(b bool) { + C.leveldb_options_set_create_if_missing(o.Opt, boolToUchar(b)) +} + +func (o *Options) SetFilterPolicy(fp *FilterPolicy) { + var policy *C.leveldb_filterpolicy_t + if fp != nil { + policy = fp.Policy + } + C.leveldb_options_set_filter_policy(o.Opt, policy) +} + +func (ro *ReadOptions) Close() { + C.leveldb_readoptions_destroy(ro.Opt) +} + +func (ro *ReadOptions) SetVerifyChecksums(b bool) { + C.leveldb_readoptions_set_verify_checksums(ro.Opt, boolToUchar(b)) +} + +func (ro *ReadOptions) SetFillCache(b bool) { + C.leveldb_readoptions_set_fill_cache(ro.Opt, boolToUchar(b)) +} + +func (ro *ReadOptions) SetSnapshot(snap *Snapshot) { + var s *C.leveldb_snapshot_t + if snap != nil { + s = snap.snap + } + C.leveldb_readoptions_set_snapshot(ro.Opt, s) +} + +func (wo *WriteOptions) Close() { + C.leveldb_writeoptions_destroy(wo.Opt) +} + +func (wo *WriteOptions) SetSync(b bool) { + C.leveldb_writeoptions_set_sync(wo.Opt, boolToUchar(b)) +} diff --git a/vendor/github.com/siddontang/ledisdb/store/leveldb/slice.go b/vendor/github.com/siddontang/ledisdb/store/leveldb/slice.go new file mode 100644 index 000000000000..83ebf55c0369 --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/store/leveldb/slice.go @@ -0,0 +1,40 @@ +// +build leveldb + +package leveldb + +// #cgo LDFLAGS: -lleveldb +// #include "leveldb/c.h" +import "C" + +import ( + "reflect" + "unsafe" +) + +type CSlice struct { + data unsafe.Pointer + size int +} + +func NewCSlice(p unsafe.Pointer, n int) *CSlice { + return &CSlice{p, n} +} + +func (s *CSlice) Data() []byte { + var value []byte + + sH := (*reflect.SliceHeader)(unsafe.Pointer(&value)) + sH.Cap = int(s.size) + sH.Len = int(s.size) + sH.Data = uintptr(s.data) + + return value +} + +func (s *CSlice) Size() int { + return int(s.size) +} + +func (s *CSlice) Free() { + C.leveldb_free(s.data) +} diff --git a/vendor/github.com/siddontang/ledisdb/store/leveldb/snapshot.go b/vendor/github.com/siddontang/ledisdb/store/leveldb/snapshot.go new file mode 100644 index 000000000000..bdc8d514da8f --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/store/leveldb/snapshot.go @@ -0,0 +1,39 @@ +// +build leveldb + +package leveldb + +// #cgo LDFLAGS: -lleveldb +// #include "leveldb/c.h" +import "C" + +import ( + "github.com/siddontang/ledisdb/store/driver" +) + +type Snapshot struct { + db *DB + snap *C.leveldb_snapshot_t + readOpts *ReadOptions + iteratorOpts *ReadOptions +} + +func (s *Snapshot) Get(key []byte) ([]byte, error) { + return s.db.get(s.readOpts, key) +} + +func (s *Snapshot) GetSlice(key []byte) (driver.ISlice, error) { + return s.db.getSlice(s.readOpts, key) +} + +func (s *Snapshot) NewIterator() driver.IIterator { + it := new(Iterator) + it.it = C.leveldb_create_iterator(s.db.db, s.db.iteratorOpts.Opt) + return it + +} + +func (s *Snapshot) Close() { + C.leveldb_release_snapshot(s.db.db, s.snap) + s.iteratorOpts.Close() + s.readOpts.Close() +} diff --git a/vendor/github.com/siddontang/ledisdb/store/leveldb/util.go b/vendor/github.com/siddontang/ledisdb/store/leveldb/util.go new file mode 100644 index 000000000000..6efe33b661d6 --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/store/leveldb/util.go @@ -0,0 +1,45 @@ +// +build leveldb + +package leveldb + +// #include "leveldb/c.h" +import "C" + +import ( + "fmt" + "reflect" + "unsafe" +) + +func boolToUchar(b bool) C.uchar { + uc := C.uchar(0) + if b { + uc = C.uchar(1) + } + return uc +} + +func ucharToBool(uc C.uchar) bool { + if uc == C.uchar(0) { + return false + } + return true +} + +func saveError(errStr *C.char) error { + if errStr != nil { + gs := C.GoString(errStr) + C.leveldb_free(unsafe.Pointer(errStr)) + return fmt.Errorf(gs) + } + return nil +} + +func slice(p unsafe.Pointer, n int) []byte { + var b []byte + pbyte := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + pbyte.Data = uintptr(p) + pbyte.Len = n + pbyte.Cap = n + return b +} diff --git a/vendor/github.com/siddontang/ledisdb/store/rocksdb/batch.go b/vendor/github.com/siddontang/ledisdb/store/rocksdb/batch.go new file mode 100644 index 000000000000..bb727e7015ef --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/store/rocksdb/batch.go @@ -0,0 +1,83 @@ +// +build rocksdb + +package rocksdb + +// #cgo LDFLAGS: -lrocksdb +// #include "rocksdb/c.h" +// #include "rocksdb_ext.h" +import "C" + +import ( + "unsafe" +) + +type WriteBatch struct { + db *DB + wbatch *C.rocksdb_writebatch_t + commitOk bool +} + +func (w *WriteBatch) Close() { + if w.wbatch != nil { + C.rocksdb_writebatch_destroy(w.wbatch) + w.wbatch = nil + } +} + +func (w *WriteBatch) Put(key, value []byte) { + w.commitOk = false + + var k, v *C.char + if len(key) != 0 { + k = (*C.char)(unsafe.Pointer(&key[0])) + } + if len(value) != 0 { + v = (*C.char)(unsafe.Pointer(&value[0])) + } + + lenk := len(key) + lenv := len(value) + + C.rocksdb_writebatch_put(w.wbatch, k, C.size_t(lenk), v, C.size_t(lenv)) +} + +func (w *WriteBatch) Delete(key []byte) { + w.commitOk = false + + C.rocksdb_writebatch_delete(w.wbatch, + (*C.char)(unsafe.Pointer(&key[0])), C.size_t(len(key))) +} + +func (w *WriteBatch) Commit() error { + return w.commit(w.db.writeOpts) +} + +func (w *WriteBatch) SyncCommit() error { + return w.commit(w.db.syncOpts) +} + +func (w *WriteBatch) Rollback() error { + if !w.commitOk { + C.rocksdb_writebatch_clear(w.wbatch) + } + return nil +} + +func (w *WriteBatch) commit(wb *WriteOptions) error { + w.commitOk = true + + var errStr *C.char + C.rocksdb_write_ext(w.db.db, wb.Opt, w.wbatch, &errStr) + if errStr != nil { + w.commitOk = false + return saveError(errStr) + } + return nil +} + +func (w *WriteBatch) Data() []byte { + var vallen C.size_t + value := C.rocksdb_writebatch_data(w.wbatch, &vallen) + + return slice(unsafe.Pointer(value), int(vallen)) +} diff --git a/vendor/github.com/siddontang/ledisdb/store/rocksdb/cache.go b/vendor/github.com/siddontang/ledisdb/store/rocksdb/cache.go new file mode 100644 index 000000000000..931998ba4af4 --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/store/rocksdb/cache.go @@ -0,0 +1,20 @@ +// +build rocksdb + +package rocksdb + +// #cgo LDFLAGS: -lrocksdb +// #include +// #include "rocksdb/c.h" +import "C" + +type Cache struct { + Cache *C.rocksdb_cache_t +} + +func NewLRUCache(capacity int) *Cache { + return &Cache{C.rocksdb_cache_create_lru(C.size_t(capacity))} +} + +func (c *Cache) Close() { + C.rocksdb_cache_destroy(c.Cache) +} diff --git a/vendor/github.com/siddontang/ledisdb/store/rocksdb/const.go b/vendor/github.com/siddontang/ledisdb/store/rocksdb/const.go new file mode 100644 index 000000000000..f4155bbe2018 --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/store/rocksdb/const.go @@ -0,0 +1,3 @@ +package rocksdb + +const DBName = "rocksdb" diff --git a/vendor/github.com/siddontang/ledisdb/store/rocksdb/db.go b/vendor/github.com/siddontang/ledisdb/store/rocksdb/db.go new file mode 100644 index 000000000000..d5b708043ec6 --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/store/rocksdb/db.go @@ -0,0 +1,342 @@ +// +build rocksdb + +// Package rocksdb is a wrapper for c++ rocksdb +package rocksdb + +/* +#cgo LDFLAGS: -lrocksdb +#include +#include +#include "rocksdb_ext.h" +*/ +import "C" + +import ( + "os" + "runtime" + "unsafe" + + "github.com/siddontang/ledisdb/config" + "github.com/siddontang/ledisdb/store/driver" +) + +const defaultFilterBits int = 10 + +type Store struct { +} + +func (s Store) String() string { + return DBName +} + +func (s Store) Open(path string, cfg *config.Config) (driver.IDB, error) { + if err := os.MkdirAll(path, 0755); err != nil { + return nil, err + } + + db := new(DB) + db.path = path + db.cfg = &cfg.RocksDB + + if err := db.open(); err != nil { + return nil, err + } + + return db, nil +} + +func (s Store) Repair(path string, cfg *config.Config) error { + db := new(DB) + db.path = path + db.cfg = &cfg.RocksDB + + err := db.open() + defer db.Close() + + //open ok, do not need repair + if err == nil { + return nil + } + + var errStr *C.char + ldbname := C.CString(path) + defer C.free(unsafe.Pointer(ldbname)) + + C.rocksdb_repair_db(db.opts.Opt, ldbname, &errStr) + if errStr != nil { + return saveError(errStr) + } + return nil +} + +type DB struct { + path string + + cfg *config.RocksDBConfig + + db *C.rocksdb_t + + env *Env + + opts *Options + blockOpts *BlockBasedTableOptions + + //for default read and write options + readOpts *ReadOptions + writeOpts *WriteOptions + iteratorOpts *ReadOptions + + syncOpts *WriteOptions + + cache *Cache + + filter *FilterPolicy +} + +func (db *DB) open() error { + db.initOptions(db.cfg) + + var errStr *C.char + ldbname := C.CString(db.path) + defer C.free(unsafe.Pointer(ldbname)) + + db.db = C.rocksdb_open(db.opts.Opt, ldbname, &errStr) + if errStr != nil { + db.db = nil + return saveError(errStr) + } + return nil +} + +func (db *DB) initOptions(cfg *config.RocksDBConfig) { + opts := NewOptions() + blockOpts := NewBlockBasedTableOptions() + + opts.SetCreateIfMissing(true) + + db.env = NewDefaultEnv() + db.env.SetBackgroundThreads(cfg.BackgroundThreads) + db.env.SetHighPriorityBackgroundThreads(cfg.HighPriorityBackgroundThreads) + opts.SetEnv(db.env) + + db.cache = NewLRUCache(cfg.CacheSize) + blockOpts.SetCache(db.cache) + + //we must use bloomfilter + db.filter = NewBloomFilter(defaultFilterBits) + blockOpts.SetFilterPolicy(db.filter) + blockOpts.SetBlockSize(cfg.BlockSize) + opts.SetBlockBasedTableFactory(blockOpts) + + opts.SetCompression(CompressionOpt(cfg.Compression)) + opts.SetWriteBufferSize(cfg.WriteBufferSize) + opts.SetMaxOpenFiles(cfg.MaxOpenFiles) + opts.SetMaxBackgroundCompactions(cfg.MaxBackgroundCompactions) + opts.SetMaxBackgroundFlushes(cfg.MaxBackgroundFlushes) + opts.SetLevel0FileNumCompactionTrigger(cfg.Level0FileNumCompactionTrigger) + opts.SetLevel0SlowdownWritesTrigger(cfg.Level0SlowdownWritesTrigger) + opts.SetLevel0StopWritesTrigger(cfg.Level0StopWritesTrigger) + opts.SetTargetFileSizeBase(cfg.TargetFileSizeBase) + opts.SetTargetFileSizeMultiplier(cfg.TargetFileSizeMultiplier) + opts.SetMaxBytesForLevelBase(cfg.MaxBytesForLevelBase) + opts.SetMaxBytesForLevelMultiplier(cfg.MaxBytesForLevelMultiplier) + opts.SetMinWriteBufferNumberToMerge(cfg.MinWriteBufferNumberToMerge) + opts.DisableAutoCompactions(cfg.DisableAutoCompactions) + opts.EnableStatistics(cfg.EnableStatistics) + opts.UseFsync(cfg.UseFsync) + opts.SetStatsDumpPeriodSec(cfg.StatsDumpPeriodSec) + opts.SetMaxManifestFileSize(cfg.MaxManifestFileSize) + + db.opts = opts + db.blockOpts = blockOpts + + db.readOpts = NewReadOptions() + db.writeOpts = NewWriteOptions() + db.writeOpts.DisableWAL(cfg.DisableWAL) + + db.syncOpts = NewWriteOptions() + db.syncOpts.SetSync(true) + db.syncOpts.DisableWAL(cfg.DisableWAL) + + db.iteratorOpts = NewReadOptions() + db.iteratorOpts.SetFillCache(false) +} + +func (db *DB) Close() error { + if db.db != nil { + C.rocksdb_close(db.db) + db.db = nil + } + + if db.filter != nil { + db.filter.Close() + } + + if db.cache != nil { + db.cache.Close() + } + + if db.env != nil { + db.env.Close() + } + + //db.blockOpts.Close() + + db.opts.Close() + + db.readOpts.Close() + db.writeOpts.Close() + db.iteratorOpts.Close() + + return nil +} + +func (db *DB) Put(key, value []byte) error { + return db.put(db.writeOpts, key, value) +} + +func (db *DB) Get(key []byte) ([]byte, error) { + return db.get(db.readOpts, key) +} + +func (db *DB) Delete(key []byte) error { + return db.delete(db.writeOpts, key) +} + +func (db *DB) SyncPut(key []byte, value []byte) error { + return db.put(db.syncOpts, key, value) +} + +func (db *DB) SyncDelete(key []byte) error { + return db.delete(db.syncOpts, key) +} + +func (db *DB) NewWriteBatch() driver.IWriteBatch { + wb := &WriteBatch{ + db: db, + wbatch: C.rocksdb_writebatch_create(), + } + + runtime.SetFinalizer(wb, func(w *WriteBatch) { + w.Close() + }) + + return wb +} + +func (db *DB) NewIterator() driver.IIterator { + it := new(Iterator) + + it.it = C.rocksdb_create_iterator(db.db, db.iteratorOpts.Opt) + + return it +} + +func (db *DB) NewSnapshot() (driver.ISnapshot, error) { + snap := &Snapshot{ + db: db, + snap: C.rocksdb_create_snapshot(db.db), + readOpts: NewReadOptions(), + iteratorOpts: NewReadOptions(), + } + snap.readOpts.SetSnapshot(snap) + snap.iteratorOpts.SetSnapshot(snap) + snap.iteratorOpts.SetFillCache(false) + + return snap, nil +} + +func (db *DB) put(wo *WriteOptions, key, value []byte) error { + var errStr *C.char + var k, v *C.char + if len(key) != 0 { + k = (*C.char)(unsafe.Pointer(&key[0])) + } + if len(value) != 0 { + v = (*C.char)(unsafe.Pointer(&value[0])) + } + + lenk := len(key) + lenv := len(value) + C.rocksdb_put( + db.db, wo.Opt, k, C.size_t(lenk), v, C.size_t(lenv), &errStr) + + if errStr != nil { + return saveError(errStr) + } + return nil +} + +func (db *DB) get(ro *ReadOptions, key []byte) ([]byte, error) { + var errStr *C.char + var vallen C.size_t + var k *C.char + if len(key) != 0 { + k = (*C.char)(unsafe.Pointer(&key[0])) + } + + value := C.rocksdb_get( + db.db, ro.Opt, k, C.size_t(len(key)), &vallen, &errStr) + + if errStr != nil { + return nil, saveError(errStr) + } + + if value == nil { + return nil, nil + } + + defer C.free(unsafe.Pointer(value)) + return C.GoBytes(unsafe.Pointer(value), C.int(vallen)), nil +} + +func (db *DB) getSlice(ro *ReadOptions, key []byte) (driver.ISlice, error) { + var errStr *C.char + var vallen C.size_t + var k *C.char + if len(key) != 0 { + k = (*C.char)(unsafe.Pointer(&key[0])) + } + + value := C.rocksdb_get( + db.db, ro.Opt, k, C.size_t(len(key)), &vallen, &errStr) + + if errStr != nil { + return nil, saveError(errStr) + } + + if value == nil { + return nil, nil + } + + return NewCSlice(unsafe.Pointer(value), int(vallen)), nil +} + +func (db *DB) delete(wo *WriteOptions, key []byte) error { + var errStr *C.char + var k *C.char + if len(key) != 0 { + k = (*C.char)(unsafe.Pointer(&key[0])) + } + + C.rocksdb_delete( + db.db, wo.Opt, k, C.size_t(len(key)), &errStr) + + if errStr != nil { + return saveError(errStr) + } + return nil +} + +func (db *DB) Compact() error { + C.rocksdb_compact_range(db.db, nil, 0, nil, 0) + return nil +} + +func (db *DB) GetSlice(key []byte) (driver.ISlice, error) { + return db.getSlice(db.readOpts, key) +} + +func init() { + driver.Register(Store{}) +} diff --git a/vendor/github.com/siddontang/ledisdb/store/rocksdb/env.go b/vendor/github.com/siddontang/ledisdb/store/rocksdb/env.go new file mode 100644 index 000000000000..e239c1b6c0e8 --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/store/rocksdb/env.go @@ -0,0 +1,27 @@ +// +build rocksdb + +package rocksdb + +// #cgo LDFLAGS: -lrocksdb +// #include "rocksdb/c.h" +import "C" + +type Env struct { + Env *C.rocksdb_env_t +} + +func NewDefaultEnv() *Env { + return &Env{C.rocksdb_create_default_env()} +} + +func (env *Env) SetHighPriorityBackgroundThreads(n int) { + C.rocksdb_env_set_high_priority_background_threads(env.Env, C.int(n)) +} + +func (env *Env) SetBackgroundThreads(n int) { + C.rocksdb_env_set_background_threads(env.Env, C.int(n)) +} + +func (env *Env) Close() { + C.rocksdb_env_destroy(env.Env) +} diff --git a/vendor/github.com/siddontang/ledisdb/store/rocksdb/filterpolicy.go b/vendor/github.com/siddontang/ledisdb/store/rocksdb/filterpolicy.go new file mode 100644 index 000000000000..3be4ef6acac7 --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/store/rocksdb/filterpolicy.go @@ -0,0 +1,21 @@ +// +build rocksdb + +package rocksdb + +// #cgo LDFLAGS: -lrocksdb +// #include +// #include "rocksdb/c.h" +import "C" + +type FilterPolicy struct { + Policy *C.rocksdb_filterpolicy_t +} + +func NewBloomFilter(bitsPerKey int) *FilterPolicy { + policy := C.rocksdb_filterpolicy_create_bloom(C.int(bitsPerKey)) + return &FilterPolicy{policy} +} + +func (fp *FilterPolicy) Close() { + C.rocksdb_filterpolicy_destroy(fp.Policy) +} diff --git a/vendor/github.com/siddontang/ledisdb/store/rocksdb/iterator.go b/vendor/github.com/siddontang/ledisdb/store/rocksdb/iterator.go new file mode 100644 index 000000000000..046c5e9dfaa4 --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/store/rocksdb/iterator.go @@ -0,0 +1,70 @@ +// +build rocksdb + +package rocksdb + +// #cgo LDFLAGS: -lrocksdb +// #include +// #include "rocksdb/c.h" +// #include "rocksdb_ext.h" +import "C" + +import ( + "unsafe" +) + +type Iterator struct { + it *C.rocksdb_iterator_t + isValid C.uchar +} + +func (it *Iterator) Key() []byte { + var klen C.size_t + kdata := C.rocksdb_iter_key(it.it, &klen) + if kdata == nil { + return nil + } + + return slice(unsafe.Pointer(kdata), int(C.int(klen))) +} + +func (it *Iterator) Value() []byte { + var vlen C.size_t + vdata := C.rocksdb_iter_value(it.it, &vlen) + if vdata == nil { + return nil + } + + return slice(unsafe.Pointer(vdata), int(C.int(vlen))) +} + +func (it *Iterator) Close() error { + if it.it != nil { + C.rocksdb_iter_destroy(it.it) + it.it = nil + } + return nil +} + +func (it *Iterator) Valid() bool { + return ucharToBool(it.isValid) +} + +func (it *Iterator) Next() { + it.isValid = C.rocksdb_iter_next_ext(it.it) +} + +func (it *Iterator) Prev() { + it.isValid = C.rocksdb_iter_prev_ext(it.it) +} + +func (it *Iterator) First() { + it.isValid = C.rocksdb_iter_seek_to_first_ext(it.it) +} + +func (it *Iterator) Last() { + it.isValid = C.rocksdb_iter_seek_to_last_ext(it.it) +} + +func (it *Iterator) Seek(key []byte) { + it.isValid = C.rocksdb_iter_seek_ext(it.it, (*C.char)(unsafe.Pointer(&key[0])), C.size_t(len(key))) +} diff --git a/vendor/github.com/siddontang/ledisdb/store/rocksdb/options.go b/vendor/github.com/siddontang/ledisdb/store/rocksdb/options.go new file mode 100644 index 000000000000..48ca2301a49a --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/store/rocksdb/options.go @@ -0,0 +1,229 @@ +// +build rocksdb + +package rocksdb + +// #cgo LDFLAGS: -lrocksdb +// #include "rocksdb/c.h" +import "C" + +type CompressionOpt int + +const ( + NoCompression = CompressionOpt(0) + SnappyCompression = CompressionOpt(1) + ZlibCompression = CompressionOpt(2) + Bz2Compression = CompressionOpt(3) + Lz4Compression = CompressionOpt(4) + Lz4hcCompression = CompressionOpt(5) +) + +type Options struct { + Opt *C.rocksdb_options_t +} + +type ReadOptions struct { + Opt *C.rocksdb_readoptions_t +} + +type WriteOptions struct { + Opt *C.rocksdb_writeoptions_t +} + +type BlockBasedTableOptions struct { + Opt *C.rocksdb_block_based_table_options_t +} + +func NewOptions() *Options { + opt := C.rocksdb_options_create() + return &Options{opt} +} + +func NewReadOptions() *ReadOptions { + opt := C.rocksdb_readoptions_create() + return &ReadOptions{opt} +} + +func NewWriteOptions() *WriteOptions { + opt := C.rocksdb_writeoptions_create() + return &WriteOptions{opt} +} + +func NewBlockBasedTableOptions() *BlockBasedTableOptions { + opt := C.rocksdb_block_based_options_create() + return &BlockBasedTableOptions{opt} +} + +func (o *Options) Close() { + C.rocksdb_options_destroy(o.Opt) +} + +func (o *Options) IncreaseParallelism(n int) { + C.rocksdb_options_increase_parallelism(o.Opt, C.int(n)) +} + +func (o *Options) OptimizeLevelStyleCompaction(n int) { + C.rocksdb_options_optimize_level_style_compaction(o.Opt, C.uint64_t(n)) +} + +func (o *Options) SetComparator(cmp *C.rocksdb_comparator_t) { + C.rocksdb_options_set_comparator(o.Opt, cmp) +} + +func (o *Options) SetErrorIfExists(error_if_exists bool) { + eie := boolToUchar(error_if_exists) + C.rocksdb_options_set_error_if_exists(o.Opt, eie) +} + +func (o *Options) SetEnv(env *Env) { + C.rocksdb_options_set_env(o.Opt, env.Env) +} + +func (o *Options) SetWriteBufferSize(s int) { + C.rocksdb_options_set_write_buffer_size(o.Opt, C.size_t(s)) +} + +func (o *Options) SetParanoidChecks(pc bool) { + C.rocksdb_options_set_paranoid_checks(o.Opt, boolToUchar(pc)) +} + +func (o *Options) SetMaxOpenFiles(n int) { + C.rocksdb_options_set_max_open_files(o.Opt, C.int(n)) +} + +func (o *Options) SetCompression(t CompressionOpt) { + C.rocksdb_options_set_compression(o.Opt, C.int(t)) +} + +func (o *Options) SetCreateIfMissing(b bool) { + C.rocksdb_options_set_create_if_missing(o.Opt, boolToUchar(b)) +} + +func (o *Options) SetMaxWriteBufferNumber(n int) { + C.rocksdb_options_set_max_write_buffer_number(o.Opt, C.int(n)) +} + +func (o *Options) SetMaxBackgroundCompactions(n int) { + C.rocksdb_options_set_max_background_compactions(o.Opt, C.int(n)) +} + +func (o *Options) SetMaxBackgroundFlushes(n int) { + C.rocksdb_options_set_max_background_flushes(o.Opt, C.int(n)) +} + +func (o *Options) SetNumLevels(n int) { + C.rocksdb_options_set_num_levels(o.Opt, C.int(n)) +} + +func (o *Options) SetLevel0FileNumCompactionTrigger(n int) { + C.rocksdb_options_set_level0_file_num_compaction_trigger(o.Opt, C.int(n)) +} + +func (o *Options) SetLevel0SlowdownWritesTrigger(n int) { + C.rocksdb_options_set_level0_slowdown_writes_trigger(o.Opt, C.int(n)) +} + +func (o *Options) SetLevel0StopWritesTrigger(n int) { + C.rocksdb_options_set_level0_stop_writes_trigger(o.Opt, C.int(n)) +} + +func (o *Options) SetTargetFileSizeBase(n int) { + C.rocksdb_options_set_target_file_size_base(o.Opt, C.uint64_t(uint64(n))) +} + +func (o *Options) SetTargetFileSizeMultiplier(n int) { + C.rocksdb_options_set_target_file_size_multiplier(o.Opt, C.int(n)) +} + +func (o *Options) SetMaxBytesForLevelBase(n int) { + C.rocksdb_options_set_max_bytes_for_level_base(o.Opt, C.uint64_t(uint64(n))) +} + +func (o *Options) SetMaxBytesForLevelMultiplier(n int) { + C.rocksdb_options_set_max_bytes_for_level_multiplier(o.Opt, C.double(n)) +} + +func (o *Options) SetBlockBasedTableFactory(opt *BlockBasedTableOptions) { + C.rocksdb_options_set_block_based_table_factory(o.Opt, opt.Opt) +} + +func (o *Options) SetMinWriteBufferNumberToMerge(n int) { + C.rocksdb_options_set_min_write_buffer_number_to_merge(o.Opt, C.int(n)) +} + +func (o *Options) DisableAutoCompactions(b bool) { + C.rocksdb_options_set_disable_auto_compactions(o.Opt, boolToInt(b)) +} + +func (o *Options) UseFsync(b bool) { + C.rocksdb_options_set_use_fsync(o.Opt, boolToInt(b)) +} + +func (o *Options) EnableStatistics(b bool) { + if b { + C.rocksdb_options_enable_statistics(o.Opt) + } +} + +func (o *Options) SetStatsDumpPeriodSec(n int) { + C.rocksdb_options_set_stats_dump_period_sec(o.Opt, C.uint(n)) +} + +func (o *Options) SetMaxManifestFileSize(n int) { + C.rocksdb_options_set_max_manifest_file_size(o.Opt, C.size_t(n)) +} + +func (o *BlockBasedTableOptions) Close() { + C.rocksdb_block_based_options_destroy(o.Opt) +} + +func (o *BlockBasedTableOptions) SetFilterPolicy(fp *FilterPolicy) { + var policy *C.rocksdb_filterpolicy_t + if fp != nil { + policy = fp.Policy + } + C.rocksdb_block_based_options_set_filter_policy(o.Opt, policy) +} + +func (o *BlockBasedTableOptions) SetBlockSize(s int) { + C.rocksdb_block_based_options_set_block_size(o.Opt, C.size_t(s)) +} + +func (o *BlockBasedTableOptions) SetBlockRestartInterval(n int) { + C.rocksdb_block_based_options_set_block_restart_interval(o.Opt, C.int(n)) +} + +func (o *BlockBasedTableOptions) SetCache(cache *Cache) { + C.rocksdb_block_based_options_set_block_cache(o.Opt, cache.Cache) +} + +func (ro *ReadOptions) Close() { + C.rocksdb_readoptions_destroy(ro.Opt) +} + +func (ro *ReadOptions) SetVerifyChecksums(b bool) { + C.rocksdb_readoptions_set_verify_checksums(ro.Opt, boolToUchar(b)) +} + +func (ro *ReadOptions) SetFillCache(b bool) { + C.rocksdb_readoptions_set_fill_cache(ro.Opt, boolToUchar(b)) +} + +func (ro *ReadOptions) SetSnapshot(snap *Snapshot) { + var s *C.rocksdb_snapshot_t + if snap != nil { + s = snap.snap + } + C.rocksdb_readoptions_set_snapshot(ro.Opt, s) +} + +func (wo *WriteOptions) Close() { + C.rocksdb_writeoptions_destroy(wo.Opt) +} + +func (wo *WriteOptions) SetSync(b bool) { + C.rocksdb_writeoptions_set_sync(wo.Opt, boolToUchar(b)) +} + +func (wo *WriteOptions) DisableWAL(b bool) { + C.rocksdb_writeoptions_disable_WAL(wo.Opt, boolToInt(b)) +} diff --git a/vendor/github.com/siddontang/ledisdb/store/rocksdb/rocksdb_ext.cc b/vendor/github.com/siddontang/ledisdb/store/rocksdb/rocksdb_ext.cc new file mode 100644 index 000000000000..39036ab96cc2 --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/store/rocksdb/rocksdb_ext.cc @@ -0,0 +1,44 @@ +// +build rocksdb + +#include "rocksdb_ext.h" + +#include +#include + +extern "C" { + +unsigned char rocksdb_iter_seek_to_first_ext(rocksdb_iterator_t* iter) { + rocksdb_iter_seek_to_first(iter); + return rocksdb_iter_valid(iter); +} + +unsigned char rocksdb_iter_seek_to_last_ext(rocksdb_iterator_t* iter) { + rocksdb_iter_seek_to_last(iter); + return rocksdb_iter_valid(iter); +} + +unsigned char rocksdb_iter_seek_ext(rocksdb_iterator_t* iter, const char* k, size_t klen) { + rocksdb_iter_seek(iter, k, klen); + return rocksdb_iter_valid(iter); +} + +unsigned char rocksdb_iter_next_ext(rocksdb_iterator_t* iter) { + rocksdb_iter_next(iter); + return rocksdb_iter_valid(iter); +} + +unsigned char rocksdb_iter_prev_ext(rocksdb_iterator_t* iter) { + rocksdb_iter_prev(iter); + return rocksdb_iter_valid(iter); +} + +void rocksdb_write_ext(rocksdb_t* db, + const rocksdb_writeoptions_t* options, + rocksdb_writebatch_t* batch, char** errptr) { + rocksdb_write(db, options, batch, errptr); + if(*errptr == NULL) { + rocksdb_writebatch_clear(batch); + } +} + +} \ No newline at end of file diff --git a/vendor/github.com/siddontang/ledisdb/store/rocksdb/rocksdb_ext.h b/vendor/github.com/siddontang/ledisdb/store/rocksdb/rocksdb_ext.h new file mode 100644 index 000000000000..11cb65304105 --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/store/rocksdb/rocksdb_ext.h @@ -0,0 +1,24 @@ +// +build rocksdb + +#ifndef ROCKSDB_EXT_H +#define ROCKSDB_EXT_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "rocksdb/c.h" + +// Below iterator functions like rocksdb iterator but returns valid status for iterator +extern unsigned char rocksdb_iter_seek_to_first_ext(rocksdb_iterator_t*); +extern unsigned char rocksdb_iter_seek_to_last_ext(rocksdb_iterator_t*); +extern unsigned char rocksdb_iter_seek_ext(rocksdb_iterator_t*, const char* k, size_t klen); +extern unsigned char rocksdb_iter_next_ext(rocksdb_iterator_t*); +extern unsigned char rocksdb_iter_prev_ext(rocksdb_iterator_t*); +extern void rocksdb_write_ext(rocksdb_t* db, const rocksdb_writeoptions_t* options, rocksdb_writebatch_t* batch, char** errptr); + +#ifdef __cplusplus +} +#endif + +#endif \ No newline at end of file diff --git a/vendor/github.com/siddontang/ledisdb/store/rocksdb/slice.go b/vendor/github.com/siddontang/ledisdb/store/rocksdb/slice.go new file mode 100644 index 000000000000..bbaa65bd7652 --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/store/rocksdb/slice.go @@ -0,0 +1,41 @@ +//+build rocksdb + +package rocksdb + +// #cgo LDFLAGS: -lrocksdb +// #include +// #include +import "C" + +import ( + "reflect" + "unsafe" +) + +type CSlice struct { + data unsafe.Pointer + size int +} + +func NewCSlice(p unsafe.Pointer, n int) *CSlice { + return &CSlice{p, n} +} + +func (s *CSlice) Data() []byte { + var value []byte + + sH := (*reflect.SliceHeader)(unsafe.Pointer(&value)) + sH.Cap = int(s.size) + sH.Len = int(s.size) + sH.Data = uintptr(s.data) + + return value +} + +func (s *CSlice) Size() int { + return int(s.size) +} + +func (s *CSlice) Free() { + C.free(s.data) +} diff --git a/vendor/github.com/siddontang/ledisdb/store/rocksdb/snapshot.go b/vendor/github.com/siddontang/ledisdb/store/rocksdb/snapshot.go new file mode 100644 index 000000000000..1ced60020027 --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/store/rocksdb/snapshot.go @@ -0,0 +1,39 @@ +// +build rocksdb + +package rocksdb + +// #cgo LDFLAGS: -lrocksdb +// #include "rocksdb/c.h" +import "C" + +import ( + "github.com/siddontang/ledisdb/store/driver" +) + +type Snapshot struct { + db *DB + snap *C.rocksdb_snapshot_t + readOpts *ReadOptions + iteratorOpts *ReadOptions +} + +func (s *Snapshot) Get(key []byte) ([]byte, error) { + return s.db.get(s.readOpts, key) +} + +func (s *Snapshot) GetSlice(key []byte) (driver.ISlice, error) { + return s.db.getSlice(s.readOpts, key) +} + +func (s *Snapshot) NewIterator() driver.IIterator { + it := new(Iterator) + it.it = C.rocksdb_create_iterator(s.db.db, s.db.iteratorOpts.Opt) + return it + +} + +func (s *Snapshot) Close() { + C.rocksdb_release_snapshot(s.db.db, s.snap) + s.iteratorOpts.Close() + s.readOpts.Close() +} diff --git a/vendor/github.com/siddontang/ledisdb/store/rocksdb/util.go b/vendor/github.com/siddontang/ledisdb/store/rocksdb/util.go new file mode 100644 index 000000000000..22b73baf4ad4 --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/store/rocksdb/util.go @@ -0,0 +1,54 @@ +// +build rocksdb + +package rocksdb + +// #include +// #include "rocksdb/c.h" +import "C" + +import ( + "fmt" + "reflect" + "unsafe" +) + +func boolToUchar(b bool) C.uchar { + uc := C.uchar(0) + if b { + uc = C.uchar(1) + } + return uc +} + +func ucharToBool(uc C.uchar) bool { + if uc == C.uchar(0) { + return false + } + return true +} + +func boolToInt(b bool) C.int { + uc := C.int(0) + if b { + uc = C.int(1) + } + return uc +} + +func saveError(errStr *C.char) error { + if errStr != nil { + gs := C.GoString(errStr) + C.free(unsafe.Pointer(errStr)) + return fmt.Errorf(gs) + } + return nil +} + +func slice(p unsafe.Pointer, n int) []byte { + var b []byte + pbyte := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + pbyte.Data = uintptr(p) + pbyte.Len = n + pbyte.Cap = n + return b +} diff --git a/vendor/github.com/siddontang/ledisdb/store/slice.go b/vendor/github.com/siddontang/ledisdb/store/slice.go new file mode 100644 index 000000000000..b027f4f28b8e --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/store/slice.go @@ -0,0 +1,9 @@ +package store + +import ( + "github.com/siddontang/ledisdb/store/driver" +) + +type Slice interface { + driver.ISlice +} diff --git a/vendor/github.com/siddontang/ledisdb/store/snapshot.go b/vendor/github.com/siddontang/ledisdb/store/snapshot.go new file mode 100644 index 000000000000..a1c9de9944e6 --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/store/snapshot.go @@ -0,0 +1,48 @@ +package store + +import ( + "github.com/siddontang/ledisdb/store/driver" +) + +type Snapshot struct { + driver.ISnapshot + st *Stat +} + +func (s *Snapshot) NewIterator() *Iterator { + it := new(Iterator) + it.it = s.ISnapshot.NewIterator() + it.st = s.st + + s.st.IterNum.Add(1) + + return it +} + +func (s *Snapshot) Get(key []byte) ([]byte, error) { + v, err := s.ISnapshot.Get(key) + s.st.statGet(v, err) + return v, err +} + +func (s *Snapshot) GetSlice(key []byte) (Slice, error) { + if d, ok := s.ISnapshot.(driver.ISliceGeter); ok { + v, err := d.GetSlice(key) + s.st.statGet(v, err) + return v, err + } else { + v, err := s.Get(key) + if err != nil { + return nil, err + } else if v == nil { + return nil, nil + } else { + return driver.GoSlice(v), nil + } + } +} + +func (s *Snapshot) Close() { + s.st.SnapshotCloseNum.Add(1) + s.ISnapshot.Close() +} diff --git a/vendor/github.com/siddontang/ledisdb/store/stat.go b/vendor/github.com/siddontang/ledisdb/store/stat.go new file mode 100644 index 000000000000..e0a035ab8e18 --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/store/stat.go @@ -0,0 +1,37 @@ +package store + +import ( + "github.com/siddontang/go/sync2" +) + +type Stat struct { + GetNum sync2.AtomicInt64 + GetMissingNum sync2.AtomicInt64 + GetTotalTime sync2.AtomicDuration + PutNum sync2.AtomicInt64 + DeleteNum sync2.AtomicInt64 + IterNum sync2.AtomicInt64 + IterSeekNum sync2.AtomicInt64 + IterCloseNum sync2.AtomicInt64 + SnapshotNum sync2.AtomicInt64 + SnapshotCloseNum sync2.AtomicInt64 + BatchNum sync2.AtomicInt64 + BatchCommitNum sync2.AtomicInt64 + BatchCommitTotalTime sync2.AtomicDuration + TxNum sync2.AtomicInt64 + TxCommitNum sync2.AtomicInt64 + TxCloseNum sync2.AtomicInt64 + CompactNum sync2.AtomicInt64 + CompactTotalTime sync2.AtomicDuration +} + +func (st *Stat) statGet(v interface{}, err error) { + st.GetNum.Add(1) + if v == nil && err == nil { + st.GetMissingNum.Add(1) + } +} + +func (st *Stat) Reset() { + *st = Stat{} +} diff --git a/vendor/github.com/siddontang/ledisdb/store/store.go b/vendor/github.com/siddontang/ledisdb/store/store.go new file mode 100644 index 000000000000..1352491254dc --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/store/store.go @@ -0,0 +1,62 @@ +package store + +import ( + "fmt" + "os" + "path" + + "github.com/siddontang/ledisdb/config" + "github.com/siddontang/ledisdb/store/driver" + + _ "github.com/siddontang/ledisdb/store/goleveldb" + _ "github.com/siddontang/ledisdb/store/leveldb" + _ "github.com/siddontang/ledisdb/store/rocksdb" +) + +func getStorePath(cfg *config.Config) string { + if len(cfg.DBPath) > 0 { + return cfg.DBPath + } else { + return path.Join(cfg.DataDir, fmt.Sprintf("%s_data", cfg.DBName)) + } +} + +func Open(cfg *config.Config) (*DB, error) { + s, err := driver.GetStore(cfg) + if err != nil { + return nil, err + } + + path := getStorePath(cfg) + + if err := os.MkdirAll(path, 0755); err != nil { + return nil, err + } + + idb, err := s.Open(path, cfg) + if err != nil { + return nil, err + } + + db := new(DB) + db.db = idb + db.name = s.String() + db.st = &Stat{} + db.cfg = cfg + + return db, nil +} + +func Repair(cfg *config.Config) error { + s, err := driver.GetStore(cfg) + if err != nil { + return err + } + + path := getStorePath(cfg) + + return s.Repair(path, cfg) +} + +func init() { +} diff --git a/vendor/github.com/siddontang/ledisdb/store/writebatch.go b/vendor/github.com/siddontang/ledisdb/store/writebatch.go new file mode 100644 index 000000000000..73760d719c3c --- /dev/null +++ b/vendor/github.com/siddontang/ledisdb/store/writebatch.go @@ -0,0 +1,136 @@ +package store + +import ( + "time" + + "github.com/siddontang/ledisdb/store/driver" + "github.com/syndtr/goleveldb/leveldb" +) + +type WriteBatch struct { + wb driver.IWriteBatch + st *Stat + + putNum int64 + deleteNum int64 + db *DB + + data *BatchData +} + +func (wb *WriteBatch) Close() { + wb.wb.Close() +} + +func (wb *WriteBatch) Put(key []byte, value []byte) { + wb.putNum++ + wb.wb.Put(key, value) +} + +func (wb *WriteBatch) Delete(key []byte) { + wb.deleteNum++ + wb.wb.Delete(key) +} + +func (wb *WriteBatch) Commit() error { + wb.st.BatchCommitNum.Add(1) + wb.st.PutNum.Add(wb.putNum) + wb.st.DeleteNum.Add(wb.deleteNum) + wb.putNum = 0 + wb.deleteNum = 0 + + var err error + t := time.Now() + if wb.db == nil || !wb.db.needSyncCommit() { + err = wb.wb.Commit() + } else { + err = wb.wb.SyncCommit() + } + + wb.st.BatchCommitTotalTime.Add(time.Now().Sub(t)) + + return err +} + +func (wb *WriteBatch) Rollback() error { + wb.putNum = 0 + wb.deleteNum = 0 + + return wb.wb.Rollback() +} + +// the data will be undefined after commit or rollback +func (wb *WriteBatch) BatchData() *BatchData { + data := wb.wb.Data() + if wb.data == nil { + wb.data = new(BatchData) + } + + wb.data.Load(data) + return wb.data +} + +func (wb *WriteBatch) Data() []byte { + b := wb.BatchData() + return b.Data() +} + +/* + see leveldb batch data format for more information +*/ + +type BatchData struct { + leveldb.Batch +} + +func NewBatchData(data []byte) (*BatchData, error) { + b := new(BatchData) + + if err := b.Load(data); err != nil { + return nil, err + } + + return b, nil +} + +func (d *BatchData) Data() []byte { + return d.Dump() +} + +func (d *BatchData) Reset() { + d.Batch.Reset() +} + +type BatchDataReplay interface { + Put(key, value []byte) + Delete(key []byte) +} + +type BatchItem struct { + Key []byte + Value []byte +} + +type batchItems []BatchItem + +func (bs *batchItems) Put(key, value []byte) { + *bs = append(*bs, BatchItem{key, value}) +} + +func (bs *batchItems) Delete(key []byte) { + *bs = append(*bs, BatchItem{key, nil}) +} + +func (d *BatchData) Replay(r BatchDataReplay) error { + return d.Batch.Replay(r) +} + +func (d *BatchData) Items() ([]BatchItem, error) { + is := make(batchItems, 0, d.Len()) + + if err := d.Replay(&is); err != nil { + return nil, err + } + + return []BatchItem(is), nil +} diff --git a/vendor/github.com/siddontang/rdb/LICENSE b/vendor/github.com/siddontang/rdb/LICENSE new file mode 100644 index 000000000000..c16e3affbd3d --- /dev/null +++ b/vendor/github.com/siddontang/rdb/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 siddontang + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/siddontang/rdb/decode.go b/vendor/github.com/siddontang/rdb/decode.go new file mode 100644 index 000000000000..865d24198120 --- /dev/null +++ b/vendor/github.com/siddontang/rdb/decode.go @@ -0,0 +1,128 @@ +package rdb + +// Copyright 2014 Wandoujia Inc. All Rights Reserved. +// Licensed under the MIT (MIT-LICENSE.txt) license. + +import "fmt" + +import ( + "github.com/cupcake/rdb" + "github.com/cupcake/rdb/nopdecoder" +) + +func DecodeDump(p []byte) (interface{}, error) { + d := &decoder{} + if err := rdb.DecodeDump(p, 0, nil, 0, d); err != nil { + return nil, err + } + return d.obj, d.err +} + +type decoder struct { + nopdecoder.NopDecoder + obj interface{} + err error +} + +func (d *decoder) initObject(obj interface{}) { + if d.err != nil { + return + } + if d.obj != nil { + d.err = fmt.Errorf("invalid object, init again") + } else { + d.obj = obj + } +} + +func (d *decoder) Set(key, value []byte, expiry int64) { + d.initObject(String(value)) +} + +func (d *decoder) StartHash(key []byte, length, expiry int64) { + d.initObject(Hash(nil)) +} + +func (d *decoder) Hset(key, field, value []byte) { + if d.err != nil { + return + } + switch h := d.obj.(type) { + default: + d.err = fmt.Errorf("invalid object, not a hashmap") + case Hash: + v := struct { + Field, Value []byte + }{ + field, + value, + } + d.obj = append(h, v) + } +} + +func (d *decoder) StartSet(key []byte, cardinality, expiry int64) { + d.initObject(Set(nil)) +} + +func (d *decoder) Sadd(key, member []byte) { + if d.err != nil { + return + } + switch s := d.obj.(type) { + default: + d.err = fmt.Errorf("invalid object, not a set") + case Set: + d.obj = append(s, member) + } +} + +func (d *decoder) StartList(key []byte, length, expiry int64) { + d.initObject(List(nil)) +} + +func (d *decoder) Rpush(key, value []byte) { + if d.err != nil { + return + } + switch l := d.obj.(type) { + default: + d.err = fmt.Errorf("invalid object, not a list") + case List: + d.obj = append(l, value) + } +} + +func (d *decoder) StartZSet(key []byte, cardinality, expiry int64) { + d.initObject(ZSet(nil)) +} + +func (d *decoder) Zadd(key []byte, score float64, member []byte) { + if d.err != nil { + return + } + switch z := d.obj.(type) { + default: + d.err = fmt.Errorf("invalid object, not a zset") + case ZSet: + v := struct { + Member []byte + Score float64 + }{ + member, + score, + } + d.obj = append(z, v) + } +} + +type String []byte +type List [][]byte +type Hash []struct { + Field, Value []byte +} +type Set [][]byte +type ZSet []struct { + Member []byte + Score float64 +} diff --git a/vendor/github.com/siddontang/rdb/digest.go b/vendor/github.com/siddontang/rdb/digest.go new file mode 100644 index 000000000000..b59e4dfb7d23 --- /dev/null +++ b/vendor/github.com/siddontang/rdb/digest.go @@ -0,0 +1,106 @@ +// Copyright 2014 Wandoujia Inc. All Rights Reserved. +// Licensed under the MIT (MIT-LICENSE.txt) license. + +package rdb + +import ( + "encoding/binary" + "hash" +) + +var crc64_table = [256]uint64{ + 0x0000000000000000, 0x7ad870c830358979, 0xf5b0e190606b12f2, 0x8f689158505e9b8b, + 0xc038e5739841b68f, 0xbae095bba8743ff6, 0x358804e3f82aa47d, 0x4f50742bc81f2d04, + 0xab28ecb46814fe75, 0xd1f09c7c5821770c, 0x5e980d24087fec87, 0x24407dec384a65fe, + 0x6b1009c7f05548fa, 0x11c8790fc060c183, 0x9ea0e857903e5a08, 0xe478989fa00bd371, + 0x7d08ff3b88be6f81, 0x07d08ff3b88be6f8, 0x88b81eabe8d57d73, 0xf2606e63d8e0f40a, + 0xbd301a4810ffd90e, 0xc7e86a8020ca5077, 0x4880fbd87094cbfc, 0x32588b1040a14285, + 0xd620138fe0aa91f4, 0xacf86347d09f188d, 0x2390f21f80c18306, 0x594882d7b0f40a7f, + 0x1618f6fc78eb277b, 0x6cc0863448deae02, 0xe3a8176c18803589, 0x997067a428b5bcf0, + 0xfa11fe77117cdf02, 0x80c98ebf2149567b, 0x0fa11fe77117cdf0, 0x75796f2f41224489, + 0x3a291b04893d698d, 0x40f16bccb908e0f4, 0xcf99fa94e9567b7f, 0xb5418a5cd963f206, + 0x513912c379682177, 0x2be1620b495da80e, 0xa489f35319033385, 0xde51839b2936bafc, + 0x9101f7b0e12997f8, 0xebd98778d11c1e81, 0x64b116208142850a, 0x1e6966e8b1770c73, + 0x8719014c99c2b083, 0xfdc17184a9f739fa, 0x72a9e0dcf9a9a271, 0x08719014c99c2b08, + 0x4721e43f0183060c, 0x3df994f731b68f75, 0xb29105af61e814fe, 0xc849756751dd9d87, + 0x2c31edf8f1d64ef6, 0x56e99d30c1e3c78f, 0xd9810c6891bd5c04, 0xa3597ca0a188d57d, + 0xec09088b6997f879, 0x96d1784359a27100, 0x19b9e91b09fcea8b, 0x636199d339c963f2, + 0xdf7adabd7a6e2d6f, 0xa5a2aa754a5ba416, 0x2aca3b2d1a053f9d, 0x50124be52a30b6e4, + 0x1f423fcee22f9be0, 0x659a4f06d21a1299, 0xeaf2de5e82448912, 0x902aae96b271006b, + 0x74523609127ad31a, 0x0e8a46c1224f5a63, 0x81e2d7997211c1e8, 0xfb3aa75142244891, + 0xb46ad37a8a3b6595, 0xceb2a3b2ba0eecec, 0x41da32eaea507767, 0x3b024222da65fe1e, + 0xa2722586f2d042ee, 0xd8aa554ec2e5cb97, 0x57c2c41692bb501c, 0x2d1ab4dea28ed965, + 0x624ac0f56a91f461, 0x1892b03d5aa47d18, 0x97fa21650afae693, 0xed2251ad3acf6fea, + 0x095ac9329ac4bc9b, 0x7382b9faaaf135e2, 0xfcea28a2faafae69, 0x8632586aca9a2710, + 0xc9622c4102850a14, 0xb3ba5c8932b0836d, 0x3cd2cdd162ee18e6, 0x460abd1952db919f, + 0x256b24ca6b12f26d, 0x5fb354025b277b14, 0xd0dbc55a0b79e09f, 0xaa03b5923b4c69e6, + 0xe553c1b9f35344e2, 0x9f8bb171c366cd9b, 0x10e3202993385610, 0x6a3b50e1a30ddf69, + 0x8e43c87e03060c18, 0xf49bb8b633338561, 0x7bf329ee636d1eea, 0x012b592653589793, + 0x4e7b2d0d9b47ba97, 0x34a35dc5ab7233ee, 0xbbcbcc9dfb2ca865, 0xc113bc55cb19211c, + 0x5863dbf1e3ac9dec, 0x22bbab39d3991495, 0xadd33a6183c78f1e, 0xd70b4aa9b3f20667, + 0x985b3e827bed2b63, 0xe2834e4a4bd8a21a, 0x6debdf121b863991, 0x1733afda2bb3b0e8, + 0xf34b37458bb86399, 0x8993478dbb8deae0, 0x06fbd6d5ebd3716b, 0x7c23a61ddbe6f812, + 0x3373d23613f9d516, 0x49aba2fe23cc5c6f, 0xc6c333a67392c7e4, 0xbc1b436e43a74e9d, + 0x95ac9329ac4bc9b5, 0xef74e3e19c7e40cc, 0x601c72b9cc20db47, 0x1ac40271fc15523e, + 0x5594765a340a7f3a, 0x2f4c0692043ff643, 0xa02497ca54616dc8, 0xdafce7026454e4b1, + 0x3e847f9dc45f37c0, 0x445c0f55f46abeb9, 0xcb349e0da4342532, 0xb1eceec59401ac4b, + 0xfebc9aee5c1e814f, 0x8464ea266c2b0836, 0x0b0c7b7e3c7593bd, 0x71d40bb60c401ac4, + 0xe8a46c1224f5a634, 0x927c1cda14c02f4d, 0x1d148d82449eb4c6, 0x67ccfd4a74ab3dbf, + 0x289c8961bcb410bb, 0x5244f9a98c8199c2, 0xdd2c68f1dcdf0249, 0xa7f41839ecea8b30, + 0x438c80a64ce15841, 0x3954f06e7cd4d138, 0xb63c61362c8a4ab3, 0xcce411fe1cbfc3ca, + 0x83b465d5d4a0eece, 0xf96c151de49567b7, 0x76048445b4cbfc3c, 0x0cdcf48d84fe7545, + 0x6fbd6d5ebd3716b7, 0x15651d968d029fce, 0x9a0d8ccedd5c0445, 0xe0d5fc06ed698d3c, + 0xaf85882d2576a038, 0xd55df8e515432941, 0x5a3569bd451db2ca, 0x20ed197575283bb3, + 0xc49581ead523e8c2, 0xbe4df122e51661bb, 0x3125607ab548fa30, 0x4bfd10b2857d7349, + 0x04ad64994d625e4d, 0x7e7514517d57d734, 0xf11d85092d094cbf, 0x8bc5f5c11d3cc5c6, + 0x12b5926535897936, 0x686de2ad05bcf04f, 0xe70573f555e26bc4, 0x9ddd033d65d7e2bd, + 0xd28d7716adc8cfb9, 0xa85507de9dfd46c0, 0x273d9686cda3dd4b, 0x5de5e64efd965432, + 0xb99d7ed15d9d8743, 0xc3450e196da80e3a, 0x4c2d9f413df695b1, 0x36f5ef890dc31cc8, + 0x79a59ba2c5dc31cc, 0x037deb6af5e9b8b5, 0x8c157a32a5b7233e, 0xf6cd0afa9582aa47, + 0x4ad64994d625e4da, 0x300e395ce6106da3, 0xbf66a804b64ef628, 0xc5bed8cc867b7f51, + 0x8aeeace74e645255, 0xf036dc2f7e51db2c, 0x7f5e4d772e0f40a7, 0x05863dbf1e3ac9de, + 0xe1fea520be311aaf, 0x9b26d5e88e0493d6, 0x144e44b0de5a085d, 0x6e963478ee6f8124, + 0x21c640532670ac20, 0x5b1e309b16452559, 0xd476a1c3461bbed2, 0xaeaed10b762e37ab, + 0x37deb6af5e9b8b5b, 0x4d06c6676eae0222, 0xc26e573f3ef099a9, 0xb8b627f70ec510d0, + 0xf7e653dcc6da3dd4, 0x8d3e2314f6efb4ad, 0x0256b24ca6b12f26, 0x788ec2849684a65f, + 0x9cf65a1b368f752e, 0xe62e2ad306bafc57, 0x6946bb8b56e467dc, 0x139ecb4366d1eea5, + 0x5ccebf68aecec3a1, 0x2616cfa09efb4ad8, 0xa97e5ef8cea5d153, 0xd3a62e30fe90582a, + 0xb0c7b7e3c7593bd8, 0xca1fc72bf76cb2a1, 0x45775673a732292a, 0x3faf26bb9707a053, + 0x70ff52905f188d57, 0x0a2722586f2d042e, 0x854fb3003f739fa5, 0xff97c3c80f4616dc, + 0x1bef5b57af4dc5ad, 0x61372b9f9f784cd4, 0xee5fbac7cf26d75f, 0x9487ca0fff135e26, + 0xdbd7be24370c7322, 0xa10fceec0739fa5b, 0x2e675fb4576761d0, 0x54bf2f7c6752e8a9, + 0xcdcf48d84fe75459, 0xb71738107fd2dd20, 0x387fa9482f8c46ab, 0x42a7d9801fb9cfd2, + 0x0df7adabd7a6e2d6, 0x772fdd63e7936baf, 0xf8474c3bb7cdf024, 0x829f3cf387f8795d, + 0x66e7a46c27f3aa2c, 0x1c3fd4a417c62355, 0x935745fc4798b8de, 0xe98f353477ad31a7, + 0xa6df411fbfb21ca3, 0xdc0731d78f8795da, 0x536fa08fdfd90e51, 0x29b7d047efec8728} + +type digest struct { + crc uint64 +} + +func (d *digest) update(p []byte) { + for _, b := range p { + d.crc = crc64_table[byte(d.crc)^b] ^ (d.crc >> 8) + } +} + +func newDigest() hash.Hash64 { + d := &digest{} + return d +} + +func (d *digest) Write(p []byte) (int, error) { + d.update(p) + return len(p), nil +} + +func (d *digest) Sum(in []byte) []byte { + buf := make([]byte, 8) + binary.LittleEndian.PutUint64(buf, d.crc) + return append(in, buf...) +} + +func (d *digest) Sum64() uint64 { return d.crc } +func (d *digest) BlockSize() int { return 1 } +func (d *digest) Size() int { return 8 } +func (d *digest) Reset() { d.crc = 0 } diff --git a/vendor/github.com/siddontang/rdb/encode.go b/vendor/github.com/siddontang/rdb/encode.go new file mode 100644 index 000000000000..19c48c331fab --- /dev/null +++ b/vendor/github.com/siddontang/rdb/encode.go @@ -0,0 +1,52 @@ +package rdb + +import ( + "bytes" + "fmt" + "github.com/cupcake/rdb" +) + +func Dump(obj interface{}) ([]byte, error) { + var buf bytes.Buffer + + e := rdb.NewEncoder(&buf) + + switch v := obj.(type) { + case String: + e.EncodeType(rdb.TypeString) + e.EncodeString(v) + case Hash: + e.EncodeType(rdb.TypeHash) + e.EncodeLength(uint32(len(v))) + + for i := 0; i < len(v); i++ { + e.EncodeString(v[i].Field) + e.EncodeString(v[i].Value) + } + case List: + e.EncodeType(rdb.TypeList) + e.EncodeLength(uint32(len(v))) + for i := 0; i < len(v); i++ { + e.EncodeString(v[i]) + } + case Set: + e.EncodeType(rdb.TypeSet) + e.EncodeLength(uint32(len(v))) + for i := 0; i < len(v); i++ { + e.EncodeString(v[i]) + } + case ZSet: + e.EncodeType(rdb.TypeZSet) + e.EncodeLength(uint32(len(v))) + for i := 0; i < len(v); i++ { + e.EncodeString(v[i].Member) + e.EncodeFloat(v[i].Score) + } + default: + return nil, fmt.Errorf("invalid dump type %T", obj) + } + + e.EncodeDumpFooter() + + return buf.Bytes(), nil +} diff --git a/vendor/github.com/siddontang/rdb/loader.go b/vendor/github.com/siddontang/rdb/loader.go new file mode 100644 index 000000000000..22743cbdebda --- /dev/null +++ b/vendor/github.com/siddontang/rdb/loader.go @@ -0,0 +1,112 @@ +// Copyright 2014 Wandoujia Inc. All Rights Reserved. +// Licensed under the MIT (MIT-LICENSE.txt) license. + +package rdb + +import ( + "bytes" + "encoding/binary" + "fmt" + "hash" + "io" + "strconv" +) + +type Loader struct { + *rdbReader + crc hash.Hash64 + db uint32 +} + +func NewLoader(r io.Reader) *Loader { + l := &Loader{} + l.crc = newDigest() + l.rdbReader = newRdbReader(io.TeeReader(r, l.crc)) + return l +} + +func (l *Loader) LoadHeader() error { + header := make([]byte, 9) + if err := l.readFull(header); err != nil { + return err + } + if !bytes.Equal(header[:5], []byte("REDIS")) { + return fmt.Errorf("verify magic string, invalid file format") + } + if version, err := strconv.ParseInt(string(header[5:]), 10, 64); err != nil { + return err + } else if version <= 0 || version > Version { + return fmt.Errorf("verify version, invalid RDB version number %d", version) + } + return nil +} + +func (l *Loader) LoadChecksum() error { + crc1 := l.crc.Sum64() + if crc2, err := l.readUint64(); err != nil { + return err + } else if crc1 != crc2 { + return fmt.Errorf("checksum validation failed") + } + return nil +} + +type Entry struct { + DB uint32 + Key []byte + ValDump []byte + ExpireAt uint64 +} + +func (l *Loader) LoadEntry() (entry *Entry, err error) { + var expireat uint64 + for { + var otype byte + if otype, err = l.readByte(); err != nil { + return + } + switch otype { + case rdbFlagExpiryMS: + if expireat, err = l.readUint64(); err != nil { + return + } + case rdbFlagExpiry: + var sec uint32 + if sec, err = l.readUint32(); err != nil { + return + } + expireat = uint64(sec) * 1000 + case rdbFlagSelectDB: + if l.db, err = l.readLength(); err != nil { + return + } + case rdbFlagEOF: + return + default: + var key, obj []byte + if key, err = l.readString(); err != nil { + return + } + if obj, err = l.readObject(otype); err != nil { + return + } + entry = &Entry{} + entry.DB = l.db + entry.Key = key + entry.ValDump = createValDump(otype, obj) + entry.ExpireAt = expireat + return + } + } +} + +func createValDump(otype byte, obj []byte) []byte { + var b bytes.Buffer + c := newDigest() + w := io.MultiWriter(&b, c) + w.Write([]byte{otype}) + w.Write(obj) + binary.Write(w, binary.LittleEndian, uint16(Version)) + binary.Write(w, binary.LittleEndian, c.Sum64()) + return b.Bytes() +} diff --git a/vendor/github.com/siddontang/rdb/reader.go b/vendor/github.com/siddontang/rdb/reader.go new file mode 100644 index 000000000000..89ae9ed18121 --- /dev/null +++ b/vendor/github.com/siddontang/rdb/reader.go @@ -0,0 +1,332 @@ +// Copyright 2014 Wandoujia Inc. All Rights Reserved. +// Licensed under the MIT (MIT-LICENSE.txt) license. + +package rdb + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "math" + "strconv" +) + +const ( + Version = 6 +) + +const ( + rdbTypeString = 0 + rdbTypeList = 1 + rdbTypeSet = 2 + rdbTypeZSet = 3 + rdbTypeHash = 4 + + rdbTypeHashZipmap = 9 + rdbTypeListZiplist = 10 + rdbTypeSetIntset = 11 + rdbTypeZSetZiplist = 12 + rdbTypeHashZiplist = 13 + + rdbFlagExpiryMS = 0xfc + rdbFlagExpiry = 0xfd + rdbFlagSelectDB = 0xfe + rdbFlagEOF = 0xff +) + +const ( + rdb6bitLen = 0 + rdb14bitLen = 1 + rdb32bitLen = 2 + rdbEncVal = 3 + + rdbEncInt8 = 0 + rdbEncInt16 = 1 + rdbEncInt32 = 2 + rdbEncLZF = 3 + + rdbZiplist6bitlenString = 0 + rdbZiplist14bitlenString = 1 + rdbZiplist32bitlenString = 2 + + rdbZiplistInt16 = 0xc0 + rdbZiplistInt32 = 0xd0 + rdbZiplistInt64 = 0xe0 + rdbZiplistInt24 = 0xf0 + rdbZiplistInt8 = 0xfe + rdbZiplistInt4 = 15 +) + +type rdbReader struct { + raw io.Reader + buf [8]byte + nread int64 +} + +func newRdbReader(r io.Reader) *rdbReader { + return &rdbReader{raw: r} +} + +func (r *rdbReader) Read(p []byte) (int, error) { + n, err := r.raw.Read(p) + r.nread += int64(n) + return n, err +} + +func (r *rdbReader) offset() int64 { + return r.nread +} + +func (r *rdbReader) readObject(otype byte) ([]byte, error) { + var b bytes.Buffer + r = newRdbReader(io.TeeReader(r, &b)) + switch otype { + default: + return nil, fmt.Errorf("unknown object-type %02x", otype) + case rdbTypeHashZipmap: + fallthrough + case rdbTypeListZiplist: + fallthrough + case rdbTypeSetIntset: + fallthrough + case rdbTypeZSetZiplist: + fallthrough + case rdbTypeHashZiplist: + fallthrough + case rdbTypeString: + if _, err := r.readString(); err != nil { + return nil, err + } + case rdbTypeList, rdbTypeSet: + if n, err := r.readLength(); err != nil { + return nil, err + } else { + for i := 0; i < int(n); i++ { + if _, err := r.readString(); err != nil { + return nil, err + } + } + } + case rdbTypeZSet: + if n, err := r.readLength(); err != nil { + return nil, err + } else { + for i := 0; i < int(n); i++ { + if _, err := r.readString(); err != nil { + return nil, err + } + if _, err := r.readFloat(); err != nil { + return nil, err + } + } + } + case rdbTypeHash: + if n, err := r.readLength(); err != nil { + return nil, err + } else { + for i := 0; i < int(n); i++ { + if _, err := r.readString(); err != nil { + return nil, err + } + if _, err := r.readString(); err != nil { + return nil, err + } + } + } + } + return b.Bytes(), nil +} + +func (r *rdbReader) readString() ([]byte, error) { + length, encoded, err := r.readEncodedLength() + if err != nil { + return nil, err + } + if !encoded { + return r.readBytes(int(length)) + } + switch t := uint8(length); t { + default: + return nil, fmt.Errorf("invalid encoded-string %02x", t) + case rdbEncInt8: + i, err := r.readInt8() + return []byte(strconv.FormatInt(int64(i), 10)), err + case rdbEncInt16: + i, err := r.readInt16() + return []byte(strconv.FormatInt(int64(i), 10)), err + case rdbEncInt32: + i, err := r.readInt32() + return []byte(strconv.FormatInt(int64(i), 10)), err + case rdbEncLZF: + var inlen, outlen uint32 + if inlen, err = r.readLength(); err != nil { + return nil, err + } + if outlen, err = r.readLength(); err != nil { + return nil, err + } + if in, err := r.readBytes(int(inlen)); err != nil { + return nil, err + } else { + return lzfDecompress(in, int(outlen)) + } + } +} + +func (r *rdbReader) readEncodedLength() (length uint32, encoded bool, err error) { + var u uint8 + if u, err = r.readUint8(); err != nil { + return + } + length = uint32(u & 0x3f) + switch u >> 6 { + case rdb6bitLen: + case rdb14bitLen: + u, err = r.readUint8() + length = (length << 8) + uint32(u) + case rdbEncVal: + encoded = true + default: + length, err = r.readUint32BigEndian() + } + return +} + +func (r *rdbReader) readLength() (uint32, error) { + length, encoded, err := r.readEncodedLength() + if err == nil && encoded { + err = fmt.Errorf("encoded-length") + } + return length, err +} + +func (r *rdbReader) readFloat() (float64, error) { + u, err := r.readUint8() + if err != nil { + return 0, err + } + switch u { + case 253: + return math.NaN(), nil + case 254: + return math.Inf(0), nil + case 255: + return math.Inf(-1), nil + default: + if b, err := r.readBytes(int(u)); err != nil { + return 0, err + } else { + v, err := strconv.ParseFloat(string(b), 64) + return v, err + } + } +} + +func (r *rdbReader) readByte() (byte, error) { + b := r.buf[:1] + _, err := r.Read(b) + return b[0], err +} + +func (r *rdbReader) readFull(p []byte) error { + _, err := io.ReadFull(r, p) + return err +} + +func (r *rdbReader) readBytes(n int) ([]byte, error) { + p := make([]byte, n) + return p, r.readFull(p) +} + +func (r *rdbReader) readUint8() (uint8, error) { + b, err := r.readByte() + return uint8(b), err +} + +func (r *rdbReader) readUint16() (uint16, error) { + b := r.buf[:2] + err := r.readFull(b) + return binary.LittleEndian.Uint16(b), err +} + +func (r *rdbReader) readUint32() (uint32, error) { + b := r.buf[:4] + err := r.readFull(b) + return binary.LittleEndian.Uint32(b), err +} + +func (r *rdbReader) readUint64() (uint64, error) { + b := r.buf[:8] + err := r.readFull(b) + return binary.LittleEndian.Uint64(b), err +} + +func (r *rdbReader) readUint32BigEndian() (uint32, error) { + b := r.buf[:4] + err := r.readFull(b) + return binary.BigEndian.Uint32(b), err +} + +func (r *rdbReader) readInt8() (int8, error) { + u, err := r.readUint8() + return int8(u), err +} + +func (r *rdbReader) readInt16() (int16, error) { + u, err := r.readUint16() + return int16(u), err +} + +func (r *rdbReader) readInt32() (int32, error) { + u, err := r.readUint32() + return int32(u), err +} + +func (r *rdbReader) readInt64() (int64, error) { + u, err := r.readUint64() + return int64(u), err +} + +func (r *rdbReader) readInt32BigEndian() (int32, error) { + u, err := r.readUint32BigEndian() + return int32(u), err +} + +func lzfDecompress(in []byte, outlen int) (out []byte, err error) { + defer func() { + if x := recover(); x != nil { + err = fmt.Errorf("decompress exception: %v", x) + } + }() + out = make([]byte, outlen) + i, o := 0, 0 + for i < len(in) { + ctrl := int(in[i]) + i++ + if ctrl < 32 { + for x := 0; x <= ctrl; x++ { + out[o] = in[i] + i++ + o++ + } + } else { + length := ctrl >> 5 + if length == 7 { + length = length + int(in[i]) + i++ + } + ref := o - ((ctrl & 0x1f) << 8) - int(in[i]) - 1 + i++ + for x := 0; x <= length+1; x++ { + out[o] = out[ref] + ref++ + o++ + } + } + } + if o != outlen { + return nil, fmt.Errorf("decompress length is %d != expected %d", o, outlen) + } + return out, nil +} From a4ad4af4826871267a2a686cd2ab6b8043bea0e1 Mon Sep 17 00:00:00 2001 From: Lunny Xiao Date: Wed, 19 Dec 2018 10:09:43 +0800 Subject: [PATCH 06/14] add config for issue indexer type --- models/issue_indexer.go | 20 +++++++++++++------- modules/setting/setting.go | 2 ++ 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/models/issue_indexer.go b/models/issue_indexer.go index 15d821c8b010..cb2acc6433c6 100644 --- a/models/issue_indexer.go +++ b/models/issue_indexer.go @@ -21,15 +21,21 @@ var ( // InitIssueIndexer initialize issue indexer func InitIssueIndexer() error { - issueIndexer = issues.NewBleveIndexer(setting.Indexer.IssuePath) - exist, err := issueIndexer.Init() - if err != nil { - return err - } - if !exist { - go populateIssueIndexer() + switch setting.Indexer.IssueType { + case "bleve": + issueIndexer = issues.NewBleveIndexer(setting.Indexer.IssuePath) + exist, err := issueIndexer.Init() + if err != nil { + return err + } + if !exist { + go populateIssueIndexer() + } + default: + return fmt.Errorf("unknow issue indexer type: %s", setting.Indexer.IssueType) } + var err error switch setting.Indexer.IssueIndexerQueueType { case setting.LedisLocalQueueType: issueIndexerUpdateQueue, err = issues.NewLedisLocalQueue( diff --git a/modules/setting/setting.go b/modules/setting/setting.go index de881b25805e..c9b599eb0baa 100644 --- a/modules/setting/setting.go +++ b/modules/setting/setting.go @@ -187,6 +187,7 @@ var ( // Indexer settings Indexer = struct { + IssueType string IssuePath string RepoIndexerEnabled bool RepoPath string @@ -197,6 +198,7 @@ var ( IssueIndexerQueueDBIndex int IssueIndexerQueueBatchNumber int }{ + IssueType: "bleve", IssuePath: "indexers/issues.bleve", IssueIndexerQueueType: LedisLocalQueueType, IssueIndexerQueueDir: "indexers/issues.queue", From 37d972227ca65b588468b80361a0c812c76e34f8 Mon Sep 17 00:00:00 2001 From: Lunny Xiao Date: Wed, 19 Dec 2018 10:44:10 +0800 Subject: [PATCH 07/14] fix bugs on issue indexer --- modules/indexer/issues/bleve.go | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/modules/indexer/issues/bleve.go b/modules/indexer/issues/bleve.go index b40274f96eff..959aae5d2da2 100644 --- a/modules/indexer/issues/bleve.go +++ b/modules/indexer/issues/bleve.go @@ -96,6 +96,7 @@ func openIndexer(path string, latestVersion int) (bleve.Index, error) { } else if err != nil { return nil, err } + return index, nil } @@ -108,7 +109,7 @@ func (i *BleveIndexerData) Type() string { } // createIssueIndexer create an issue indexer if one does not already exist -func createIssueIndexer(path string) (bleve.Index, error) { +func createIssueIndexer(path string, latestVersion int) (bleve.Index, error) { mapping := bleve.NewIndexMapping() docMapping := bleve.NewDocumentMapping() @@ -138,7 +139,17 @@ func createIssueIndexer(path string) (bleve.Index, error) { mapping.AddDocumentMapping(issueIndexerDocType, docMapping) mapping.AddDocumentMapping("_all", bleve.NewDocumentDisabledMapping()) - return bleve.New(path, mapping) + index, err := bleve.New(path, mapping) + if err != nil { + return nil, err + } + + if err = rupture.WriteIndexMetadata(path, &rupture.IndexMetadata{ + Version: latestVersion, + }); err != nil { + return nil, err + } + return index, nil } var ( @@ -168,7 +179,7 @@ func (b *BleveIndexer) Init() (bool, error) { return true, nil } - b.indexer, err = createIssueIndexer(b.indexDir) + b.indexer, err = createIssueIndexer(b.indexDir, issueIndexerLatestVersion) return false, err } From 4ce88810175449e9e0a4a70056af23b37ed503ca Mon Sep 17 00:00:00 2001 From: Lunny Xiao Date: Wed, 19 Dec 2018 11:43:03 +0800 Subject: [PATCH 08/14] fix lint --- modules/indexer/issues/bleve.go | 4 +++- modules/indexer/issues/indexer.go | 3 ++- modules/indexer/issues/queue_channel.go | 2 ++ modules/indexer/issues/queue_ledis_local.go | 8 +++++--- 4 files changed, 12 insertions(+), 5 deletions(-) diff --git a/modules/indexer/issues/bleve.go b/modules/indexer/issues/bleve.go index 959aae5d2da2..d04e21b5b29b 100644 --- a/modules/indexer/issues/bleve.go +++ b/modules/indexer/issues/bleve.go @@ -100,7 +100,7 @@ func openIndexer(path string, latestVersion int) (bleve.Index, error) { return index, nil } -// IssueIndexerUpdate an update to the issue indexer +// BleveIndexerData an update to the issue indexer type BleveIndexerData IndexerData // Type returns the document type, for bleve's mapping.Classifier interface. @@ -169,6 +169,7 @@ func NewBleveIndexer(indexDir string) *BleveIndexer { } } +// Init will initial the indexer func (b *BleveIndexer) Init() (bool, error) { var err error b.indexer, err = openIndexer(b.indexDir, issueIndexerLatestVersion) @@ -183,6 +184,7 @@ func (b *BleveIndexer) Init() (bool, error) { return false, err } +// Index will save the index data func (b *BleveIndexer) Index(issues []*IndexerData) error { batch := rupture.NewFlushingBatch(b.indexer, maxBatchSize) for _, issue := range issues { diff --git a/modules/indexer/issues/indexer.go b/modules/indexer/issues/indexer.go index 7954197d15f3..7bc770f465d7 100644 --- a/modules/indexer/issues/indexer.go +++ b/modules/indexer/issues/indexer.go @@ -14,13 +14,14 @@ type IndexerData struct { IsDelete bool `json:"-"` } -// Match +// Match represents on search result type Match struct { ID int64 `json:"id"` RepoID int64 `json:"repo_id"` Score float64 `json:"score"` } +// SearchResult represents search results type SearchResult struct { Hits []Match } diff --git a/modules/indexer/issues/queue_channel.go b/modules/indexer/issues/queue_channel.go index 5b39e199a605..99a90ad49915 100644 --- a/modules/indexer/issues/queue_channel.go +++ b/modules/indexer/issues/queue_channel.go @@ -26,6 +26,7 @@ func NewChannelQueue(indexer Indexer, batchNumber int) *ChannelQueue { } } +// Run starts to run the queue func (c *ChannelQueue) Run() error { var i int var datas = make([]*IndexerData, 0, c.batchNumber) @@ -49,6 +50,7 @@ func (c *ChannelQueue) Run() error { } } +// Push will push the indexer data to queue func (c *ChannelQueue) Push(data *IndexerData) { c.queue <- data } diff --git a/modules/indexer/issues/queue_ledis_local.go b/modules/indexer/issues/queue_ledis_local.go index 1ebeda75e37a..5e5d3216f511 100644 --- a/modules/indexer/issues/queue_ledis_local.go +++ b/modules/indexer/issues/queue_ledis_local.go @@ -16,7 +16,7 @@ import ( var ( _ Queue = &LedisLocalQueue{} - ledis_local_key = []byte("ledis_local_key") + ledisLocalKey = []byte("ledis_local_key") ) // LedisLocalQueue implements a ledis as a disk library queue @@ -49,11 +49,12 @@ func NewLedisLocalQueue(indexer Indexer, dataDir string, dbIdx, batchNumber int) }, nil } +// Run starts to run the queue func (l *LedisLocalQueue) Run() error { var i int var datas = make([]*IndexerData, 0, l.batchNumber) for { - bs, err := l.db.RPop(ledis_local_key) + bs, err := l.db.RPop(ledisLocalKey) if err != nil { log.Error(4, "RPop: %v", err) time.Sleep(time.Millisecond * 100) @@ -87,13 +88,14 @@ func (l *LedisLocalQueue) Run() error { } } +// Push will push the indexer data to queue func (l *LedisLocalQueue) Push(data *IndexerData) { bs, err := json.Marshal(data) if err != nil { log.Error(4, "Marshal: %v", err) return } - _, err = l.db.LPush(ledis_local_key, bs) + _, err = l.db.LPush(ledisLocalKey, bs) if err != nil { log.Error(4, "LPush: %v", err) } From 881cd486e553d9eaa388b879d21c01de2d38377f Mon Sep 17 00:00:00 2001 From: Lunny Xiao Date: Sat, 12 Jan 2019 10:54:31 +0800 Subject: [PATCH 09/14] refactor the for loop --- models/issue_indexer.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/models/issue_indexer.go b/models/issue_indexer.go index cb2acc6433c6..3e4858537693 100644 --- a/models/issue_indexer.go +++ b/models/issue_indexer.go @@ -59,8 +59,7 @@ func InitIssueIndexer() error { // populateIssueIndexer populate the issue indexer with issue data func populateIssueIndexer() { - page := 1 - for { + for page := 1; ; page++ { repos, _, err := SearchRepositoryByName(&SearchRepoOptions{ Page: page, PageSize: RepositoryListDefaultPageSize, @@ -75,7 +74,7 @@ func populateIssueIndexer() { if len(repos) == 0 { return } - page++ + for _, repo := range repos { is, err := Issues(&IssuesOptions{ RepoIDs: []int64{repo.ID}, From 4258f9d7b243d158879da706566e5320d1297a56 Mon Sep 17 00:00:00 2001 From: Lunny Xiao Date: Sat, 12 Jan 2019 11:01:02 +0800 Subject: [PATCH 10/14] reduce function signature change --- models/issue_indexer.go | 2 +- routers/api/v1/repo/issue.go | 2 +- routers/repo/issue.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/models/issue_indexer.go b/models/issue_indexer.go index 3e4858537693..5f9a49af7357 100644 --- a/models/issue_indexer.go +++ b/models/issue_indexer.go @@ -122,7 +122,7 @@ func DeleteRepoIssueIndexer(repo *Repository) { } // SearchIssuesByKeyword search issue ids by keywords and repo id -func SearchIssuesByKeyword(keyword string, repoID int64) ([]int64, error) { +func SearchIssuesByKeyword(repoID int64, keyword string) ([]int64, error) { var issueIDs []int64 res, err := issueIndexer.Search(keyword, repoID, 1000, 0) if err != nil { diff --git a/routers/api/v1/repo/issue.go b/routers/api/v1/repo/issue.go index 0a5d4032c0a5..b13af335486f 100644 --- a/routers/api/v1/repo/issue.go +++ b/routers/api/v1/repo/issue.go @@ -77,7 +77,7 @@ func ListIssues(ctx *context.APIContext) { var labelIDs []int64 var err error if len(keyword) > 0 { - issueIDs, err = models.SearchIssuesByKeyword(keyword, ctx.Repo.Repository.ID) + issueIDs, err = models.SearchIssuesByKeyword(ctx.Repo.Repository.ID, keyword) } if splitted := strings.Split(ctx.Query("labels"), ","); len(splitted) > 0 { diff --git a/routers/repo/issue.go b/routers/repo/issue.go index 19fcce5e0f1e..bef527eeb1a0 100644 --- a/routers/repo/issue.go +++ b/routers/repo/issue.go @@ -129,7 +129,7 @@ func issues(ctx *context.Context, milestoneID int64, isPullOption util.OptionalB var issueIDs []int64 if len(keyword) > 0 { - issueIDs, err = models.SearchIssuesByKeyword(keyword, repo.ID) + issueIDs, err = models.SearchIssuesByKeyword(repo.ID, keyword) if err != nil { ctx.ServerError("issueIndexer.Search", err) return From 9a63ecbe0aebd69e73a597de64341dd39cb57502 Mon Sep 17 00:00:00 2001 From: Lunny Xiao Date: Fri, 18 Jan 2019 20:02:46 +0800 Subject: [PATCH 11/14] fix some bugs --- models/issue.go | 17 ++++++- models/issue_comment.go | 14 ------ models/issue_indexer.go | 13 +++++- modules/indexer/issues/bleve.go | 11 +++++ modules/indexer/issues/indexer.go | 4 +- modules/indexer/issues/queue_ledis_local.go | 14 ++++++ modules/notification/base/notifier.go | 4 +- modules/notification/base/null.go | 4 +- modules/notification/indexer/indexer.go | 50 ++++++++++++++++++++- modules/notification/notification.go | 8 ++-- routers/api/v1/repo/issue_comment.go | 4 +- routers/repo/issue.go | 4 +- 12 files changed, 116 insertions(+), 31 deletions(-) diff --git a/models/issue.go b/models/issue.go index a78bddd0dfc4..fde3f7051aab 100644 --- a/models/issue.go +++ b/models/issue.go @@ -179,12 +179,21 @@ func (issue *Issue) LoadPullRequest() error { } func (issue *Issue) loadComments(e Engine) (err error) { + return issue.loadCommentsByType(e, CommentTypeUnknown) +} + +// LoadDiscussComments loads discuss comments +func (issue *Issue) LoadDiscussComments() error { + return issue.loadCommentsByType(x, CommentTypeComment) +} + +func (issue *Issue) loadCommentsByType(e Engine, tp CommentType) (err error) { if issue.Comments != nil { return nil } issue.Comments, err = findComments(e, FindCommentsOptions{ IssueID: issue.ID, - Type: CommentTypeUnknown, + Type: tp, }) return err } @@ -1212,6 +1221,12 @@ func getIssuesByIDs(e Engine, issueIDs []int64) ([]*Issue, error) { return issues, e.In("id", issueIDs).Find(&issues) } +func getIssueIDsByRepoID(e Engine, repoID int64) ([]int64, error) { + var ids = make([]int64, 0, 10) + err := e.Table("issue").Where("repo_id = ?", repoID).Find(&ids) + return ids, err +} + // GetIssuesByIDs return issues with the given IDs. func GetIssuesByIDs(issueIDs []int64) ([]*Issue, error) { return getIssuesByIDs(x, issueIDs) diff --git a/models/issue_comment.go b/models/issue_comment.go index 161ef06521a0..cbcae9792d6a 100644 --- a/models/issue_comment.go +++ b/models/issue_comment.go @@ -1032,13 +1032,6 @@ func UpdateComment(doer *User, c *Comment, oldContent string) error { return err } - if c.Type == CommentTypeComment { - if err := c.Issue.loadComments(x); err != nil { - return err - } - UpdateIssueIndexer(c.Issue) - } - if err := c.Issue.LoadAttributes(); err != nil { return err } @@ -1098,13 +1091,6 @@ func DeleteComment(doer *User, comment *Comment) error { return err } - if comment.Type == CommentTypeComment { - if err := comment.Issue.loadComments(x); err != nil { - return err - } - UpdateIssueIndexer(comment.Issue) - } - if err := comment.Issue.LoadAttributes(); err != nil { return err } diff --git a/models/issue_indexer.go b/models/issue_indexer.go index 5f9a49af7357..227dd6ce3e85 100644 --- a/models/issue_indexer.go +++ b/models/issue_indexer.go @@ -115,8 +115,19 @@ func UpdateIssueIndexer(issue *Issue) { // DeleteRepoIssueIndexer deletes repo's all issues indexes func DeleteRepoIssueIndexer(repo *Repository) { + var ids []int64 + ids, err := getIssueIDsByRepoID(x, repo.ID) + if err != nil { + log.Error(4, "getIssueIDsByRepoID failed: %v", err) + return + } + + if len(ids) <= 0 { + return + } + issueIndexerUpdateQueue.Push(&issues.IndexerData{ - RepoID: repo.ID, + IDs: ids, IsDelete: true, }) } diff --git a/modules/indexer/issues/bleve.go b/modules/indexer/issues/bleve.go index d04e21b5b29b..36279198b86b 100644 --- a/modules/indexer/issues/bleve.go +++ b/modules/indexer/issues/bleve.go @@ -205,6 +205,17 @@ func (b *BleveIndexer) Index(issues []*IndexerData) error { return batch.Flush() } +// Delete deletes indexes by ids +func (b *BleveIndexer) Delete(ids ...int64) error { + batch := rupture.NewFlushingBatch(b.indexer, maxBatchSize) + for _, id := range ids { + if err := batch.Delete(indexerID(id)); err != nil { + return err + } + } + return batch.Flush() +} + // Search searches for issues by given conditions. // Returns the matching issue IDs func (b *BleveIndexer) Search(keyword string, repoID int64, limit, start int) (*SearchResult, error) { diff --git a/modules/indexer/issues/indexer.go b/modules/indexer/issues/indexer.go index 7bc770f465d7..c31006d0dd76 100644 --- a/modules/indexer/issues/indexer.go +++ b/modules/indexer/issues/indexer.go @@ -11,7 +11,8 @@ type IndexerData struct { Title string Content string Comments []string - IsDelete bool `json:"-"` + IsDelete bool + IDs []int64 } // Match represents on search result @@ -30,5 +31,6 @@ type SearchResult struct { type Indexer interface { Init() (bool, error) Index(issue []*IndexerData) error + Delete(ids ...int64) error Search(kw string, repoID int64, limit, start int) (*SearchResult, error) } diff --git a/modules/indexer/issues/queue_ledis_local.go b/modules/indexer/issues/queue_ledis_local.go index 5e5d3216f511..854e2df60ce0 100644 --- a/modules/indexer/issues/queue_ledis_local.go +++ b/modules/indexer/issues/queue_ledis_local.go @@ -83,6 +83,20 @@ func (l *LedisLocalQueue) Run() error { log.Trace("LedisLocalQueue: task found: %#v", data) + if data.IsDelete { + if data.ID > 0 { + if err = l.indexer.Delete(data.ID); err != nil { + log.Error(4, "indexer.Delete: %v", err) + } + } else if len(data.IDs) > 0 { + if err = l.indexer.Delete(data.IDs...); err != nil { + log.Error(4, "indexer.Delete: %v", err) + } + } + time.Sleep(time.Millisecond * 10) + continue + } + datas = append(datas, &data) time.Sleep(time.Millisecond * 10) } diff --git a/modules/notification/base/notifier.go b/modules/notification/base/notifier.go index 2e127293c4be..bac90f5bb1d2 100644 --- a/modules/notification/base/notifier.go +++ b/modules/notification/base/notifier.go @@ -34,8 +34,8 @@ type Notifier interface { NotifyCreateIssueComment(*models.User, *models.Repository, *models.Issue, *models.Comment) - NotifyUpdateComment(*models.User, *models.Comment, int64, string) - NotifyDeleteComment(*models.User, *models.Comment, int64) + NotifyUpdateComment(*models.User, *models.Comment, string) + NotifyDeleteComment(*models.User, *models.Comment) NotifyNewRelease(rel *models.Release) NotifyUpdateRelease(doer *models.User, rel *models.Release) diff --git a/modules/notification/base/null.go b/modules/notification/base/null.go index 33c48c077c95..608bd0dcaae7 100644 --- a/modules/notification/base/null.go +++ b/modules/notification/base/null.go @@ -47,11 +47,11 @@ func (*NullNotifier) NotifyMergePullRequest(pr *models.PullRequest, doer *models } // NotifyUpdateComment places a place holder function -func (*NullNotifier) NotifyUpdateComment(doer *models.User, c *models.Comment, repoID int64, oldContent string) { +func (*NullNotifier) NotifyUpdateComment(doer *models.User, c *models.Comment, oldContent string) { } // NotifyDeleteComment places a place holder function -func (*NullNotifier) NotifyDeleteComment(doer *models.User, c *models.Comment, repoID int64) { +func (*NullNotifier) NotifyDeleteComment(doer *models.User, c *models.Comment) { } // NotifyDeleteRepository places a place holder function diff --git a/modules/notification/indexer/indexer.go b/modules/notification/indexer/indexer.go index 561b713bac45..66d483c0173c 100644 --- a/modules/notification/indexer/indexer.go +++ b/modules/notification/indexer/indexer.go @@ -6,6 +6,7 @@ package indexer import ( "code.gitea.io/gitea/models" + "code.gitea.io/gitea/modules/log" "code.gitea.io/gitea/modules/notification/base" ) @@ -25,6 +26,15 @@ func NewNotifier() base.Notifier { func (r *indexerNotifier) NotifyCreateIssueComment(doer *models.User, repo *models.Repository, issue *models.Issue, comment *models.Comment) { if comment.Type == models.CommentTypeComment { + if issue.Comments == nil { + if err := issue.LoadDiscussComments(); err != nil { + log.Error(4, "LoadComments failed: %v", err) + return + } + } else { + issue.Comments = append(issue.Comments, comment) + } + models.UpdateIssueIndexer(issue) } } @@ -37,14 +47,50 @@ func (r *indexerNotifier) NotifyNewPullRequest(pr *models.PullRequest) { models.UpdateIssueIndexer(pr.Issue) } -func (r *indexerNotifier) NotifyUpdateComment(doer *models.User, c *models.Comment, repoID int64, oldContent string) { +func (r *indexerNotifier) NotifyUpdateComment(doer *models.User, c *models.Comment, oldContent string) { if c.Type == models.CommentTypeComment { + var found bool + if c.Issue.Comments != nil { + for i := 0; i < len(c.Issue.Comments); i++ { + if c.Issue.Comments[i].ID == c.ID { + c.Issue.Comments[i] = c + found = true + break + } + } + } + + if !found { + if err := c.Issue.LoadDiscussComments(); err != nil { + log.Error(4, "LoadComments failed: %v", err) + return + } + } + models.UpdateIssueIndexer(c.Issue) } } -func (r *indexerNotifier) NotifyDeleteComment(doer *models.User, comment *models.Comment, repoID int64) { +func (r *indexerNotifier) NotifyDeleteComment(doer *models.User, comment *models.Comment) { if comment.Type == models.CommentTypeComment { + var found bool + if comment.Issue.Comments != nil { + for i := 0; i < len(comment.Issue.Comments); i++ { + if comment.Issue.Comments[i].ID == comment.ID { + comment.Issue.Comments = append(comment.Issue.Comments[:i], comment.Issue.Comments[i+1:]...) + found = true + break + } + } + } + + if !found { + if err := comment.Issue.LoadDiscussComments(); err != nil { + log.Error(4, "LoadComments failed: %v", err) + return + } + } + // reload comments to delete the old comment models.UpdateIssueIndexer(comment.Issue) } } diff --git a/modules/notification/notification.go b/modules/notification/notification.go index f0e160385eb9..e38c36f7dd1e 100644 --- a/modules/notification/notification.go +++ b/modules/notification/notification.go @@ -73,16 +73,16 @@ func NotifyPullRequestReview(pr *models.PullRequest, review *models.Review, comm } // NotifyUpdateComment notifies update comment to notifiers -func NotifyUpdateComment(doer *models.User, c *models.Comment, repoID int64, oldContent string) { +func NotifyUpdateComment(doer *models.User, c *models.Comment, oldContent string) { for _, notifier := range notifiers { - notifier.NotifyUpdateComment(doer, c, repoID, oldContent) + notifier.NotifyUpdateComment(doer, c, oldContent) } } // NotifyDeleteComment notifies delete comment to notifiers -func NotifyDeleteComment(doer *models.User, c *models.Comment, repoID int64) { +func NotifyDeleteComment(doer *models.User, c *models.Comment) { for _, notifier := range notifiers { - notifier.NotifyDeleteComment(doer, c, repoID) + notifier.NotifyDeleteComment(doer, c) } } diff --git a/routers/api/v1/repo/issue_comment.go b/routers/api/v1/repo/issue_comment.go index 930157281952..720513f00720 100644 --- a/routers/api/v1/repo/issue_comment.go +++ b/routers/api/v1/repo/issue_comment.go @@ -284,7 +284,7 @@ func editIssueComment(ctx *context.APIContext, form api.EditIssueCommentOption) return } - notification.NotifyUpdateComment(ctx.User, comment, ctx.Repo.Repository.ID, oldContent) + notification.NotifyUpdateComment(ctx.User, comment, oldContent) ctx.JSON(200, comment.APIFormat()) } @@ -375,7 +375,7 @@ func deleteIssueComment(ctx *context.APIContext) { return } - notification.NotifyDeleteComment(ctx.User, comment, ctx.Repo.Repository.ID) + notification.NotifyDeleteComment(ctx.User, comment) ctx.Status(204) } diff --git a/routers/repo/issue.go b/routers/repo/issue.go index bef527eeb1a0..7dc355acd0a3 100644 --- a/routers/repo/issue.go +++ b/routers/repo/issue.go @@ -1259,7 +1259,7 @@ func UpdateCommentContent(ctx *context.Context) { return } - notification.NotifyUpdateComment(ctx.User, comment, comment.Issue.RepoID, oldContent) + notification.NotifyUpdateComment(ctx.User, comment, oldContent) ctx.JSON(200, map[string]interface{}{ "content": string(markdown.Render([]byte(comment.Content), ctx.Query("context"), ctx.Repo.Repository.ComposeMetas())), @@ -1292,7 +1292,7 @@ func DeleteComment(ctx *context.Context) { return } - notification.NotifyDeleteComment(ctx.User, comment, comment.Issue.RepoID) + notification.NotifyDeleteComment(ctx.User, comment) ctx.Status(200) } From fb969ad4ffe8f0470f19ce2552b2d133c5998d20 Mon Sep 17 00:00:00 2001 From: Lunny Xiao Date: Sun, 17 Feb 2019 20:17:39 +0800 Subject: [PATCH 12/14] use levelqueue instead of ledis --- Gopkg.lock | 91 +- Gopkg.toml | 2 +- models/issue_indexer.go | 5 +- .../{queue_ledis_local.go => queue_disk.go} | 42 +- modules/setting/setting.go | 12 +- vendor/github.com/cupcake/rdb/LICENCE | 21 - vendor/github.com/cupcake/rdb/crc64/crc64.go | 64 - vendor/github.com/cupcake/rdb/decoder.go | 824 ------------- vendor/github.com/cupcake/rdb/encoder.go | 130 -- .../cupcake/rdb/nopdecoder/nop_decoder.go | 24 - vendor/github.com/cupcake/rdb/slice_buffer.go | 67 -- .../ledisdb => lunny/levelqueue}/LICENSE | 12 +- vendor/github.com/lunny/levelqueue/error.go | 12 + vendor/github.com/lunny/levelqueue/queue.go | 214 ++++ vendor/github.com/pelletier/go-toml/LICENSE | 21 - vendor/github.com/pelletier/go-toml/doc.go | 23 - vendor/github.com/pelletier/go-toml/fuzz.go | 31 - .../pelletier/go-toml/keysparsing.go | 85 -- vendor/github.com/pelletier/go-toml/lexer.go | 750 ------------ .../github.com/pelletier/go-toml/marshal.go | 609 ---------- vendor/github.com/pelletier/go-toml/parser.go | 430 ------- .../github.com/pelletier/go-toml/position.go | 29 - vendor/github.com/pelletier/go-toml/token.go | 144 --- vendor/github.com/pelletier/go-toml/toml.go | 367 ------ .../pelletier/go-toml/tomltree_create.go | 142 --- .../pelletier/go-toml/tomltree_write.go | 333 ------ vendor/github.com/siddontang/go/LICENSE | 20 - vendor/github.com/siddontang/go/bson/LICENSE | 25 - .../github.com/siddontang/go/filelock/LICENSE | 27 - .../go/filelock/file_lock_generic.go | 17 - .../go/filelock/file_lock_solaris.go | 43 - .../siddontang/go/filelock/file_lock_unix.go | 51 - .../go/filelock/file_lock_windows.go | 36 - vendor/github.com/siddontang/go/hack/hack.go | 27 - .../siddontang/go/ioutil2/ioutil.go | 39 - .../siddontang/go/ioutil2/sectionwriter.go | 69 -- vendor/github.com/siddontang/go/log/doc.go | 21 - .../siddontang/go/log/filehandler.go | 221 ---- .../github.com/siddontang/go/log/handler.go | 48 - vendor/github.com/siddontang/go/log/log.go | 343 ------ .../siddontang/go/log/sockethandler.go | 65 - vendor/github.com/siddontang/go/num/bytes.go | 67 -- vendor/github.com/siddontang/go/num/cmp.go | 161 --- vendor/github.com/siddontang/go/num/str.go | 157 --- .../github.com/siddontang/go/snappy/LICENSE | 27 - .../github.com/siddontang/go/snappy/decode.go | 124 -- .../github.com/siddontang/go/snappy/encode.go | 174 --- .../github.com/siddontang/go/snappy/snappy.go | 38 - .../github.com/siddontang/go/sync2/atomic.go | 146 --- .../siddontang/go/sync2/semaphore.go | 65 - .../siddontang/ledisdb/config/config.go | 315 ----- .../siddontang/ledisdb/ledis/batch.go | 139 --- .../siddontang/ledisdb/ledis/const.go | 144 --- .../siddontang/ledisdb/ledis/doc.go | 58 - .../siddontang/ledisdb/ledis/dump.go | 223 ---- .../siddontang/ledisdb/ledis/event.go | 126 -- .../siddontang/ledisdb/ledis/ledis.go | 241 ---- .../siddontang/ledisdb/ledis/ledis_db.go | 204 ---- .../siddontang/ledisdb/ledis/migrate.go | 189 --- .../siddontang/ledisdb/ledis/replication.go | 250 ---- .../siddontang/ledisdb/ledis/scan.go | 396 ------ .../siddontang/ledisdb/ledis/sort.go | 233 ---- .../siddontang/ledisdb/ledis/t_hash.go | 537 --------- .../siddontang/ledisdb/ledis/t_kv.go | 769 ------------ .../siddontang/ledisdb/ledis/t_list.go | 783 ------------ .../siddontang/ledisdb/ledis/t_set.go | 627 ---------- .../siddontang/ledisdb/ledis/t_ttl.go | 213 ---- .../siddontang/ledisdb/ledis/t_zset.go | 1063 ----------------- .../siddontang/ledisdb/ledis/util.go | 95 -- .../siddontang/ledisdb/rpl/file_io.go | 363 ------ .../siddontang/ledisdb/rpl/file_store.go | 416 ------- .../siddontang/ledisdb/rpl/file_table.go | 571 --------- .../siddontang/ledisdb/rpl/goleveldb_store.go | 225 ---- .../github.com/siddontang/ledisdb/rpl/log.go | 167 --- .../github.com/siddontang/ledisdb/rpl/rpl.go | 336 ------ .../siddontang/ledisdb/rpl/store.go | 36 - .../github.com/siddontang/ledisdb/store/db.go | 169 --- .../siddontang/ledisdb/store/driver/driver.go | 57 - .../siddontang/ledisdb/store/driver/slice.go | 21 - .../siddontang/ledisdb/store/driver/store.go | 46 - .../ledisdb/store/goleveldb/batch.go | 39 - .../ledisdb/store/goleveldb/const.go | 4 - .../siddontang/ledisdb/store/goleveldb/db.go | 204 ---- .../ledisdb/store/goleveldb/iterator.go | 49 - .../ledisdb/store/goleveldb/snapshot.go | 26 - .../siddontang/ledisdb/store/iterator.go | 334 ------ .../siddontang/ledisdb/store/leveldb/batch.go | 99 -- .../siddontang/ledisdb/store/leveldb/cache.go | 20 - .../siddontang/ledisdb/store/leveldb/const.go | 3 - .../siddontang/ledisdb/store/leveldb/db.go | 314 ----- .../ledisdb/store/leveldb/filterpolicy.go | 21 - .../ledisdb/store/leveldb/iterator.go | 70 -- .../ledisdb/store/leveldb/leveldb_ext.cc | 95 -- .../ledisdb/store/leveldb/leveldb_ext.h | 41 - .../ledisdb/store/leveldb/options.go | 126 -- .../siddontang/ledisdb/store/leveldb/slice.go | 40 - .../ledisdb/store/leveldb/snapshot.go | 39 - .../siddontang/ledisdb/store/leveldb/util.go | 45 - .../siddontang/ledisdb/store/rocksdb/batch.go | 83 -- .../siddontang/ledisdb/store/rocksdb/cache.go | 20 - .../siddontang/ledisdb/store/rocksdb/const.go | 3 - .../siddontang/ledisdb/store/rocksdb/db.go | 342 ------ .../siddontang/ledisdb/store/rocksdb/env.go | 27 - .../ledisdb/store/rocksdb/filterpolicy.go | 21 - .../ledisdb/store/rocksdb/iterator.go | 70 -- .../ledisdb/store/rocksdb/options.go | 229 ---- .../ledisdb/store/rocksdb/rocksdb_ext.cc | 44 - .../ledisdb/store/rocksdb/rocksdb_ext.h | 24 - .../siddontang/ledisdb/store/rocksdb/slice.go | 41 - .../ledisdb/store/rocksdb/snapshot.go | 39 - .../siddontang/ledisdb/store/rocksdb/util.go | 54 - .../siddontang/ledisdb/store/slice.go | 9 - .../siddontang/ledisdb/store/snapshot.go | 48 - .../siddontang/ledisdb/store/stat.go | 37 - .../siddontang/ledisdb/store/store.go | 62 - .../siddontang/ledisdb/store/writebatch.go | 136 --- vendor/github.com/siddontang/rdb/LICENSE | 21 - vendor/github.com/siddontang/rdb/decode.go | 128 -- vendor/github.com/siddontang/rdb/digest.go | 106 -- vendor/github.com/siddontang/rdb/encode.go | 52 - vendor/github.com/siddontang/rdb/loader.go | 112 -- vendor/github.com/siddontang/rdb/reader.go | 332 ----- 122 files changed, 262 insertions(+), 18854 deletions(-) rename modules/indexer/issues/{queue_ledis_local.go => queue_disk.go} (64%) delete mode 100644 vendor/github.com/cupcake/rdb/LICENCE delete mode 100644 vendor/github.com/cupcake/rdb/crc64/crc64.go delete mode 100644 vendor/github.com/cupcake/rdb/decoder.go delete mode 100644 vendor/github.com/cupcake/rdb/encoder.go delete mode 100644 vendor/github.com/cupcake/rdb/nopdecoder/nop_decoder.go delete mode 100644 vendor/github.com/cupcake/rdb/slice_buffer.go rename vendor/github.com/{siddontang/ledisdb => lunny/levelqueue}/LICENSE (87%) create mode 100644 vendor/github.com/lunny/levelqueue/error.go create mode 100644 vendor/github.com/lunny/levelqueue/queue.go delete mode 100644 vendor/github.com/pelletier/go-toml/LICENSE delete mode 100644 vendor/github.com/pelletier/go-toml/doc.go delete mode 100644 vendor/github.com/pelletier/go-toml/fuzz.go delete mode 100644 vendor/github.com/pelletier/go-toml/keysparsing.go delete mode 100644 vendor/github.com/pelletier/go-toml/lexer.go delete mode 100644 vendor/github.com/pelletier/go-toml/marshal.go delete mode 100644 vendor/github.com/pelletier/go-toml/parser.go delete mode 100644 vendor/github.com/pelletier/go-toml/position.go delete mode 100644 vendor/github.com/pelletier/go-toml/token.go delete mode 100644 vendor/github.com/pelletier/go-toml/toml.go delete mode 100644 vendor/github.com/pelletier/go-toml/tomltree_create.go delete mode 100644 vendor/github.com/pelletier/go-toml/tomltree_write.go delete mode 100644 vendor/github.com/siddontang/go/LICENSE delete mode 100644 vendor/github.com/siddontang/go/bson/LICENSE delete mode 100644 vendor/github.com/siddontang/go/filelock/LICENSE delete mode 100644 vendor/github.com/siddontang/go/filelock/file_lock_generic.go delete mode 100644 vendor/github.com/siddontang/go/filelock/file_lock_solaris.go delete mode 100644 vendor/github.com/siddontang/go/filelock/file_lock_unix.go delete mode 100644 vendor/github.com/siddontang/go/filelock/file_lock_windows.go delete mode 100644 vendor/github.com/siddontang/go/hack/hack.go delete mode 100644 vendor/github.com/siddontang/go/ioutil2/ioutil.go delete mode 100644 vendor/github.com/siddontang/go/ioutil2/sectionwriter.go delete mode 100644 vendor/github.com/siddontang/go/log/doc.go delete mode 100644 vendor/github.com/siddontang/go/log/filehandler.go delete mode 100644 vendor/github.com/siddontang/go/log/handler.go delete mode 100644 vendor/github.com/siddontang/go/log/log.go delete mode 100644 vendor/github.com/siddontang/go/log/sockethandler.go delete mode 100644 vendor/github.com/siddontang/go/num/bytes.go delete mode 100644 vendor/github.com/siddontang/go/num/cmp.go delete mode 100644 vendor/github.com/siddontang/go/num/str.go delete mode 100644 vendor/github.com/siddontang/go/snappy/LICENSE delete mode 100644 vendor/github.com/siddontang/go/snappy/decode.go delete mode 100644 vendor/github.com/siddontang/go/snappy/encode.go delete mode 100644 vendor/github.com/siddontang/go/snappy/snappy.go delete mode 100644 vendor/github.com/siddontang/go/sync2/atomic.go delete mode 100644 vendor/github.com/siddontang/go/sync2/semaphore.go delete mode 100644 vendor/github.com/siddontang/ledisdb/config/config.go delete mode 100644 vendor/github.com/siddontang/ledisdb/ledis/batch.go delete mode 100644 vendor/github.com/siddontang/ledisdb/ledis/const.go delete mode 100644 vendor/github.com/siddontang/ledisdb/ledis/doc.go delete mode 100644 vendor/github.com/siddontang/ledisdb/ledis/dump.go delete mode 100644 vendor/github.com/siddontang/ledisdb/ledis/event.go delete mode 100644 vendor/github.com/siddontang/ledisdb/ledis/ledis.go delete mode 100644 vendor/github.com/siddontang/ledisdb/ledis/ledis_db.go delete mode 100644 vendor/github.com/siddontang/ledisdb/ledis/migrate.go delete mode 100644 vendor/github.com/siddontang/ledisdb/ledis/replication.go delete mode 100644 vendor/github.com/siddontang/ledisdb/ledis/scan.go delete mode 100644 vendor/github.com/siddontang/ledisdb/ledis/sort.go delete mode 100644 vendor/github.com/siddontang/ledisdb/ledis/t_hash.go delete mode 100644 vendor/github.com/siddontang/ledisdb/ledis/t_kv.go delete mode 100644 vendor/github.com/siddontang/ledisdb/ledis/t_list.go delete mode 100644 vendor/github.com/siddontang/ledisdb/ledis/t_set.go delete mode 100644 vendor/github.com/siddontang/ledisdb/ledis/t_ttl.go delete mode 100644 vendor/github.com/siddontang/ledisdb/ledis/t_zset.go delete mode 100644 vendor/github.com/siddontang/ledisdb/ledis/util.go delete mode 100644 vendor/github.com/siddontang/ledisdb/rpl/file_io.go delete mode 100644 vendor/github.com/siddontang/ledisdb/rpl/file_store.go delete mode 100644 vendor/github.com/siddontang/ledisdb/rpl/file_table.go delete mode 100644 vendor/github.com/siddontang/ledisdb/rpl/goleveldb_store.go delete mode 100644 vendor/github.com/siddontang/ledisdb/rpl/log.go delete mode 100644 vendor/github.com/siddontang/ledisdb/rpl/rpl.go delete mode 100644 vendor/github.com/siddontang/ledisdb/rpl/store.go delete mode 100644 vendor/github.com/siddontang/ledisdb/store/db.go delete mode 100644 vendor/github.com/siddontang/ledisdb/store/driver/driver.go delete mode 100644 vendor/github.com/siddontang/ledisdb/store/driver/slice.go delete mode 100644 vendor/github.com/siddontang/ledisdb/store/driver/store.go delete mode 100644 vendor/github.com/siddontang/ledisdb/store/goleveldb/batch.go delete mode 100644 vendor/github.com/siddontang/ledisdb/store/goleveldb/const.go delete mode 100644 vendor/github.com/siddontang/ledisdb/store/goleveldb/db.go delete mode 100644 vendor/github.com/siddontang/ledisdb/store/goleveldb/iterator.go delete mode 100644 vendor/github.com/siddontang/ledisdb/store/goleveldb/snapshot.go delete mode 100644 vendor/github.com/siddontang/ledisdb/store/iterator.go delete mode 100644 vendor/github.com/siddontang/ledisdb/store/leveldb/batch.go delete mode 100644 vendor/github.com/siddontang/ledisdb/store/leveldb/cache.go delete mode 100644 vendor/github.com/siddontang/ledisdb/store/leveldb/const.go delete mode 100644 vendor/github.com/siddontang/ledisdb/store/leveldb/db.go delete mode 100644 vendor/github.com/siddontang/ledisdb/store/leveldb/filterpolicy.go delete mode 100644 vendor/github.com/siddontang/ledisdb/store/leveldb/iterator.go delete mode 100644 vendor/github.com/siddontang/ledisdb/store/leveldb/leveldb_ext.cc delete mode 100644 vendor/github.com/siddontang/ledisdb/store/leveldb/leveldb_ext.h delete mode 100644 vendor/github.com/siddontang/ledisdb/store/leveldb/options.go delete mode 100644 vendor/github.com/siddontang/ledisdb/store/leveldb/slice.go delete mode 100644 vendor/github.com/siddontang/ledisdb/store/leveldb/snapshot.go delete mode 100644 vendor/github.com/siddontang/ledisdb/store/leveldb/util.go delete mode 100644 vendor/github.com/siddontang/ledisdb/store/rocksdb/batch.go delete mode 100644 vendor/github.com/siddontang/ledisdb/store/rocksdb/cache.go delete mode 100644 vendor/github.com/siddontang/ledisdb/store/rocksdb/const.go delete mode 100644 vendor/github.com/siddontang/ledisdb/store/rocksdb/db.go delete mode 100644 vendor/github.com/siddontang/ledisdb/store/rocksdb/env.go delete mode 100644 vendor/github.com/siddontang/ledisdb/store/rocksdb/filterpolicy.go delete mode 100644 vendor/github.com/siddontang/ledisdb/store/rocksdb/iterator.go delete mode 100644 vendor/github.com/siddontang/ledisdb/store/rocksdb/options.go delete mode 100644 vendor/github.com/siddontang/ledisdb/store/rocksdb/rocksdb_ext.cc delete mode 100644 vendor/github.com/siddontang/ledisdb/store/rocksdb/rocksdb_ext.h delete mode 100644 vendor/github.com/siddontang/ledisdb/store/rocksdb/slice.go delete mode 100644 vendor/github.com/siddontang/ledisdb/store/rocksdb/snapshot.go delete mode 100644 vendor/github.com/siddontang/ledisdb/store/rocksdb/util.go delete mode 100644 vendor/github.com/siddontang/ledisdb/store/slice.go delete mode 100644 vendor/github.com/siddontang/ledisdb/store/snapshot.go delete mode 100644 vendor/github.com/siddontang/ledisdb/store/stat.go delete mode 100644 vendor/github.com/siddontang/ledisdb/store/store.go delete mode 100644 vendor/github.com/siddontang/ledisdb/store/writebatch.go delete mode 100644 vendor/github.com/siddontang/rdb/LICENSE delete mode 100644 vendor/github.com/siddontang/rdb/decode.go delete mode 100644 vendor/github.com/siddontang/rdb/digest.go delete mode 100644 vendor/github.com/siddontang/rdb/encode.go delete mode 100644 vendor/github.com/siddontang/rdb/loader.go delete mode 100644 vendor/github.com/siddontang/rdb/reader.go diff --git a/Gopkg.lock b/Gopkg.lock index 592d48bf29e7..a2193eab1a1f 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -221,17 +221,6 @@ pruneopts = "NUT" revision = "d904413d884d1fb849e2ad8834619f661761ef57" -[[projects]] - digest = "1:2e39e716a20e285bb1da0c5f6d00d7b5da0e50e527a8e5dd0258d1e0fcd1b403" - name = "github.com/cupcake/rdb" - packages = [ - ".", - "crc64", - "nopdecoder", - ] - pruneopts = "NUT" - revision = "43ba34106c765f2111c0dc7b74cdf8ee437411e0" - [[projects]] digest = "1:a2c1d0e43bd3baaa071d1b9ed72c27d78169b2b269f71c105ac4ba34b1be4a39" name = "github.com/davecgh/go-spew" @@ -627,6 +616,14 @@ pruneopts = "NUT" revision = "e3534c89ef969912856dfa39e56b09e58c5f5daf" +[[projects]] + branch = "master" + digest = "1:3ea59a5ada4bbac04da58e6177ca63da8c377a3143b48fca584408bf415fdafb" + name = "github.com/lunny/levelqueue" + packages = ["."] + pruneopts = "NUT" + revision = "02b525a4418e684a7786215296984e364746806f" + [[projects]] digest = "1:1e6a29ed1f189354030e3371f63ec58aacbc2bf232fd104c6e0d41174ac5af48" name = "github.com/lunny/log" @@ -744,14 +741,6 @@ revision = "c37440a7cf42ac63b919c752ca73a85067e05992" version = "v0.2.0" -[[projects]] - digest = "1:51ea800cff51752ff68e12e04106f5887b4daec6f9356721238c28019f0b42db" - name = "github.com/pelletier/go-toml" - packages = ["."] - pruneopts = "NUT" - revision = "c01d1270ff3e442a8a57cddc1c92dc1138598194" - version = "v1.2.0" - [[projects]] digest = "1:44c66ad69563dbe3f8e76d7d6cad21a03626e53f1875b5ab163ded419e01ca7a" name = "github.com/philhofer/fwd" @@ -861,22 +850,6 @@ pruneopts = "NUT" revision = "1dba4b3954bc059efc3991ec364f9f9a35f597d2" -[[projects]] - branch = "master" - digest = "1:81cd039986aace9719c68a9794fa8c9dd1007cffa1ff8995631e8ed35aacf6fe" - name = "github.com/siddontang/go" - packages = [ - "filelock", - "hack", - "ioutil2", - "log", - "num", - "snappy", - "sync2", - ] - pruneopts = "NUT" - revision = "bdc77568d726a8702315ec4eafda030b6abc4f43" - [[projects]] branch = "master" digest = "1:dbda803f21e60c38de7d9f884390f2ebbe234ce0c3d139b65bbb36b03a99d266" @@ -885,31 +858,6 @@ pruneopts = "NUT" revision = "d8f7bb82a96d89c1254e5a6c967134e1433c9ee2" -[[projects]] - digest = "1:25ac32ee449099128d3c84e4d4596749f1ba8965045bdfe4e99e1914e26b5e93" - name = "github.com/siddontang/ledisdb" - packages = [ - "config", - "ledis", - "rpl", - "store", - "store/driver", - "store/goleveldb", - "store/leveldb", - "store/rocksdb", - ] - pruneopts = "NUT" - revision = "56900470a899883f691bcdf6bea4ac547f2a9a6f" - version = "v0.6" - -[[projects]] - branch = "master" - digest = "1:7ddaee1a4c41ddf0b35191621f7849bb96889a614137356a851c5d4da491f173" - name = "github.com/siddontang/rdb" - packages = ["."] - pruneopts = "NUT" - revision = "fc89ed2e418d27e3ea76e708e54276d2b44ae9cf" - [[projects]] digest = "1:89fd77d603a74a6540d60067debad9397865bf040955d907362c95d364baeba6" name = "github.com/src-d/gcfg" @@ -939,26 +887,6 @@ revision = "12b6f73e6084dad08a7c6e575284b177ecafbc71" version = "v1.2.1" -[[projects]] - digest = "1:685fdfea42d825ebd39ee0994354b46c374cf2c2b2d97a41a8dee1807c6a9b62" - name = "github.com/syndtr/goleveldb" - packages = [ - "leveldb", - "leveldb/cache", - "leveldb/comparer", - "leveldb/errors", - "leveldb/filter", - "leveldb/iterator", - "leveldb/journal", - "leveldb/memdb", - "leveldb/opt", - "leveldb/storage", - "leveldb/table", - "leveldb/util", - ] - pruneopts = "NUT" - revision = "cfa635847112c5dc4782e128fa7e0d05fdbfb394" - [[projects]] branch = "master" digest = "1:685fdfea42d825ebd39ee0994354b46c374cf2c2b2d97a41a8dee1807c6a9b62" @@ -1350,6 +1278,7 @@ "github.com/lafriks/xormstore", "github.com/lib/pq", "github.com/lunny/dingtalk_webhook", + "github.com/lunny/levelqueue", "github.com/markbates/goth", "github.com/markbates/goth/gothic", "github.com/markbates/goth/providers/bitbucket", @@ -1373,8 +1302,6 @@ "github.com/russross/blackfriday", "github.com/satori/go.uuid", "github.com/sergi/go-diff/diffmatchpatch", - "github.com/siddontang/ledisdb/config", - "github.com/siddontang/ledisdb/ledis", "github.com/stretchr/testify/assert", "github.com/tstranex/u2f", "github.com/urfave/cli", diff --git a/Gopkg.toml b/Gopkg.toml index 94f15079ba93..ba29ce5a962c 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -112,4 +112,4 @@ ignored = ["google.golang.org/appengine*"] [[constraint]] name = "github.com/prometheus/client_golang" - version = "0.9.0" + version = "0.9.0" \ No newline at end of file diff --git a/models/issue_indexer.go b/models/issue_indexer.go index 227dd6ce3e85..00b8558eaf09 100644 --- a/models/issue_indexer.go +++ b/models/issue_indexer.go @@ -37,11 +37,10 @@ func InitIssueIndexer() error { var err error switch setting.Indexer.IssueIndexerQueueType { - case setting.LedisLocalQueueType: - issueIndexerUpdateQueue, err = issues.NewLedisLocalQueue( + case setting.LevelQueueType: + issueIndexerUpdateQueue, err = issues.NewLevelQueue( issueIndexer, setting.Indexer.IssueIndexerQueueDir, - setting.Indexer.IssueIndexerQueueDBIndex, setting.Indexer.IssueIndexerQueueBatchNumber) if err != nil { return err diff --git a/modules/indexer/issues/queue_ledis_local.go b/modules/indexer/issues/queue_disk.go similarity index 64% rename from modules/indexer/issues/queue_ledis_local.go rename to modules/indexer/issues/queue_disk.go index 854e2df60ce0..97e9a3d96508 100644 --- a/modules/indexer/issues/queue_ledis_local.go +++ b/modules/indexer/issues/queue_disk.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Gitea Authors. All rights reserved. +// Copyright 2019 The Gitea Authors. All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. @@ -9,52 +9,40 @@ import ( "time" "code.gitea.io/gitea/modules/log" - "github.com/siddontang/ledisdb/config" - "github.com/siddontang/ledisdb/ledis" + "github.com/lunny/levelqueue" ) var ( - _ Queue = &LedisLocalQueue{} - - ledisLocalKey = []byte("ledis_local_key") + _ Queue = &LevelQueue{} ) -// LedisLocalQueue implements a ledis as a disk library queue -type LedisLocalQueue struct { +// LevelQueue implements a disk library queue +type LevelQueue struct { indexer Indexer - ledis *ledis.Ledis - db *ledis.DB + queue *levelqueue.Queue batchNumber int } -// NewLedisLocalQueue creates a ledis local queue -func NewLedisLocalQueue(indexer Indexer, dataDir string, dbIdx, batchNumber int) (*LedisLocalQueue, error) { - ledis, err := ledis.Open(&config.Config{ - DataDir: dataDir, - }) - if err != nil { - return nil, err - } - - db, err := ledis.Select(dbIdx) +// NewLevelQueue creates a ledis local queue +func NewLevelQueue(indexer Indexer, dataDir string, batchNumber int) (*LevelQueue, error) { + queue, err := levelqueue.Open(dataDir) if err != nil { return nil, err } - return &LedisLocalQueue{ + return &LevelQueue{ indexer: indexer, - ledis: ledis, - db: db, + queue: queue, batchNumber: batchNumber, }, nil } // Run starts to run the queue -func (l *LedisLocalQueue) Run() error { +func (l *LevelQueue) Run() error { var i int var datas = make([]*IndexerData, 0, l.batchNumber) for { - bs, err := l.db.RPop(ledisLocalKey) + bs, err := l.queue.RPop() if err != nil { log.Error(4, "RPop: %v", err) time.Sleep(time.Millisecond * 100) @@ -103,13 +91,13 @@ func (l *LedisLocalQueue) Run() error { } // Push will push the indexer data to queue -func (l *LedisLocalQueue) Push(data *IndexerData) { +func (l *LevelQueue) Push(data *IndexerData) { bs, err := json.Marshal(data) if err != nil { log.Error(4, "Marshal: %v", err) return } - _, err = l.db.LPush(ledisLocalKey, bs) + err = l.queue.LPush(bs) if err != nil { log.Error(4, "LPush: %v", err) } diff --git a/modules/setting/setting.go b/modules/setting/setting.go index c9b599eb0baa..3b19ee996408 100644 --- a/modules/setting/setting.go +++ b/modules/setting/setting.go @@ -85,8 +85,8 @@ const ( // enumerates all the indexer queue types const ( - LedisLocalQueueType = "ledis_local" - ChannelQueueType = "channel" + LevelQueueType = "levelqueue" + ChannelQueueType = "channel" ) // settings @@ -195,15 +195,13 @@ var ( MaxIndexerFileSize int64 IssueIndexerQueueType string IssueIndexerQueueDir string - IssueIndexerQueueDBIndex int IssueIndexerQueueBatchNumber int }{ IssueType: "bleve", IssuePath: "indexers/issues.bleve", - IssueIndexerQueueType: LedisLocalQueueType, + IssueIndexerQueueType: LevelQueueType, IssueIndexerQueueDir: "indexers/issues.queue", IssueIndexerQueueBatchNumber: 20, - IssueIndexerQueueDBIndex: 0, } // Repository settings @@ -1247,11 +1245,9 @@ func NewContext() { } Indexer.UpdateQueueLength = sec.Key("UPDATE_BUFFER_LEN").MustInt(20) Indexer.MaxIndexerFileSize = sec.Key("MAX_FILE_SIZE").MustInt64(1024 * 1024) - Indexer.IssueIndexerQueueType = sec.Key("ISSUE_INDEXER_QUEUE_TYPE").MustString(LedisLocalQueueType) + Indexer.IssueIndexerQueueType = sec.Key("ISSUE_INDEXER_QUEUE_TYPE").MustString(LevelQueueType) Indexer.IssueIndexerQueueDir = sec.Key("ISSUE_INDEXER_QUEUE_DIR").MustString(path.Join(AppDataPath, "indexers/issues.queue")) Indexer.IssueIndexerQueueBatchNumber = sec.Key("ISSUE_INDEXER_QUEUE_BATCH_NUMBER").MustInt(20) - Indexer.IssueIndexerQueueDBIndex = sec.Key("ISSUE_INDEXER_QUEUE_DB_INDEX").MustInt(0) - } // NewServices initializes the services diff --git a/vendor/github.com/cupcake/rdb/LICENCE b/vendor/github.com/cupcake/rdb/LICENCE deleted file mode 100644 index 50257901b124..000000000000 --- a/vendor/github.com/cupcake/rdb/LICENCE +++ /dev/null @@ -1,21 +0,0 @@ -Copyright (c) 2012 Jonathan Rudenberg -Copyright (c) 2012 Sripathi Krishnan - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cupcake/rdb/crc64/crc64.go b/vendor/github.com/cupcake/rdb/crc64/crc64.go deleted file mode 100644 index 54fed9c5a291..000000000000 --- a/vendor/github.com/cupcake/rdb/crc64/crc64.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package crc64 implements the Jones coefficients with an init value of 0. -package crc64 - -import "hash" - -// Redis uses the CRC64 variant with "Jones" coefficients and init value of 0. -// -// Specification of this CRC64 variant follows: -// Name: crc-64-jones -// Width: 64 bits -// Poly: 0xad93d23594c935a9 -// Reflected In: True -// Xor_In: 0xffffffffffffffff -// Reflected_Out: True -// Xor_Out: 0x0 - -var table = [256]uint64{0x0000000000000000, 0x7ad870c830358979, 0xf5b0e190606b12f2, 0x8f689158505e9b8b, 0xc038e5739841b68f, 0xbae095bba8743ff6, 0x358804e3f82aa47d, 0x4f50742bc81f2d04, 0xab28ecb46814fe75, 0xd1f09c7c5821770c, 0x5e980d24087fec87, 0x24407dec384a65fe, 0x6b1009c7f05548fa, 0x11c8790fc060c183, 0x9ea0e857903e5a08, 0xe478989fa00bd371, 0x7d08ff3b88be6f81, 0x07d08ff3b88be6f8, 0x88b81eabe8d57d73, 0xf2606e63d8e0f40a, 0xbd301a4810ffd90e, 0xc7e86a8020ca5077, 0x4880fbd87094cbfc, 0x32588b1040a14285, 0xd620138fe0aa91f4, 0xacf86347d09f188d, 0x2390f21f80c18306, 0x594882d7b0f40a7f, 0x1618f6fc78eb277b, 0x6cc0863448deae02, 0xe3a8176c18803589, 0x997067a428b5bcf0, 0xfa11fe77117cdf02, 0x80c98ebf2149567b, 0x0fa11fe77117cdf0, 0x75796f2f41224489, 0x3a291b04893d698d, 0x40f16bccb908e0f4, 0xcf99fa94e9567b7f, 0xb5418a5cd963f206, 0x513912c379682177, 0x2be1620b495da80e, 0xa489f35319033385, 0xde51839b2936bafc, 0x9101f7b0e12997f8, 0xebd98778d11c1e81, 0x64b116208142850a, 0x1e6966e8b1770c73, 0x8719014c99c2b083, 0xfdc17184a9f739fa, 0x72a9e0dcf9a9a271, 0x08719014c99c2b08, 0x4721e43f0183060c, 0x3df994f731b68f75, 0xb29105af61e814fe, 0xc849756751dd9d87, 0x2c31edf8f1d64ef6, 0x56e99d30c1e3c78f, 0xd9810c6891bd5c04, 0xa3597ca0a188d57d, 0xec09088b6997f879, 0x96d1784359a27100, 0x19b9e91b09fcea8b, 0x636199d339c963f2, 0xdf7adabd7a6e2d6f, 0xa5a2aa754a5ba416, 0x2aca3b2d1a053f9d, 0x50124be52a30b6e4, 0x1f423fcee22f9be0, 0x659a4f06d21a1299, 0xeaf2de5e82448912, 0x902aae96b271006b, 0x74523609127ad31a, 0x0e8a46c1224f5a63, 0x81e2d7997211c1e8, 0xfb3aa75142244891, 0xb46ad37a8a3b6595, 0xceb2a3b2ba0eecec, 0x41da32eaea507767, 0x3b024222da65fe1e, 0xa2722586f2d042ee, 0xd8aa554ec2e5cb97, 0x57c2c41692bb501c, 0x2d1ab4dea28ed965, 0x624ac0f56a91f461, 0x1892b03d5aa47d18, 0x97fa21650afae693, 0xed2251ad3acf6fea, 0x095ac9329ac4bc9b, 0x7382b9faaaf135e2, 0xfcea28a2faafae69, 0x8632586aca9a2710, 0xc9622c4102850a14, 0xb3ba5c8932b0836d, 0x3cd2cdd162ee18e6, 0x460abd1952db919f, 0x256b24ca6b12f26d, 0x5fb354025b277b14, 0xd0dbc55a0b79e09f, 0xaa03b5923b4c69e6, 0xe553c1b9f35344e2, 0x9f8bb171c366cd9b, 0x10e3202993385610, 0x6a3b50e1a30ddf69, 0x8e43c87e03060c18, 0xf49bb8b633338561, 0x7bf329ee636d1eea, 0x012b592653589793, 0x4e7b2d0d9b47ba97, 0x34a35dc5ab7233ee, 0xbbcbcc9dfb2ca865, 0xc113bc55cb19211c, 0x5863dbf1e3ac9dec, 0x22bbab39d3991495, 0xadd33a6183c78f1e, 0xd70b4aa9b3f20667, 0x985b3e827bed2b63, 0xe2834e4a4bd8a21a, 0x6debdf121b863991, 0x1733afda2bb3b0e8, 0xf34b37458bb86399, 0x8993478dbb8deae0, 0x06fbd6d5ebd3716b, 0x7c23a61ddbe6f812, 0x3373d23613f9d516, 0x49aba2fe23cc5c6f, 0xc6c333a67392c7e4, 0xbc1b436e43a74e9d, 0x95ac9329ac4bc9b5, 0xef74e3e19c7e40cc, 0x601c72b9cc20db47, 0x1ac40271fc15523e, 0x5594765a340a7f3a, 0x2f4c0692043ff643, 0xa02497ca54616dc8, 0xdafce7026454e4b1, 0x3e847f9dc45f37c0, 0x445c0f55f46abeb9, 0xcb349e0da4342532, 0xb1eceec59401ac4b, 0xfebc9aee5c1e814f, 0x8464ea266c2b0836, 0x0b0c7b7e3c7593bd, 0x71d40bb60c401ac4, 0xe8a46c1224f5a634, 0x927c1cda14c02f4d, 0x1d148d82449eb4c6, 0x67ccfd4a74ab3dbf, 0x289c8961bcb410bb, 0x5244f9a98c8199c2, 0xdd2c68f1dcdf0249, 0xa7f41839ecea8b30, 0x438c80a64ce15841, 0x3954f06e7cd4d138, 0xb63c61362c8a4ab3, 0xcce411fe1cbfc3ca, 0x83b465d5d4a0eece, 0xf96c151de49567b7, 0x76048445b4cbfc3c, 0x0cdcf48d84fe7545, 0x6fbd6d5ebd3716b7, 0x15651d968d029fce, 0x9a0d8ccedd5c0445, 0xe0d5fc06ed698d3c, 0xaf85882d2576a038, 0xd55df8e515432941, 0x5a3569bd451db2ca, 0x20ed197575283bb3, 0xc49581ead523e8c2, 0xbe4df122e51661bb, 0x3125607ab548fa30, 0x4bfd10b2857d7349, 0x04ad64994d625e4d, 0x7e7514517d57d734, 0xf11d85092d094cbf, 0x8bc5f5c11d3cc5c6, 0x12b5926535897936, 0x686de2ad05bcf04f, 0xe70573f555e26bc4, 0x9ddd033d65d7e2bd, 0xd28d7716adc8cfb9, 0xa85507de9dfd46c0, 0x273d9686cda3dd4b, 0x5de5e64efd965432, 0xb99d7ed15d9d8743, 0xc3450e196da80e3a, 0x4c2d9f413df695b1, 0x36f5ef890dc31cc8, 0x79a59ba2c5dc31cc, 0x037deb6af5e9b8b5, 0x8c157a32a5b7233e, 0xf6cd0afa9582aa47, 0x4ad64994d625e4da, 0x300e395ce6106da3, 0xbf66a804b64ef628, 0xc5bed8cc867b7f51, 0x8aeeace74e645255, 0xf036dc2f7e51db2c, 0x7f5e4d772e0f40a7, 0x05863dbf1e3ac9de, 0xe1fea520be311aaf, 0x9b26d5e88e0493d6, 0x144e44b0de5a085d, 0x6e963478ee6f8124, 0x21c640532670ac20, 0x5b1e309b16452559, 0xd476a1c3461bbed2, 0xaeaed10b762e37ab, 0x37deb6af5e9b8b5b, 0x4d06c6676eae0222, 0xc26e573f3ef099a9, 0xb8b627f70ec510d0, 0xf7e653dcc6da3dd4, 0x8d3e2314f6efb4ad, 0x0256b24ca6b12f26, 0x788ec2849684a65f, 0x9cf65a1b368f752e, 0xe62e2ad306bafc57, 0x6946bb8b56e467dc, 0x139ecb4366d1eea5, 0x5ccebf68aecec3a1, 0x2616cfa09efb4ad8, 0xa97e5ef8cea5d153, 0xd3a62e30fe90582a, 0xb0c7b7e3c7593bd8, 0xca1fc72bf76cb2a1, 0x45775673a732292a, 0x3faf26bb9707a053, 0x70ff52905f188d57, 0x0a2722586f2d042e, 0x854fb3003f739fa5, 0xff97c3c80f4616dc, 0x1bef5b57af4dc5ad, 0x61372b9f9f784cd4, 0xee5fbac7cf26d75f, 0x9487ca0fff135e26, 0xdbd7be24370c7322, 0xa10fceec0739fa5b, 0x2e675fb4576761d0, 0x54bf2f7c6752e8a9, 0xcdcf48d84fe75459, 0xb71738107fd2dd20, 0x387fa9482f8c46ab, 0x42a7d9801fb9cfd2, 0x0df7adabd7a6e2d6, 0x772fdd63e7936baf, 0xf8474c3bb7cdf024, 0x829f3cf387f8795d, 0x66e7a46c27f3aa2c, 0x1c3fd4a417c62355, 0x935745fc4798b8de, 0xe98f353477ad31a7, 0xa6df411fbfb21ca3, 0xdc0731d78f8795da, 0x536fa08fdfd90e51, 0x29b7d047efec8728} - -func crc64(crc uint64, b []byte) uint64 { - for _, v := range b { - crc = table[byte(crc)^v] ^ (crc >> 8) - } - return crc -} - -func Digest(b []byte) uint64 { - return crc64(0, b) -} - -type digest struct { - crc uint64 -} - -func New() hash.Hash64 { - return &digest{} -} - -func (h *digest) Write(p []byte) (int, error) { - h.crc = crc64(h.crc, p) - return len(p), nil -} - -// Encode in little endian -func (d *digest) Sum(in []byte) []byte { - s := d.Sum64() - in = append(in, byte(s)) - in = append(in, byte(s>>8)) - in = append(in, byte(s>>16)) - in = append(in, byte(s>>24)) - in = append(in, byte(s>>32)) - in = append(in, byte(s>>40)) - in = append(in, byte(s>>48)) - in = append(in, byte(s>>56)) - return in -} - -func (d *digest) Sum64() uint64 { return d.crc } -func (d *digest) BlockSize() int { return 1 } -func (d *digest) Size() int { return 8 } -func (d *digest) Reset() { d.crc = 0 } diff --git a/vendor/github.com/cupcake/rdb/decoder.go b/vendor/github.com/cupcake/rdb/decoder.go deleted file mode 100644 index dd3993b5cebc..000000000000 --- a/vendor/github.com/cupcake/rdb/decoder.go +++ /dev/null @@ -1,824 +0,0 @@ -// Package rdb implements parsing and encoding of the Redis RDB file format. -package rdb - -import ( - "bufio" - "bytes" - "encoding/binary" - "fmt" - "io" - "math" - "strconv" - - "github.com/cupcake/rdb/crc64" -) - -// A Decoder must be implemented to parse a RDB file. -type Decoder interface { - // StartRDB is called when parsing of a valid RDB file starts. - StartRDB() - // StartDatabase is called when database n starts. - // Once a database starts, another database will not start until EndDatabase is called. - StartDatabase(n int) - // AUX field - Aux(key, value []byte) - // ResizeDB hint - ResizeDatabase(dbSize, expiresSize uint32) - // Set is called once for each string key. - Set(key, value []byte, expiry int64) - // StartHash is called at the beginning of a hash. - // Hset will be called exactly length times before EndHash. - StartHash(key []byte, length, expiry int64) - // Hset is called once for each field=value pair in a hash. - Hset(key, field, value []byte) - // EndHash is called when there are no more fields in a hash. - EndHash(key []byte) - // StartSet is called at the beginning of a set. - // Sadd will be called exactly cardinality times before EndSet. - StartSet(key []byte, cardinality, expiry int64) - // Sadd is called once for each member of a set. - Sadd(key, member []byte) - // EndSet is called when there are no more fields in a set. - EndSet(key []byte) - // StartList is called at the beginning of a list. - // Rpush will be called exactly length times before EndList. - // If length of the list is not known, then length is -1 - StartList(key []byte, length, expiry int64) - // Rpush is called once for each value in a list. - Rpush(key, value []byte) - // EndList is called when there are no more values in a list. - EndList(key []byte) - // StartZSet is called at the beginning of a sorted set. - // Zadd will be called exactly cardinality times before EndZSet. - StartZSet(key []byte, cardinality, expiry int64) - // Zadd is called once for each member of a sorted set. - Zadd(key []byte, score float64, member []byte) - // EndZSet is called when there are no more members in a sorted set. - EndZSet(key []byte) - // EndDatabase is called at the end of a database. - EndDatabase(n int) - // EndRDB is called when parsing of the RDB file is complete. - EndRDB() -} - -// Decode parses a RDB file from r and calls the decode hooks on d. -func Decode(r io.Reader, d Decoder) error { - decoder := &decode{d, make([]byte, 8), bufio.NewReader(r)} - return decoder.decode() -} - -// Decode a byte slice from the Redis DUMP command. The dump does not contain the -// database, key or expiry, so they must be included in the function call (but -// can be zero values). -func DecodeDump(dump []byte, db int, key []byte, expiry int64, d Decoder) error { - err := verifyDump(dump) - if err != nil { - return err - } - - decoder := &decode{d, make([]byte, 8), bytes.NewReader(dump[1:])} - decoder.event.StartRDB() - decoder.event.StartDatabase(db) - - err = decoder.readObject(key, ValueType(dump[0]), expiry) - - decoder.event.EndDatabase(db) - decoder.event.EndRDB() - return err -} - -type byteReader interface { - io.Reader - io.ByteReader -} - -type decode struct { - event Decoder - intBuf []byte - r byteReader -} - -type ValueType byte - -const ( - TypeString ValueType = 0 - TypeList ValueType = 1 - TypeSet ValueType = 2 - TypeZSet ValueType = 3 - TypeHash ValueType = 4 - - TypeHashZipmap ValueType = 9 - TypeListZiplist ValueType = 10 - TypeSetIntset ValueType = 11 - TypeZSetZiplist ValueType = 12 - TypeHashZiplist ValueType = 13 - TypeListQuicklist ValueType = 14 -) - -const ( - rdb6bitLen = 0 - rdb14bitLen = 1 - rdb32bitLen = 2 - rdbEncVal = 3 - - rdbFlagAux = 0xfa - rdbFlagResizeDB = 0xfb - rdbFlagExpiryMS = 0xfc - rdbFlagExpiry = 0xfd - rdbFlagSelectDB = 0xfe - rdbFlagEOF = 0xff - - rdbEncInt8 = 0 - rdbEncInt16 = 1 - rdbEncInt32 = 2 - rdbEncLZF = 3 - - rdbZiplist6bitlenString = 0 - rdbZiplist14bitlenString = 1 - rdbZiplist32bitlenString = 2 - - rdbZiplistInt16 = 0xc0 - rdbZiplistInt32 = 0xd0 - rdbZiplistInt64 = 0xe0 - rdbZiplistInt24 = 0xf0 - rdbZiplistInt8 = 0xfe - rdbZiplistInt4 = 15 -) - -func (d *decode) decode() error { - err := d.checkHeader() - if err != nil { - return err - } - - d.event.StartRDB() - - var db uint32 - var expiry int64 - firstDB := true - for { - objType, err := d.r.ReadByte() - if err != nil { - return err - } - switch objType { - case rdbFlagAux: - auxKey, err := d.readString() - if err != nil { - return err - } - auxVal, err := d.readString() - if err != nil { - return err - } - d.event.Aux(auxKey, auxVal) - case rdbFlagResizeDB: - dbSize, _, err := d.readLength() - if err != nil { - return err - } - expiresSize, _, err := d.readLength() - if err != nil { - return err - } - d.event.ResizeDatabase(dbSize, expiresSize) - case rdbFlagExpiryMS: - _, err := io.ReadFull(d.r, d.intBuf) - if err != nil { - return err - } - expiry = int64(binary.LittleEndian.Uint64(d.intBuf)) - case rdbFlagExpiry: - _, err := io.ReadFull(d.r, d.intBuf[:4]) - if err != nil { - return err - } - expiry = int64(binary.LittleEndian.Uint32(d.intBuf)) * 1000 - case rdbFlagSelectDB: - if !firstDB { - d.event.EndDatabase(int(db)) - } - db, _, err = d.readLength() - if err != nil { - return err - } - d.event.StartDatabase(int(db)) - case rdbFlagEOF: - d.event.EndDatabase(int(db)) - d.event.EndRDB() - return nil - default: - key, err := d.readString() - if err != nil { - return err - } - err = d.readObject(key, ValueType(objType), expiry) - if err != nil { - return err - } - expiry = 0 - } - } - - panic("not reached") -} - -func (d *decode) readObject(key []byte, typ ValueType, expiry int64) error { - switch typ { - case TypeString: - value, err := d.readString() - if err != nil { - return err - } - d.event.Set(key, value, expiry) - case TypeList: - length, _, err := d.readLength() - if err != nil { - return err - } - d.event.StartList(key, int64(length), expiry) - for i := uint32(0); i < length; i++ { - value, err := d.readString() - if err != nil { - return err - } - d.event.Rpush(key, value) - } - d.event.EndList(key) - case TypeListQuicklist: - length, _, err := d.readLength() - if err != nil { - return err - } - d.event.StartList(key, int64(-1), expiry) - for i := uint32(0); i < length; i++ { - d.readZiplist(key, 0, false) - } - d.event.EndList(key) - case TypeSet: - cardinality, _, err := d.readLength() - if err != nil { - return err - } - d.event.StartSet(key, int64(cardinality), expiry) - for i := uint32(0); i < cardinality; i++ { - member, err := d.readString() - if err != nil { - return err - } - d.event.Sadd(key, member) - } - d.event.EndSet(key) - case TypeZSet: - cardinality, _, err := d.readLength() - if err != nil { - return err - } - d.event.StartZSet(key, int64(cardinality), expiry) - for i := uint32(0); i < cardinality; i++ { - member, err := d.readString() - if err != nil { - return err - } - score, err := d.readFloat64() - if err != nil { - return err - } - d.event.Zadd(key, score, member) - } - d.event.EndZSet(key) - case TypeHash: - length, _, err := d.readLength() - if err != nil { - return err - } - d.event.StartHash(key, int64(length), expiry) - for i := uint32(0); i < length; i++ { - field, err := d.readString() - if err != nil { - return err - } - value, err := d.readString() - if err != nil { - return err - } - d.event.Hset(key, field, value) - } - d.event.EndHash(key) - case TypeHashZipmap: - return d.readZipmap(key, expiry) - case TypeListZiplist: - return d.readZiplist(key, expiry, true) - case TypeSetIntset: - return d.readIntset(key, expiry) - case TypeZSetZiplist: - return d.readZiplistZset(key, expiry) - case TypeHashZiplist: - return d.readZiplistHash(key, expiry) - default: - return fmt.Errorf("rdb: unknown object type %d for key %s", typ, key) - } - return nil -} - -func (d *decode) readZipmap(key []byte, expiry int64) error { - var length int - zipmap, err := d.readString() - if err != nil { - return err - } - buf := newSliceBuffer(zipmap) - lenByte, err := buf.ReadByte() - if err != nil { - return err - } - if lenByte >= 254 { // we need to count the items manually - length, err = countZipmapItems(buf) - length /= 2 - if err != nil { - return err - } - } else { - length = int(lenByte) - } - d.event.StartHash(key, int64(length), expiry) - for i := 0; i < length; i++ { - field, err := readZipmapItem(buf, false) - if err != nil { - return err - } - value, err := readZipmapItem(buf, true) - if err != nil { - return err - } - d.event.Hset(key, field, value) - } - d.event.EndHash(key) - return nil -} - -func readZipmapItem(buf *sliceBuffer, readFree bool) ([]byte, error) { - length, free, err := readZipmapItemLength(buf, readFree) - if err != nil { - return nil, err - } - if length == -1 { - return nil, nil - } - value, err := buf.Slice(length) - if err != nil { - return nil, err - } - _, err = buf.Seek(int64(free), 1) - return value, err -} - -func countZipmapItems(buf *sliceBuffer) (int, error) { - n := 0 - for { - strLen, free, err := readZipmapItemLength(buf, n%2 != 0) - if err != nil { - return 0, err - } - if strLen == -1 { - break - } - _, err = buf.Seek(int64(strLen)+int64(free), 1) - if err != nil { - return 0, err - } - n++ - } - _, err := buf.Seek(0, 0) - return n, err -} - -func readZipmapItemLength(buf *sliceBuffer, readFree bool) (int, int, error) { - b, err := buf.ReadByte() - if err != nil { - return 0, 0, err - } - switch b { - case 253: - s, err := buf.Slice(5) - if err != nil { - return 0, 0, err - } - return int(binary.BigEndian.Uint32(s)), int(s[4]), nil - case 254: - return 0, 0, fmt.Errorf("rdb: invalid zipmap item length") - case 255: - return -1, 0, nil - } - var free byte - if readFree { - free, err = buf.ReadByte() - } - return int(b), int(free), err -} - -func (d *decode) readZiplist(key []byte, expiry int64, addListEvents bool) error { - ziplist, err := d.readString() - if err != nil { - return err - } - buf := newSliceBuffer(ziplist) - length, err := readZiplistLength(buf) - if err != nil { - return err - } - if addListEvents { - d.event.StartList(key, length, expiry) - } - for i := int64(0); i < length; i++ { - entry, err := readZiplistEntry(buf) - if err != nil { - return err - } - d.event.Rpush(key, entry) - } - if addListEvents { - d.event.EndList(key) - } - return nil -} - -func (d *decode) readZiplistZset(key []byte, expiry int64) error { - ziplist, err := d.readString() - if err != nil { - return err - } - buf := newSliceBuffer(ziplist) - cardinality, err := readZiplistLength(buf) - if err != nil { - return err - } - cardinality /= 2 - d.event.StartZSet(key, cardinality, expiry) - for i := int64(0); i < cardinality; i++ { - member, err := readZiplistEntry(buf) - if err != nil { - return err - } - scoreBytes, err := readZiplistEntry(buf) - if err != nil { - return err - } - score, err := strconv.ParseFloat(string(scoreBytes), 64) - if err != nil { - return err - } - d.event.Zadd(key, score, member) - } - d.event.EndZSet(key) - return nil -} - -func (d *decode) readZiplistHash(key []byte, expiry int64) error { - ziplist, err := d.readString() - if err != nil { - return err - } - buf := newSliceBuffer(ziplist) - length, err := readZiplistLength(buf) - if err != nil { - return err - } - length /= 2 - d.event.StartHash(key, length, expiry) - for i := int64(0); i < length; i++ { - field, err := readZiplistEntry(buf) - if err != nil { - return err - } - value, err := readZiplistEntry(buf) - if err != nil { - return err - } - d.event.Hset(key, field, value) - } - d.event.EndHash(key) - return nil -} - -func readZiplistLength(buf *sliceBuffer) (int64, error) { - buf.Seek(8, 0) // skip the zlbytes and zltail - lenBytes, err := buf.Slice(2) - if err != nil { - return 0, err - } - return int64(binary.LittleEndian.Uint16(lenBytes)), nil -} - -func readZiplistEntry(buf *sliceBuffer) ([]byte, error) { - prevLen, err := buf.ReadByte() - if err != nil { - return nil, err - } - if prevLen == 254 { - buf.Seek(4, 1) // skip the 4-byte prevlen - } - - header, err := buf.ReadByte() - if err != nil { - return nil, err - } - switch { - case header>>6 == rdbZiplist6bitlenString: - return buf.Slice(int(header & 0x3f)) - case header>>6 == rdbZiplist14bitlenString: - b, err := buf.ReadByte() - if err != nil { - return nil, err - } - return buf.Slice((int(header&0x3f) << 8) | int(b)) - case header>>6 == rdbZiplist32bitlenString: - lenBytes, err := buf.Slice(4) - if err != nil { - return nil, err - } - return buf.Slice(int(binary.BigEndian.Uint32(lenBytes))) - case header == rdbZiplistInt16: - intBytes, err := buf.Slice(2) - if err != nil { - return nil, err - } - return []byte(strconv.FormatInt(int64(int16(binary.LittleEndian.Uint16(intBytes))), 10)), nil - case header == rdbZiplistInt32: - intBytes, err := buf.Slice(4) - if err != nil { - return nil, err - } - return []byte(strconv.FormatInt(int64(int32(binary.LittleEndian.Uint32(intBytes))), 10)), nil - case header == rdbZiplistInt64: - intBytes, err := buf.Slice(8) - if err != nil { - return nil, err - } - return []byte(strconv.FormatInt(int64(binary.LittleEndian.Uint64(intBytes)), 10)), nil - case header == rdbZiplistInt24: - intBytes := make([]byte, 4) - _, err := buf.Read(intBytes[1:]) - if err != nil { - return nil, err - } - return []byte(strconv.FormatInt(int64(int32(binary.LittleEndian.Uint32(intBytes))>>8), 10)), nil - case header == rdbZiplistInt8: - b, err := buf.ReadByte() - return []byte(strconv.FormatInt(int64(int8(b)), 10)), err - case header>>4 == rdbZiplistInt4: - return []byte(strconv.FormatInt(int64(header&0x0f)-1, 10)), nil - } - - return nil, fmt.Errorf("rdb: unknown ziplist header byte: %d", header) -} - -func (d *decode) readIntset(key []byte, expiry int64) error { - intset, err := d.readString() - if err != nil { - return err - } - buf := newSliceBuffer(intset) - intSizeBytes, err := buf.Slice(4) - if err != nil { - return err - } - intSize := binary.LittleEndian.Uint32(intSizeBytes) - - if intSize != 2 && intSize != 4 && intSize != 8 { - return fmt.Errorf("rdb: unknown intset encoding: %d", intSize) - } - - lenBytes, err := buf.Slice(4) - if err != nil { - return err - } - cardinality := binary.LittleEndian.Uint32(lenBytes) - - d.event.StartSet(key, int64(cardinality), expiry) - for i := uint32(0); i < cardinality; i++ { - intBytes, err := buf.Slice(int(intSize)) - if err != nil { - return err - } - var intString string - switch intSize { - case 2: - intString = strconv.FormatInt(int64(int16(binary.LittleEndian.Uint16(intBytes))), 10) - case 4: - intString = strconv.FormatInt(int64(int32(binary.LittleEndian.Uint32(intBytes))), 10) - case 8: - intString = strconv.FormatInt(int64(int64(binary.LittleEndian.Uint64(intBytes))), 10) - } - d.event.Sadd(key, []byte(intString)) - } - d.event.EndSet(key) - return nil -} - -func (d *decode) checkHeader() error { - header := make([]byte, 9) - _, err := io.ReadFull(d.r, header) - if err != nil { - return err - } - - if !bytes.Equal(header[:5], []byte("REDIS")) { - return fmt.Errorf("rdb: invalid file format") - } - - version, _ := strconv.ParseInt(string(header[5:]), 10, 64) - if version < 1 || version > 7 { - return fmt.Errorf("rdb: invalid RDB version number %d", version) - } - - return nil -} - -func (d *decode) readString() ([]byte, error) { - length, encoded, err := d.readLength() - if err != nil { - return nil, err - } - if encoded { - switch length { - case rdbEncInt8: - i, err := d.readUint8() - return []byte(strconv.FormatInt(int64(int8(i)), 10)), err - case rdbEncInt16: - i, err := d.readUint16() - return []byte(strconv.FormatInt(int64(int16(i)), 10)), err - case rdbEncInt32: - i, err := d.readUint32() - return []byte(strconv.FormatInt(int64(int32(i)), 10)), err - case rdbEncLZF: - clen, _, err := d.readLength() - if err != nil { - return nil, err - } - ulen, _, err := d.readLength() - if err != nil { - return nil, err - } - compressed := make([]byte, clen) - _, err = io.ReadFull(d.r, compressed) - if err != nil { - return nil, err - } - decompressed := lzfDecompress(compressed, int(ulen)) - if len(decompressed) != int(ulen) { - return nil, fmt.Errorf("decompressed string length %d didn't match expected length %d", len(decompressed), ulen) - } - return decompressed, nil - } - } - - str := make([]byte, length) - _, err = io.ReadFull(d.r, str) - return str, err -} - -func (d *decode) readUint8() (uint8, error) { - b, err := d.r.ReadByte() - return uint8(b), err -} - -func (d *decode) readUint16() (uint16, error) { - _, err := io.ReadFull(d.r, d.intBuf[:2]) - if err != nil { - return 0, err - } - return binary.LittleEndian.Uint16(d.intBuf), nil -} - -func (d *decode) readUint32() (uint32, error) { - _, err := io.ReadFull(d.r, d.intBuf[:4]) - if err != nil { - return 0, err - } - return binary.LittleEndian.Uint32(d.intBuf), nil -} - -func (d *decode) readUint64() (uint64, error) { - _, err := io.ReadFull(d.r, d.intBuf) - if err != nil { - return 0, err - } - return binary.LittleEndian.Uint64(d.intBuf), nil -} - -func (d *decode) readUint32Big() (uint32, error) { - _, err := io.ReadFull(d.r, d.intBuf[:4]) - if err != nil { - return 0, err - } - return binary.BigEndian.Uint32(d.intBuf), nil -} - -// Doubles are saved as strings prefixed by an unsigned -// 8 bit integer specifying the length of the representation. -// This 8 bit integer has special values in order to specify the following -// conditions: -// 253: not a number -// 254: + inf -// 255: - inf -func (d *decode) readFloat64() (float64, error) { - length, err := d.readUint8() - if err != nil { - return 0, err - } - switch length { - case 253: - return math.NaN(), nil - case 254: - return math.Inf(0), nil - case 255: - return math.Inf(-1), nil - default: - floatBytes := make([]byte, length) - _, err := io.ReadFull(d.r, floatBytes) - if err != nil { - return 0, err - } - f, err := strconv.ParseFloat(string(floatBytes), 64) - return f, err - } - - panic("not reached") -} - -func (d *decode) readLength() (uint32, bool, error) { - b, err := d.r.ReadByte() - if err != nil { - return 0, false, err - } - // The first two bits of the first byte are used to indicate the length encoding type - switch (b & 0xc0) >> 6 { - case rdb6bitLen: - // When the first two bits are 00, the next 6 bits are the length. - return uint32(b & 0x3f), false, nil - case rdb14bitLen: - // When the first two bits are 01, the next 14 bits are the length. - bb, err := d.r.ReadByte() - if err != nil { - return 0, false, err - } - return (uint32(b&0x3f) << 8) | uint32(bb), false, nil - case rdbEncVal: - // When the first two bits are 11, the next object is encoded. - // The next 6 bits indicate the encoding type. - return uint32(b & 0x3f), true, nil - default: - // When the first two bits are 10, the next 6 bits are discarded. - // The next 4 bytes are the length. - length, err := d.readUint32Big() - return length, false, err - } - - panic("not reached") -} - -func verifyDump(d []byte) error { - if len(d) < 10 { - return fmt.Errorf("rdb: invalid dump length") - } - version := binary.LittleEndian.Uint16(d[len(d)-10:]) - if version != uint16(Version) { - return fmt.Errorf("rdb: invalid version %d, expecting %d", version, Version) - } - - if binary.LittleEndian.Uint64(d[len(d)-8:]) != crc64.Digest(d[:len(d)-8]) { - return fmt.Errorf("rdb: invalid CRC checksum") - } - - return nil -} - -func lzfDecompress(in []byte, outlen int) []byte { - out := make([]byte, outlen) - for i, o := 0, 0; i < len(in); { - ctrl := int(in[i]) - i++ - if ctrl < 32 { - for x := 0; x <= ctrl; x++ { - out[o] = in[i] - i++ - o++ - } - } else { - length := ctrl >> 5 - if length == 7 { - length = length + int(in[i]) - i++ - } - ref := o - ((ctrl & 0x1f) << 8) - int(in[i]) - 1 - i++ - for x := 0; x <= length+1; x++ { - out[o] = out[ref] - ref++ - o++ - } - } - } - return out -} diff --git a/vendor/github.com/cupcake/rdb/encoder.go b/vendor/github.com/cupcake/rdb/encoder.go deleted file mode 100644 index 7902a7d314d7..000000000000 --- a/vendor/github.com/cupcake/rdb/encoder.go +++ /dev/null @@ -1,130 +0,0 @@ -package rdb - -import ( - "encoding/binary" - "fmt" - "hash" - "io" - "math" - "strconv" - - "github.com/cupcake/rdb/crc64" -) - -const Version = 6 - -type Encoder struct { - w io.Writer - crc hash.Hash -} - -func NewEncoder(w io.Writer) *Encoder { - e := &Encoder{crc: crc64.New()} - e.w = io.MultiWriter(w, e.crc) - return e -} - -func (e *Encoder) EncodeHeader() error { - _, err := fmt.Fprintf(e.w, "REDIS%04d", Version) - return err -} - -func (e *Encoder) EncodeFooter() error { - e.w.Write([]byte{rdbFlagEOF}) - _, err := e.w.Write(e.crc.Sum(nil)) - return err -} - -func (e *Encoder) EncodeDumpFooter() error { - binary.Write(e.w, binary.LittleEndian, uint16(Version)) - _, err := e.w.Write(e.crc.Sum(nil)) - return err -} - -func (e *Encoder) EncodeDatabase(n int) error { - e.w.Write([]byte{rdbFlagSelectDB}) - return e.EncodeLength(uint32(n)) -} - -func (e *Encoder) EncodeExpiry(expiry uint64) error { - b := make([]byte, 9) - b[0] = rdbFlagExpiryMS - binary.LittleEndian.PutUint64(b[1:], expiry) - _, err := e.w.Write(b) - return err -} - -func (e *Encoder) EncodeType(v ValueType) error { - _, err := e.w.Write([]byte{byte(v)}) - return err -} - -func (e *Encoder) EncodeString(s []byte) error { - written, err := e.encodeIntString(s) - if written { - return err - } - e.EncodeLength(uint32(len(s))) - _, err = e.w.Write(s) - return err -} - -func (e *Encoder) EncodeLength(l uint32) (err error) { - switch { - case l < 1<<6: - _, err = e.w.Write([]byte{byte(l)}) - case l < 1<<14: - _, err = e.w.Write([]byte{byte(l>>8) | rdb14bitLen<<6, byte(l)}) - default: - b := make([]byte, 5) - b[0] = rdb32bitLen << 6 - binary.BigEndian.PutUint32(b[1:], l) - _, err = e.w.Write(b) - } - return -} - -func (e *Encoder) EncodeFloat(f float64) (err error) { - switch { - case math.IsNaN(f): - _, err = e.w.Write([]byte{253}) - case math.IsInf(f, 1): - _, err = e.w.Write([]byte{254}) - case math.IsInf(f, -1): - _, err = e.w.Write([]byte{255}) - default: - b := []byte(strconv.FormatFloat(f, 'g', 17, 64)) - e.w.Write([]byte{byte(len(b))}) - _, err = e.w.Write(b) - } - return -} - -func (e *Encoder) encodeIntString(b []byte) (written bool, err error) { - s := string(b) - i, err := strconv.ParseInt(s, 10, 32) - if err != nil { - return - } - // if the stringified parsed int isn't exactly the same, we can't encode it as an int - if s != strconv.FormatInt(i, 10) { - return - } - switch { - case i >= math.MinInt8 && i <= math.MaxInt8: - _, err = e.w.Write([]byte{rdbEncVal << 6, byte(int8(i))}) - case i >= math.MinInt16 && i <= math.MaxInt16: - b := make([]byte, 3) - b[0] = rdbEncVal<<6 | rdbEncInt16 - binary.LittleEndian.PutUint16(b[1:], uint16(int16(i))) - _, err = e.w.Write(b) - case i >= math.MinInt32 && i <= math.MaxInt32: - b := make([]byte, 5) - b[0] = rdbEncVal<<6 | rdbEncInt32 - binary.LittleEndian.PutUint32(b[1:], uint32(int32(i))) - _, err = e.w.Write(b) - default: - return - } - return true, err -} diff --git a/vendor/github.com/cupcake/rdb/nopdecoder/nop_decoder.go b/vendor/github.com/cupcake/rdb/nopdecoder/nop_decoder.go deleted file mode 100644 index de93a6973fed..000000000000 --- a/vendor/github.com/cupcake/rdb/nopdecoder/nop_decoder.go +++ /dev/null @@ -1,24 +0,0 @@ -package nopdecoder - -// NopDecoder may be embedded in a real Decoder to avoid implementing methods. -type NopDecoder struct{} - -func (d NopDecoder) StartRDB() {} -func (d NopDecoder) StartDatabase(n int) {} -func (d NopDecoder) Aux(key, value []byte) {} -func (d NopDecoder) ResizeDatabase(dbSize, expiresSize uint32) {} -func (d NopDecoder) EndDatabase(n int) {} -func (d NopDecoder) EndRDB() {} -func (d NopDecoder) Set(key, value []byte, expiry int64) {} -func (d NopDecoder) StartHash(key []byte, length, expiry int64) {} -func (d NopDecoder) Hset(key, field, value []byte) {} -func (d NopDecoder) EndHash(key []byte) {} -func (d NopDecoder) StartSet(key []byte, cardinality, expiry int64) {} -func (d NopDecoder) Sadd(key, member []byte) {} -func (d NopDecoder) EndSet(key []byte) {} -func (d NopDecoder) StartList(key []byte, length, expiry int64) {} -func (d NopDecoder) Rpush(key, value []byte) {} -func (d NopDecoder) EndList(key []byte) {} -func (d NopDecoder) StartZSet(key []byte, cardinality, expiry int64) {} -func (d NopDecoder) Zadd(key []byte, score float64, member []byte) {} -func (d NopDecoder) EndZSet(key []byte) {} diff --git a/vendor/github.com/cupcake/rdb/slice_buffer.go b/vendor/github.com/cupcake/rdb/slice_buffer.go deleted file mode 100644 index b3e12a02c6c0..000000000000 --- a/vendor/github.com/cupcake/rdb/slice_buffer.go +++ /dev/null @@ -1,67 +0,0 @@ -package rdb - -import ( - "errors" - "io" -) - -type sliceBuffer struct { - s []byte - i int -} - -func newSliceBuffer(s []byte) *sliceBuffer { - return &sliceBuffer{s, 0} -} - -func (s *sliceBuffer) Slice(n int) ([]byte, error) { - if s.i+n > len(s.s) { - return nil, io.EOF - } - b := s.s[s.i : s.i+n] - s.i += n - return b, nil -} - -func (s *sliceBuffer) ReadByte() (byte, error) { - if s.i >= len(s.s) { - return 0, io.EOF - } - b := s.s[s.i] - s.i++ - return b, nil -} - -func (s *sliceBuffer) Read(b []byte) (int, error) { - if len(b) == 0 { - return 0, nil - } - if s.i >= len(s.s) { - return 0, io.EOF - } - n := copy(b, s.s[s.i:]) - s.i += n - return n, nil -} - -func (s *sliceBuffer) Seek(offset int64, whence int) (int64, error) { - var abs int64 - switch whence { - case 0: - abs = offset - case 1: - abs = int64(s.i) + offset - case 2: - abs = int64(len(s.s)) + offset - default: - return 0, errors.New("invalid whence") - } - if abs < 0 { - return 0, errors.New("negative position") - } - if abs >= 1<<31 { - return 0, errors.New("position out of range") - } - s.i = int(abs) - return abs, nil -} diff --git a/vendor/github.com/siddontang/ledisdb/LICENSE b/vendor/github.com/lunny/levelqueue/LICENSE similarity index 87% rename from vendor/github.com/siddontang/ledisdb/LICENSE rename to vendor/github.com/lunny/levelqueue/LICENSE index 7ece9fdf5a64..4a5a4ea0ff71 100644 --- a/vendor/github.com/siddontang/ledisdb/LICENSE +++ b/vendor/github.com/lunny/levelqueue/LICENSE @@ -1,6 +1,4 @@ -The MIT License (MIT) - -Copyright (c) 2014 siddontang +Copyright (c) 2019 Lunny Xiao Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -9,13 +7,13 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/lunny/levelqueue/error.go b/vendor/github.com/lunny/levelqueue/error.go new file mode 100644 index 000000000000..d639c5d496c0 --- /dev/null +++ b/vendor/github.com/lunny/levelqueue/error.go @@ -0,0 +1,12 @@ +// Copyright 2019 Lunny Xiao. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package levelqueue + +import "errors" + +var ( + // ErrNotFound means no element in queue + ErrNotFound = errors.New("no key found") +) diff --git a/vendor/github.com/lunny/levelqueue/queue.go b/vendor/github.com/lunny/levelqueue/queue.go new file mode 100644 index 000000000000..0b2bef6c849c --- /dev/null +++ b/vendor/github.com/lunny/levelqueue/queue.go @@ -0,0 +1,214 @@ +// Copyright 2019 Lunny Xiao. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package levelqueue + +import ( + "bytes" + "encoding/binary" + "sync" + + "github.com/syndtr/goleveldb/leveldb" +) + +// Queue defines a queue struct +type Queue struct { + db *leveldb.DB + highLock sync.Mutex + lowLock sync.Mutex + low int64 + high int64 +} + +// Open opens a queue object or create it if not exist +func Open(dataDir string) (*Queue, error) { + db, err := leveldb.OpenFile(dataDir, nil) + if err != nil { + return nil, err + } + + var queue = &Queue{ + db: db, + } + queue.low, err = queue.readID(lowKey) + if err == leveldb.ErrNotFound { + queue.low = 1 + err = db.Put(lowKey, id2bytes(1), nil) + } + if err != nil { + return nil, err + } + + queue.high, err = queue.readID(highKey) + if err == leveldb.ErrNotFound { + err = db.Put(highKey, id2bytes(0), nil) + } + if err != nil { + return nil, err + } + + return queue, nil +} + +func (queue *Queue) readID(key []byte) (int64, error) { + bs, err := queue.db.Get(key, nil) + if err != nil { + return 0, err + } + return bytes2id(bs) +} + +var ( + lowKey = []byte("low") + highKey = []byte("high") +) + +func (queue *Queue) highincrement() (int64, error) { + id := queue.high + 1 + queue.high = id + err := queue.db.Put(highKey, id2bytes(queue.high), nil) + if err != nil { + queue.high = queue.high - 1 + return 0, err + } + return id, nil +} + +func (queue *Queue) highdecrement() (int64, error) { + queue.high = queue.high - 1 + err := queue.db.Put(highKey, id2bytes(queue.high), nil) + if err != nil { + queue.high = queue.high + 1 + return 0, err + } + return queue.high, nil +} + +func (queue *Queue) lowincrement() (int64, error) { + queue.low = queue.low + 1 + err := queue.db.Put(lowKey, id2bytes(queue.low), nil) + if err != nil { + queue.low = queue.low - 1 + return 0, err + } + return queue.low, nil +} + +func (queue *Queue) lowdecrement() (int64, error) { + queue.low = queue.low - 1 + err := queue.db.Put(lowKey, id2bytes(queue.low), nil) + if err != nil { + queue.low = queue.low + 1 + return 0, err + } + return queue.low, nil +} + +// Len returns the length of the queue +func (queue *Queue) Len() int64 { + queue.lowLock.Lock() + queue.highLock.Lock() + l := queue.high - queue.low + 1 + queue.highLock.Unlock() + queue.lowLock.Unlock() + return l +} + +func id2bytes(id int64) []byte { + var buf = make([]byte, 8) + binary.PutVarint(buf, id) + return buf +} + +func bytes2id(b []byte) (int64, error) { + return binary.ReadVarint(bytes.NewReader(b)) +} + +// RPush pushes a data from right of queue +func (queue *Queue) RPush(data []byte) error { + queue.highLock.Lock() + id, err := queue.highincrement() + if err != nil { + queue.highLock.Unlock() + return err + } + err = queue.db.Put(id2bytes(id), data, nil) + queue.highLock.Unlock() + return err +} + +// LPush pushes a data from left of queue +func (queue *Queue) LPush(data []byte) error { + queue.highLock.Lock() + id, err := queue.lowdecrement() + if err != nil { + queue.highLock.Unlock() + return err + } + err = queue.db.Put(id2bytes(id), data, nil) + queue.highLock.Unlock() + return err +} + +// RPop pop a data from right of queue +func (queue *Queue) RPop() ([]byte, error) { + queue.highLock.Lock() + currentID := queue.high + + res, err := queue.db.Get(id2bytes(currentID), nil) + if err != nil { + queue.highLock.Unlock() + if err == leveldb.ErrNotFound { + return nil, ErrNotFound + } + return nil, err + } + + _, err = queue.highdecrement() + if err != nil { + queue.highLock.Unlock() + return nil, err + } + + err = queue.db.Delete(id2bytes(currentID), nil) + queue.highLock.Unlock() + if err != nil { + return nil, err + } + return res, nil +} + +// LPop pop a data from left of queue +func (queue *Queue) LPop() ([]byte, error) { + queue.lowLock.Lock() + currentID := queue.low + + res, err := queue.db.Get(id2bytes(currentID), nil) + if err != nil { + queue.lowLock.Unlock() + if err == leveldb.ErrNotFound { + return nil, ErrNotFound + } + return nil, err + } + + _, err = queue.lowincrement() + if err != nil { + return nil, err + } + + err = queue.db.Delete(id2bytes(currentID), nil) + queue.lowLock.Unlock() + if err != nil { + return nil, err + } + return res, nil +} + +// Close closes the queue +func (queue *Queue) Close() error { + err := queue.db.Close() + queue.db = nil + return err +} diff --git a/vendor/github.com/pelletier/go-toml/LICENSE b/vendor/github.com/pelletier/go-toml/LICENSE deleted file mode 100644 index 583bdae62823..000000000000 --- a/vendor/github.com/pelletier/go-toml/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 - 2017 Thomas Pelletier, Eric Anderton - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/pelletier/go-toml/doc.go b/vendor/github.com/pelletier/go-toml/doc.go deleted file mode 100644 index d5fd98c0211a..000000000000 --- a/vendor/github.com/pelletier/go-toml/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -// Package toml is a TOML parser and manipulation library. -// -// This version supports the specification as described in -// https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md -// -// Marshaling -// -// Go-toml can marshal and unmarshal TOML documents from and to data -// structures. -// -// TOML document as a tree -// -// Go-toml can operate on a TOML document as a tree. Use one of the Load* -// functions to parse TOML data and obtain a Tree instance, then one of its -// methods to manipulate the tree. -// -// JSONPath-like queries -// -// The package github.com/pelletier/go-toml/query implements a system -// similar to JSONPath to quickly retrieve elements of a TOML document using a -// single expression. See the package documentation for more information. -// -package toml diff --git a/vendor/github.com/pelletier/go-toml/fuzz.go b/vendor/github.com/pelletier/go-toml/fuzz.go deleted file mode 100644 index 14570c8d3577..000000000000 --- a/vendor/github.com/pelletier/go-toml/fuzz.go +++ /dev/null @@ -1,31 +0,0 @@ -// +build gofuzz - -package toml - -func Fuzz(data []byte) int { - tree, err := LoadBytes(data) - if err != nil { - if tree != nil { - panic("tree must be nil if there is an error") - } - return 0 - } - - str, err := tree.ToTomlString() - if err != nil { - if str != "" { - panic(`str must be "" if there is an error`) - } - panic(err) - } - - tree, err = Load(str) - if err != nil { - if tree != nil { - panic("tree must be nil if there is an error") - } - return 0 - } - - return 1 -} diff --git a/vendor/github.com/pelletier/go-toml/keysparsing.go b/vendor/github.com/pelletier/go-toml/keysparsing.go deleted file mode 100644 index 284db64678b3..000000000000 --- a/vendor/github.com/pelletier/go-toml/keysparsing.go +++ /dev/null @@ -1,85 +0,0 @@ -// Parsing keys handling both bare and quoted keys. - -package toml - -import ( - "bytes" - "errors" - "fmt" - "unicode" -) - -// Convert the bare key group string to an array. -// The input supports double quotation to allow "." inside the key name, -// but escape sequences are not supported. Lexers must unescape them beforehand. -func parseKey(key string) ([]string, error) { - groups := []string{} - var buffer bytes.Buffer - inQuotes := false - wasInQuotes := false - ignoreSpace := true - expectDot := false - - for _, char := range key { - if ignoreSpace { - if char == ' ' { - continue - } - ignoreSpace = false - } - switch char { - case '"': - if inQuotes { - groups = append(groups, buffer.String()) - buffer.Reset() - wasInQuotes = true - } - inQuotes = !inQuotes - expectDot = false - case '.': - if inQuotes { - buffer.WriteRune(char) - } else { - if !wasInQuotes { - if buffer.Len() == 0 { - return nil, errors.New("empty table key") - } - groups = append(groups, buffer.String()) - buffer.Reset() - } - ignoreSpace = true - expectDot = false - wasInQuotes = false - } - case ' ': - if inQuotes { - buffer.WriteRune(char) - } else { - expectDot = true - } - default: - if !inQuotes && !isValidBareChar(char) { - return nil, fmt.Errorf("invalid bare character: %c", char) - } - if !inQuotes && expectDot { - return nil, errors.New("what?") - } - buffer.WriteRune(char) - expectDot = false - } - } - if inQuotes { - return nil, errors.New("mismatched quotes") - } - if buffer.Len() > 0 { - groups = append(groups, buffer.String()) - } - if len(groups) == 0 { - return nil, errors.New("empty key") - } - return groups, nil -} - -func isValidBareChar(r rune) bool { - return isAlphanumeric(r) || r == '-' || unicode.IsNumber(r) -} diff --git a/vendor/github.com/pelletier/go-toml/lexer.go b/vendor/github.com/pelletier/go-toml/lexer.go deleted file mode 100644 index d11de428594c..000000000000 --- a/vendor/github.com/pelletier/go-toml/lexer.go +++ /dev/null @@ -1,750 +0,0 @@ -// TOML lexer. -// -// Written using the principles developed by Rob Pike in -// http://www.youtube.com/watch?v=HxaD_trXwRE - -package toml - -import ( - "bytes" - "errors" - "fmt" - "regexp" - "strconv" - "strings" -) - -var dateRegexp *regexp.Regexp - -// Define state functions -type tomlLexStateFn func() tomlLexStateFn - -// Define lexer -type tomlLexer struct { - inputIdx int - input []rune // Textual source - currentTokenStart int - currentTokenStop int - tokens []token - depth int - line int - col int - endbufferLine int - endbufferCol int -} - -// Basic read operations on input - -func (l *tomlLexer) read() rune { - r := l.peek() - if r == '\n' { - l.endbufferLine++ - l.endbufferCol = 1 - } else { - l.endbufferCol++ - } - l.inputIdx++ - return r -} - -func (l *tomlLexer) next() rune { - r := l.read() - - if r != eof { - l.currentTokenStop++ - } - return r -} - -func (l *tomlLexer) ignore() { - l.currentTokenStart = l.currentTokenStop - l.line = l.endbufferLine - l.col = l.endbufferCol -} - -func (l *tomlLexer) skip() { - l.next() - l.ignore() -} - -func (l *tomlLexer) fastForward(n int) { - for i := 0; i < n; i++ { - l.next() - } -} - -func (l *tomlLexer) emitWithValue(t tokenType, value string) { - l.tokens = append(l.tokens, token{ - Position: Position{l.line, l.col}, - typ: t, - val: value, - }) - l.ignore() -} - -func (l *tomlLexer) emit(t tokenType) { - l.emitWithValue(t, string(l.input[l.currentTokenStart:l.currentTokenStop])) -} - -func (l *tomlLexer) peek() rune { - if l.inputIdx >= len(l.input) { - return eof - } - return l.input[l.inputIdx] -} - -func (l *tomlLexer) peekString(size int) string { - maxIdx := len(l.input) - upperIdx := l.inputIdx + size // FIXME: potential overflow - if upperIdx > maxIdx { - upperIdx = maxIdx - } - return string(l.input[l.inputIdx:upperIdx]) -} - -func (l *tomlLexer) follow(next string) bool { - return next == l.peekString(len(next)) -} - -// Error management - -func (l *tomlLexer) errorf(format string, args ...interface{}) tomlLexStateFn { - l.tokens = append(l.tokens, token{ - Position: Position{l.line, l.col}, - typ: tokenError, - val: fmt.Sprintf(format, args...), - }) - return nil -} - -// State functions - -func (l *tomlLexer) lexVoid() tomlLexStateFn { - for { - next := l.peek() - switch next { - case '[': - return l.lexTableKey - case '#': - return l.lexComment(l.lexVoid) - case '=': - return l.lexEqual - case '\r': - fallthrough - case '\n': - l.skip() - continue - } - - if isSpace(next) { - l.skip() - } - - if l.depth > 0 { - return l.lexRvalue - } - - if isKeyStartChar(next) { - return l.lexKey - } - - if next == eof { - l.next() - break - } - } - - l.emit(tokenEOF) - return nil -} - -func (l *tomlLexer) lexRvalue() tomlLexStateFn { - for { - next := l.peek() - switch next { - case '.': - return l.errorf("cannot start float with a dot") - case '=': - return l.lexEqual - case '[': - l.depth++ - return l.lexLeftBracket - case ']': - l.depth-- - return l.lexRightBracket - case '{': - return l.lexLeftCurlyBrace - case '}': - return l.lexRightCurlyBrace - case '#': - return l.lexComment(l.lexRvalue) - case '"': - return l.lexString - case '\'': - return l.lexLiteralString - case ',': - return l.lexComma - case '\r': - fallthrough - case '\n': - l.skip() - if l.depth == 0 { - return l.lexVoid - } - return l.lexRvalue - case '_': - return l.errorf("cannot start number with underscore") - } - - if l.follow("true") { - return l.lexTrue - } - - if l.follow("false") { - return l.lexFalse - } - - if l.follow("inf") { - return l.lexInf - } - - if l.follow("nan") { - return l.lexNan - } - - if isSpace(next) { - l.skip() - continue - } - - if next == eof { - l.next() - break - } - - possibleDate := l.peekString(35) - dateMatch := dateRegexp.FindString(possibleDate) - if dateMatch != "" { - l.fastForward(len(dateMatch)) - return l.lexDate - } - - if next == '+' || next == '-' || isDigit(next) { - return l.lexNumber - } - - if isAlphanumeric(next) { - return l.lexKey - } - - return l.errorf("no value can start with %c", next) - } - - l.emit(tokenEOF) - return nil -} - -func (l *tomlLexer) lexLeftCurlyBrace() tomlLexStateFn { - l.next() - l.emit(tokenLeftCurlyBrace) - return l.lexRvalue -} - -func (l *tomlLexer) lexRightCurlyBrace() tomlLexStateFn { - l.next() - l.emit(tokenRightCurlyBrace) - return l.lexRvalue -} - -func (l *tomlLexer) lexDate() tomlLexStateFn { - l.emit(tokenDate) - return l.lexRvalue -} - -func (l *tomlLexer) lexTrue() tomlLexStateFn { - l.fastForward(4) - l.emit(tokenTrue) - return l.lexRvalue -} - -func (l *tomlLexer) lexFalse() tomlLexStateFn { - l.fastForward(5) - l.emit(tokenFalse) - return l.lexRvalue -} - -func (l *tomlLexer) lexInf() tomlLexStateFn { - l.fastForward(3) - l.emit(tokenInf) - return l.lexRvalue -} - -func (l *tomlLexer) lexNan() tomlLexStateFn { - l.fastForward(3) - l.emit(tokenNan) - return l.lexRvalue -} - -func (l *tomlLexer) lexEqual() tomlLexStateFn { - l.next() - l.emit(tokenEqual) - return l.lexRvalue -} - -func (l *tomlLexer) lexComma() tomlLexStateFn { - l.next() - l.emit(tokenComma) - return l.lexRvalue -} - -// Parse the key and emits its value without escape sequences. -// bare keys, basic string keys and literal string keys are supported. -func (l *tomlLexer) lexKey() tomlLexStateFn { - growingString := "" - - for r := l.peek(); isKeyChar(r) || r == '\n' || r == '\r'; r = l.peek() { - if r == '"' { - l.next() - str, err := l.lexStringAsString(`"`, false, true) - if err != nil { - return l.errorf(err.Error()) - } - growingString += str - l.next() - continue - } else if r == '\'' { - l.next() - str, err := l.lexLiteralStringAsString(`'`, false) - if err != nil { - return l.errorf(err.Error()) - } - growingString += str - l.next() - continue - } else if r == '\n' { - return l.errorf("keys cannot contain new lines") - } else if isSpace(r) { - break - } else if !isValidBareChar(r) { - return l.errorf("keys cannot contain %c character", r) - } - growingString += string(r) - l.next() - } - l.emitWithValue(tokenKey, growingString) - return l.lexVoid -} - -func (l *tomlLexer) lexComment(previousState tomlLexStateFn) tomlLexStateFn { - return func() tomlLexStateFn { - for next := l.peek(); next != '\n' && next != eof; next = l.peek() { - if next == '\r' && l.follow("\r\n") { - break - } - l.next() - } - l.ignore() - return previousState - } -} - -func (l *tomlLexer) lexLeftBracket() tomlLexStateFn { - l.next() - l.emit(tokenLeftBracket) - return l.lexRvalue -} - -func (l *tomlLexer) lexLiteralStringAsString(terminator string, discardLeadingNewLine bool) (string, error) { - growingString := "" - - if discardLeadingNewLine { - if l.follow("\r\n") { - l.skip() - l.skip() - } else if l.peek() == '\n' { - l.skip() - } - } - - // find end of string - for { - if l.follow(terminator) { - return growingString, nil - } - - next := l.peek() - if next == eof { - break - } - growingString += string(l.next()) - } - - return "", errors.New("unclosed string") -} - -func (l *tomlLexer) lexLiteralString() tomlLexStateFn { - l.skip() - - // handle special case for triple-quote - terminator := "'" - discardLeadingNewLine := false - if l.follow("''") { - l.skip() - l.skip() - terminator = "'''" - discardLeadingNewLine = true - } - - str, err := l.lexLiteralStringAsString(terminator, discardLeadingNewLine) - if err != nil { - return l.errorf(err.Error()) - } - - l.emitWithValue(tokenString, str) - l.fastForward(len(terminator)) - l.ignore() - return l.lexRvalue -} - -// Lex a string and return the results as a string. -// Terminator is the substring indicating the end of the token. -// The resulting string does not include the terminator. -func (l *tomlLexer) lexStringAsString(terminator string, discardLeadingNewLine, acceptNewLines bool) (string, error) { - growingString := "" - - if discardLeadingNewLine { - if l.follow("\r\n") { - l.skip() - l.skip() - } else if l.peek() == '\n' { - l.skip() - } - } - - for { - if l.follow(terminator) { - return growingString, nil - } - - if l.follow("\\") { - l.next() - switch l.peek() { - case '\r': - fallthrough - case '\n': - fallthrough - case '\t': - fallthrough - case ' ': - // skip all whitespace chars following backslash - for strings.ContainsRune("\r\n\t ", l.peek()) { - l.next() - } - case '"': - growingString += "\"" - l.next() - case 'n': - growingString += "\n" - l.next() - case 'b': - growingString += "\b" - l.next() - case 'f': - growingString += "\f" - l.next() - case '/': - growingString += "/" - l.next() - case 't': - growingString += "\t" - l.next() - case 'r': - growingString += "\r" - l.next() - case '\\': - growingString += "\\" - l.next() - case 'u': - l.next() - code := "" - for i := 0; i < 4; i++ { - c := l.peek() - if !isHexDigit(c) { - return "", errors.New("unfinished unicode escape") - } - l.next() - code = code + string(c) - } - intcode, err := strconv.ParseInt(code, 16, 32) - if err != nil { - return "", errors.New("invalid unicode escape: \\u" + code) - } - growingString += string(rune(intcode)) - case 'U': - l.next() - code := "" - for i := 0; i < 8; i++ { - c := l.peek() - if !isHexDigit(c) { - return "", errors.New("unfinished unicode escape") - } - l.next() - code = code + string(c) - } - intcode, err := strconv.ParseInt(code, 16, 64) - if err != nil { - return "", errors.New("invalid unicode escape: \\U" + code) - } - growingString += string(rune(intcode)) - default: - return "", errors.New("invalid escape sequence: \\" + string(l.peek())) - } - } else { - r := l.peek() - - if 0x00 <= r && r <= 0x1F && !(acceptNewLines && (r == '\n' || r == '\r')) { - return "", fmt.Errorf("unescaped control character %U", r) - } - l.next() - growingString += string(r) - } - - if l.peek() == eof { - break - } - } - - return "", errors.New("unclosed string") -} - -func (l *tomlLexer) lexString() tomlLexStateFn { - l.skip() - - // handle special case for triple-quote - terminator := `"` - discardLeadingNewLine := false - acceptNewLines := false - if l.follow(`""`) { - l.skip() - l.skip() - terminator = `"""` - discardLeadingNewLine = true - acceptNewLines = true - } - - str, err := l.lexStringAsString(terminator, discardLeadingNewLine, acceptNewLines) - - if err != nil { - return l.errorf(err.Error()) - } - - l.emitWithValue(tokenString, str) - l.fastForward(len(terminator)) - l.ignore() - return l.lexRvalue -} - -func (l *tomlLexer) lexTableKey() tomlLexStateFn { - l.next() - - if l.peek() == '[' { - // token '[[' signifies an array of tables - l.next() - l.emit(tokenDoubleLeftBracket) - return l.lexInsideTableArrayKey - } - // vanilla table key - l.emit(tokenLeftBracket) - return l.lexInsideTableKey -} - -// Parse the key till "]]", but only bare keys are supported -func (l *tomlLexer) lexInsideTableArrayKey() tomlLexStateFn { - for r := l.peek(); r != eof; r = l.peek() { - switch r { - case ']': - if l.currentTokenStop > l.currentTokenStart { - l.emit(tokenKeyGroupArray) - } - l.next() - if l.peek() != ']' { - break - } - l.next() - l.emit(tokenDoubleRightBracket) - return l.lexVoid - case '[': - return l.errorf("table array key cannot contain ']'") - default: - l.next() - } - } - return l.errorf("unclosed table array key") -} - -// Parse the key till "]" but only bare keys are supported -func (l *tomlLexer) lexInsideTableKey() tomlLexStateFn { - for r := l.peek(); r != eof; r = l.peek() { - switch r { - case ']': - if l.currentTokenStop > l.currentTokenStart { - l.emit(tokenKeyGroup) - } - l.next() - l.emit(tokenRightBracket) - return l.lexVoid - case '[': - return l.errorf("table key cannot contain ']'") - default: - l.next() - } - } - return l.errorf("unclosed table key") -} - -func (l *tomlLexer) lexRightBracket() tomlLexStateFn { - l.next() - l.emit(tokenRightBracket) - return l.lexRvalue -} - -type validRuneFn func(r rune) bool - -func isValidHexRune(r rune) bool { - return r >= 'a' && r <= 'f' || - r >= 'A' && r <= 'F' || - r >= '0' && r <= '9' || - r == '_' -} - -func isValidOctalRune(r rune) bool { - return r >= '0' && r <= '7' || r == '_' -} - -func isValidBinaryRune(r rune) bool { - return r == '0' || r == '1' || r == '_' -} - -func (l *tomlLexer) lexNumber() tomlLexStateFn { - r := l.peek() - - if r == '0' { - follow := l.peekString(2) - if len(follow) == 2 { - var isValidRune validRuneFn - switch follow[1] { - case 'x': - isValidRune = isValidHexRune - case 'o': - isValidRune = isValidOctalRune - case 'b': - isValidRune = isValidBinaryRune - default: - if follow[1] >= 'a' && follow[1] <= 'z' || follow[1] >= 'A' && follow[1] <= 'Z' { - return l.errorf("unknown number base: %s. possible options are x (hex) o (octal) b (binary)", string(follow[1])) - } - } - - if isValidRune != nil { - l.next() - l.next() - digitSeen := false - for { - next := l.peek() - if !isValidRune(next) { - break - } - digitSeen = true - l.next() - } - - if !digitSeen { - return l.errorf("number needs at least one digit") - } - - l.emit(tokenInteger) - - return l.lexRvalue - } - } - } - - if r == '+' || r == '-' { - l.next() - if l.follow("inf") { - return l.lexInf - } - if l.follow("nan") { - return l.lexNan - } - } - - pointSeen := false - expSeen := false - digitSeen := false - for { - next := l.peek() - if next == '.' { - if pointSeen { - return l.errorf("cannot have two dots in one float") - } - l.next() - if !isDigit(l.peek()) { - return l.errorf("float cannot end with a dot") - } - pointSeen = true - } else if next == 'e' || next == 'E' { - expSeen = true - l.next() - r := l.peek() - if r == '+' || r == '-' { - l.next() - } - } else if isDigit(next) { - digitSeen = true - l.next() - } else if next == '_' { - l.next() - } else { - break - } - if pointSeen && !digitSeen { - return l.errorf("cannot start float with a dot") - } - } - - if !digitSeen { - return l.errorf("no digit in that number") - } - if pointSeen || expSeen { - l.emit(tokenFloat) - } else { - l.emit(tokenInteger) - } - return l.lexRvalue -} - -func (l *tomlLexer) run() { - for state := l.lexVoid; state != nil; { - state = state() - } -} - -func init() { - dateRegexp = regexp.MustCompile(`^\d{1,4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d{1,9})?(Z|[+-]\d{2}:\d{2})`) -} - -// Entry point -func lexToml(inputBytes []byte) []token { - runes := bytes.Runes(inputBytes) - l := &tomlLexer{ - input: runes, - tokens: make([]token, 0, 256), - line: 1, - col: 1, - endbufferLine: 1, - endbufferCol: 1, - } - l.run() - return l.tokens -} diff --git a/vendor/github.com/pelletier/go-toml/marshal.go b/vendor/github.com/pelletier/go-toml/marshal.go deleted file mode 100644 index 671da5564c30..000000000000 --- a/vendor/github.com/pelletier/go-toml/marshal.go +++ /dev/null @@ -1,609 +0,0 @@ -package toml - -import ( - "bytes" - "errors" - "fmt" - "io" - "reflect" - "strconv" - "strings" - "time" -) - -const tagKeyMultiline = "multiline" - -type tomlOpts struct { - name string - comment string - commented bool - multiline bool - include bool - omitempty bool -} - -type encOpts struct { - quoteMapKeys bool - arraysOneElementPerLine bool -} - -var encOptsDefaults = encOpts{ - quoteMapKeys: false, -} - -var timeType = reflect.TypeOf(time.Time{}) -var marshalerType = reflect.TypeOf(new(Marshaler)).Elem() - -// Check if the given marshall type maps to a Tree primitive -func isPrimitive(mtype reflect.Type) bool { - switch mtype.Kind() { - case reflect.Ptr: - return isPrimitive(mtype.Elem()) - case reflect.Bool: - return true - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return true - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return true - case reflect.Float32, reflect.Float64: - return true - case reflect.String: - return true - case reflect.Struct: - return mtype == timeType || isCustomMarshaler(mtype) - default: - return false - } -} - -// Check if the given marshall type maps to a Tree slice -func isTreeSlice(mtype reflect.Type) bool { - switch mtype.Kind() { - case reflect.Slice: - return !isOtherSlice(mtype) - default: - return false - } -} - -// Check if the given marshall type maps to a non-Tree slice -func isOtherSlice(mtype reflect.Type) bool { - switch mtype.Kind() { - case reflect.Ptr: - return isOtherSlice(mtype.Elem()) - case reflect.Slice: - return isPrimitive(mtype.Elem()) || isOtherSlice(mtype.Elem()) - default: - return false - } -} - -// Check if the given marshall type maps to a Tree -func isTree(mtype reflect.Type) bool { - switch mtype.Kind() { - case reflect.Map: - return true - case reflect.Struct: - return !isPrimitive(mtype) - default: - return false - } -} - -func isCustomMarshaler(mtype reflect.Type) bool { - return mtype.Implements(marshalerType) -} - -func callCustomMarshaler(mval reflect.Value) ([]byte, error) { - return mval.Interface().(Marshaler).MarshalTOML() -} - -// Marshaler is the interface implemented by types that -// can marshal themselves into valid TOML. -type Marshaler interface { - MarshalTOML() ([]byte, error) -} - -/* -Marshal returns the TOML encoding of v. Behavior is similar to the Go json -encoder, except that there is no concept of a Marshaler interface or MarshalTOML -function for sub-structs, and currently only definite types can be marshaled -(i.e. no `interface{}`). - -The following struct annotations are supported: - - toml:"Field" Overrides the field's name to output. - omitempty When set, empty values and groups are not emitted. - comment:"comment" Emits a # comment on the same line. This supports new lines. - commented:"true" Emits the value as commented. - -Note that pointers are automatically assigned the "omitempty" option, as TOML -explicitly does not handle null values (saying instead the label should be -dropped). - -Tree structural types and corresponding marshal types: - - *Tree (*)struct, (*)map[string]interface{} - []*Tree (*)[](*)struct, (*)[](*)map[string]interface{} - []interface{} (as interface{}) (*)[]primitive, (*)[]([]interface{}) - interface{} (*)primitive - -Tree primitive types and corresponding marshal types: - - uint64 uint, uint8-uint64, pointers to same - int64 int, int8-uint64, pointers to same - float64 float32, float64, pointers to same - string string, pointers to same - bool bool, pointers to same - time.Time time.Time{}, pointers to same -*/ -func Marshal(v interface{}) ([]byte, error) { - return NewEncoder(nil).marshal(v) -} - -// Encoder writes TOML values to an output stream. -type Encoder struct { - w io.Writer - encOpts -} - -// NewEncoder returns a new encoder that writes to w. -func NewEncoder(w io.Writer) *Encoder { - return &Encoder{ - w: w, - encOpts: encOptsDefaults, - } -} - -// Encode writes the TOML encoding of v to the stream. -// -// See the documentation for Marshal for details. -func (e *Encoder) Encode(v interface{}) error { - b, err := e.marshal(v) - if err != nil { - return err - } - if _, err := e.w.Write(b); err != nil { - return err - } - return nil -} - -// QuoteMapKeys sets up the encoder to encode -// maps with string type keys with quoted TOML keys. -// -// This relieves the character limitations on map keys. -func (e *Encoder) QuoteMapKeys(v bool) *Encoder { - e.quoteMapKeys = v - return e -} - -// ArraysWithOneElementPerLine sets up the encoder to encode arrays -// with more than one element on multiple lines instead of one. -// -// For example: -// -// A = [1,2,3] -// -// Becomes -// -// A = [ -// 1, -// 2, -// 3, -// ] -func (e *Encoder) ArraysWithOneElementPerLine(v bool) *Encoder { - e.arraysOneElementPerLine = v - return e -} - -func (e *Encoder) marshal(v interface{}) ([]byte, error) { - mtype := reflect.TypeOf(v) - if mtype.Kind() != reflect.Struct { - return []byte{}, errors.New("Only a struct can be marshaled to TOML") - } - sval := reflect.ValueOf(v) - if isCustomMarshaler(mtype) { - return callCustomMarshaler(sval) - } - t, err := e.valueToTree(mtype, sval) - if err != nil { - return []byte{}, err - } - - var buf bytes.Buffer - _, err = t.writeTo(&buf, "", "", 0, e.arraysOneElementPerLine) - - return buf.Bytes(), err -} - -// Convert given marshal struct or map value to toml tree -func (e *Encoder) valueToTree(mtype reflect.Type, mval reflect.Value) (*Tree, error) { - if mtype.Kind() == reflect.Ptr { - return e.valueToTree(mtype.Elem(), mval.Elem()) - } - tval := newTree() - switch mtype.Kind() { - case reflect.Struct: - for i := 0; i < mtype.NumField(); i++ { - mtypef, mvalf := mtype.Field(i), mval.Field(i) - opts := tomlOptions(mtypef) - if opts.include && (!opts.omitempty || !isZero(mvalf)) { - val, err := e.valueToToml(mtypef.Type, mvalf) - if err != nil { - return nil, err - } - - tval.SetWithOptions(opts.name, SetOptions{ - Comment: opts.comment, - Commented: opts.commented, - Multiline: opts.multiline, - }, val) - } - } - case reflect.Map: - for _, key := range mval.MapKeys() { - mvalf := mval.MapIndex(key) - val, err := e.valueToToml(mtype.Elem(), mvalf) - if err != nil { - return nil, err - } - if e.quoteMapKeys { - keyStr, err := tomlValueStringRepresentation(key.String(), "", e.arraysOneElementPerLine) - if err != nil { - return nil, err - } - tval.SetPath([]string{keyStr}, val) - } else { - tval.Set(key.String(), val) - } - } - } - return tval, nil -} - -// Convert given marshal slice to slice of Toml trees -func (e *Encoder) valueToTreeSlice(mtype reflect.Type, mval reflect.Value) ([]*Tree, error) { - tval := make([]*Tree, mval.Len(), mval.Len()) - for i := 0; i < mval.Len(); i++ { - val, err := e.valueToTree(mtype.Elem(), mval.Index(i)) - if err != nil { - return nil, err - } - tval[i] = val - } - return tval, nil -} - -// Convert given marshal slice to slice of toml values -func (e *Encoder) valueToOtherSlice(mtype reflect.Type, mval reflect.Value) (interface{}, error) { - tval := make([]interface{}, mval.Len(), mval.Len()) - for i := 0; i < mval.Len(); i++ { - val, err := e.valueToToml(mtype.Elem(), mval.Index(i)) - if err != nil { - return nil, err - } - tval[i] = val - } - return tval, nil -} - -// Convert given marshal value to toml value -func (e *Encoder) valueToToml(mtype reflect.Type, mval reflect.Value) (interface{}, error) { - if mtype.Kind() == reflect.Ptr { - return e.valueToToml(mtype.Elem(), mval.Elem()) - } - switch { - case isCustomMarshaler(mtype): - return callCustomMarshaler(mval) - case isTree(mtype): - return e.valueToTree(mtype, mval) - case isTreeSlice(mtype): - return e.valueToTreeSlice(mtype, mval) - case isOtherSlice(mtype): - return e.valueToOtherSlice(mtype, mval) - default: - switch mtype.Kind() { - case reflect.Bool: - return mval.Bool(), nil - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return mval.Int(), nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return mval.Uint(), nil - case reflect.Float32, reflect.Float64: - return mval.Float(), nil - case reflect.String: - return mval.String(), nil - case reflect.Struct: - return mval.Interface().(time.Time), nil - default: - return nil, fmt.Errorf("Marshal can't handle %v(%v)", mtype, mtype.Kind()) - } - } -} - -// Unmarshal attempts to unmarshal the Tree into a Go struct pointed by v. -// Neither Unmarshaler interfaces nor UnmarshalTOML functions are supported for -// sub-structs, and only definite types can be unmarshaled. -func (t *Tree) Unmarshal(v interface{}) error { - d := Decoder{tval: t} - return d.unmarshal(v) -} - -// Marshal returns the TOML encoding of Tree. -// See Marshal() documentation for types mapping table. -func (t *Tree) Marshal() ([]byte, error) { - var buf bytes.Buffer - err := NewEncoder(&buf).Encode(t) - return buf.Bytes(), err -} - -// Unmarshal parses the TOML-encoded data and stores the result in the value -// pointed to by v. Behavior is similar to the Go json encoder, except that there -// is no concept of an Unmarshaler interface or UnmarshalTOML function for -// sub-structs, and currently only definite types can be unmarshaled to (i.e. no -// `interface{}`). -// -// The following struct annotations are supported: -// -// toml:"Field" Overrides the field's name to map to. -// -// See Marshal() documentation for types mapping table. -func Unmarshal(data []byte, v interface{}) error { - t, err := LoadReader(bytes.NewReader(data)) - if err != nil { - return err - } - return t.Unmarshal(v) -} - -// Decoder reads and decodes TOML values from an input stream. -type Decoder struct { - r io.Reader - tval *Tree - encOpts -} - -// NewDecoder returns a new decoder that reads from r. -func NewDecoder(r io.Reader) *Decoder { - return &Decoder{ - r: r, - encOpts: encOptsDefaults, - } -} - -// Decode reads a TOML-encoded value from it's input -// and unmarshals it in the value pointed at by v. -// -// See the documentation for Marshal for details. -func (d *Decoder) Decode(v interface{}) error { - var err error - d.tval, err = LoadReader(d.r) - if err != nil { - return err - } - return d.unmarshal(v) -} - -func (d *Decoder) unmarshal(v interface{}) error { - mtype := reflect.TypeOf(v) - if mtype.Kind() != reflect.Ptr || mtype.Elem().Kind() != reflect.Struct { - return errors.New("Only a pointer to struct can be unmarshaled from TOML") - } - - sval, err := d.valueFromTree(mtype.Elem(), d.tval) - if err != nil { - return err - } - reflect.ValueOf(v).Elem().Set(sval) - return nil -} - -// Convert toml tree to marshal struct or map, using marshal type -func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree) (reflect.Value, error) { - if mtype.Kind() == reflect.Ptr { - return d.unwrapPointer(mtype, tval) - } - var mval reflect.Value - switch mtype.Kind() { - case reflect.Struct: - mval = reflect.New(mtype).Elem() - for i := 0; i < mtype.NumField(); i++ { - mtypef := mtype.Field(i) - opts := tomlOptions(mtypef) - if opts.include { - baseKey := opts.name - keysToTry := []string{baseKey, strings.ToLower(baseKey), strings.ToTitle(baseKey)} - for _, key := range keysToTry { - exists := tval.Has(key) - if !exists { - continue - } - val := tval.Get(key) - mvalf, err := d.valueFromToml(mtypef.Type, val) - if err != nil { - return mval, formatError(err, tval.GetPosition(key)) - } - mval.Field(i).Set(mvalf) - break - } - } - } - case reflect.Map: - mval = reflect.MakeMap(mtype) - for _, key := range tval.Keys() { - // TODO: path splits key - val := tval.GetPath([]string{key}) - mvalf, err := d.valueFromToml(mtype.Elem(), val) - if err != nil { - return mval, formatError(err, tval.GetPosition(key)) - } - mval.SetMapIndex(reflect.ValueOf(key), mvalf) - } - } - return mval, nil -} - -// Convert toml value to marshal struct/map slice, using marshal type -func (d *Decoder) valueFromTreeSlice(mtype reflect.Type, tval []*Tree) (reflect.Value, error) { - mval := reflect.MakeSlice(mtype, len(tval), len(tval)) - for i := 0; i < len(tval); i++ { - val, err := d.valueFromTree(mtype.Elem(), tval[i]) - if err != nil { - return mval, err - } - mval.Index(i).Set(val) - } - return mval, nil -} - -// Convert toml value to marshal primitive slice, using marshal type -func (d *Decoder) valueFromOtherSlice(mtype reflect.Type, tval []interface{}) (reflect.Value, error) { - mval := reflect.MakeSlice(mtype, len(tval), len(tval)) - for i := 0; i < len(tval); i++ { - val, err := d.valueFromToml(mtype.Elem(), tval[i]) - if err != nil { - return mval, err - } - mval.Index(i).Set(val) - } - return mval, nil -} - -// Convert toml value to marshal value, using marshal type -func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}) (reflect.Value, error) { - if mtype.Kind() == reflect.Ptr { - return d.unwrapPointer(mtype, tval) - } - - switch tval.(type) { - case *Tree: - if isTree(mtype) { - return d.valueFromTree(mtype, tval.(*Tree)) - } - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a tree", tval, tval) - case []*Tree: - if isTreeSlice(mtype) { - return d.valueFromTreeSlice(mtype, tval.([]*Tree)) - } - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to trees", tval, tval) - case []interface{}: - if isOtherSlice(mtype) { - return d.valueFromOtherSlice(mtype, tval.([]interface{})) - } - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a slice", tval, tval) - default: - switch mtype.Kind() { - case reflect.Bool, reflect.Struct: - val := reflect.ValueOf(tval) - // if this passes for when mtype is reflect.Struct, tval is a time.Time - if !val.Type().ConvertibleTo(mtype) { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) - } - - return val.Convert(mtype), nil - case reflect.String: - val := reflect.ValueOf(tval) - // stupidly, int64 is convertible to string. So special case this. - if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Int64 { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) - } - - return val.Convert(mtype), nil - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - val := reflect.ValueOf(tval) - if !val.Type().ConvertibleTo(mtype) { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) - } - if reflect.Indirect(reflect.New(mtype)).OverflowInt(val.Int()) { - return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) - } - - return val.Convert(mtype), nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - val := reflect.ValueOf(tval) - if !val.Type().ConvertibleTo(mtype) { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) - } - if val.Int() < 0 { - return reflect.ValueOf(nil), fmt.Errorf("%v(%T) is negative so does not fit in %v", tval, tval, mtype.String()) - } - if reflect.Indirect(reflect.New(mtype)).OverflowUint(uint64(val.Int())) { - return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) - } - - return val.Convert(mtype), nil - case reflect.Float32, reflect.Float64: - val := reflect.ValueOf(tval) - if !val.Type().ConvertibleTo(mtype) { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) - } - if reflect.Indirect(reflect.New(mtype)).OverflowFloat(val.Float()) { - return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) - } - - return val.Convert(mtype), nil - default: - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v(%v)", tval, tval, mtype, mtype.Kind()) - } - } -} - -func (d *Decoder) unwrapPointer(mtype reflect.Type, tval interface{}) (reflect.Value, error) { - val, err := d.valueFromToml(mtype.Elem(), tval) - if err != nil { - return reflect.ValueOf(nil), err - } - mval := reflect.New(mtype.Elem()) - mval.Elem().Set(val) - return mval, nil -} - -func tomlOptions(vf reflect.StructField) tomlOpts { - tag := vf.Tag.Get("toml") - parse := strings.Split(tag, ",") - var comment string - if c := vf.Tag.Get("comment"); c != "" { - comment = c - } - commented, _ := strconv.ParseBool(vf.Tag.Get("commented")) - multiline, _ := strconv.ParseBool(vf.Tag.Get(tagKeyMultiline)) - result := tomlOpts{name: vf.Name, comment: comment, commented: commented, multiline: multiline, include: true, omitempty: false} - if parse[0] != "" { - if parse[0] == "-" && len(parse) == 1 { - result.include = false - } else { - result.name = strings.Trim(parse[0], " ") - } - } - if vf.PkgPath != "" { - result.include = false - } - if len(parse) > 1 && strings.Trim(parse[1], " ") == "omitempty" { - result.omitempty = true - } - if vf.Type.Kind() == reflect.Ptr { - result.omitempty = true - } - return result -} - -func isZero(val reflect.Value) bool { - switch val.Type().Kind() { - case reflect.Map: - fallthrough - case reflect.Array: - fallthrough - case reflect.Slice: - return val.Len() == 0 - default: - return reflect.DeepEqual(val.Interface(), reflect.Zero(val.Type()).Interface()) - } -} - -func formatError(err error, pos Position) error { - if err.Error()[0] == '(' { // Error already contains position information - return err - } - return fmt.Errorf("%s: %s", pos, err) -} diff --git a/vendor/github.com/pelletier/go-toml/parser.go b/vendor/github.com/pelletier/go-toml/parser.go deleted file mode 100644 index 2d27599a9993..000000000000 --- a/vendor/github.com/pelletier/go-toml/parser.go +++ /dev/null @@ -1,430 +0,0 @@ -// TOML Parser. - -package toml - -import ( - "errors" - "fmt" - "math" - "reflect" - "regexp" - "strconv" - "strings" - "time" -) - -type tomlParser struct { - flowIdx int - flow []token - tree *Tree - currentTable []string - seenTableKeys []string -} - -type tomlParserStateFn func() tomlParserStateFn - -// Formats and panics an error message based on a token -func (p *tomlParser) raiseError(tok *token, msg string, args ...interface{}) { - panic(tok.Position.String() + ": " + fmt.Sprintf(msg, args...)) -} - -func (p *tomlParser) run() { - for state := p.parseStart; state != nil; { - state = state() - } -} - -func (p *tomlParser) peek() *token { - if p.flowIdx >= len(p.flow) { - return nil - } - return &p.flow[p.flowIdx] -} - -func (p *tomlParser) assume(typ tokenType) { - tok := p.getToken() - if tok == nil { - p.raiseError(tok, "was expecting token %s, but token stream is empty", tok) - } - if tok.typ != typ { - p.raiseError(tok, "was expecting token %s, but got %s instead", typ, tok) - } -} - -func (p *tomlParser) getToken() *token { - tok := p.peek() - if tok == nil { - return nil - } - p.flowIdx++ - return tok -} - -func (p *tomlParser) parseStart() tomlParserStateFn { - tok := p.peek() - - // end of stream, parsing is finished - if tok == nil { - return nil - } - - switch tok.typ { - case tokenDoubleLeftBracket: - return p.parseGroupArray - case tokenLeftBracket: - return p.parseGroup - case tokenKey: - return p.parseAssign - case tokenEOF: - return nil - default: - p.raiseError(tok, "unexpected token") - } - return nil -} - -func (p *tomlParser) parseGroupArray() tomlParserStateFn { - startToken := p.getToken() // discard the [[ - key := p.getToken() - if key.typ != tokenKeyGroupArray { - p.raiseError(key, "unexpected token %s, was expecting a table array key", key) - } - - // get or create table array element at the indicated part in the path - keys, err := parseKey(key.val) - if err != nil { - p.raiseError(key, "invalid table array key: %s", err) - } - p.tree.createSubTree(keys[:len(keys)-1], startToken.Position) // create parent entries - destTree := p.tree.GetPath(keys) - var array []*Tree - if destTree == nil { - array = make([]*Tree, 0) - } else if target, ok := destTree.([]*Tree); ok && target != nil { - array = destTree.([]*Tree) - } else { - p.raiseError(key, "key %s is already assigned and not of type table array", key) - } - p.currentTable = keys - - // add a new tree to the end of the table array - newTree := newTree() - newTree.position = startToken.Position - array = append(array, newTree) - p.tree.SetPath(p.currentTable, array) - - // remove all keys that were children of this table array - prefix := key.val + "." - found := false - for ii := 0; ii < len(p.seenTableKeys); { - tableKey := p.seenTableKeys[ii] - if strings.HasPrefix(tableKey, prefix) { - p.seenTableKeys = append(p.seenTableKeys[:ii], p.seenTableKeys[ii+1:]...) - } else { - found = (tableKey == key.val) - ii++ - } - } - - // keep this key name from use by other kinds of assignments - if !found { - p.seenTableKeys = append(p.seenTableKeys, key.val) - } - - // move to next parser state - p.assume(tokenDoubleRightBracket) - return p.parseStart -} - -func (p *tomlParser) parseGroup() tomlParserStateFn { - startToken := p.getToken() // discard the [ - key := p.getToken() - if key.typ != tokenKeyGroup { - p.raiseError(key, "unexpected token %s, was expecting a table key", key) - } - for _, item := range p.seenTableKeys { - if item == key.val { - p.raiseError(key, "duplicated tables") - } - } - - p.seenTableKeys = append(p.seenTableKeys, key.val) - keys, err := parseKey(key.val) - if err != nil { - p.raiseError(key, "invalid table array key: %s", err) - } - if err := p.tree.createSubTree(keys, startToken.Position); err != nil { - p.raiseError(key, "%s", err) - } - p.assume(tokenRightBracket) - p.currentTable = keys - return p.parseStart -} - -func (p *tomlParser) parseAssign() tomlParserStateFn { - key := p.getToken() - p.assume(tokenEqual) - - value := p.parseRvalue() - var tableKey []string - if len(p.currentTable) > 0 { - tableKey = p.currentTable - } else { - tableKey = []string{} - } - - // find the table to assign, looking out for arrays of tables - var targetNode *Tree - switch node := p.tree.GetPath(tableKey).(type) { - case []*Tree: - targetNode = node[len(node)-1] - case *Tree: - targetNode = node - default: - p.raiseError(key, "Unknown table type for path: %s", - strings.Join(tableKey, ".")) - } - - // assign value to the found table - keyVals := []string{key.val} - if len(keyVals) != 1 { - p.raiseError(key, "Invalid key") - } - keyVal := keyVals[0] - localKey := []string{keyVal} - finalKey := append(tableKey, keyVal) - if targetNode.GetPath(localKey) != nil { - p.raiseError(key, "The following key was defined twice: %s", - strings.Join(finalKey, ".")) - } - var toInsert interface{} - - switch value.(type) { - case *Tree, []*Tree: - toInsert = value - default: - toInsert = &tomlValue{value: value, position: key.Position} - } - targetNode.values[keyVal] = toInsert - return p.parseStart -} - -var numberUnderscoreInvalidRegexp *regexp.Regexp -var hexNumberUnderscoreInvalidRegexp *regexp.Regexp - -func numberContainsInvalidUnderscore(value string) error { - if numberUnderscoreInvalidRegexp.MatchString(value) { - return errors.New("invalid use of _ in number") - } - return nil -} - -func hexNumberContainsInvalidUnderscore(value string) error { - if hexNumberUnderscoreInvalidRegexp.MatchString(value) { - return errors.New("invalid use of _ in hex number") - } - return nil -} - -func cleanupNumberToken(value string) string { - cleanedVal := strings.Replace(value, "_", "", -1) - return cleanedVal -} - -func (p *tomlParser) parseRvalue() interface{} { - tok := p.getToken() - if tok == nil || tok.typ == tokenEOF { - p.raiseError(tok, "expecting a value") - } - - switch tok.typ { - case tokenString: - return tok.val - case tokenTrue: - return true - case tokenFalse: - return false - case tokenInf: - if tok.val[0] == '-' { - return math.Inf(-1) - } - return math.Inf(1) - case tokenNan: - return math.NaN() - case tokenInteger: - cleanedVal := cleanupNumberToken(tok.val) - var err error - var val int64 - if len(cleanedVal) >= 3 && cleanedVal[0] == '0' { - switch cleanedVal[1] { - case 'x': - err = hexNumberContainsInvalidUnderscore(tok.val) - if err != nil { - p.raiseError(tok, "%s", err) - } - val, err = strconv.ParseInt(cleanedVal[2:], 16, 64) - case 'o': - err = numberContainsInvalidUnderscore(tok.val) - if err != nil { - p.raiseError(tok, "%s", err) - } - val, err = strconv.ParseInt(cleanedVal[2:], 8, 64) - case 'b': - err = numberContainsInvalidUnderscore(tok.val) - if err != nil { - p.raiseError(tok, "%s", err) - } - val, err = strconv.ParseInt(cleanedVal[2:], 2, 64) - default: - panic("invalid base") // the lexer should catch this first - } - } else { - err = numberContainsInvalidUnderscore(tok.val) - if err != nil { - p.raiseError(tok, "%s", err) - } - val, err = strconv.ParseInt(cleanedVal, 10, 64) - } - if err != nil { - p.raiseError(tok, "%s", err) - } - return val - case tokenFloat: - err := numberContainsInvalidUnderscore(tok.val) - if err != nil { - p.raiseError(tok, "%s", err) - } - cleanedVal := cleanupNumberToken(tok.val) - val, err := strconv.ParseFloat(cleanedVal, 64) - if err != nil { - p.raiseError(tok, "%s", err) - } - return val - case tokenDate: - val, err := time.ParseInLocation(time.RFC3339Nano, tok.val, time.UTC) - if err != nil { - p.raiseError(tok, "%s", err) - } - return val - case tokenLeftBracket: - return p.parseArray() - case tokenLeftCurlyBrace: - return p.parseInlineTable() - case tokenEqual: - p.raiseError(tok, "cannot have multiple equals for the same key") - case tokenError: - p.raiseError(tok, "%s", tok) - } - - p.raiseError(tok, "never reached") - - return nil -} - -func tokenIsComma(t *token) bool { - return t != nil && t.typ == tokenComma -} - -func (p *tomlParser) parseInlineTable() *Tree { - tree := newTree() - var previous *token -Loop: - for { - follow := p.peek() - if follow == nil || follow.typ == tokenEOF { - p.raiseError(follow, "unterminated inline table") - } - switch follow.typ { - case tokenRightCurlyBrace: - p.getToken() - break Loop - case tokenKey: - if !tokenIsComma(previous) && previous != nil { - p.raiseError(follow, "comma expected between fields in inline table") - } - key := p.getToken() - p.assume(tokenEqual) - value := p.parseRvalue() - tree.Set(key.val, value) - case tokenComma: - if previous == nil { - p.raiseError(follow, "inline table cannot start with a comma") - } - if tokenIsComma(previous) { - p.raiseError(follow, "need field between two commas in inline table") - } - p.getToken() - default: - p.raiseError(follow, "unexpected token type in inline table: %s", follow.String()) - } - previous = follow - } - if tokenIsComma(previous) { - p.raiseError(previous, "trailing comma at the end of inline table") - } - return tree -} - -func (p *tomlParser) parseArray() interface{} { - var array []interface{} - arrayType := reflect.TypeOf(nil) - for { - follow := p.peek() - if follow == nil || follow.typ == tokenEOF { - p.raiseError(follow, "unterminated array") - } - if follow.typ == tokenRightBracket { - p.getToken() - break - } - val := p.parseRvalue() - if arrayType == nil { - arrayType = reflect.TypeOf(val) - } - if reflect.TypeOf(val) != arrayType { - p.raiseError(follow, "mixed types in array") - } - array = append(array, val) - follow = p.peek() - if follow == nil || follow.typ == tokenEOF { - p.raiseError(follow, "unterminated array") - } - if follow.typ != tokenRightBracket && follow.typ != tokenComma { - p.raiseError(follow, "missing comma") - } - if follow.typ == tokenComma { - p.getToken() - } - } - // An array of Trees is actually an array of inline - // tables, which is a shorthand for a table array. If the - // array was not converted from []interface{} to []*Tree, - // the two notations would not be equivalent. - if arrayType == reflect.TypeOf(newTree()) { - tomlArray := make([]*Tree, len(array)) - for i, v := range array { - tomlArray[i] = v.(*Tree) - } - return tomlArray - } - return array -} - -func parseToml(flow []token) *Tree { - result := newTree() - result.position = Position{1, 1} - parser := &tomlParser{ - flowIdx: 0, - flow: flow, - tree: result, - currentTable: make([]string, 0), - seenTableKeys: make([]string, 0), - } - parser.run() - return result -} - -func init() { - numberUnderscoreInvalidRegexp = regexp.MustCompile(`([^\d]_|_[^\d])|_$|^_`) - hexNumberUnderscoreInvalidRegexp = regexp.MustCompile(`(^0x_)|([^\da-f]_|_[^\da-f])|_$|^_`) -} diff --git a/vendor/github.com/pelletier/go-toml/position.go b/vendor/github.com/pelletier/go-toml/position.go deleted file mode 100644 index c17bff87baaa..000000000000 --- a/vendor/github.com/pelletier/go-toml/position.go +++ /dev/null @@ -1,29 +0,0 @@ -// Position support for go-toml - -package toml - -import ( - "fmt" -) - -// Position of a document element within a TOML document. -// -// Line and Col are both 1-indexed positions for the element's line number and -// column number, respectively. Values of zero or less will cause Invalid(), -// to return true. -type Position struct { - Line int // line within the document - Col int // column within the line -} - -// String representation of the position. -// Displays 1-indexed line and column numbers. -func (p Position) String() string { - return fmt.Sprintf("(%d, %d)", p.Line, p.Col) -} - -// Invalid returns whether or not the position is valid (i.e. with negative or -// null values) -func (p Position) Invalid() bool { - return p.Line <= 0 || p.Col <= 0 -} diff --git a/vendor/github.com/pelletier/go-toml/token.go b/vendor/github.com/pelletier/go-toml/token.go deleted file mode 100644 index 1a9081346679..000000000000 --- a/vendor/github.com/pelletier/go-toml/token.go +++ /dev/null @@ -1,144 +0,0 @@ -package toml - -import ( - "fmt" - "strconv" - "unicode" -) - -// Define tokens -type tokenType int - -const ( - eof = -(iota + 1) -) - -const ( - tokenError tokenType = iota - tokenEOF - tokenComment - tokenKey - tokenString - tokenInteger - tokenTrue - tokenFalse - tokenFloat - tokenInf - tokenNan - tokenEqual - tokenLeftBracket - tokenRightBracket - tokenLeftCurlyBrace - tokenRightCurlyBrace - tokenLeftParen - tokenRightParen - tokenDoubleLeftBracket - tokenDoubleRightBracket - tokenDate - tokenKeyGroup - tokenKeyGroupArray - tokenComma - tokenColon - tokenDollar - tokenStar - tokenQuestion - tokenDot - tokenDotDot - tokenEOL -) - -var tokenTypeNames = []string{ - "Error", - "EOF", - "Comment", - "Key", - "String", - "Integer", - "True", - "False", - "Float", - "Inf", - "NaN", - "=", - "[", - "]", - "{", - "}", - "(", - ")", - "]]", - "[[", - "Date", - "KeyGroup", - "KeyGroupArray", - ",", - ":", - "$", - "*", - "?", - ".", - "..", - "EOL", -} - -type token struct { - Position - typ tokenType - val string -} - -func (tt tokenType) String() string { - idx := int(tt) - if idx < len(tokenTypeNames) { - return tokenTypeNames[idx] - } - return "Unknown" -} - -func (t token) Int() int { - if result, err := strconv.Atoi(t.val); err != nil { - panic(err) - } else { - return result - } -} - -func (t token) String() string { - switch t.typ { - case tokenEOF: - return "EOF" - case tokenError: - return t.val - } - - return fmt.Sprintf("%q", t.val) -} - -func isSpace(r rune) bool { - return r == ' ' || r == '\t' -} - -func isAlphanumeric(r rune) bool { - return unicode.IsLetter(r) || r == '_' -} - -func isKeyChar(r rune) bool { - // Keys start with the first character that isn't whitespace or [ and end - // with the last non-whitespace character before the equals sign. Keys - // cannot contain a # character." - return !(r == '\r' || r == '\n' || r == eof || r == '=') -} - -func isKeyStartChar(r rune) bool { - return !(isSpace(r) || r == '\r' || r == '\n' || r == eof || r == '[') -} - -func isDigit(r rune) bool { - return unicode.IsNumber(r) -} - -func isHexDigit(r rune) bool { - return isDigit(r) || - (r >= 'a' && r <= 'f') || - (r >= 'A' && r <= 'F') -} diff --git a/vendor/github.com/pelletier/go-toml/toml.go b/vendor/github.com/pelletier/go-toml/toml.go deleted file mode 100644 index 98c185ad0b8e..000000000000 --- a/vendor/github.com/pelletier/go-toml/toml.go +++ /dev/null @@ -1,367 +0,0 @@ -package toml - -import ( - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "runtime" - "strings" -) - -type tomlValue struct { - value interface{} // string, int64, uint64, float64, bool, time.Time, [] of any of this list - comment string - commented bool - multiline bool - position Position -} - -// Tree is the result of the parsing of a TOML file. -type Tree struct { - values map[string]interface{} // string -> *tomlValue, *Tree, []*Tree - comment string - commented bool - position Position -} - -func newTree() *Tree { - return &Tree{ - values: make(map[string]interface{}), - position: Position{}, - } -} - -// TreeFromMap initializes a new Tree object using the given map. -func TreeFromMap(m map[string]interface{}) (*Tree, error) { - result, err := toTree(m) - if err != nil { - return nil, err - } - return result.(*Tree), nil -} - -// Position returns the position of the tree. -func (t *Tree) Position() Position { - return t.position -} - -// Has returns a boolean indicating if the given key exists. -func (t *Tree) Has(key string) bool { - if key == "" { - return false - } - return t.HasPath(strings.Split(key, ".")) -} - -// HasPath returns true if the given path of keys exists, false otherwise. -func (t *Tree) HasPath(keys []string) bool { - return t.GetPath(keys) != nil -} - -// Keys returns the keys of the toplevel tree (does not recurse). -func (t *Tree) Keys() []string { - keys := make([]string, len(t.values)) - i := 0 - for k := range t.values { - keys[i] = k - i++ - } - return keys -} - -// Get the value at key in the Tree. -// Key is a dot-separated path (e.g. a.b.c) without single/double quoted strings. -// If you need to retrieve non-bare keys, use GetPath. -// Returns nil if the path does not exist in the tree. -// If keys is of length zero, the current tree is returned. -func (t *Tree) Get(key string) interface{} { - if key == "" { - return t - } - return t.GetPath(strings.Split(key, ".")) -} - -// GetPath returns the element in the tree indicated by 'keys'. -// If keys is of length zero, the current tree is returned. -func (t *Tree) GetPath(keys []string) interface{} { - if len(keys) == 0 { - return t - } - subtree := t - for _, intermediateKey := range keys[:len(keys)-1] { - value, exists := subtree.values[intermediateKey] - if !exists { - return nil - } - switch node := value.(type) { - case *Tree: - subtree = node - case []*Tree: - // go to most recent element - if len(node) == 0 { - return nil - } - subtree = node[len(node)-1] - default: - return nil // cannot navigate through other node types - } - } - // branch based on final node type - switch node := subtree.values[keys[len(keys)-1]].(type) { - case *tomlValue: - return node.value - default: - return node - } -} - -// GetPosition returns the position of the given key. -func (t *Tree) GetPosition(key string) Position { - if key == "" { - return t.position - } - return t.GetPositionPath(strings.Split(key, ".")) -} - -// GetPositionPath returns the element in the tree indicated by 'keys'. -// If keys is of length zero, the current tree is returned. -func (t *Tree) GetPositionPath(keys []string) Position { - if len(keys) == 0 { - return t.position - } - subtree := t - for _, intermediateKey := range keys[:len(keys)-1] { - value, exists := subtree.values[intermediateKey] - if !exists { - return Position{0, 0} - } - switch node := value.(type) { - case *Tree: - subtree = node - case []*Tree: - // go to most recent element - if len(node) == 0 { - return Position{0, 0} - } - subtree = node[len(node)-1] - default: - return Position{0, 0} - } - } - // branch based on final node type - switch node := subtree.values[keys[len(keys)-1]].(type) { - case *tomlValue: - return node.position - case *Tree: - return node.position - case []*Tree: - // go to most recent element - if len(node) == 0 { - return Position{0, 0} - } - return node[len(node)-1].position - default: - return Position{0, 0} - } -} - -// GetDefault works like Get but with a default value -func (t *Tree) GetDefault(key string, def interface{}) interface{} { - val := t.Get(key) - if val == nil { - return def - } - return val -} - -// SetOptions arguments are supplied to the SetWithOptions and SetPathWithOptions functions to modify marshalling behaviour. -// The default values within the struct are valid default options. -type SetOptions struct { - Comment string - Commented bool - Multiline bool -} - -// SetWithOptions is the same as Set, but allows you to provide formatting -// instructions to the key, that will be used by Marshal(). -func (t *Tree) SetWithOptions(key string, opts SetOptions, value interface{}) { - t.SetPathWithOptions(strings.Split(key, "."), opts, value) -} - -// SetPathWithOptions is the same as SetPath, but allows you to provide -// formatting instructions to the key, that will be reused by Marshal(). -func (t *Tree) SetPathWithOptions(keys []string, opts SetOptions, value interface{}) { - subtree := t - for _, intermediateKey := range keys[:len(keys)-1] { - nextTree, exists := subtree.values[intermediateKey] - if !exists { - nextTree = newTree() - subtree.values[intermediateKey] = nextTree // add new element here - } - switch node := nextTree.(type) { - case *Tree: - subtree = node - case []*Tree: - // go to most recent element - if len(node) == 0 { - // create element if it does not exist - subtree.values[intermediateKey] = append(node, newTree()) - } - subtree = node[len(node)-1] - } - } - - var toInsert interface{} - - switch value.(type) { - case *Tree: - tt := value.(*Tree) - tt.comment = opts.Comment - toInsert = value - case []*Tree: - toInsert = value - case *tomlValue: - tt := value.(*tomlValue) - tt.comment = opts.Comment - toInsert = tt - default: - toInsert = &tomlValue{value: value, comment: opts.Comment, commented: opts.Commented, multiline: opts.Multiline} - } - - subtree.values[keys[len(keys)-1]] = toInsert -} - -// Set an element in the tree. -// Key is a dot-separated path (e.g. a.b.c). -// Creates all necessary intermediate trees, if needed. -func (t *Tree) Set(key string, value interface{}) { - t.SetWithComment(key, "", false, value) -} - -// SetWithComment is the same as Set, but allows you to provide comment -// information to the key, that will be reused by Marshal(). -func (t *Tree) SetWithComment(key string, comment string, commented bool, value interface{}) { - t.SetPathWithComment(strings.Split(key, "."), comment, commented, value) -} - -// SetPath sets an element in the tree. -// Keys is an array of path elements (e.g. {"a","b","c"}). -// Creates all necessary intermediate trees, if needed. -func (t *Tree) SetPath(keys []string, value interface{}) { - t.SetPathWithComment(keys, "", false, value) -} - -// SetPathWithComment is the same as SetPath, but allows you to provide comment -// information to the key, that will be reused by Marshal(). -func (t *Tree) SetPathWithComment(keys []string, comment string, commented bool, value interface{}) { - subtree := t - for _, intermediateKey := range keys[:len(keys)-1] { - nextTree, exists := subtree.values[intermediateKey] - if !exists { - nextTree = newTree() - subtree.values[intermediateKey] = nextTree // add new element here - } - switch node := nextTree.(type) { - case *Tree: - subtree = node - case []*Tree: - // go to most recent element - if len(node) == 0 { - // create element if it does not exist - subtree.values[intermediateKey] = append(node, newTree()) - } - subtree = node[len(node)-1] - } - } - - var toInsert interface{} - - switch value.(type) { - case *Tree: - tt := value.(*Tree) - tt.comment = comment - toInsert = value - case []*Tree: - toInsert = value - case *tomlValue: - tt := value.(*tomlValue) - tt.comment = comment - toInsert = tt - default: - toInsert = &tomlValue{value: value, comment: comment, commented: commented} - } - - subtree.values[keys[len(keys)-1]] = toInsert -} - -// createSubTree takes a tree and a key and create the necessary intermediate -// subtrees to create a subtree at that point. In-place. -// -// e.g. passing a.b.c will create (assuming tree is empty) tree[a], tree[a][b] -// and tree[a][b][c] -// -// Returns nil on success, error object on failure -func (t *Tree) createSubTree(keys []string, pos Position) error { - subtree := t - for _, intermediateKey := range keys { - nextTree, exists := subtree.values[intermediateKey] - if !exists { - tree := newTree() - tree.position = pos - subtree.values[intermediateKey] = tree - nextTree = tree - } - - switch node := nextTree.(type) { - case []*Tree: - subtree = node[len(node)-1] - case *Tree: - subtree = node - default: - return fmt.Errorf("unknown type for path %s (%s): %T (%#v)", - strings.Join(keys, "."), intermediateKey, nextTree, nextTree) - } - } - return nil -} - -// LoadBytes creates a Tree from a []byte. -func LoadBytes(b []byte) (tree *Tree, err error) { - defer func() { - if r := recover(); r != nil { - if _, ok := r.(runtime.Error); ok { - panic(r) - } - err = errors.New(r.(string)) - } - }() - tree = parseToml(lexToml(b)) - return -} - -// LoadReader creates a Tree from any io.Reader. -func LoadReader(reader io.Reader) (tree *Tree, err error) { - inputBytes, err := ioutil.ReadAll(reader) - if err != nil { - return - } - tree, err = LoadBytes(inputBytes) - return -} - -// Load creates a Tree from a string. -func Load(content string) (tree *Tree, err error) { - return LoadBytes([]byte(content)) -} - -// LoadFile creates a Tree from a file. -func LoadFile(path string) (tree *Tree, err error) { - file, err := os.Open(path) - if err != nil { - return nil, err - } - defer file.Close() - return LoadReader(file) -} diff --git a/vendor/github.com/pelletier/go-toml/tomltree_create.go b/vendor/github.com/pelletier/go-toml/tomltree_create.go deleted file mode 100644 index 79610e9b340c..000000000000 --- a/vendor/github.com/pelletier/go-toml/tomltree_create.go +++ /dev/null @@ -1,142 +0,0 @@ -package toml - -import ( - "fmt" - "reflect" - "time" -) - -var kindToType = [reflect.String + 1]reflect.Type{ - reflect.Bool: reflect.TypeOf(true), - reflect.String: reflect.TypeOf(""), - reflect.Float32: reflect.TypeOf(float64(1)), - reflect.Float64: reflect.TypeOf(float64(1)), - reflect.Int: reflect.TypeOf(int64(1)), - reflect.Int8: reflect.TypeOf(int64(1)), - reflect.Int16: reflect.TypeOf(int64(1)), - reflect.Int32: reflect.TypeOf(int64(1)), - reflect.Int64: reflect.TypeOf(int64(1)), - reflect.Uint: reflect.TypeOf(uint64(1)), - reflect.Uint8: reflect.TypeOf(uint64(1)), - reflect.Uint16: reflect.TypeOf(uint64(1)), - reflect.Uint32: reflect.TypeOf(uint64(1)), - reflect.Uint64: reflect.TypeOf(uint64(1)), -} - -// typeFor returns a reflect.Type for a reflect.Kind, or nil if none is found. -// supported values: -// string, bool, int64, uint64, float64, time.Time, int, int8, int16, int32, uint, uint8, uint16, uint32, float32 -func typeFor(k reflect.Kind) reflect.Type { - if k > 0 && int(k) < len(kindToType) { - return kindToType[k] - } - return nil -} - -func simpleValueCoercion(object interface{}) (interface{}, error) { - switch original := object.(type) { - case string, bool, int64, uint64, float64, time.Time: - return original, nil - case int: - return int64(original), nil - case int8: - return int64(original), nil - case int16: - return int64(original), nil - case int32: - return int64(original), nil - case uint: - return uint64(original), nil - case uint8: - return uint64(original), nil - case uint16: - return uint64(original), nil - case uint32: - return uint64(original), nil - case float32: - return float64(original), nil - case fmt.Stringer: - return original.String(), nil - default: - return nil, fmt.Errorf("cannot convert type %T to Tree", object) - } -} - -func sliceToTree(object interface{}) (interface{}, error) { - // arrays are a bit tricky, since they can represent either a - // collection of simple values, which is represented by one - // *tomlValue, or an array of tables, which is represented by an - // array of *Tree. - - // holding the assumption that this function is called from toTree only when value.Kind() is Array or Slice - value := reflect.ValueOf(object) - insideType := value.Type().Elem() - length := value.Len() - if length > 0 { - insideType = reflect.ValueOf(value.Index(0).Interface()).Type() - } - if insideType.Kind() == reflect.Map { - // this is considered as an array of tables - tablesArray := make([]*Tree, 0, length) - for i := 0; i < length; i++ { - table := value.Index(i) - tree, err := toTree(table.Interface()) - if err != nil { - return nil, err - } - tablesArray = append(tablesArray, tree.(*Tree)) - } - return tablesArray, nil - } - - sliceType := typeFor(insideType.Kind()) - if sliceType == nil { - sliceType = insideType - } - - arrayValue := reflect.MakeSlice(reflect.SliceOf(sliceType), 0, length) - - for i := 0; i < length; i++ { - val := value.Index(i).Interface() - simpleValue, err := simpleValueCoercion(val) - if err != nil { - return nil, err - } - arrayValue = reflect.Append(arrayValue, reflect.ValueOf(simpleValue)) - } - return &tomlValue{value: arrayValue.Interface(), position: Position{}}, nil -} - -func toTree(object interface{}) (interface{}, error) { - value := reflect.ValueOf(object) - - if value.Kind() == reflect.Map { - values := map[string]interface{}{} - keys := value.MapKeys() - for _, key := range keys { - if key.Kind() != reflect.String { - if _, ok := key.Interface().(string); !ok { - return nil, fmt.Errorf("map key needs to be a string, not %T (%v)", key.Interface(), key.Kind()) - } - } - - v := value.MapIndex(key) - newValue, err := toTree(v.Interface()) - if err != nil { - return nil, err - } - values[key.String()] = newValue - } - return &Tree{values: values, position: Position{}}, nil - } - - if value.Kind() == reflect.Array || value.Kind() == reflect.Slice { - return sliceToTree(object) - } - - simpleValue, err := simpleValueCoercion(object) - if err != nil { - return nil, err - } - return &tomlValue{value: simpleValue, position: Position{}}, nil -} diff --git a/vendor/github.com/pelletier/go-toml/tomltree_write.go b/vendor/github.com/pelletier/go-toml/tomltree_write.go deleted file mode 100644 index e4049e29f2a1..000000000000 --- a/vendor/github.com/pelletier/go-toml/tomltree_write.go +++ /dev/null @@ -1,333 +0,0 @@ -package toml - -import ( - "bytes" - "fmt" - "io" - "math" - "reflect" - "sort" - "strconv" - "strings" - "time" -) - -// Encodes a string to a TOML-compliant multi-line string value -// This function is a clone of the existing encodeTomlString function, except that whitespace characters -// are preserved. Quotation marks and backslashes are also not escaped. -func encodeMultilineTomlString(value string) string { - var b bytes.Buffer - - for _, rr := range value { - switch rr { - case '\b': - b.WriteString(`\b`) - case '\t': - b.WriteString("\t") - case '\n': - b.WriteString("\n") - case '\f': - b.WriteString(`\f`) - case '\r': - b.WriteString("\r") - case '"': - b.WriteString(`"`) - case '\\': - b.WriteString(`\`) - default: - intRr := uint16(rr) - if intRr < 0x001F { - b.WriteString(fmt.Sprintf("\\u%0.4X", intRr)) - } else { - b.WriteRune(rr) - } - } - } - return b.String() -} - -// Encodes a string to a TOML-compliant string value -func encodeTomlString(value string) string { - var b bytes.Buffer - - for _, rr := range value { - switch rr { - case '\b': - b.WriteString(`\b`) - case '\t': - b.WriteString(`\t`) - case '\n': - b.WriteString(`\n`) - case '\f': - b.WriteString(`\f`) - case '\r': - b.WriteString(`\r`) - case '"': - b.WriteString(`\"`) - case '\\': - b.WriteString(`\\`) - default: - intRr := uint16(rr) - if intRr < 0x001F { - b.WriteString(fmt.Sprintf("\\u%0.4X", intRr)) - } else { - b.WriteRune(rr) - } - } - } - return b.String() -} - -func tomlValueStringRepresentation(v interface{}, indent string, arraysOneElementPerLine bool) (string, error) { - // this interface check is added to dereference the change made in the writeTo function. - // That change was made to allow this function to see formatting options. - tv, ok := v.(*tomlValue) - if ok { - v = tv.value - } else { - tv = &tomlValue{} - } - - switch value := v.(type) { - case uint64: - return strconv.FormatUint(value, 10), nil - case int64: - return strconv.FormatInt(value, 10), nil - case float64: - // Ensure a round float does contain a decimal point. Otherwise feeding - // the output back to the parser would convert to an integer. - if math.Trunc(value) == value { - return strings.ToLower(strconv.FormatFloat(value, 'f', 1, 32)), nil - } - return strings.ToLower(strconv.FormatFloat(value, 'f', -1, 32)), nil - case string: - if tv.multiline { - return "\"\"\"\n" + encodeMultilineTomlString(value) + "\"\"\"", nil - } - return "\"" + encodeTomlString(value) + "\"", nil - case []byte: - b, _ := v.([]byte) - return tomlValueStringRepresentation(string(b), indent, arraysOneElementPerLine) - case bool: - if value { - return "true", nil - } - return "false", nil - case time.Time: - return value.Format(time.RFC3339), nil - case nil: - return "", nil - } - - rv := reflect.ValueOf(v) - - if rv.Kind() == reflect.Slice { - var values []string - for i := 0; i < rv.Len(); i++ { - item := rv.Index(i).Interface() - itemRepr, err := tomlValueStringRepresentation(item, indent, arraysOneElementPerLine) - if err != nil { - return "", err - } - values = append(values, itemRepr) - } - if arraysOneElementPerLine && len(values) > 1 { - stringBuffer := bytes.Buffer{} - valueIndent := indent + ` ` // TODO: move that to a shared encoder state - - stringBuffer.WriteString("[\n") - - for _, value := range values { - stringBuffer.WriteString(valueIndent) - stringBuffer.WriteString(value) - stringBuffer.WriteString(`,`) - stringBuffer.WriteString("\n") - } - - stringBuffer.WriteString(indent + "]") - - return stringBuffer.String(), nil - } - return "[" + strings.Join(values, ",") + "]", nil - } - return "", fmt.Errorf("unsupported value type %T: %v", v, v) -} - -func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool) (int64, error) { - simpleValuesKeys := make([]string, 0) - complexValuesKeys := make([]string, 0) - - for k := range t.values { - v := t.values[k] - switch v.(type) { - case *Tree, []*Tree: - complexValuesKeys = append(complexValuesKeys, k) - default: - simpleValuesKeys = append(simpleValuesKeys, k) - } - } - - sort.Strings(simpleValuesKeys) - sort.Strings(complexValuesKeys) - - for _, k := range simpleValuesKeys { - v, ok := t.values[k].(*tomlValue) - if !ok { - return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k]) - } - - repr, err := tomlValueStringRepresentation(v, indent, arraysOneElementPerLine) - if err != nil { - return bytesCount, err - } - - if v.comment != "" { - comment := strings.Replace(v.comment, "\n", "\n"+indent+"#", -1) - start := "# " - if strings.HasPrefix(comment, "#") { - start = "" - } - writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment, "\n") - bytesCount += int64(writtenBytesCountComment) - if errc != nil { - return bytesCount, errc - } - } - - var commented string - if v.commented { - commented = "# " - } - writtenBytesCount, err := writeStrings(w, indent, commented, k, " = ", repr, "\n") - bytesCount += int64(writtenBytesCount) - if err != nil { - return bytesCount, err - } - } - - for _, k := range complexValuesKeys { - v := t.values[k] - - combinedKey := k - if keyspace != "" { - combinedKey = keyspace + "." + combinedKey - } - var commented string - if t.commented { - commented = "# " - } - - switch node := v.(type) { - // node has to be of those two types given how keys are sorted above - case *Tree: - tv, ok := t.values[k].(*Tree) - if !ok { - return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k]) - } - if tv.comment != "" { - comment := strings.Replace(tv.comment, "\n", "\n"+indent+"#", -1) - start := "# " - if strings.HasPrefix(comment, "#") { - start = "" - } - writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment) - bytesCount += int64(writtenBytesCountComment) - if errc != nil { - return bytesCount, errc - } - } - writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[", combinedKey, "]\n") - bytesCount += int64(writtenBytesCount) - if err != nil { - return bytesCount, err - } - bytesCount, err = node.writeTo(w, indent+" ", combinedKey, bytesCount, arraysOneElementPerLine) - if err != nil { - return bytesCount, err - } - case []*Tree: - for _, subTree := range node { - writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[[", combinedKey, "]]\n") - bytesCount += int64(writtenBytesCount) - if err != nil { - return bytesCount, err - } - - bytesCount, err = subTree.writeTo(w, indent+" ", combinedKey, bytesCount, arraysOneElementPerLine) - if err != nil { - return bytesCount, err - } - } - } - } - - return bytesCount, nil -} - -func writeStrings(w io.Writer, s ...string) (int, error) { - var n int - for i := range s { - b, err := io.WriteString(w, s[i]) - n += b - if err != nil { - return n, err - } - } - return n, nil -} - -// WriteTo encode the Tree as Toml and writes it to the writer w. -// Returns the number of bytes written in case of success, or an error if anything happened. -func (t *Tree) WriteTo(w io.Writer) (int64, error) { - return t.writeTo(w, "", "", 0, false) -} - -// ToTomlString generates a human-readable representation of the current tree. -// Output spans multiple lines, and is suitable for ingest by a TOML parser. -// If the conversion cannot be performed, ToString returns a non-nil error. -func (t *Tree) ToTomlString() (string, error) { - var buf bytes.Buffer - _, err := t.WriteTo(&buf) - if err != nil { - return "", err - } - return buf.String(), nil -} - -// String generates a human-readable representation of the current tree. -// Alias of ToString. Present to implement the fmt.Stringer interface. -func (t *Tree) String() string { - result, _ := t.ToTomlString() - return result -} - -// ToMap recursively generates a representation of the tree using Go built-in structures. -// The following types are used: -// -// * bool -// * float64 -// * int64 -// * string -// * uint64 -// * time.Time -// * map[string]interface{} (where interface{} is any of this list) -// * []interface{} (where interface{} is any of this list) -func (t *Tree) ToMap() map[string]interface{} { - result := map[string]interface{}{} - - for k, v := range t.values { - switch node := v.(type) { - case []*Tree: - var array []interface{} - for _, item := range node { - array = append(array, item.ToMap()) - } - result[k] = array - case *Tree: - result[k] = node.ToMap() - case *tomlValue: - result[k] = node.value - } - } - return result -} diff --git a/vendor/github.com/siddontang/go/LICENSE b/vendor/github.com/siddontang/go/LICENSE deleted file mode 100644 index 80511a0a784d..000000000000 --- a/vendor/github.com/siddontang/go/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 siddontang - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/siddontang/go/bson/LICENSE b/vendor/github.com/siddontang/go/bson/LICENSE deleted file mode 100644 index 890326017b85..000000000000 --- a/vendor/github.com/siddontang/go/bson/LICENSE +++ /dev/null @@ -1,25 +0,0 @@ -BSON library for Go - -Copyright (c) 2010-2012 - Gustavo Niemeyer - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/siddontang/go/filelock/LICENSE b/vendor/github.com/siddontang/go/filelock/LICENSE deleted file mode 100644 index fec05ce12959..000000000000 --- a/vendor/github.com/siddontang/go/filelock/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2011 The LevelDB-Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/siddontang/go/filelock/file_lock_generic.go b/vendor/github.com/siddontang/go/filelock/file_lock_generic.go deleted file mode 100644 index 53c292acbdff..000000000000 --- a/vendor/github.com/siddontang/go/filelock/file_lock_generic.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2012 The LevelDB-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows - -package filelock - -import ( - "fmt" - "io" - "runtime" -) - -func Lock(name string) (io.Closer, error) { - return nil, fmt.Errorf("leveldb/db: file locking is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) -} diff --git a/vendor/github.com/siddontang/go/filelock/file_lock_solaris.go b/vendor/github.com/siddontang/go/filelock/file_lock_solaris.go deleted file mode 100644 index 56ff3e2ceef2..000000000000 --- a/vendor/github.com/siddontang/go/filelock/file_lock_solaris.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2014 The LevelDB-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build solaris - -package filelock - -import ( - "io" - "os" - "syscall" -) - -// lockCloser hides all of an os.File's methods, except for Close. -type lockCloser struct { - f *os.File -} - -func (l lockCloser) Close() error { - return l.f.Close() -} - -func Lock(name string) (io.Closer, error) { - f, err := os.Create(name) - if err != nil { - return nil, err - } - - spec := syscall.Flock_t{ - Type: syscall.F_WRLCK, - Whence: int16(os.SEEK_SET), - Start: 0, - Len: 0, // 0 means to lock the entire file. - Pid: int32(os.Getpid()), - } - if err := syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &spec); err != nil { - f.Close() - return nil, err - } - - return lockCloser{f}, nil -} diff --git a/vendor/github.com/siddontang/go/filelock/file_lock_unix.go b/vendor/github.com/siddontang/go/filelock/file_lock_unix.go deleted file mode 100644 index f70ae6192c59..000000000000 --- a/vendor/github.com/siddontang/go/filelock/file_lock_unix.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2014 The LevelDB-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd linux netbsd openbsd - -package filelock - -import ( - "io" - "os" - "syscall" -) - -// lockCloser hides all of an os.File's methods, except for Close. -type lockCloser struct { - f *os.File -} - -func (l lockCloser) Close() error { - return l.f.Close() -} - -func Lock(name string) (io.Closer, error) { - f, err := os.Create(name) - if err != nil { - return nil, err - } - - /* - Some people tell me FcntlFlock does not exist, so use flock here - */ - if err := syscall.Flock(int(f.Fd()), syscall.LOCK_EX|syscall.LOCK_NB); err != nil { - f.Close() - return nil, err - } - - // spec := syscall.Flock_t{ - // Type: syscall.F_WRLCK, - // Whence: int16(os.SEEK_SET), - // Start: 0, - // Len: 0, // 0 means to lock the entire file. - // Pid: int32(os.Getpid()), - // } - // if err := syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &spec); err != nil { - // f.Close() - // return nil, err - // } - - return lockCloser{f}, nil -} diff --git a/vendor/github.com/siddontang/go/filelock/file_lock_windows.go b/vendor/github.com/siddontang/go/filelock/file_lock_windows.go deleted file mode 100644 index 5d3e4ba2029a..000000000000 --- a/vendor/github.com/siddontang/go/filelock/file_lock_windows.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2013 The LevelDB-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package filelock - -import ( - "io" - "syscall" -) - -// lockCloser hides all of an syscall.Handle's methods, except for Close. -type lockCloser struct { - fd syscall.Handle -} - -func (l lockCloser) Close() error { - return syscall.Close(l.fd) -} - -func Lock(name string) (io.Closer, error) { - p, err := syscall.UTF16PtrFromString(name) - if err != nil { - return nil, err - } - fd, err := syscall.CreateFile(p, - syscall.GENERIC_READ|syscall.GENERIC_WRITE, - 0, nil, syscall.CREATE_ALWAYS, - syscall.FILE_ATTRIBUTE_NORMAL, - 0, - ) - if err != nil { - return nil, err - } - return lockCloser{fd: fd}, nil -} diff --git a/vendor/github.com/siddontang/go/hack/hack.go b/vendor/github.com/siddontang/go/hack/hack.go deleted file mode 100644 index 74ee83cbf5d5..000000000000 --- a/vendor/github.com/siddontang/go/hack/hack.go +++ /dev/null @@ -1,27 +0,0 @@ -package hack - -import ( - "reflect" - "unsafe" -) - -// no copy to change slice to string -// use your own risk -func String(b []byte) (s string) { - pbytes := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - pstring := (*reflect.StringHeader)(unsafe.Pointer(&s)) - pstring.Data = pbytes.Data - pstring.Len = pbytes.Len - return -} - -// no copy to change string to slice -// use your own risk -func Slice(s string) (b []byte) { - pbytes := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - pstring := (*reflect.StringHeader)(unsafe.Pointer(&s)) - pbytes.Data = pstring.Data - pbytes.Len = pstring.Len - pbytes.Cap = pstring.Len - return -} diff --git a/vendor/github.com/siddontang/go/ioutil2/ioutil.go b/vendor/github.com/siddontang/go/ioutil2/ioutil.go deleted file mode 100644 index 35c0ad3cad51..000000000000 --- a/vendor/github.com/siddontang/go/ioutil2/ioutil.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2012, Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ioutil2 - -import ( - "io" - "io/ioutil" - "os" - "path" -) - -// Write file to temp and atomically move when everything else succeeds. -func WriteFileAtomic(filename string, data []byte, perm os.FileMode) error { - dir, name := path.Dir(filename), path.Base(filename) - f, err := ioutil.TempFile(dir, name) - if err != nil { - return err - } - n, err := f.Write(data) - f.Close() - if err == nil && n < len(data) { - err = io.ErrShortWrite - } else { - err = os.Chmod(f.Name(), perm) - } - if err != nil { - os.Remove(f.Name()) - return err - } - return os.Rename(f.Name(), filename) -} - -// Check file exists or not -func FileExists(name string) bool { - _, err := os.Stat(name) - return !os.IsNotExist(err) -} diff --git a/vendor/github.com/siddontang/go/ioutil2/sectionwriter.go b/vendor/github.com/siddontang/go/ioutil2/sectionwriter.go deleted file mode 100644 index c02ab0d5fd1f..000000000000 --- a/vendor/github.com/siddontang/go/ioutil2/sectionwriter.go +++ /dev/null @@ -1,69 +0,0 @@ -package ioutil2 - -import ( - "errors" - "io" -) - -var ErrExceedLimit = errors.New("write exceed limit") - -func NewSectionWriter(w io.WriterAt, off int64, n int64) *SectionWriter { - return &SectionWriter{w, off, off, off + n} -} - -type SectionWriter struct { - w io.WriterAt - base int64 - off int64 - limit int64 -} - -func (s *SectionWriter) Write(p []byte) (n int, err error) { - if s.off >= s.limit { - return 0, ErrExceedLimit - } - - if max := s.limit - s.off; int64(len(p)) > max { - return 0, ErrExceedLimit - } - - n, err = s.w.WriteAt(p, s.off) - s.off += int64(n) - return -} - -var errWhence = errors.New("Seek: invalid whence") -var errOffset = errors.New("Seek: invalid offset") - -func (s *SectionWriter) Seek(offset int64, whence int) (int64, error) { - switch whence { - default: - return 0, errWhence - case 0: - offset += s.base - case 1: - offset += s.off - case 2: - offset += s.limit - } - if offset < s.base { - return 0, errOffset - } - s.off = offset - return offset - s.base, nil -} - -func (s *SectionWriter) WriteAt(p []byte, off int64) (n int, err error) { - if off < 0 || off >= s.limit-s.base { - return 0, errOffset - } - off += s.base - if max := s.limit - off; int64(len(p)) > max { - return 0, ErrExceedLimit - } - - return s.w.WriteAt(p, off) -} - -// Size returns the size of the section in bytes. -func (s *SectionWriter) Size() int64 { return s.limit - s.base } diff --git a/vendor/github.com/siddontang/go/log/doc.go b/vendor/github.com/siddontang/go/log/doc.go deleted file mode 100644 index 81a60ee853bc..000000000000 --- a/vendor/github.com/siddontang/go/log/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -// log package supplies more advanced features than go orign log package. -// -// It supports log different level: trace, debug, info, warn, error, fatal. -// -// It also supports different log handlers which you can log to stdout, file, socket, etc... -// -// Use -// -// import "github.com/siddontang/go/log" -// -// //log with different level -// log.Info("hello world") -// log.Error("hello world") -// -// //create a logger with specified handler -// h := NewStreamHandler(os.Stdout) -// l := log.NewDefault(h) -// l.Info("hello world") -// l.Infof("%s %d", "hello", 123) -// -package log diff --git a/vendor/github.com/siddontang/go/log/filehandler.go b/vendor/github.com/siddontang/go/log/filehandler.go deleted file mode 100644 index 2c158e2cf569..000000000000 --- a/vendor/github.com/siddontang/go/log/filehandler.go +++ /dev/null @@ -1,221 +0,0 @@ -package log - -import ( - "fmt" - "os" - "path" - "time" -) - -//FileHandler writes log to a file. -type FileHandler struct { - fd *os.File -} - -func NewFileHandler(fileName string, flag int) (*FileHandler, error) { - dir := path.Dir(fileName) - os.Mkdir(dir, 0777) - - f, err := os.OpenFile(fileName, flag, 0) - if err != nil { - return nil, err - } - - h := new(FileHandler) - - h.fd = f - - return h, nil -} - -func (h *FileHandler) Write(b []byte) (n int, err error) { - return h.fd.Write(b) -} - -func (h *FileHandler) Close() error { - return h.fd.Close() -} - -//RotatingFileHandler writes log a file, if file size exceeds maxBytes, -//it will backup current file and open a new one. -// -//max backup file number is set by backupCount, it will delete oldest if backups too many. -type RotatingFileHandler struct { - fd *os.File - - fileName string - maxBytes int - curBytes int - backupCount int -} - -func NewRotatingFileHandler(fileName string, maxBytes int, backupCount int) (*RotatingFileHandler, error) { - dir := path.Dir(fileName) - os.MkdirAll(dir, 0777) - - h := new(RotatingFileHandler) - - if maxBytes <= 0 { - return nil, fmt.Errorf("invalid max bytes") - } - - h.fileName = fileName - h.maxBytes = maxBytes - h.backupCount = backupCount - - var err error - h.fd, err = os.OpenFile(fileName, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) - if err != nil { - return nil, err - } - - f, err := h.fd.Stat() - if err != nil { - return nil, err - } - h.curBytes = int(f.Size()) - - return h, nil -} - -func (h *RotatingFileHandler) Write(p []byte) (n int, err error) { - h.doRollover() - n, err = h.fd.Write(p) - h.curBytes += n - return -} - -func (h *RotatingFileHandler) Close() error { - if h.fd != nil { - return h.fd.Close() - } - return nil -} - -func (h *RotatingFileHandler) doRollover() { - - if h.curBytes < h.maxBytes { - return - } - - f, err := h.fd.Stat() - if err != nil { - return - } - - if h.maxBytes <= 0 { - return - } else if f.Size() < int64(h.maxBytes) { - h.curBytes = int(f.Size()) - return - } - - if h.backupCount > 0 { - h.fd.Close() - - for i := h.backupCount - 1; i > 0; i-- { - sfn := fmt.Sprintf("%s.%d", h.fileName, i) - dfn := fmt.Sprintf("%s.%d", h.fileName, i+1) - - os.Rename(sfn, dfn) - } - - dfn := fmt.Sprintf("%s.1", h.fileName) - os.Rename(h.fileName, dfn) - - h.fd, _ = os.OpenFile(h.fileName, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) - h.curBytes = 0 - f, err := h.fd.Stat() - if err != nil { - return - } - h.curBytes = int(f.Size()) - } -} - -//TimeRotatingFileHandler writes log to a file, -//it will backup current and open a new one, with a period time you sepecified. -// -//refer: http://docs.python.org/2/library/logging.handlers.html. -//same like python TimedRotatingFileHandler. -type TimeRotatingFileHandler struct { - fd *os.File - - baseName string - interval int64 - suffix string - rolloverAt int64 -} - -const ( - WhenSecond = iota - WhenMinute - WhenHour - WhenDay -) - -func NewTimeRotatingFileHandler(baseName string, when int8, interval int) (*TimeRotatingFileHandler, error) { - dir := path.Dir(baseName) - os.Mkdir(dir, 0777) - - h := new(TimeRotatingFileHandler) - - h.baseName = baseName - - switch when { - case WhenSecond: - h.interval = 1 - h.suffix = "2006-01-02_15-04-05" - case WhenMinute: - h.interval = 60 - h.suffix = "2006-01-02_15-04" - case WhenHour: - h.interval = 3600 - h.suffix = "2006-01-02_15" - case WhenDay: - h.interval = 3600 * 24 - h.suffix = "2006-01-02" - default: - return nil, fmt.Errorf("invalid when_rotate: %d", when) - } - - h.interval = h.interval * int64(interval) - - var err error - h.fd, err = os.OpenFile(h.baseName, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) - if err != nil { - return nil, err - } - - fInfo, _ := h.fd.Stat() - h.rolloverAt = fInfo.ModTime().Unix() + h.interval - - return h, nil -} - -func (h *TimeRotatingFileHandler) doRollover() { - //refer http://hg.python.org/cpython/file/2.7/Lib/logging/handlers.py - now := time.Now() - - if h.rolloverAt <= now.Unix() { - fName := h.baseName + now.Format(h.suffix) - h.fd.Close() - e := os.Rename(h.baseName, fName) - if e != nil { - panic(e) - } - - h.fd, _ = os.OpenFile(h.baseName, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) - - h.rolloverAt = time.Now().Unix() + h.interval - } -} - -func (h *TimeRotatingFileHandler) Write(b []byte) (n int, err error) { - h.doRollover() - return h.fd.Write(b) -} - -func (h *TimeRotatingFileHandler) Close() error { - return h.fd.Close() -} diff --git a/vendor/github.com/siddontang/go/log/handler.go b/vendor/github.com/siddontang/go/log/handler.go deleted file mode 100644 index 4dc086f45c0a..000000000000 --- a/vendor/github.com/siddontang/go/log/handler.go +++ /dev/null @@ -1,48 +0,0 @@ -package log - -import ( - "io" -) - -//Handler writes logs to somewhere -type Handler interface { - Write(p []byte) (n int, err error) - Close() error -} - -//StreamHandler writes logs to a specified io Writer, maybe stdout, stderr, etc... -type StreamHandler struct { - w io.Writer -} - -func NewStreamHandler(w io.Writer) (*StreamHandler, error) { - h := new(StreamHandler) - - h.w = w - - return h, nil -} - -func (h *StreamHandler) Write(b []byte) (n int, err error) { - return h.w.Write(b) -} - -func (h *StreamHandler) Close() error { - return nil -} - -//NullHandler does nothing, it discards anything. -type NullHandler struct { -} - -func NewNullHandler() (*NullHandler, error) { - return new(NullHandler), nil -} - -func (h *NullHandler) Write(b []byte) (n int, err error) { - return len(b), nil -} - -func (h *NullHandler) Close() { - -} diff --git a/vendor/github.com/siddontang/go/log/log.go b/vendor/github.com/siddontang/go/log/log.go deleted file mode 100644 index 371f6016871e..000000000000 --- a/vendor/github.com/siddontang/go/log/log.go +++ /dev/null @@ -1,343 +0,0 @@ -package log - -import ( - "fmt" - "os" - "runtime" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" -) - -//log level, from low to high, more high means more serious -const ( - LevelTrace = iota - LevelDebug - LevelInfo - LevelWarn - LevelError - LevelFatal -) - -const ( - Ltime = 1 << iota //time format "2006/01/02 15:04:05" - Lfile //file.go:123 - Llevel //[Trace|Debug|Info...] -) - -var LevelName [6]string = [6]string{"Trace", "Debug", "Info", "Warn", "Error", "Fatal"} - -const TimeFormat = "2006/01/02 15:04:05" - -const maxBufPoolSize = 16 - -type atomicInt32 int32 - -func (i *atomicInt32) Set(n int) { - atomic.StoreInt32((*int32)(i), int32(n)) -} - -func (i *atomicInt32) Get() int { - return int(atomic.LoadInt32((*int32)(i))) -} - -type Logger struct { - level atomicInt32 - flag int - - hMutex sync.Mutex - handler Handler - - bufMutex sync.Mutex - bufs [][]byte - - closed atomicInt32 -} - -//new a logger with specified handler and flag -func New(handler Handler, flag int) *Logger { - var l = new(Logger) - - l.level.Set(LevelInfo) - l.handler = handler - - l.flag = flag - - l.closed.Set(0) - - l.bufs = make([][]byte, 0, 16) - - return l -} - -//new a default logger with specified handler and flag: Ltime|Lfile|Llevel -func NewDefault(handler Handler) *Logger { - return New(handler, Ltime|Lfile|Llevel) -} - -func newStdHandler() *StreamHandler { - h, _ := NewStreamHandler(os.Stdout) - return h -} - -var std = NewDefault(newStdHandler()) - -func (l *Logger) popBuf() []byte { - l.bufMutex.Lock() - var buf []byte - if len(l.bufs) == 0 { - buf = make([]byte, 0, 1024) - } else { - buf = l.bufs[len(l.bufs)-1] - l.bufs = l.bufs[0 : len(l.bufs)-1] - } - l.bufMutex.Unlock() - - return buf -} - -func (l *Logger) putBuf(buf []byte) { - l.bufMutex.Lock() - if len(l.bufs) < maxBufPoolSize { - buf = buf[0:0] - l.bufs = append(l.bufs, buf) - } - l.bufMutex.Unlock() -} - -func (l *Logger) Close() { - if l.closed.Get() == 1 { - return - } - l.closed.Set(1) - - l.handler.Close() -} - -//set log level, any log level less than it will not log -func (l *Logger) SetLevel(level int) { - l.level.Set(level) -} - -// name can be in ["trace", "debug", "info", "warn", "error", "fatal"] -func (l *Logger) SetLevelByName(name string) { - name = strings.ToLower(name) - switch name { - case "trace": - l.SetLevel(LevelTrace) - case "debug": - l.SetLevel(LevelDebug) - case "info": - l.SetLevel(LevelInfo) - case "warn": - l.SetLevel(LevelWarn) - case "error": - l.SetLevel(LevelError) - case "fatal": - l.SetLevel(LevelFatal) - } -} - -func (l *Logger) SetHandler(h Handler) { - if l.closed.Get() == 1 { - return - } - - l.hMutex.Lock() - if l.handler != nil { - l.handler.Close() - } - l.handler = h - l.hMutex.Unlock() -} - -func (l *Logger) Output(callDepth int, level int, format string, v ...interface{}) { - if l.closed.Get() == 1 { - // closed - return - } - - if l.level.Get() > level { - // higher level can be logged - return - } - - var s string - if format == "" { - s = fmt.Sprint(v...) - } else { - s = fmt.Sprintf(format, v...) - } - - buf := l.popBuf() - - if l.flag&Ltime > 0 { - now := time.Now().Format(TimeFormat) - buf = append(buf, '[') - buf = append(buf, now...) - buf = append(buf, "] "...) - } - - if l.flag&Lfile > 0 { - _, file, line, ok := runtime.Caller(callDepth) - if !ok { - file = "???" - line = 0 - } else { - for i := len(file) - 1; i > 0; i-- { - if file[i] == '/' { - file = file[i+1:] - break - } - } - } - - buf = append(buf, file...) - buf = append(buf, ':') - - buf = strconv.AppendInt(buf, int64(line), 10) - buf = append(buf, ' ') - } - - if l.flag&Llevel > 0 { - buf = append(buf, '[') - buf = append(buf, LevelName[level]...) - buf = append(buf, "] "...) - } - - buf = append(buf, s...) - - if s[len(s)-1] != '\n' { - buf = append(buf, '\n') - } - - // l.msg <- buf - - l.hMutex.Lock() - l.handler.Write(buf) - l.hMutex.Unlock() - l.putBuf(buf) -} - -//log with Trace level -func (l *Logger) Trace(v ...interface{}) { - l.Output(2, LevelTrace, "", v...) -} - -//log with Debug level -func (l *Logger) Debug(v ...interface{}) { - l.Output(2, LevelDebug, "", v...) -} - -//log with info level -func (l *Logger) Info(v ...interface{}) { - l.Output(2, LevelInfo, "", v...) -} - -//log with warn level -func (l *Logger) Warn(v ...interface{}) { - l.Output(2, LevelWarn, "", v...) -} - -//log with error level -func (l *Logger) Error(v ...interface{}) { - l.Output(2, LevelError, "", v...) -} - -//log with fatal level -func (l *Logger) Fatal(v ...interface{}) { - l.Output(2, LevelFatal, "", v...) -} - -//log with Trace level -func (l *Logger) Tracef(format string, v ...interface{}) { - l.Output(2, LevelTrace, format, v...) -} - -//log with Debug level -func (l *Logger) Debugf(format string, v ...interface{}) { - l.Output(2, LevelDebug, format, v...) -} - -//log with info level -func (l *Logger) Infof(format string, v ...interface{}) { - l.Output(2, LevelInfo, format, v...) -} - -//log with warn level -func (l *Logger) Warnf(format string, v ...interface{}) { - l.Output(2, LevelWarn, format, v...) -} - -//log with error level -func (l *Logger) Errorf(format string, v ...interface{}) { - l.Output(2, LevelError, format, v...) -} - -//log with fatal level -func (l *Logger) Fatalf(format string, v ...interface{}) { - l.Output(2, LevelFatal, format, v...) -} - -func SetLevel(level int) { - std.SetLevel(level) -} - -// name can be in ["trace", "debug", "info", "warn", "error", "fatal"] -func SetLevelByName(name string) { - std.SetLevelByName(name) -} - -func SetHandler(h Handler) { - std.SetHandler(h) -} - -func Trace(v ...interface{}) { - std.Output(2, LevelTrace, "", v...) -} - -func Debug(v ...interface{}) { - std.Output(2, LevelDebug, "", v...) -} - -func Info(v ...interface{}) { - std.Output(2, LevelInfo, "", v...) -} - -func Warn(v ...interface{}) { - std.Output(2, LevelWarn, "", v...) -} - -func Error(v ...interface{}) { - std.Output(2, LevelError, "", v...) -} - -func Fatal(v ...interface{}) { - std.Output(2, LevelFatal, "", v...) -} - -func Tracef(format string, v ...interface{}) { - std.Output(2, LevelTrace, format, v...) -} - -func Debugf(format string, v ...interface{}) { - std.Output(2, LevelDebug, format, v...) -} - -func Infof(format string, v ...interface{}) { - std.Output(2, LevelInfo, format, v...) -} - -func Warnf(format string, v ...interface{}) { - std.Output(2, LevelWarn, format, v...) -} - -func Errorf(format string, v ...interface{}) { - std.Output(2, LevelError, format, v...) -} - -func Fatalf(format string, v ...interface{}) { - std.Output(2, LevelFatal, format, v...) -} diff --git a/vendor/github.com/siddontang/go/log/sockethandler.go b/vendor/github.com/siddontang/go/log/sockethandler.go deleted file mode 100644 index 3e7494d9501b..000000000000 --- a/vendor/github.com/siddontang/go/log/sockethandler.go +++ /dev/null @@ -1,65 +0,0 @@ -package log - -import ( - "encoding/binary" - "net" - "time" -) - -//SocketHandler writes log to a connectionl. -//Network protocol is simple: log length + log | log length + log. log length is uint32, bigendian. -//you must implement your own log server, maybe you can use logd instead simply. -type SocketHandler struct { - c net.Conn - protocol string - addr string -} - -func NewSocketHandler(protocol string, addr string) (*SocketHandler, error) { - s := new(SocketHandler) - - s.protocol = protocol - s.addr = addr - - return s, nil -} - -func (h *SocketHandler) Write(p []byte) (n int, err error) { - if err = h.connect(); err != nil { - return - } - - buf := make([]byte, len(p)+4) - - binary.BigEndian.PutUint32(buf, uint32(len(p))) - - copy(buf[4:], p) - - n, err = h.c.Write(buf) - if err != nil { - h.c.Close() - h.c = nil - } - return -} - -func (h *SocketHandler) Close() error { - if h.c != nil { - h.c.Close() - } - return nil -} - -func (h *SocketHandler) connect() error { - if h.c != nil { - return nil - } - - var err error - h.c, err = net.DialTimeout(h.protocol, h.addr, 20*time.Second) - if err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/siddontang/go/num/bytes.go b/vendor/github.com/siddontang/go/num/bytes.go deleted file mode 100644 index 1f3def74ac73..000000000000 --- a/vendor/github.com/siddontang/go/num/bytes.go +++ /dev/null @@ -1,67 +0,0 @@ -package num - -import ( - "encoding/binary" -) - -//all are bigendian format - -func BytesToUint16(b []byte) uint16 { - return binary.BigEndian.Uint16(b) -} - -func Uint16ToBytes(u uint16) []byte { - buf := make([]byte, 2) - binary.BigEndian.PutUint16(buf, u) - return buf -} - -func BytesToUint32(b []byte) uint32 { - return binary.BigEndian.Uint32(b) -} - -func Uint32ToBytes(u uint32) []byte { - buf := make([]byte, 4) - binary.BigEndian.PutUint32(buf, u) - return buf -} - -func BytesToUint64(b []byte) uint64 { - return binary.BigEndian.Uint64(b) -} - -func Uint64ToBytes(u uint64) []byte { - buf := make([]byte, 8) - binary.BigEndian.PutUint64(buf, u) - return buf -} - -func BytesToInt16(b []byte) int16 { - return int16(binary.BigEndian.Uint16(b)) -} - -func Int16ToBytes(u int16) []byte { - buf := make([]byte, 2) - binary.BigEndian.PutUint16(buf, uint16(u)) - return buf -} - -func BytesToInt32(b []byte) int32 { - return int32(binary.BigEndian.Uint32(b)) -} - -func Int32ToBytes(u int32) []byte { - buf := make([]byte, 4) - binary.BigEndian.PutUint32(buf, uint32(u)) - return buf -} - -func BytesToInt64(b []byte) int64 { - return int64(binary.BigEndian.Uint64(b)) -} - -func Int64ToBytes(u int64) []byte { - buf := make([]byte, 8) - binary.BigEndian.PutUint64(buf, uint64(u)) - return buf -} diff --git a/vendor/github.com/siddontang/go/num/cmp.go b/vendor/github.com/siddontang/go/num/cmp.go deleted file mode 100644 index 78f8d4f14af3..000000000000 --- a/vendor/github.com/siddontang/go/num/cmp.go +++ /dev/null @@ -1,161 +0,0 @@ -package num - -func MinUint(a uint, b uint) uint { - if a > b { - return b - } else { - return a - } -} - -func MaxUint(a uint, b uint) uint { - if a > b { - return a - } else { - return b - } -} - -func MinInt(a int, b int) int { - if a > b { - return b - } else { - return a - } -} - -func MaxInt(a int, b int) int { - if a > b { - return a - } else { - return b - } -} - -func MinUint8(a uint8, b uint8) uint8 { - if a > b { - return b - } else { - return a - } -} - -func MaxUint8(a uint8, b uint8) uint8 { - if a > b { - return a - } else { - return b - } -} - -func MinInt8(a int8, b int8) int8 { - if a > b { - return b - } else { - return a - } -} - -func MaxInt8(a int8, b int8) int8 { - if a > b { - return a - } else { - return b - } -} - -func MinUint16(a uint16, b uint16) uint16 { - if a > b { - return b - } else { - return a - } -} - -func MaxUint16(a uint16, b uint16) uint16 { - if a > b { - return a - } else { - return b - } -} - -func MinInt16(a int16, b int16) int16 { - if a > b { - return b - } else { - return a - } -} - -func MaxInt16(a int16, b int16) int16 { - if a > b { - return a - } else { - return b - } -} - -func MinUint32(a uint32, b uint32) uint32 { - if a > b { - return b - } else { - return a - } -} - -func MaxUint32(a uint32, b uint32) uint32 { - if a > b { - return a - } else { - return b - } -} - -func MinInt32(a int32, b int32) int32 { - if a > b { - return b - } else { - return a - } -} - -func MaxInt32(a int32, b int32) int32 { - if a > b { - return a - } else { - return b - } -} - -func MinUint64(a uint64, b uint64) uint64 { - if a > b { - return b - } else { - return a - } -} - -func MaxUint64(a uint64, b uint64) uint64 { - if a > b { - return a - } else { - return b - } -} - -func MinInt64(a int64, b int64) int64 { - if a > b { - return b - } else { - return a - } -} - -func MaxInt64(a int64, b int64) int64 { - if a > b { - return a - } else { - return b - } -} diff --git a/vendor/github.com/siddontang/go/num/str.go b/vendor/github.com/siddontang/go/num/str.go deleted file mode 100644 index 4b304817b86d..000000000000 --- a/vendor/github.com/siddontang/go/num/str.go +++ /dev/null @@ -1,157 +0,0 @@ -package num - -import ( - "strconv" -) - -func ParseUint(s string) (uint, error) { - if v, err := strconv.ParseUint(s, 10, 0); err != nil { - return 0, err - } else { - return uint(v), nil - } -} - -func ParseUint8(s string) (uint8, error) { - if v, err := strconv.ParseUint(s, 10, 8); err != nil { - return 0, err - } else { - return uint8(v), nil - } -} - -func ParseUint16(s string) (uint16, error) { - if v, err := strconv.ParseUint(s, 10, 16); err != nil { - return 0, err - } else { - return uint16(v), nil - } -} - -func ParseUint32(s string) (uint32, error) { - if v, err := strconv.ParseUint(s, 10, 32); err != nil { - return 0, err - } else { - return uint32(v), nil - } -} - -func ParseUint64(s string) (uint64, error) { - return strconv.ParseUint(s, 10, 64) -} - -func ParseInt(s string) (int, error) { - if v, err := strconv.ParseInt(s, 10, 0); err != nil { - return 0, err - } else { - return int(v), nil - } -} - -func ParseInt8(s string) (int8, error) { - if v, err := strconv.ParseInt(s, 10, 8); err != nil { - return 0, err - } else { - return int8(v), nil - } -} - -func ParseInt16(s string) (int16, error) { - if v, err := strconv.ParseInt(s, 10, 16); err != nil { - return 0, err - } else { - return int16(v), nil - } -} - -func ParseInt32(s string) (int32, error) { - if v, err := strconv.ParseInt(s, 10, 32); err != nil { - return 0, err - } else { - return int32(v), nil - } -} - -func ParseInt64(s string) (int64, error) { - return strconv.ParseInt(s, 10, 64) -} - -func FormatInt(v int) string { - return strconv.FormatInt(int64(v), 10) -} - -func FormatInt8(v int8) string { - return strconv.FormatInt(int64(v), 10) -} - -func FormatInt16(v int16) string { - return strconv.FormatInt(int64(v), 10) -} - -func FormatInt32(v int32) string { - return strconv.FormatInt(int64(v), 10) -} - -func FormatInt64(v int64) string { - return strconv.FormatInt(int64(v), 10) -} - -func FormatUint(v uint) string { - return strconv.FormatUint(uint64(v), 10) -} - -func FormatUint8(v uint8) string { - return strconv.FormatUint(uint64(v), 10) -} - -func FormatUint16(v uint16) string { - return strconv.FormatUint(uint64(v), 10) -} - -func FormatUint32(v uint32) string { - return strconv.FormatUint(uint64(v), 10) -} - -func FormatUint64(v uint64) string { - return strconv.FormatUint(uint64(v), 10) -} - -func FormatIntToSlice(v int) []byte { - return strconv.AppendInt(nil, int64(v), 10) -} - -func FormatInt8ToSlice(v int8) []byte { - return strconv.AppendInt(nil, int64(v), 10) -} - -func FormatInt16ToSlice(v int16) []byte { - return strconv.AppendInt(nil, int64(v), 10) -} - -func FormatInt32ToSlice(v int32) []byte { - return strconv.AppendInt(nil, int64(v), 10) -} - -func FormatInt64ToSlice(v int64) []byte { - return strconv.AppendInt(nil, int64(v), 10) -} - -func FormatUintToSlice(v uint) []byte { - return strconv.AppendUint(nil, uint64(v), 10) -} - -func FormatUint8ToSlice(v uint8) []byte { - return strconv.AppendUint(nil, uint64(v), 10) -} - -func FormatUint16ToSlice(v uint16) []byte { - return strconv.AppendUint(nil, uint64(v), 10) -} - -func FormatUint32ToSlice(v uint32) []byte { - return strconv.AppendUint(nil, uint64(v), 10) -} - -func FormatUint64ToSlice(v uint64) []byte { - return strconv.AppendUint(nil, uint64(v), 10) -} diff --git a/vendor/github.com/siddontang/go/snappy/LICENSE b/vendor/github.com/siddontang/go/snappy/LICENSE deleted file mode 100644 index 6050c10f4c8b..000000000000 --- a/vendor/github.com/siddontang/go/snappy/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/siddontang/go/snappy/decode.go b/vendor/github.com/siddontang/go/snappy/decode.go deleted file mode 100644 index d93c1b9dbfd7..000000000000 --- a/vendor/github.com/siddontang/go/snappy/decode.go +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snappy - -import ( - "encoding/binary" - "errors" -) - -// ErrCorrupt reports that the input is invalid. -var ErrCorrupt = errors.New("snappy: corrupt input") - -// DecodedLen returns the length of the decoded block. -func DecodedLen(src []byte) (int, error) { - v, _, err := decodedLen(src) - return v, err -} - -// decodedLen returns the length of the decoded block and the number of bytes -// that the length header occupied. -func decodedLen(src []byte) (blockLen, headerLen int, err error) { - v, n := binary.Uvarint(src) - if n == 0 { - return 0, 0, ErrCorrupt - } - if uint64(int(v)) != v { - return 0, 0, errors.New("snappy: decoded block is too large") - } - return int(v), n, nil -} - -// Decode returns the decoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire decoded block. -// Otherwise, a newly allocated slice will be returned. -// It is valid to pass a nil dst. -func Decode(dst, src []byte) ([]byte, error) { - dLen, s, err := decodedLen(src) - if err != nil { - return nil, err - } - if len(dst) < dLen { - dst = make([]byte, dLen) - } - - var d, offset, length int - for s < len(src) { - switch src[s] & 0x03 { - case tagLiteral: - x := uint(src[s] >> 2) - switch { - case x < 60: - s += 1 - case x == 60: - s += 2 - if s > len(src) { - return nil, ErrCorrupt - } - x = uint(src[s-1]) - case x == 61: - s += 3 - if s > len(src) { - return nil, ErrCorrupt - } - x = uint(src[s-2]) | uint(src[s-1])<<8 - case x == 62: - s += 4 - if s > len(src) { - return nil, ErrCorrupt - } - x = uint(src[s-3]) | uint(src[s-2])<<8 | uint(src[s-1])<<16 - case x == 63: - s += 5 - if s > len(src) { - return nil, ErrCorrupt - } - x = uint(src[s-4]) | uint(src[s-3])<<8 | uint(src[s-2])<<16 | uint(src[s-1])<<24 - } - length = int(x + 1) - if length <= 0 { - return nil, errors.New("snappy: unsupported literal length") - } - if length > len(dst)-d || length > len(src)-s { - return nil, ErrCorrupt - } - copy(dst[d:], src[s:s+length]) - d += length - s += length - continue - - case tagCopy1: - s += 2 - if s > len(src) { - return nil, ErrCorrupt - } - length = 4 + int(src[s-2])>>2&0x7 - offset = int(src[s-2])&0xe0<<3 | int(src[s-1]) - - case tagCopy2: - s += 3 - if s > len(src) { - return nil, ErrCorrupt - } - length = 1 + int(src[s-3])>>2 - offset = int(src[s-2]) | int(src[s-1])<<8 - - case tagCopy4: - return nil, errors.New("snappy: unsupported COPY_4 tag") - } - - end := d + length - if offset > d || end > len(dst) { - return nil, ErrCorrupt - } - for ; d < end; d++ { - dst[d] = dst[d-offset] - } - } - if d != dLen { - return nil, ErrCorrupt - } - return dst[:d], nil -} diff --git a/vendor/github.com/siddontang/go/snappy/encode.go b/vendor/github.com/siddontang/go/snappy/encode.go deleted file mode 100644 index b2371db11c8f..000000000000 --- a/vendor/github.com/siddontang/go/snappy/encode.go +++ /dev/null @@ -1,174 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snappy - -import ( - "encoding/binary" -) - -// We limit how far copy back-references can go, the same as the C++ code. -const maxOffset = 1 << 15 - -// emitLiteral writes a literal chunk and returns the number of bytes written. -func emitLiteral(dst, lit []byte) int { - i, n := 0, uint(len(lit)-1) - switch { - case n < 60: - dst[0] = uint8(n)<<2 | tagLiteral - i = 1 - case n < 1<<8: - dst[0] = 60<<2 | tagLiteral - dst[1] = uint8(n) - i = 2 - case n < 1<<16: - dst[0] = 61<<2 | tagLiteral - dst[1] = uint8(n) - dst[2] = uint8(n >> 8) - i = 3 - case n < 1<<24: - dst[0] = 62<<2 | tagLiteral - dst[1] = uint8(n) - dst[2] = uint8(n >> 8) - dst[3] = uint8(n >> 16) - i = 4 - case int64(n) < 1<<32: - dst[0] = 63<<2 | tagLiteral - dst[1] = uint8(n) - dst[2] = uint8(n >> 8) - dst[3] = uint8(n >> 16) - dst[4] = uint8(n >> 24) - i = 5 - default: - panic("snappy: source buffer is too long") - } - if copy(dst[i:], lit) != len(lit) { - panic("snappy: destination buffer is too short") - } - return i + len(lit) -} - -// emitCopy writes a copy chunk and returns the number of bytes written. -func emitCopy(dst []byte, offset, length int) int { - i := 0 - for length > 0 { - x := length - 4 - if 0 <= x && x < 1<<3 && offset < 1<<11 { - dst[i+0] = uint8(offset>>8)&0x07<<5 | uint8(x)<<2 | tagCopy1 - dst[i+1] = uint8(offset) - i += 2 - break - } - - x = length - if x > 1<<6 { - x = 1 << 6 - } - dst[i+0] = uint8(x-1)<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - i += 3 - length -= x - } - return i -} - -// Encode returns the encoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire encoded block. -// Otherwise, a newly allocated slice will be returned. -// It is valid to pass a nil dst. -func Encode(dst, src []byte) ([]byte, error) { - if n := MaxEncodedLen(len(src)); len(dst) < n { - dst = make([]byte, n) - } - - // The block starts with the varint-encoded length of the decompressed bytes. - d := binary.PutUvarint(dst, uint64(len(src))) - - // Return early if src is short. - if len(src) <= 4 { - if len(src) != 0 { - d += emitLiteral(dst[d:], src) - } - return dst[:d], nil - } - - // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. - const maxTableSize = 1 << 14 - shift, tableSize := uint(32-8), 1<<8 - for tableSize < maxTableSize && tableSize < len(src) { - shift-- - tableSize *= 2 - } - var table [maxTableSize]int - - // Iterate over the source bytes. - var ( - s int // The iterator position. - t int // The last position with the same hash as s. - lit int // The start position of any pending literal bytes. - ) - for s+3 < len(src) { - // Update the hash table. - b0, b1, b2, b3 := src[s], src[s+1], src[s+2], src[s+3] - h := uint32(b0) | uint32(b1)<<8 | uint32(b2)<<16 | uint32(b3)<<24 - p := &table[(h*0x1e35a7bd)>>shift] - // We need to to store values in [-1, inf) in table. To save - // some initialization time, (re)use the table's zero value - // and shift the values against this zero: add 1 on writes, - // subtract 1 on reads. - t, *p = *p-1, s+1 - // If t is invalid or src[s:s+4] differs from src[t:t+4], accumulate a literal byte. - if t < 0 || s-t >= maxOffset || b0 != src[t] || b1 != src[t+1] || b2 != src[t+2] || b3 != src[t+3] { - s++ - continue - } - // Otherwise, we have a match. First, emit any pending literal bytes. - if lit != s { - d += emitLiteral(dst[d:], src[lit:s]) - } - // Extend the match to be as long as possible. - s0 := s - s, t = s+4, t+4 - for s < len(src) && src[s] == src[t] { - s++ - t++ - } - // Emit the copied bytes. - d += emitCopy(dst[d:], s-t, s-s0) - lit = s - } - - // Emit any final pending literal bytes and return. - if lit != len(src) { - d += emitLiteral(dst[d:], src[lit:]) - } - return dst[:d], nil -} - -// MaxEncodedLen returns the maximum length of a snappy block, given its -// uncompressed length. -func MaxEncodedLen(srcLen int) int { - // Compressed data can be defined as: - // compressed := item* literal* - // item := literal* copy - // - // The trailing literal sequence has a space blowup of at most 62/60 - // since a literal of length 60 needs one tag byte + one extra byte - // for length information. - // - // Item blowup is trickier to measure. Suppose the "copy" op copies - // 4 bytes of data. Because of a special check in the encoding code, - // we produce a 4-byte copy only if the offset is < 65536. Therefore - // the copy op takes 3 bytes to encode, and this type of item leads - // to at most the 62/60 blowup for representing literals. - // - // Suppose the "copy" op copies 5 bytes of data. If the offset is big - // enough, it will take 5 bytes to encode the copy op. Therefore the - // worst case here is a one-byte literal followed by a five-byte copy. - // That is, 6 bytes of input turn into 7 bytes of "compressed" data. - // - // This last factor dominates the blowup, so the final estimate is: - return 32 + srcLen + srcLen/6 -} diff --git a/vendor/github.com/siddontang/go/snappy/snappy.go b/vendor/github.com/siddontang/go/snappy/snappy.go deleted file mode 100644 index 2f1b790d0b71..000000000000 --- a/vendor/github.com/siddontang/go/snappy/snappy.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package snappy implements the snappy block-based compression format. -// It aims for very high speeds and reasonable compression. -// -// The C++ snappy implementation is at http://code.google.com/p/snappy/ -package snappy - -/* -Each encoded block begins with the varint-encoded length of the decoded data, -followed by a sequence of chunks. Chunks begin and end on byte boundaries. The -first byte of each chunk is broken into its 2 least and 6 most significant bits -called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. -Zero means a literal tag. All other values mean a copy tag. - -For literal tags: - - If m < 60, the next 1 + m bytes are literal bytes. - - Otherwise, let n be the little-endian unsigned integer denoted by the next - m - 59 bytes. The next 1 + n bytes after that are literal bytes. - -For copy tags, length bytes are copied from offset bytes ago, in the style of -Lempel-Ziv compression algorithms. In particular: - - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). - The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 - of the offset. The next byte is bits 0-7 of the offset. - - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). - The length is 1 + m. The offset is the little-endian unsigned integer - denoted by the next 2 bytes. - - For l == 3, this tag is a legacy format that is no longer supported. -*/ -const ( - tagLiteral = 0x00 - tagCopy1 = 0x01 - tagCopy2 = 0x02 - tagCopy4 = 0x03 -) diff --git a/vendor/github.com/siddontang/go/sync2/atomic.go b/vendor/github.com/siddontang/go/sync2/atomic.go deleted file mode 100644 index 382fc20dfec7..000000000000 --- a/vendor/github.com/siddontang/go/sync2/atomic.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright 2013, Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package sync2 - -import ( - "sync" - "sync/atomic" - "time" -) - -type AtomicInt32 int32 - -func (i *AtomicInt32) Add(n int32) int32 { - return atomic.AddInt32((*int32)(i), n) -} - -func (i *AtomicInt32) Set(n int32) { - atomic.StoreInt32((*int32)(i), n) -} - -func (i *AtomicInt32) Get() int32 { - return atomic.LoadInt32((*int32)(i)) -} - -func (i *AtomicInt32) CompareAndSwap(oldval, newval int32) (swapped bool) { - return atomic.CompareAndSwapInt32((*int32)(i), oldval, newval) -} - -type AtomicUint32 uint32 - -func (i *AtomicUint32) Add(n uint32) uint32 { - return atomic.AddUint32((*uint32)(i), n) -} - -func (i *AtomicUint32) Set(n uint32) { - atomic.StoreUint32((*uint32)(i), n) -} - -func (i *AtomicUint32) Get() uint32 { - return atomic.LoadUint32((*uint32)(i)) -} - -func (i *AtomicUint32) CompareAndSwap(oldval, newval uint32) (swapped bool) { - return atomic.CompareAndSwapUint32((*uint32)(i), oldval, newval) -} - -type AtomicInt64 int64 - -func (i *AtomicInt64) Add(n int64) int64 { - return atomic.AddInt64((*int64)(i), n) -} - -func (i *AtomicInt64) Set(n int64) { - atomic.StoreInt64((*int64)(i), n) -} - -func (i *AtomicInt64) Get() int64 { - return atomic.LoadInt64((*int64)(i)) -} - -func (i *AtomicInt64) CompareAndSwap(oldval, newval int64) (swapped bool) { - return atomic.CompareAndSwapInt64((*int64)(i), oldval, newval) -} - -type AtomicUint64 uint64 - -func (i *AtomicUint64) Add(n uint64) uint64 { - return atomic.AddUint64((*uint64)(i), n) -} - -func (i *AtomicUint64) Set(n uint64) { - atomic.StoreUint64((*uint64)(i), n) -} - -func (i *AtomicUint64) Get() uint64 { - return atomic.LoadUint64((*uint64)(i)) -} - -func (i *AtomicUint64) CompareAndSwap(oldval, newval uint64) (swapped bool) { - return atomic.CompareAndSwapUint64((*uint64)(i), oldval, newval) -} - -type AtomicDuration int64 - -func (d *AtomicDuration) Add(duration time.Duration) time.Duration { - return time.Duration(atomic.AddInt64((*int64)(d), int64(duration))) -} - -func (d *AtomicDuration) Set(duration time.Duration) { - atomic.StoreInt64((*int64)(d), int64(duration)) -} - -func (d *AtomicDuration) Get() time.Duration { - return time.Duration(atomic.LoadInt64((*int64)(d))) -} - -func (d *AtomicDuration) CompareAndSwap(oldval, newval time.Duration) (swapped bool) { - return atomic.CompareAndSwapInt64((*int64)(d), int64(oldval), int64(newval)) -} - -// AtomicString gives you atomic-style APIs for string, but -// it's only a convenience wrapper that uses a mutex. So, it's -// not as efficient as the rest of the atomic types. -type AtomicString struct { - mu sync.Mutex - str string -} - -func (s *AtomicString) Set(str string) { - s.mu.Lock() - s.str = str - s.mu.Unlock() -} - -func (s *AtomicString) Get() string { - s.mu.Lock() - str := s.str - s.mu.Unlock() - return str -} - -func (s *AtomicString) CompareAndSwap(oldval, newval string) (swapped bool) { - s.mu.Lock() - defer s.mu.Unlock() - if s.str == oldval { - s.str = newval - return true - } - return false -} - -type AtomicBool int32 - -func (b *AtomicBool) Set(v bool) { - if v { - atomic.StoreInt32((*int32)(b), 1) - } else { - atomic.StoreInt32((*int32)(b), 0) - } -} - -func (b *AtomicBool) Get() bool { - return atomic.LoadInt32((*int32)(b)) == 1 -} diff --git a/vendor/github.com/siddontang/go/sync2/semaphore.go b/vendor/github.com/siddontang/go/sync2/semaphore.go deleted file mode 100644 index d310da7294c7..000000000000 --- a/vendor/github.com/siddontang/go/sync2/semaphore.go +++ /dev/null @@ -1,65 +0,0 @@ -package sync2 - -import ( - "sync" - "sync/atomic" - "time" -) - -func NewSemaphore(initialCount int) *Semaphore { - res := &Semaphore{ - counter: int64(initialCount), - } - res.cond.L = &res.lock - return res -} - -type Semaphore struct { - lock sync.Mutex - cond sync.Cond - counter int64 -} - -func (s *Semaphore) Release() { - s.lock.Lock() - s.counter += 1 - if s.counter >= 0 { - s.cond.Signal() - } - s.lock.Unlock() -} - -func (s *Semaphore) Acquire() { - s.lock.Lock() - for s.counter < 1 { - s.cond.Wait() - } - s.counter -= 1 - s.lock.Unlock() -} - -func (s *Semaphore) AcquireTimeout(timeout time.Duration) bool { - done := make(chan bool, 1) - // Gate used to communicate between the threads and decide what the result - // is. If the main thread decides, we have timed out, otherwise we succeed. - decided := new(int32) - go func() { - s.Acquire() - if atomic.SwapInt32(decided, 1) == 0 { - done <- true - } else { - // If we already decided the result, and this thread did not win - s.Release() - } - }() - select { - case <-done: - return true - case <-time.NewTimer(timeout).C: - if atomic.SwapInt32(decided, 1) == 1 { - // The other thread already decided the result - return true - } - return false - } -} diff --git a/vendor/github.com/siddontang/ledisdb/config/config.go b/vendor/github.com/siddontang/ledisdb/config/config.go deleted file mode 100644 index f8aa63940146..000000000000 --- a/vendor/github.com/siddontang/ledisdb/config/config.go +++ /dev/null @@ -1,315 +0,0 @@ -package config - -import ( - "bytes" - "errors" - "io" - "io/ioutil" - "sync" - - "fmt" - - "github.com/pelletier/go-toml" - "github.com/siddontang/go/ioutil2" -) - -var ( - ErrNoConfigFile = errors.New("Running without a config file") -) - -const ( - DefaultAddr string = "127.0.0.1:6380" - - DefaultDBName string = "goleveldb" - - DefaultDataDir string = "./var" - - KB int = 1024 - MB int = KB * 1024 - GB int = MB * 1024 -) - -type LevelDBConfig struct { - Compression bool `toml:"compression"` - BlockSize int `toml:"block_size"` - WriteBufferSize int `toml:"write_buffer_size"` - CacheSize int `toml:"cache_size"` - MaxOpenFiles int `toml:"max_open_files"` - MaxFileSize int `toml:"max_file_size"` -} - -type RocksDBConfig struct { - Compression int `toml:"compression"` - BlockSize int `toml:"block_size"` - WriteBufferSize int `toml:"write_buffer_size"` - CacheSize int `toml:"cache_size"` - MaxOpenFiles int `toml:"max_open_files"` - MaxWriteBufferNum int `toml:"max_write_buffer_num"` - MinWriteBufferNumberToMerge int `toml:"min_write_buffer_number_to_merge"` - NumLevels int `toml:"num_levels"` - Level0FileNumCompactionTrigger int `toml:"level0_file_num_compaction_trigger"` - Level0SlowdownWritesTrigger int `toml:"level0_slowdown_writes_trigger"` - Level0StopWritesTrigger int `toml:"level0_stop_writes_trigger"` - TargetFileSizeBase int `toml:"target_file_size_base"` - TargetFileSizeMultiplier int `toml:"target_file_size_multiplier"` - MaxBytesForLevelBase int `toml:"max_bytes_for_level_base"` - MaxBytesForLevelMultiplier int `toml:"max_bytes_for_level_multiplier"` - DisableAutoCompactions bool `toml:"disable_auto_compactions"` - UseFsync bool `toml:"use_fsync"` - MaxBackgroundCompactions int `toml:"max_background_compactions"` - MaxBackgroundFlushes int `toml:"max_background_flushes"` - EnableStatistics bool `toml:"enable_statistics"` - StatsDumpPeriodSec int `toml:"stats_dump_period_sec"` - BackgroundThreads int `toml:"background_theads"` - HighPriorityBackgroundThreads int `toml:"high_priority_background_threads"` - DisableWAL bool `toml:"disable_wal"` - MaxManifestFileSize int `toml:"max_manifest_file_size"` -} - -type LMDBConfig struct { - MapSize int `toml:"map_size"` - NoSync bool `toml:"nosync"` -} - -type ReplicationConfig struct { - Path string `toml:"path"` - Sync bool `toml:"sync"` - WaitSyncTime int `toml:"wait_sync_time"` - WaitMaxSlaveAcks int `toml:"wait_max_slave_acks"` - ExpiredLogDays int `toml:"expired_log_days"` - StoreName string `toml:"store_name"` - MaxLogFileSize int64 `toml:"max_log_file_size"` - MaxLogFileNum int `toml:"max_log_file_num"` - SyncLog int `toml:"sync_log"` - Compression bool `toml:"compression"` - UseMmap bool `toml:"use_mmap"` -} - -type SnapshotConfig struct { - Path string `toml:"path"` - MaxNum int `toml:"max_num"` -} - -type TLS struct { - Enabled bool `toml:"enabled"` - Certificate string `toml:"certificate"` - Key string `toml:"key"` -} - -type AuthMethod func(c *Config, password string) bool - -type Config struct { - m sync.RWMutex `toml:"-"` - - AuthPassword string `toml:"auth_password"` - - //AuthMethod custom authentication method - AuthMethod AuthMethod `toml:"-"` - - FileName string `toml:"-"` - - // Addr can be empty to assign a local address dynamically - Addr string `toml:"addr"` - - AddrUnixSocketPerm string `toml:"addr_unixsocketperm"` - - HttpAddr string `toml:"http_addr"` - - SlaveOf string `toml:"slaveof"` - - Readonly bool `toml:readonly` - - DataDir string `toml:"data_dir"` - - Databases int `toml:"databases"` - - DBName string `toml:"db_name"` - DBPath string `toml:"db_path"` - DBSyncCommit int `toml:"db_sync_commit"` - - LevelDB LevelDBConfig `toml:"leveldb"` - RocksDB RocksDBConfig `toml:"rocksdb"` - - LMDB LMDBConfig `toml:"lmdb"` - - AccessLog string `toml:"access_log"` - - UseReplication bool `toml:"use_replication"` - Replication ReplicationConfig `toml:"replication"` - - Snapshot SnapshotConfig `toml:"snapshot"` - - ConnReadBufferSize int `toml:"conn_read_buffer_size"` - ConnWriteBufferSize int `toml:"conn_write_buffer_size"` - ConnKeepaliveInterval int `toml:"conn_keepalive_interval"` - - TTLCheckInterval int `toml:"ttl_check_interval"` - - //tls config - TLS TLS `toml:"tls"` -} - -func NewConfigWithFile(fileName string) (*Config, error) { - data, err := ioutil.ReadFile(fileName) - if err != nil { - return nil, err - } - - cfg, err := NewConfigWithData(data) - if err != nil { - return nil, err - } - - cfg.FileName = fileName - return cfg, nil -} - -func NewConfigWithData(data []byte) (*Config, error) { - cfg := NewConfigDefault() - - if err := toml.Unmarshal(data, cfg); err != nil { - return nil, fmt.Errorf("newConfigwithData: unmarashal: %s", err) - } - - cfg.adjust() - - return cfg, nil -} - -func NewConfigDefault() *Config { - cfg := new(Config) - - cfg.Addr = DefaultAddr - cfg.HttpAddr = "" - - cfg.DataDir = DefaultDataDir - - cfg.DBName = DefaultDBName - - cfg.SlaveOf = "" - cfg.Readonly = false - - // Disable Auth by default, by setting password to blank - cfg.AuthPassword = "" - - // default databases number - cfg.Databases = 16 - - // disable access log - cfg.AccessLog = "" - - cfg.LMDB.MapSize = 20 * MB - cfg.LMDB.NoSync = true - - cfg.UseReplication = false - cfg.Replication.WaitSyncTime = 500 - cfg.Replication.Compression = true - cfg.Replication.WaitMaxSlaveAcks = 2 - cfg.Replication.SyncLog = 0 - cfg.Replication.UseMmap = true - cfg.Snapshot.MaxNum = 1 - - cfg.RocksDB.EnableStatistics = false - cfg.RocksDB.UseFsync = false - cfg.RocksDB.DisableAutoCompactions = false - cfg.RocksDB.DisableWAL = false - - cfg.adjust() - - return cfg -} - -func getDefault(d int, s int) int { - if s <= 0 { - return d - } - - return s -} - -func (cfg *Config) adjust() { - cfg.LevelDB.adjust() - - cfg.RocksDB.adjust() - - cfg.Replication.ExpiredLogDays = getDefault(7, cfg.Replication.ExpiredLogDays) - cfg.Replication.MaxLogFileNum = getDefault(50, cfg.Replication.MaxLogFileNum) - cfg.ConnReadBufferSize = getDefault(4*KB, cfg.ConnReadBufferSize) - cfg.ConnWriteBufferSize = getDefault(4*KB, cfg.ConnWriteBufferSize) - cfg.TTLCheckInterval = getDefault(1, cfg.TTLCheckInterval) - cfg.Databases = getDefault(16, cfg.Databases) -} - -func (cfg *LevelDBConfig) adjust() { - cfg.CacheSize = getDefault(4*MB, cfg.CacheSize) - cfg.BlockSize = getDefault(4*KB, cfg.BlockSize) - cfg.WriteBufferSize = getDefault(4*MB, cfg.WriteBufferSize) - cfg.MaxOpenFiles = getDefault(1024, cfg.MaxOpenFiles) - cfg.MaxFileSize = getDefault(32*MB, cfg.MaxFileSize) -} - -func (cfg *RocksDBConfig) adjust() { - cfg.CacheSize = getDefault(4*MB, cfg.CacheSize) - cfg.BlockSize = getDefault(4*KB, cfg.BlockSize) - cfg.WriteBufferSize = getDefault(4*MB, cfg.WriteBufferSize) - cfg.MaxOpenFiles = getDefault(1024, cfg.MaxOpenFiles) - cfg.MaxWriteBufferNum = getDefault(2, cfg.MaxWriteBufferNum) - cfg.MinWriteBufferNumberToMerge = getDefault(1, cfg.MinWriteBufferNumberToMerge) - cfg.NumLevels = getDefault(7, cfg.NumLevels) - cfg.Level0FileNumCompactionTrigger = getDefault(4, cfg.Level0FileNumCompactionTrigger) - cfg.Level0SlowdownWritesTrigger = getDefault(16, cfg.Level0SlowdownWritesTrigger) - cfg.Level0StopWritesTrigger = getDefault(64, cfg.Level0StopWritesTrigger) - cfg.TargetFileSizeBase = getDefault(32*MB, cfg.TargetFileSizeBase) - cfg.TargetFileSizeMultiplier = getDefault(1, cfg.TargetFileSizeMultiplier) - cfg.MaxBytesForLevelBase = getDefault(32*MB, cfg.MaxBytesForLevelBase) - cfg.MaxBytesForLevelMultiplier = getDefault(1, cfg.MaxBytesForLevelMultiplier) - cfg.MaxBackgroundCompactions = getDefault(1, cfg.MaxBackgroundCompactions) - cfg.MaxBackgroundFlushes = getDefault(1, cfg.MaxBackgroundFlushes) - cfg.StatsDumpPeriodSec = getDefault(3600, cfg.StatsDumpPeriodSec) - cfg.BackgroundThreads = getDefault(2, cfg.BackgroundThreads) - cfg.HighPriorityBackgroundThreads = getDefault(1, cfg.HighPriorityBackgroundThreads) - cfg.MaxManifestFileSize = getDefault(20*MB, cfg.MaxManifestFileSize) -} - -func (cfg *Config) Dump(w io.Writer) error { - data, err := toml.Marshal(*cfg) - if err != nil { - return err - } - if _, err := w.Write(data); err != nil { - return err - } - - return nil -} - -func (cfg *Config) DumpFile(fileName string) error { - var b bytes.Buffer - - if err := cfg.Dump(&b); err != nil { - return err - } - - return ioutil2.WriteFileAtomic(fileName, b.Bytes(), 0644) -} - -func (cfg *Config) Rewrite() error { - if len(cfg.FileName) == 0 { - return ErrNoConfigFile - } - - return cfg.DumpFile(cfg.FileName) -} - -func (cfg *Config) GetReadonly() bool { - cfg.m.RLock() - b := cfg.Readonly - cfg.m.RUnlock() - return b -} - -func (cfg *Config) SetReadonly(b bool) { - cfg.m.Lock() - cfg.Readonly = b - cfg.m.Unlock() -} diff --git a/vendor/github.com/siddontang/ledisdb/ledis/batch.go b/vendor/github.com/siddontang/ledisdb/ledis/batch.go deleted file mode 100644 index 6800dfe898f0..000000000000 --- a/vendor/github.com/siddontang/ledisdb/ledis/batch.go +++ /dev/null @@ -1,139 +0,0 @@ -package ledis - -import ( - "sync" - - "github.com/siddontang/go/log" - "github.com/siddontang/ledisdb/rpl" - "github.com/siddontang/ledisdb/store" -) - -type batch struct { - l *Ledis - - *store.WriteBatch - - sync.Locker - - // tx *Tx -} - -func (b *batch) Commit() error { - if b.l.cfg.GetReadonly() { - return ErrWriteInROnly - } - - return b.l.handleCommit(b.WriteBatch, b.WriteBatch) - - // if b.tx == nil { - // return b.l.handleCommit(b.WriteBatch, b.WriteBatch) - // } else { - // if b.l.r != nil { - // if err := b.tx.data.Append(b.WriteBatch.BatchData()); err != nil { - // return err - // } - // } - // return b.WriteBatch.Commit() - // } -} - -func (b *batch) Lock() { - b.Locker.Lock() -} - -func (b *batch) Unlock() { - b.WriteBatch.Rollback() - b.Locker.Unlock() -} - -func (b *batch) Put(key []byte, value []byte) { - b.WriteBatch.Put(key, value) -} - -func (b *batch) Delete(key []byte) { - b.WriteBatch.Delete(key) -} - -type dbBatchLocker struct { - l *sync.Mutex - wrLock *sync.RWMutex -} - -func (l *dbBatchLocker) Lock() { - l.wrLock.RLock() - l.l.Lock() -} - -func (l *dbBatchLocker) Unlock() { - l.l.Unlock() - l.wrLock.RUnlock() -} - -// type txBatchLocker struct { -// } - -// func (l *txBatchLocker) Lock() {} -// func (l *txBatchLocker) Unlock() {} - -// type multiBatchLocker struct { -// } - -// func (l *multiBatchLocker) Lock() {} -// func (l *multiBatchLocker) Unlock() {} - -func (l *Ledis) newBatch(wb *store.WriteBatch, locker sync.Locker) *batch { - b := new(batch) - b.l = l - b.WriteBatch = wb - - b.Locker = locker - - return b -} - -type commiter interface { - Commit() error -} - -type commitDataGetter interface { - Data() []byte -} - -func (l *Ledis) handleCommit(g commitDataGetter, c commiter) error { - l.commitLock.Lock() - - var err error - if l.r != nil { - var rl *rpl.Log - if rl, err = l.r.Log(g.Data()); err != nil { - l.commitLock.Unlock() - - log.Fatalf("write wal error %s", err.Error()) - return err - } - - l.propagate(rl) - - if err = c.Commit(); err != nil { - l.commitLock.Unlock() - - log.Fatalf("commit error %s", err.Error()) - l.noticeReplication() - return err - } - - if err = l.r.UpdateCommitID(rl.ID); err != nil { - l.commitLock.Unlock() - - log.Fatalf("update commit id error %s", err.Error()) - l.noticeReplication() - return err - } - } else { - err = c.Commit() - } - - l.commitLock.Unlock() - - return err -} diff --git a/vendor/github.com/siddontang/ledisdb/ledis/const.go b/vendor/github.com/siddontang/ledisdb/ledis/const.go deleted file mode 100644 index d35ca3b52232..000000000000 --- a/vendor/github.com/siddontang/ledisdb/ledis/const.go +++ /dev/null @@ -1,144 +0,0 @@ -package ledis - -import ( - "errors" -) - -const Version = "0.5" - -type DataType byte - -// for out use -const ( - KV DataType = iota - LIST - HASH - SET - ZSET -) - -func (d DataType) String() string { - switch d { - case KV: - return KVName - case LIST: - return ListName - case HASH: - return HashName - case SET: - return SetName - case ZSET: - return ZSetName - default: - return "unknown" - } -} - -const ( - KVName = "KV" - ListName = "LIST" - HashName = "HASH" - SetName = "SET" - ZSetName = "ZSET" -) - -// for backend store -const ( - NoneType byte = 0 - KVType byte = 1 - HashType byte = 2 - HSizeType byte = 3 - ListType byte = 4 - LMetaType byte = 5 - ZSetType byte = 6 - ZSizeType byte = 7 - ZScoreType byte = 8 - // BitType byte = 9 - // BitMetaType byte = 10 - SetType byte = 11 - SSizeType byte = 12 - - maxDataType byte = 100 - - /* - I make a big mistake about TTL time key format and have to use a new one (change 101 to 103). - You must run the ledis-upgrade-ttl to upgrade db. - */ - ObsoleteExpTimeType byte = 101 - ExpMetaType byte = 102 - ExpTimeType byte = 103 - - MetaType byte = 201 -) - -var ( - TypeName = map[byte]string{ - KVType: "kv", - HashType: "hash", - HSizeType: "hsize", - ListType: "list", - LMetaType: "lmeta", - ZSetType: "zset", - ZSizeType: "zsize", - ZScoreType: "zscore", - // BitType: "bit", - // BitMetaType: "bitmeta", - SetType: "set", - SSizeType: "ssize", - ExpTimeType: "exptime", - ExpMetaType: "expmeta", - } -) - -const ( - defaultScanCount int = 10 -) - -var ( - errKeySize = errors.New("invalid key size") - errValueSize = errors.New("invalid value size") - errHashFieldSize = errors.New("invalid hash field size") - errSetMemberSize = errors.New("invalid set member size") - errZSetMemberSize = errors.New("invalid zset member size") - errExpireValue = errors.New("invalid expire value") - errListIndex = errors.New("invalid list index") -) - -const ( - MaxDatabases int = 10240 - - //max key size - MaxKeySize int = 1024 - - //max hash field size - MaxHashFieldSize int = 1024 - - //max zset member size - MaxZSetMemberSize int = 1024 - - //max set member size - MaxSetMemberSize int = 1024 - - //max value size - MaxValueSize int = 1024 * 1024 * 1024 -) - -var ( - ErrScoreMiss = errors.New("zset score miss") - ErrWriteInROnly = errors.New("write not support in readonly mode") - ErrRplInRDWR = errors.New("replication not support in read write mode") - ErrRplNotSupport = errors.New("replication not support") -) - -// const ( -// DBAutoCommit uint8 = 0x0 -// DBInTransaction uint8 = 0x1 -// DBInMulti uint8 = 0x2 -// ) - -const ( - BitAND = "and" - BitOR = "or" - BitXOR = "xor" - BitNot = "not" -) diff --git a/vendor/github.com/siddontang/ledisdb/ledis/doc.go b/vendor/github.com/siddontang/ledisdb/ledis/doc.go deleted file mode 100644 index c6bfe7807bef..000000000000 --- a/vendor/github.com/siddontang/ledisdb/ledis/doc.go +++ /dev/null @@ -1,58 +0,0 @@ -// Package ledis is a high performance embedded NoSQL. -// -// Ledis supports various data structure like kv, list, hash and zset like redis. -// -// Other features include replication, data with a limited time-to-live. -// -// Usage -// -// First create a ledis instance before use: -// -// l := ledis.Open(cfg) -// -// cfg is a Config instance which contains configuration for ledis use, -// like DataDir (root directory for ledis working to store data). -// -// After you create a ledis instance, you can select a DB to store you data: -// -// db, _ := l.Select(0) -// -// DB must be selected by a index, ledis supports only 16 databases, so the index range is [0-15]. -// -// KV -// -// KV is the most basic ledis type like any other key-value database. -// -// err := db.Set(key, value) -// value, err := db.Get(key) -// -// List -// -// List is simply lists of values, sorted by insertion order. -// You can push or pop value on the list head (left) or tail (right). -// -// err := db.LPush(key, value1) -// err := db.RPush(key, value2) -// value1, err := db.LPop(key) -// value2, err := db.RPop(key) -// -// Hash -// -// Hash is a map between fields and values. -// -// n, err := db.HSet(key, field1, value1) -// n, err := db.HSet(key, field2, value2) -// value1, err := db.HGet(key, field1) -// value2, err := db.HGet(key, field2) -// -// ZSet -// -// ZSet is a sorted collections of values. -// Every member of zset is associated with score, a int64 value which used to sort, from smallest to greatest score. -// Members are unique, but score may be same. -// -// n, err := db.ZAdd(key, ScorePair{score1, member1}, ScorePair{score2, member2}) -// ay, err := db.ZRangeByScore(key, minScore, maxScore, 0, -1) -// -// -package ledis diff --git a/vendor/github.com/siddontang/ledisdb/ledis/dump.go b/vendor/github.com/siddontang/ledisdb/ledis/dump.go deleted file mode 100644 index 3e01ec2a5550..000000000000 --- a/vendor/github.com/siddontang/ledisdb/ledis/dump.go +++ /dev/null @@ -1,223 +0,0 @@ -package ledis - -import ( - "bufio" - "bytes" - "encoding/binary" - "io" - "os" - - "github.com/siddontang/go/snappy" - "github.com/siddontang/ledisdb/store" -) - -type DumpHead struct { - CommitID uint64 -} - -func (h *DumpHead) Read(r io.Reader) error { - if err := binary.Read(r, binary.BigEndian, &h.CommitID); err != nil { - return err - } - - return nil -} - -func (h *DumpHead) Write(w io.Writer) error { - if err := binary.Write(w, binary.BigEndian, h.CommitID); err != nil { - return err - } - - return nil -} - -func (l *Ledis) DumpFile(path string) error { - f, err := os.Create(path) - if err != nil { - return err - } - defer f.Close() - - return l.Dump(f) -} - -func (l *Ledis) Dump(w io.Writer) error { - var err error - - var commitID uint64 - var snap *store.Snapshot - - l.wLock.Lock() - - if l.r != nil { - if commitID, err = l.r.LastCommitID(); err != nil { - l.wLock.Unlock() - return err - } - } - - if snap, err = l.ldb.NewSnapshot(); err != nil { - l.wLock.Unlock() - return err - } - defer snap.Close() - - l.wLock.Unlock() - - wb := bufio.NewWriterSize(w, 4096) - - h := &DumpHead{commitID} - - if err = h.Write(wb); err != nil { - return err - } - - it := snap.NewIterator() - defer it.Close() - it.SeekToFirst() - - compressBuf := make([]byte, 4096) - - var key []byte - var value []byte - for ; it.Valid(); it.Next() { - key = it.RawKey() - value = it.RawValue() - - if key, err = snappy.Encode(compressBuf, key); err != nil { - return err - } - - if err = binary.Write(wb, binary.BigEndian, uint16(len(key))); err != nil { - return err - } - - if _, err = wb.Write(key); err != nil { - return err - } - - if value, err = snappy.Encode(compressBuf, value); err != nil { - return err - } - - if err = binary.Write(wb, binary.BigEndian, uint32(len(value))); err != nil { - return err - } - - if _, err = wb.Write(value); err != nil { - return err - } - } - - if err = wb.Flush(); err != nil { - return err - } - - compressBuf = nil - - return nil -} - -// clear all data and load dump file to db -func (l *Ledis) LoadDumpFile(path string) (*DumpHead, error) { - f, err := os.Open(path) - if err != nil { - return nil, err - } - defer f.Close() - - return l.LoadDump(f) -} - -// clear all data and load dump file to db -func (l *Ledis) LoadDump(r io.Reader) (*DumpHead, error) { - l.wLock.Lock() - defer l.wLock.Unlock() - - var err error - if err = l.flushAll(); err != nil { - return nil, err - } - - rb := bufio.NewReaderSize(r, 4096) - - h := new(DumpHead) - - if err = h.Read(rb); err != nil { - return nil, err - } - - var keyLen uint16 - var valueLen uint32 - - var keyBuf bytes.Buffer - var valueBuf bytes.Buffer - - deKeyBuf := make([]byte, 4096) - deValueBuf := make([]byte, 4096) - - var key, value []byte - - wb := l.ldb.NewWriteBatch() - defer wb.Close() - - n := 0 - - for { - if err = binary.Read(rb, binary.BigEndian, &keyLen); err != nil && err != io.EOF { - return nil, err - } else if err == io.EOF { - break - } - - if _, err = io.CopyN(&keyBuf, rb, int64(keyLen)); err != nil { - return nil, err - } - - if key, err = snappy.Decode(deKeyBuf, keyBuf.Bytes()); err != nil { - return nil, err - } - - if err = binary.Read(rb, binary.BigEndian, &valueLen); err != nil { - return nil, err - } - - if _, err = io.CopyN(&valueBuf, rb, int64(valueLen)); err != nil { - return nil, err - } - - if value, err = snappy.Decode(deValueBuf, valueBuf.Bytes()); err != nil { - return nil, err - } - - wb.Put(key, value) - n++ - if n%1024 == 0 { - if err = wb.Commit(); err != nil { - return nil, err - } - } - - // if err = l.ldb.Put(key, value); err != nil { - // return nil, err - // } - - keyBuf.Reset() - valueBuf.Reset() - } - - if err = wb.Commit(); err != nil { - return nil, err - } - - deKeyBuf = nil - deValueBuf = nil - - if l.r != nil { - if err := l.r.UpdateCommitID(h.CommitID); err != nil { - return nil, err - } - } - - return h, nil -} diff --git a/vendor/github.com/siddontang/ledisdb/ledis/event.go b/vendor/github.com/siddontang/ledisdb/ledis/event.go deleted file mode 100644 index d14309def4fe..000000000000 --- a/vendor/github.com/siddontang/ledisdb/ledis/event.go +++ /dev/null @@ -1,126 +0,0 @@ -package ledis - -import ( - "errors" - "fmt" - "strconv" - - "github.com/siddontang/go/hack" -) - -var errInvalidEvent = errors.New("invalid event") - -func formatEventKey(buf []byte, k []byte) ([]byte, error) { - if len(k) < 2 { - return nil, errInvalidEvent - } - - buf = append(buf, fmt.Sprintf("DB:%2d ", k[0])...) - buf = append(buf, fmt.Sprintf("%s ", TypeName[k[1]])...) - - db := new(DB) - index, _, err := decodeDBIndex(k) - if err != nil { - return nil, err - } - db.setIndex(index) - - //to do format at respective place - - switch k[1] { - case KVType: - if key, err := db.decodeKVKey(k); err != nil { - return nil, err - } else { - buf = strconv.AppendQuote(buf, hack.String(key)) - } - case HashType: - if key, field, err := db.hDecodeHashKey(k); err != nil { - return nil, err - } else { - buf = strconv.AppendQuote(buf, hack.String(key)) - buf = append(buf, ' ') - buf = strconv.AppendQuote(buf, hack.String(field)) - } - case HSizeType: - if key, err := db.hDecodeSizeKey(k); err != nil { - return nil, err - } else { - buf = strconv.AppendQuote(buf, hack.String(key)) - } - case ListType: - if key, seq, err := db.lDecodeListKey(k); err != nil { - return nil, err - } else { - buf = strconv.AppendQuote(buf, hack.String(key)) - buf = append(buf, ' ') - buf = strconv.AppendInt(buf, int64(seq), 10) - } - case LMetaType: - if key, err := db.lDecodeMetaKey(k); err != nil { - return nil, err - } else { - buf = strconv.AppendQuote(buf, hack.String(key)) - } - case ZSetType: - if key, m, err := db.zDecodeSetKey(k); err != nil { - return nil, err - } else { - buf = strconv.AppendQuote(buf, hack.String(key)) - buf = append(buf, ' ') - buf = strconv.AppendQuote(buf, hack.String(m)) - } - case ZSizeType: - if key, err := db.zDecodeSizeKey(k); err != nil { - return nil, err - } else { - buf = strconv.AppendQuote(buf, hack.String(key)) - } - case ZScoreType: - if key, m, score, err := db.zDecodeScoreKey(k); err != nil { - return nil, err - } else { - buf = strconv.AppendQuote(buf, hack.String(key)) - buf = append(buf, ' ') - buf = strconv.AppendQuote(buf, hack.String(m)) - buf = append(buf, ' ') - buf = strconv.AppendInt(buf, score, 10) - } - case SetType: - if key, member, err := db.sDecodeSetKey(k); err != nil { - return nil, err - } else { - buf = strconv.AppendQuote(buf, hack.String(key)) - buf = append(buf, ' ') - buf = strconv.AppendQuote(buf, hack.String(member)) - } - case SSizeType: - if key, err := db.sDecodeSizeKey(k); err != nil { - return nil, err - } else { - buf = strconv.AppendQuote(buf, hack.String(key)) - } - case ExpTimeType: - if tp, key, t, err := db.expDecodeTimeKey(k); err != nil { - return nil, err - } else { - buf = append(buf, TypeName[tp]...) - buf = append(buf, ' ') - buf = strconv.AppendQuote(buf, hack.String(key)) - buf = append(buf, ' ') - buf = strconv.AppendInt(buf, t, 10) - } - case ExpMetaType: - if tp, key, err := db.expDecodeMetaKey(k); err != nil { - return nil, err - } else { - buf = append(buf, TypeName[tp]...) - buf = append(buf, ' ') - buf = strconv.AppendQuote(buf, hack.String(key)) - } - default: - return nil, errInvalidEvent - } - - return buf, nil -} diff --git a/vendor/github.com/siddontang/ledisdb/ledis/ledis.go b/vendor/github.com/siddontang/ledisdb/ledis/ledis.go deleted file mode 100644 index 8d654ba4c891..000000000000 --- a/vendor/github.com/siddontang/ledisdb/ledis/ledis.go +++ /dev/null @@ -1,241 +0,0 @@ -package ledis - -import ( - "fmt" - "io" - "os" - "path" - "sync" - "time" - - "github.com/siddontang/go/filelock" - "github.com/siddontang/go/log" - "github.com/siddontang/ledisdb/config" - "github.com/siddontang/ledisdb/rpl" - "github.com/siddontang/ledisdb/store" -) - -type Ledis struct { - cfg *config.Config - - ldb *store.DB - - dbLock sync.Mutex - dbs map[int]*DB - - quit chan struct{} - wg sync.WaitGroup - - //for replication - r *rpl.Replication - rc chan struct{} - rbatch *store.WriteBatch - rDoneCh chan struct{} - rhs []NewLogEventHandler - - wLock sync.RWMutex //allow one write at same time - commitLock sync.Mutex //allow one write commit at same time - - lock io.Closer - - ttlCheckers []*ttlChecker - ttlCheckerCh chan *ttlChecker -} - -func Open(cfg *config.Config) (*Ledis, error) { - if len(cfg.DataDir) == 0 { - cfg.DataDir = config.DefaultDataDir - } - - if cfg.Databases == 0 { - cfg.Databases = 16 - } else if cfg.Databases > MaxDatabases { - cfg.Databases = MaxDatabases - } - - os.MkdirAll(cfg.DataDir, 0755) - - var err error - - l := new(Ledis) - l.cfg = cfg - - if l.lock, err = filelock.Lock(path.Join(cfg.DataDir, "LOCK")); err != nil { - return nil, err - } - - l.quit = make(chan struct{}) - - if l.ldb, err = store.Open(cfg); err != nil { - return nil, err - } - - if cfg.UseReplication { - if l.r, err = rpl.NewReplication(cfg); err != nil { - return nil, err - } - - l.rc = make(chan struct{}, 1) - l.rbatch = l.ldb.NewWriteBatch() - l.rDoneCh = make(chan struct{}, 1) - - l.wg.Add(1) - go l.onReplication() - - //first we must try wait all replication ok - //maybe some logs are not committed - l.WaitReplication() - } else { - l.r = nil - } - - l.dbs = make(map[int]*DB, 16) - - l.checkTTL() - - return l, nil -} - -func (l *Ledis) Close() { - close(l.quit) - l.wg.Wait() - - l.ldb.Close() - - if l.r != nil { - l.r.Close() - //l.r = nil - } - - if l.lock != nil { - l.lock.Close() - //l.lock = nil - } -} - -func (l *Ledis) Select(index int) (*DB, error) { - if index < 0 || index >= l.cfg.Databases { - return nil, fmt.Errorf("invalid db index %d, must in [0, %d]", index, l.cfg.Databases-1) - } - - l.dbLock.Lock() - defer l.dbLock.Unlock() - - db, ok := l.dbs[index] - if ok { - return db, nil - } - - db = l.newDB(index) - l.dbs[index] = db - - go func(db *DB) { - l.ttlCheckerCh <- db.ttlChecker - }(db) - - return db, nil -} - -// Flush All will clear all data and replication logs -func (l *Ledis) FlushAll() error { - l.wLock.Lock() - defer l.wLock.Unlock() - - return l.flushAll() -} - -func (l *Ledis) flushAll() error { - it := l.ldb.NewIterator() - defer it.Close() - - it.SeekToFirst() - - w := l.ldb.NewWriteBatch() - defer w.Rollback() - - n := 0 - for ; it.Valid(); it.Next() { - n++ - if n == 10000 { - if err := w.Commit(); err != nil { - log.Fatalf("flush all commit error: %s", err.Error()) - return err - } - n = 0 - } - w.Delete(it.RawKey()) - } - - if err := w.Commit(); err != nil { - log.Fatalf("flush all commit error: %s", err.Error()) - return err - } - - if l.r != nil { - if err := l.r.Clear(); err != nil { - log.Fatalf("flush all replication clear error: %s", err.Error()) - return err - } - } - - return nil -} - -func (l *Ledis) IsReadOnly() bool { - if l.cfg.GetReadonly() { - return true - } else if l.r != nil { - if b, _ := l.r.CommitIDBehind(); b { - return true - } - } - return false -} - -func (l *Ledis) checkTTL() { - l.ttlCheckers = make([]*ttlChecker, 0, 16) - l.ttlCheckerCh = make(chan *ttlChecker, 16) - - if l.cfg.TTLCheckInterval == 0 { - l.cfg.TTLCheckInterval = 1 - } - - l.wg.Add(1) - go func() { - defer l.wg.Done() - - tick := time.NewTicker(time.Duration(l.cfg.TTLCheckInterval) * time.Second) - defer tick.Stop() - - for { - select { - case <-tick.C: - if l.IsReadOnly() { - break - } - - for _, c := range l.ttlCheckers { - c.check() - } - case c := <-l.ttlCheckerCh: - l.ttlCheckers = append(l.ttlCheckers, c) - c.check() - case <-l.quit: - return - } - } - - }() - -} - -func (l *Ledis) StoreStat() *store.Stat { - return l.ldb.Stat() -} - -func (l *Ledis) CompactStore() error { - l.wLock.Lock() - defer l.wLock.Unlock() - - return l.ldb.Compact() -} diff --git a/vendor/github.com/siddontang/ledisdb/ledis/ledis_db.go b/vendor/github.com/siddontang/ledisdb/ledis/ledis_db.go deleted file mode 100644 index 7b3ff0f7d3f4..000000000000 --- a/vendor/github.com/siddontang/ledisdb/ledis/ledis_db.go +++ /dev/null @@ -1,204 +0,0 @@ -package ledis - -import ( - "bytes" - "encoding/binary" - "fmt" - "sync" - - "github.com/siddontang/ledisdb/store" -) - -type ibucket interface { - Get(key []byte) ([]byte, error) - GetSlice(key []byte) (store.Slice, error) - - Put(key []byte, value []byte) error - Delete(key []byte) error - - NewIterator() *store.Iterator - - NewWriteBatch() *store.WriteBatch - - RangeIterator(min []byte, max []byte, rangeType uint8) *store.RangeLimitIterator - RevRangeIterator(min []byte, max []byte, rangeType uint8) *store.RangeLimitIterator - RangeLimitIterator(min []byte, max []byte, rangeType uint8, offset int, count int) *store.RangeLimitIterator - RevRangeLimitIterator(min []byte, max []byte, rangeType uint8, offset int, count int) *store.RangeLimitIterator -} - -type DB struct { - l *Ledis - - sdb *store.DB - - bucket ibucket - - index int - - // buffer to store index varint - indexVarBuf []byte - - kvBatch *batch - listBatch *batch - hashBatch *batch - zsetBatch *batch - // binBatch *batch - setBatch *batch - - // status uint8 - - ttlChecker *ttlChecker - - lbkeys *lBlockKeys -} - -func (l *Ledis) newDB(index int) *DB { - d := new(DB) - - d.l = l - - d.sdb = l.ldb - - d.bucket = d.sdb - - // d.status = DBAutoCommit - d.setIndex(index) - - d.kvBatch = d.newBatch() - d.listBatch = d.newBatch() - d.hashBatch = d.newBatch() - d.zsetBatch = d.newBatch() - // d.binBatch = d.newBatch() - d.setBatch = d.newBatch() - - d.lbkeys = newLBlockKeys() - - d.ttlChecker = d.newTTLChecker() - - return d -} - -func decodeDBIndex(buf []byte) (int, int, error) { - index, n := binary.Uvarint(buf) - if n == 0 { - return 0, 0, fmt.Errorf("buf is too small to save index") - } else if n < 0 { - return 0, 0, fmt.Errorf("value larger than 64 bits") - } else if index > uint64(MaxDatabases) { - return 0, 0, fmt.Errorf("value %d is larger than max databases %d", index, MaxDatabases) - } - return int(index), n, nil -} - -func (db *DB) setIndex(index int) { - db.index = index - // the most size for varint is 10 bytes - buf := make([]byte, 10) - n := binary.PutUvarint(buf, uint64(index)) - - db.indexVarBuf = buf[0:n] -} - -func (db *DB) checkKeyIndex(buf []byte) (int, error) { - if len(buf) < len(db.indexVarBuf) { - return 0, fmt.Errorf("key is too small") - } else if !bytes.Equal(db.indexVarBuf, buf[0:len(db.indexVarBuf)]) { - return 0, fmt.Errorf("invalid db index") - } - - return len(db.indexVarBuf), nil -} - -func (db *DB) newTTLChecker() *ttlChecker { - c := new(ttlChecker) - c.db = db - c.txs = make([]*batch, maxDataType) - c.cbs = make([]onExpired, maxDataType) - c.nc = 0 - - c.register(KVType, db.kvBatch, db.delete) - c.register(ListType, db.listBatch, db.lDelete) - c.register(HashType, db.hashBatch, db.hDelete) - c.register(ZSetType, db.zsetBatch, db.zDelete) - // c.register(BitType, db.binBatch, db.bDelete) - c.register(SetType, db.setBatch, db.sDelete) - - return c -} - -func (db *DB) newBatch() *batch { - return db.l.newBatch(db.bucket.NewWriteBatch(), &dbBatchLocker{l: &sync.Mutex{}, wrLock: &db.l.wLock}) -} - -func (db *DB) Index() int { - return int(db.index) -} - -// func (db *DB) IsAutoCommit() bool { -// return db.status == DBAutoCommit -// } - -func (db *DB) FlushAll() (drop int64, err error) { - all := [...](func() (int64, error)){ - db.flush, - db.lFlush, - db.hFlush, - db.zFlush, - db.sFlush} - - for _, flush := range all { - if n, e := flush(); e != nil { - err = e - return - } else { - drop += n - } - } - - return -} - -func (db *DB) flushType(t *batch, dataType byte) (drop int64, err error) { - var deleteFunc func(t *batch, key []byte) int64 - var metaDataType byte - switch dataType { - case KVType: - deleteFunc = db.delete - metaDataType = KVType - case ListType: - deleteFunc = db.lDelete - metaDataType = LMetaType - case HashType: - deleteFunc = db.hDelete - metaDataType = HSizeType - case ZSetType: - deleteFunc = db.zDelete - metaDataType = ZSizeType - // case BitType: - // deleteFunc = db.bDelete - // metaDataType = BitMetaType - case SetType: - deleteFunc = db.sDelete - metaDataType = SSizeType - default: - return 0, fmt.Errorf("invalid data type: %s", TypeName[dataType]) - } - - var keys [][]byte - keys, err = db.scanGeneric(metaDataType, nil, 1024, false, "", false) - for len(keys) != 0 || err != nil { - for _, key := range keys { - deleteFunc(t, key) - db.rmExpire(t, dataType, key) - - } - - if err = t.Commit(); err != nil { - return - } else { - drop += int64(len(keys)) - } - keys, err = db.scanGeneric(metaDataType, nil, 1024, false, "", false) - } - return -} diff --git a/vendor/github.com/siddontang/ledisdb/ledis/migrate.go b/vendor/github.com/siddontang/ledisdb/ledis/migrate.go deleted file mode 100644 index aca8a86c5ed0..000000000000 --- a/vendor/github.com/siddontang/ledisdb/ledis/migrate.go +++ /dev/null @@ -1,189 +0,0 @@ -package ledis - -import ( - "fmt" - - "github.com/siddontang/rdb" -) - -/* - To support redis <-> ledisdb, the dump value format is the same as redis. - We will not support bitmap, and may add bit operations for kv later. - - But you must know that we use int64 for zset score, not double. - Only support rdb version 6. -*/ - -func (db *DB) Dump(key []byte) ([]byte, error) { - v, err := db.Get(key) - if err != nil { - return nil, err - } else if v == nil { - return nil, err - } - - return rdb.Dump(rdb.String(v)) -} - -func (db *DB) LDump(key []byte) ([]byte, error) { - v, err := db.LRange(key, 0, -1) - if err != nil { - return nil, err - } else if len(v) == 0 { - return nil, err - } - - return rdb.Dump(rdb.List(v)) -} - -func (db *DB) HDump(key []byte) ([]byte, error) { - v, err := db.HGetAll(key) - if err != nil { - return nil, err - } else if len(v) == 0 { - return nil, err - } - - o := make(rdb.Hash, len(v)) - for i := 0; i < len(v); i++ { - o[i].Field = v[i].Field - o[i].Value = v[i].Value - } - - return rdb.Dump(o) -} - -func (db *DB) SDump(key []byte) ([]byte, error) { - v, err := db.SMembers(key) - if err != nil { - return nil, err - } else if len(v) == 0 { - return nil, err - } - - return rdb.Dump(rdb.Set(v)) -} - -func (db *DB) ZDump(key []byte) ([]byte, error) { - v, err := db.ZRangeByScore(key, MinScore, MaxScore, 0, -1) - if err != nil { - return nil, err - } else if len(v) == 0 { - return nil, err - } - - o := make(rdb.ZSet, len(v)) - for i := 0; i < len(v); i++ { - o[i].Member = v[i].Member - o[i].Score = float64(v[i].Score) - } - - return rdb.Dump(o) -} - -func (db *DB) Restore(key []byte, ttl int64, data []byte) error { - d, err := rdb.DecodeDump(data) - if err != nil { - return err - } - - //ttl is milliseconds, but we only support seconds - //later may support milliseconds - if ttl > 0 { - ttl = ttl / 1e3 - if ttl == 0 { - ttl = 1 - } - } - - switch value := d.(type) { - case rdb.String: - if _, err = db.Del(key); err != nil { - return err - } - - if err = db.Set(key, value); err != nil { - return err - } - - if ttl > 0 { - if _, err = db.Expire(key, ttl); err != nil { - return err - } - } - case rdb.Hash: - //first clear old key - if _, err = db.HClear(key); err != nil { - return err - } - - fv := make([]FVPair, len(value)) - for i := 0; i < len(value); i++ { - fv[i] = FVPair{Field: value[i].Field, Value: value[i].Value} - } - - if err = db.HMset(key, fv...); err != nil { - return err - } - - if ttl > 0 { - if _, err = db.HExpire(key, ttl); err != nil { - return err - } - } - case rdb.List: - //first clear old key - if _, err = db.LClear(key); err != nil { - return err - } - - if _, err = db.RPush(key, value...); err != nil { - return err - } - - if ttl > 0 { - if _, err = db.LExpire(key, ttl); err != nil { - return err - } - } - case rdb.ZSet: - //first clear old key - if _, err = db.ZClear(key); err != nil { - return err - } - - sp := make([]ScorePair, len(value)) - for i := 0; i < len(value); i++ { - sp[i] = ScorePair{int64(value[i].Score), value[i].Member} - } - - if _, err = db.ZAdd(key, sp...); err != nil { - return err - } - - if ttl > 0 { - if _, err = db.ZExpire(key, ttl); err != nil { - return err - } - } - case rdb.Set: - //first clear old key - if _, err = db.SClear(key); err != nil { - return err - } - - if _, err = db.SAdd(key, value...); err != nil { - return err - } - - if ttl > 0 { - if _, err = db.SExpire(key, ttl); err != nil { - return err - } - } - default: - return fmt.Errorf("invalid data type %T", d) - } - - return nil -} diff --git a/vendor/github.com/siddontang/ledisdb/ledis/replication.go b/vendor/github.com/siddontang/ledisdb/ledis/replication.go deleted file mode 100644 index 20c20994b78a..000000000000 --- a/vendor/github.com/siddontang/ledisdb/ledis/replication.go +++ /dev/null @@ -1,250 +0,0 @@ -package ledis - -import ( - "bytes" - "errors" - "io" - "time" - - "github.com/siddontang/go/log" - "github.com/siddontang/go/snappy" - "github.com/siddontang/ledisdb/rpl" - "github.com/siddontang/ledisdb/store" -) - -const ( - maxReplLogSize = 1 * 1024 * 1024 -) - -var ( - ErrLogMissed = errors.New("log is pured in server") -) - -func (l *Ledis) ReplicationUsed() bool { - return l.r != nil -} - -func (l *Ledis) handleReplication() error { - l.wLock.Lock() - defer l.wLock.Unlock() - - defer AsyncNotify(l.rDoneCh) - - rl := &rpl.Log{} - - var err error - for { - if err = l.r.NextNeedCommitLog(rl); err != nil { - if err != rpl.ErrNoBehindLog { - log.Errorf("get next commit log err, %s", err.Error) - return err - } else { - return nil - } - } else { - l.rbatch.Rollback() - - if rl.Compression == 1 { - //todo optimize - if rl.Data, err = snappy.Decode(nil, rl.Data); err != nil { - log.Errorf("decode log error %s", err.Error()) - return err - } - } - - if bd, err := store.NewBatchData(rl.Data); err != nil { - log.Errorf("decode batch log error %s", err.Error()) - return err - } else if err = bd.Replay(l.rbatch); err != nil { - log.Errorf("replay batch log error %s", err.Error()) - } - - l.commitLock.Lock() - if err = l.rbatch.Commit(); err != nil { - log.Errorf("commit log error %s", err.Error()) - } else if err = l.r.UpdateCommitID(rl.ID); err != nil { - log.Errorf("update commit id error %s", err.Error()) - } - - l.commitLock.Unlock() - if err != nil { - return err - } - } - - } -} - -func (l *Ledis) onReplication() { - defer l.wg.Done() - - l.noticeReplication() - - for { - select { - case <-l.rc: - l.handleReplication() - case <-l.quit: - return - } - } -} - -func (l *Ledis) WaitReplication() error { - if !l.ReplicationUsed() { - return ErrRplNotSupport - - } - - for i := 0; i < 100; i++ { - l.noticeReplication() - - select { - case <-l.rDoneCh: - case <-l.quit: - return nil - } - time.Sleep(100 * time.Millisecond) - - b, err := l.r.CommitIDBehind() - if err != nil { - return err - } else if !b { - return nil - } - } - - return errors.New("wait replication too many times") -} - -func (l *Ledis) StoreLogsFromReader(rb io.Reader) error { - if !l.ReplicationUsed() { - return ErrRplNotSupport - } else if !l.cfg.Readonly { - return ErrRplInRDWR - } - - log := &rpl.Log{} - - for { - if err := log.Decode(rb); err != nil { - if err == io.EOF { - break - } else { - return err - } - } - - if err := l.r.StoreLog(log); err != nil { - return err - } - - } - - l.noticeReplication() - - return nil -} - -func (l *Ledis) noticeReplication() { - AsyncNotify(l.rc) -} - -func (l *Ledis) StoreLogsFromData(data []byte) error { - rb := bytes.NewReader(data) - - return l.StoreLogsFromReader(rb) -} - -func (l *Ledis) ReadLogsTo(startLogID uint64, w io.Writer) (n int, nextLogID uint64, err error) { - if !l.ReplicationUsed() { - // no replication log - nextLogID = 0 - err = ErrRplNotSupport - return - } - - var firtID, lastID uint64 - - firtID, err = l.r.FirstLogID() - if err != nil { - return - } - - if startLogID < firtID { - err = ErrLogMissed - return - } - - lastID, err = l.r.LastLogID() - if err != nil { - return - } - - nextLogID = startLogID - - log := &rpl.Log{} - for i := startLogID; i <= lastID; i++ { - if err = l.r.GetLog(i, log); err != nil { - return - } - - if err = log.Encode(w); err != nil { - return - } - - nextLogID = i + 1 - - n += log.Size() - - if n > maxReplLogSize { - break - } - } - - return -} - -// try to read events, if no events read, try to wait the new event singal until timeout seconds -func (l *Ledis) ReadLogsToTimeout(startLogID uint64, w io.Writer, timeout int, quitCh chan struct{}) (n int, nextLogID uint64, err error) { - n, nextLogID, err = l.ReadLogsTo(startLogID, w) - if err != nil { - return - } else if n != 0 { - return - } - //no events read - select { - case <-l.r.WaitLog(): - case <-time.After(time.Duration(timeout) * time.Second): - case <-quitCh: - return - } - return l.ReadLogsTo(startLogID, w) -} - -func (l *Ledis) propagate(rl *rpl.Log) { - for _, h := range l.rhs { - h(rl) - } -} - -type NewLogEventHandler func(rl *rpl.Log) - -func (l *Ledis) AddNewLogEventHandler(h NewLogEventHandler) error { - if !l.ReplicationUsed() { - return ErrRplNotSupport - } - - l.rhs = append(l.rhs, h) - - return nil -} - -func (l *Ledis) ReplicationStat() (*rpl.Stat, error) { - if !l.ReplicationUsed() { - return nil, ErrRplNotSupport - } - - return l.r.Stat() -} diff --git a/vendor/github.com/siddontang/ledisdb/ledis/scan.go b/vendor/github.com/siddontang/ledisdb/ledis/scan.go deleted file mode 100644 index c4540a6105ed..000000000000 --- a/vendor/github.com/siddontang/ledisdb/ledis/scan.go +++ /dev/null @@ -1,396 +0,0 @@ -package ledis - -import ( - "errors" - "regexp" - - "github.com/siddontang/ledisdb/store" -) - -var errDataType = errors.New("error data type") -var errMetaKey = errors.New("error meta key") - -//fif inclusive is true, scan range [cursor, inf) else (cursor, inf) -func (db *DB) Scan(dataType DataType, cursor []byte, count int, inclusive bool, match string) ([][]byte, error) { - storeDataType, err := getDataStoreType(dataType) - if err != nil { - return nil, err - } - - return db.scanGeneric(storeDataType, cursor, count, inclusive, match, false) -} - -//if inclusive is true, revscan range (-inf, cursor] else (inf, cursor) -func (db *DB) RevScan(dataType DataType, cursor []byte, count int, inclusive bool, match string) ([][]byte, error) { - storeDataType, err := getDataStoreType(dataType) - if err != nil { - return nil, err - } - - return db.scanGeneric(storeDataType, cursor, count, inclusive, match, true) -} - -func getDataStoreType(dataType DataType) (byte, error) { - var storeDataType byte - switch dataType { - case KV: - storeDataType = KVType - case LIST: - storeDataType = LMetaType - case HASH: - storeDataType = HSizeType - case SET: - storeDataType = SSizeType - case ZSET: - storeDataType = ZSizeType - default: - return 0, errDataType - } - return storeDataType, nil -} - -func buildMatchRegexp(match string) (*regexp.Regexp, error) { - var err error - var r *regexp.Regexp = nil - - if len(match) > 0 { - if r, err = regexp.Compile(match); err != nil { - return nil, err - } - } - - return r, nil -} - -func (db *DB) buildScanIterator(minKey []byte, maxKey []byte, inclusive bool, reverse bool) *store.RangeLimitIterator { - tp := store.RangeOpen - - if !reverse { - if inclusive { - tp = store.RangeROpen - } - } else { - if inclusive { - tp = store.RangeLOpen - } - } - - var it *store.RangeLimitIterator - if !reverse { - it = db.bucket.RangeIterator(minKey, maxKey, tp) - } else { - it = db.bucket.RevRangeIterator(minKey, maxKey, tp) - } - - return it -} - -func (db *DB) buildScanKeyRange(storeDataType byte, key []byte, reverse bool) (minKey []byte, maxKey []byte, err error) { - if !reverse { - if minKey, err = db.encodeScanMinKey(storeDataType, key); err != nil { - return - } - if maxKey, err = db.encodeScanMaxKey(storeDataType, nil); err != nil { - return - } - } else { - if minKey, err = db.encodeScanMinKey(storeDataType, nil); err != nil { - return - } - if maxKey, err = db.encodeScanMaxKey(storeDataType, key); err != nil { - return - } - } - return -} - -func checkScanCount(count int) int { - if count <= 0 { - count = defaultScanCount - } - - return count -} - -func (db *DB) scanGeneric(storeDataType byte, key []byte, count int, - inclusive bool, match string, reverse bool) ([][]byte, error) { - - r, err := buildMatchRegexp(match) - if err != nil { - return nil, err - } - - minKey, maxKey, err := db.buildScanKeyRange(storeDataType, key, reverse) - if err != nil { - return nil, err - } - - count = checkScanCount(count) - - it := db.buildScanIterator(minKey, maxKey, inclusive, reverse) - - v := make([][]byte, 0, count) - - for i := 0; it.Valid() && i < count; it.Next() { - if k, err := db.decodeScanKey(storeDataType, it.Key()); err != nil { - continue - } else if r != nil && !r.Match(k) { - continue - } else { - v = append(v, k) - i++ - } - } - it.Close() - return v, nil -} - -func (db *DB) encodeScanMinKey(storeDataType byte, key []byte) ([]byte, error) { - return db.encodeScanKey(storeDataType, key) -} - -func (db *DB) encodeScanMaxKey(storeDataType byte, key []byte) ([]byte, error) { - if len(key) > 0 { - return db.encodeScanKey(storeDataType, key) - } - - k, err := db.encodeScanKey(storeDataType, nil) - if err != nil { - return nil, err - } - k[len(k)-1] = storeDataType + 1 - return k, nil -} - -func (db *DB) encodeScanKey(storeDataType byte, key []byte) ([]byte, error) { - switch storeDataType { - case KVType: - return db.encodeKVKey(key), nil - case LMetaType: - return db.lEncodeMetaKey(key), nil - case HSizeType: - return db.hEncodeSizeKey(key), nil - case ZSizeType: - return db.zEncodeSizeKey(key), nil - case SSizeType: - return db.sEncodeSizeKey(key), nil - default: - return nil, errDataType - } -} - -func (db *DB) decodeScanKey(storeDataType byte, ek []byte) (key []byte, err error) { - switch storeDataType { - case KVType: - key, err = db.decodeKVKey(ek) - case LMetaType: - key, err = db.lDecodeMetaKey(ek) - case HSizeType: - key, err = db.hDecodeSizeKey(ek) - case ZSizeType: - key, err = db.zDecodeSizeKey(ek) - case SSizeType: - key, err = db.sDecodeSizeKey(ek) - default: - err = errDataType - } - return -} - -// for specail data scan - -func (db *DB) buildDataScanKeyRange(storeDataType byte, key []byte, cursor []byte, reverse bool) (minKey []byte, maxKey []byte, err error) { - if !reverse { - if minKey, err = db.encodeDataScanMinKey(storeDataType, key, cursor); err != nil { - return - } - if maxKey, err = db.encodeDataScanMaxKey(storeDataType, key, nil); err != nil { - return - } - } else { - if minKey, err = db.encodeDataScanMinKey(storeDataType, key, nil); err != nil { - return - } - if maxKey, err = db.encodeDataScanMaxKey(storeDataType, key, cursor); err != nil { - return - } - } - return -} - -func (db *DB) encodeDataScanMinKey(storeDataType byte, key []byte, cursor []byte) ([]byte, error) { - return db.encodeDataScanKey(storeDataType, key, cursor) -} - -func (db *DB) encodeDataScanMaxKey(storeDataType byte, key []byte, cursor []byte) ([]byte, error) { - if len(cursor) > 0 { - return db.encodeDataScanKey(storeDataType, key, cursor) - } - - k, err := db.encodeDataScanKey(storeDataType, key, nil) - if err != nil { - return nil, err - } - - // here, the last byte is the start seperator, set it to stop seperator - k[len(k)-1] = k[len(k)-1] + 1 - return k, nil -} - -func (db *DB) encodeDataScanKey(storeDataType byte, key []byte, cursor []byte) ([]byte, error) { - switch storeDataType { - case HashType: - return db.hEncodeHashKey(key, cursor), nil - case ZSetType: - return db.zEncodeSetKey(key, cursor), nil - case SetType: - return db.sEncodeSetKey(key, cursor), nil - default: - return nil, errDataType - } -} - -func (db *DB) buildDataScanIterator(storeDataType byte, key []byte, cursor []byte, count int, - inclusive bool, reverse bool) (*store.RangeLimitIterator, error) { - - if err := checkKeySize(key); err != nil { - return nil, err - } - - minKey, maxKey, err := db.buildDataScanKeyRange(storeDataType, key, cursor, reverse) - if err != nil { - return nil, err - } - - it := db.buildScanIterator(minKey, maxKey, inclusive, reverse) - - return it, nil -} - -func (db *DB) hScanGeneric(key []byte, cursor []byte, count int, inclusive bool, match string, reverse bool) ([]FVPair, error) { - count = checkScanCount(count) - - r, err := buildMatchRegexp(match) - if err != nil { - return nil, err - } - - v := make([]FVPair, 0, count) - - it, err := db.buildDataScanIterator(HashType, key, cursor, count, inclusive, reverse) - if err != nil { - return nil, err - } - - defer it.Close() - - for i := 0; it.Valid() && i < count; it.Next() { - _, f, err := db.hDecodeHashKey(it.Key()) - if err != nil { - return nil, err - } else if r != nil && !r.Match(f) { - continue - } - - v = append(v, FVPair{Field: f, Value: it.Value()}) - - i++ - } - - return v, nil -} - -func (db *DB) HScan(key []byte, cursor []byte, count int, inclusive bool, match string) ([]FVPair, error) { - return db.hScanGeneric(key, cursor, count, inclusive, match, false) -} - -func (db *DB) HRevScan(key []byte, cursor []byte, count int, inclusive bool, match string) ([]FVPair, error) { - return db.hScanGeneric(key, cursor, count, inclusive, match, true) -} - -func (db *DB) sScanGeneric(key []byte, cursor []byte, count int, inclusive bool, match string, reverse bool) ([][]byte, error) { - count = checkScanCount(count) - - r, err := buildMatchRegexp(match) - if err != nil { - return nil, err - } - - v := make([][]byte, 0, count) - - it, err := db.buildDataScanIterator(SetType, key, cursor, count, inclusive, reverse) - if err != nil { - return nil, err - } - - defer it.Close() - - for i := 0; it.Valid() && i < count; it.Next() { - _, m, err := db.sDecodeSetKey(it.Key()) - if err != nil { - return nil, err - } else if r != nil && !r.Match(m) { - continue - } - - v = append(v, m) - - i++ - } - - return v, nil -} - -func (db *DB) SScan(key []byte, cursor []byte, count int, inclusive bool, match string) ([][]byte, error) { - return db.sScanGeneric(key, cursor, count, inclusive, match, false) -} - -func (db *DB) SRevScan(key []byte, cursor []byte, count int, inclusive bool, match string) ([][]byte, error) { - return db.sScanGeneric(key, cursor, count, inclusive, match, true) -} - -func (db *DB) zScanGeneric(key []byte, cursor []byte, count int, inclusive bool, match string, reverse bool) ([]ScorePair, error) { - count = checkScanCount(count) - - r, err := buildMatchRegexp(match) - if err != nil { - return nil, err - } - - v := make([]ScorePair, 0, count) - - it, err := db.buildDataScanIterator(ZSetType, key, cursor, count, inclusive, reverse) - if err != nil { - return nil, err - } - - defer it.Close() - - for i := 0; it.Valid() && i < count; it.Next() { - _, m, err := db.zDecodeSetKey(it.Key()) - if err != nil { - return nil, err - } else if r != nil && !r.Match(m) { - continue - } - - score, err := Int64(it.Value(), nil) - if err != nil { - return nil, err - } - - v = append(v, ScorePair{Score: score, Member: m}) - - i++ - } - - return v, nil -} - -func (db *DB) ZScan(key []byte, cursor []byte, count int, inclusive bool, match string) ([]ScorePair, error) { - return db.zScanGeneric(key, cursor, count, inclusive, match, false) -} - -func (db *DB) ZRevScan(key []byte, cursor []byte, count int, inclusive bool, match string) ([]ScorePair, error) { - return db.zScanGeneric(key, cursor, count, inclusive, match, true) -} diff --git a/vendor/github.com/siddontang/ledisdb/ledis/sort.go b/vendor/github.com/siddontang/ledisdb/ledis/sort.go deleted file mode 100644 index 6a54c075b822..000000000000 --- a/vendor/github.com/siddontang/ledisdb/ledis/sort.go +++ /dev/null @@ -1,233 +0,0 @@ -package ledis - -import ( - "bytes" - "fmt" - "sort" - "strconv" - - "github.com/siddontang/ledisdb/store" -) - -type Limit struct { - Offset int - Size int -} - -func getSortRange(values [][]byte, offset int, size int) (int, int) { - var start = 0 - if offset > 0 { - start = offset - } - - valueLen := len(values) - var end = valueLen - 1 - if size > 0 { - end = start + size - 1 - } - - if start >= valueLen { - start = valueLen - 1 - end = valueLen - 2 - } - - if end >= valueLen { - end = valueLen - 1 - } - - return start, end -} - -var hashPattern = []byte("*->") - -func (db *DB) lookupKeyByPattern(pattern []byte, subKey []byte) []byte { - // If the pattern is #, return the substitution key itself - if bytes.Equal(pattern, []byte{'#'}) { - return subKey - } - - // If we can't find '*' in the pattern, return nil - if !bytes.Contains(pattern, []byte{'*'}) { - return nil - } - - key := pattern - var field []byte = nil - - // Find out if we're dealing with a hash dereference - if n := bytes.Index(pattern, hashPattern); n > 0 && n+3 < len(pattern) { - key = pattern[0 : n+1] - field = pattern[n+3:] - } - - // Perform the '*' substitution - key = bytes.Replace(key, []byte{'*'}, subKey, 1) - - var value []byte - if field == nil { - value, _ = db.Get(key) - } else { - value, _ = db.HGet(key, field) - } - - return value -} - -type sortItem struct { - value []byte - cmpValue []byte - score float64 -} - -type sortItemSlice struct { - alpha bool - sortByPattern bool - items []sortItem -} - -func (s *sortItemSlice) Len() int { - return len(s.items) -} - -func (s *sortItemSlice) Swap(i, j int) { - s.items[i], s.items[j] = s.items[j], s.items[i] -} - -func (s *sortItemSlice) Less(i, j int) bool { - s1 := s.items[i] - s2 := s.items[j] - if !s.alpha { - if s1.score < s2.score { - return true - } else if s1.score > s2.score { - return false - } else { - return bytes.Compare(s1.value, s2.value) < 0 - } - } else { - if s.sortByPattern { - if s1.cmpValue == nil || s2.cmpValue == nil { - if s1.cmpValue == nil { - return true - } else { - return false - } - } else { - // Unlike redis, we only use bytes compare - return bytes.Compare(s1.cmpValue, s2.cmpValue) < 0 - } - } else { - // Unlike redis, we only use bytes compare - return bytes.Compare(s1.value, s2.value) < 0 - } - } -} - -func (db *DB) xsort(values [][]byte, offset int, size int, alpha bool, desc bool, sortBy []byte, sortGet [][]byte) ([][]byte, error) { - if len(values) == 0 { - return [][]byte{}, nil - } - - start, end := getSortRange(values, offset, size) - - dontsort := 0 - - if sortBy != nil { - if !bytes.Contains(sortBy, []byte{'*'}) { - dontsort = 1 - } - } - - items := &sortItemSlice{ - alpha: alpha, - sortByPattern: sortBy != nil, - items: make([]sortItem, len(values)), - } - - for i, value := range values { - items.items[i].value = value - items.items[i].score = 0 - items.items[i].cmpValue = nil - - if dontsort == 0 { - var cmpValue []byte - if sortBy != nil { - cmpValue = db.lookupKeyByPattern(sortBy, value) - } else { - // use value iteself to sort by - cmpValue = value - } - - if cmpValue == nil { - continue - } - - if alpha { - if sortBy != nil { - items.items[i].cmpValue = cmpValue - } - } else { - score, err := strconv.ParseFloat(string(cmpValue), 64) - if err != nil { - return nil, fmt.Errorf("%s scores can't be converted into double", cmpValue) - } - items.items[i].score = score - } - } - } - - if dontsort == 0 { - if !desc { - sort.Sort(items) - } else { - sort.Sort(sort.Reverse(items)) - } - } - - var resLen int = end - start + 1 - if len(sortGet) > 0 { - resLen = len(sortGet) * (end - start + 1) - } - - res := make([][]byte, 0, resLen) - for i := start; i <= end; i++ { - if len(sortGet) == 0 { - res = append(res, items.items[i].value) - } else { - for _, getPattern := range sortGet { - v := db.lookupKeyByPattern(getPattern, items.items[i].value) - res = append(res, v) - } - } - } - - return res, nil -} - -func (db *DB) XLSort(key []byte, offset int, size int, alpha bool, desc bool, sortBy []byte, sortGet [][]byte) ([][]byte, error) { - values, err := db.LRange(key, 0, -1) - - if err != nil { - return nil, err - } - - return db.xsort(values, offset, size, alpha, desc, sortBy, sortGet) -} - -func (db *DB) XSSort(key []byte, offset int, size int, alpha bool, desc bool, sortBy []byte, sortGet [][]byte) ([][]byte, error) { - values, err := db.SMembers(key) - if err != nil { - return nil, err - } - - return db.xsort(values, offset, size, alpha, desc, sortBy, sortGet) -} - -func (db *DB) XZSort(key []byte, offset int, size int, alpha bool, desc bool, sortBy []byte, sortGet [][]byte) ([][]byte, error) { - values, err := db.ZRangeByLex(key, nil, nil, store.RangeClose, 0, -1) - if err != nil { - return nil, err - } - - return db.xsort(values, offset, size, alpha, desc, sortBy, sortGet) -} diff --git a/vendor/github.com/siddontang/ledisdb/ledis/t_hash.go b/vendor/github.com/siddontang/ledisdb/ledis/t_hash.go deleted file mode 100644 index c822e232da63..000000000000 --- a/vendor/github.com/siddontang/ledisdb/ledis/t_hash.go +++ /dev/null @@ -1,537 +0,0 @@ -package ledis - -import ( - "encoding/binary" - "errors" - "time" - - "github.com/siddontang/go/num" - "github.com/siddontang/ledisdb/store" -) - -type FVPair struct { - Field []byte - Value []byte -} - -var errHashKey = errors.New("invalid hash key") -var errHSizeKey = errors.New("invalid hsize key") - -const ( - hashStartSep byte = ':' - hashStopSep byte = hashStartSep + 1 -) - -func checkHashKFSize(key []byte, field []byte) error { - if len(key) > MaxKeySize || len(key) == 0 { - return errKeySize - } else if len(field) > MaxHashFieldSize || len(field) == 0 { - return errHashFieldSize - } - return nil -} - -func (db *DB) hEncodeSizeKey(key []byte) []byte { - buf := make([]byte, len(key)+1+len(db.indexVarBuf)) - - pos := 0 - n := copy(buf, db.indexVarBuf) - - pos += n - buf[pos] = HSizeType - - pos++ - copy(buf[pos:], key) - - return buf -} - -func (db *DB) hDecodeSizeKey(ek []byte) ([]byte, error) { - pos, err := db.checkKeyIndex(ek) - if err != nil { - return nil, err - } - - if pos+1 > len(ek) || ek[pos] != HSizeType { - return nil, errHSizeKey - } - pos++ - - return ek[pos:], nil -} - -func (db *DB) hEncodeHashKey(key []byte, field []byte) []byte { - buf := make([]byte, len(key)+len(field)+1+1+2+len(db.indexVarBuf)) - - pos := 0 - n := copy(buf, db.indexVarBuf) - pos += n - - buf[pos] = HashType - pos++ - - binary.BigEndian.PutUint16(buf[pos:], uint16(len(key))) - pos += 2 - - copy(buf[pos:], key) - pos += len(key) - - buf[pos] = hashStartSep - pos++ - copy(buf[pos:], field) - - return buf -} - -func (db *DB) hDecodeHashKey(ek []byte) ([]byte, []byte, error) { - pos, err := db.checkKeyIndex(ek) - if err != nil { - return nil, nil, err - } - - if pos+1 > len(ek) || ek[pos] != HashType { - return nil, nil, errHashKey - } - pos++ - - if pos+2 > len(ek) { - return nil, nil, errHashKey - } - - keyLen := int(binary.BigEndian.Uint16(ek[pos:])) - pos += 2 - - if keyLen+pos > len(ek) { - return nil, nil, errHashKey - } - - key := ek[pos : pos+keyLen] - pos += keyLen - - if ek[pos] != hashStartSep { - return nil, nil, errHashKey - } - - pos++ - field := ek[pos:] - return key, field, nil -} - -func (db *DB) hEncodeStartKey(key []byte) []byte { - return db.hEncodeHashKey(key, nil) -} - -func (db *DB) hEncodeStopKey(key []byte) []byte { - k := db.hEncodeHashKey(key, nil) - - k[len(k)-1] = hashStopSep - - return k -} - -func (db *DB) hSetItem(key []byte, field []byte, value []byte) (int64, error) { - t := db.hashBatch - - ek := db.hEncodeHashKey(key, field) - - var n int64 = 1 - if v, _ := db.bucket.Get(ek); v != nil { - n = 0 - } else { - if _, err := db.hIncrSize(key, 1); err != nil { - return 0, err - } - } - - t.Put(ek, value) - return n, nil -} - -// ps : here just focus on deleting the hash data, -// any other likes expire is ignore. -func (db *DB) hDelete(t *batch, key []byte) int64 { - sk := db.hEncodeSizeKey(key) - start := db.hEncodeStartKey(key) - stop := db.hEncodeStopKey(key) - - var num int64 = 0 - it := db.bucket.RangeLimitIterator(start, stop, store.RangeROpen, 0, -1) - for ; it.Valid(); it.Next() { - t.Delete(it.Key()) - num++ - } - it.Close() - - t.Delete(sk) - return num -} - -func (db *DB) hExpireAt(key []byte, when int64) (int64, error) { - t := db.hashBatch - t.Lock() - defer t.Unlock() - - if hlen, err := db.HLen(key); err != nil || hlen == 0 { - return 0, err - } else { - db.expireAt(t, HashType, key, when) - if err := t.Commit(); err != nil { - return 0, err - } - } - return 1, nil -} - -func (db *DB) HLen(key []byte) (int64, error) { - if err := checkKeySize(key); err != nil { - return 0, err - } - - return Int64(db.bucket.Get(db.hEncodeSizeKey(key))) -} - -func (db *DB) HSet(key []byte, field []byte, value []byte) (int64, error) { - if err := checkHashKFSize(key, field); err != nil { - return 0, err - } else if err := checkValueSize(value); err != nil { - return 0, err - } - - t := db.hashBatch - t.Lock() - defer t.Unlock() - - n, err := db.hSetItem(key, field, value) - if err != nil { - return 0, err - } - - err = t.Commit() - return n, err -} - -func (db *DB) HGet(key []byte, field []byte) ([]byte, error) { - if err := checkHashKFSize(key, field); err != nil { - return nil, err - } - - return db.bucket.Get(db.hEncodeHashKey(key, field)) -} - -func (db *DB) HMset(key []byte, args ...FVPair) error { - t := db.hashBatch - t.Lock() - defer t.Unlock() - - var err error - var ek []byte - var num int64 = 0 - for i := 0; i < len(args); i++ { - if err := checkHashKFSize(key, args[i].Field); err != nil { - return err - } else if err := checkValueSize(args[i].Value); err != nil { - return err - } - - ek = db.hEncodeHashKey(key, args[i].Field) - - if v, err := db.bucket.Get(ek); err != nil { - return err - } else if v == nil { - num++ - } - - t.Put(ek, args[i].Value) - } - - if _, err = db.hIncrSize(key, num); err != nil { - return err - } - - //todo add binglog - err = t.Commit() - return err -} - -func (db *DB) HMget(key []byte, args ...[]byte) ([][]byte, error) { - var ek []byte - - it := db.bucket.NewIterator() - defer it.Close() - - r := make([][]byte, len(args)) - for i := 0; i < len(args); i++ { - if err := checkHashKFSize(key, args[i]); err != nil { - return nil, err - } - - ek = db.hEncodeHashKey(key, args[i]) - - r[i] = it.Find(ek) - } - - return r, nil -} - -func (db *DB) HDel(key []byte, args ...[]byte) (int64, error) { - t := db.hashBatch - - var ek []byte - var v []byte - var err error - - t.Lock() - defer t.Unlock() - - it := db.bucket.NewIterator() - defer it.Close() - - var num int64 = 0 - for i := 0; i < len(args); i++ { - if err := checkHashKFSize(key, args[i]); err != nil { - return 0, err - } - - ek = db.hEncodeHashKey(key, args[i]) - - v = it.RawFind(ek) - if v == nil { - continue - } else { - num++ - t.Delete(ek) - } - } - - if _, err = db.hIncrSize(key, -num); err != nil { - return 0, err - } - - err = t.Commit() - - return num, err -} - -func (db *DB) hIncrSize(key []byte, delta int64) (int64, error) { - t := db.hashBatch - sk := db.hEncodeSizeKey(key) - - var err error - var size int64 = 0 - if size, err = Int64(db.bucket.Get(sk)); err != nil { - return 0, err - } else { - size += delta - if size <= 0 { - size = 0 - t.Delete(sk) - db.rmExpire(t, HashType, key) - } else { - t.Put(sk, PutInt64(size)) - } - } - - return size, nil -} - -func (db *DB) HIncrBy(key []byte, field []byte, delta int64) (int64, error) { - if err := checkHashKFSize(key, field); err != nil { - return 0, err - } - - t := db.hashBatch - var ek []byte - var err error - - t.Lock() - defer t.Unlock() - - ek = db.hEncodeHashKey(key, field) - - var n int64 = 0 - if n, err = StrInt64(db.bucket.Get(ek)); err != nil { - return 0, err - } - - n += delta - - _, err = db.hSetItem(key, field, num.FormatInt64ToSlice(n)) - if err != nil { - return 0, err - } - - err = t.Commit() - - return n, err -} - -func (db *DB) HGetAll(key []byte) ([]FVPair, error) { - if err := checkKeySize(key); err != nil { - return nil, err - } - - start := db.hEncodeStartKey(key) - stop := db.hEncodeStopKey(key) - - v := make([]FVPair, 0, 16) - - it := db.bucket.RangeLimitIterator(start, stop, store.RangeROpen, 0, -1) - defer it.Close() - - for ; it.Valid(); it.Next() { - _, f, err := db.hDecodeHashKey(it.Key()) - if err != nil { - return nil, err - } - - v = append(v, FVPair{Field: f, Value: it.Value()}) - } - - return v, nil -} - -func (db *DB) HKeys(key []byte) ([][]byte, error) { - if err := checkKeySize(key); err != nil { - return nil, err - } - - start := db.hEncodeStartKey(key) - stop := db.hEncodeStopKey(key) - - v := make([][]byte, 0, 16) - - it := db.bucket.RangeLimitIterator(start, stop, store.RangeROpen, 0, -1) - defer it.Close() - - for ; it.Valid(); it.Next() { - _, f, err := db.hDecodeHashKey(it.Key()) - if err != nil { - return nil, err - } - v = append(v, f) - } - - return v, nil -} - -func (db *DB) HValues(key []byte) ([][]byte, error) { - if err := checkKeySize(key); err != nil { - return nil, err - } - - start := db.hEncodeStartKey(key) - stop := db.hEncodeStopKey(key) - - v := make([][]byte, 0, 16) - - it := db.bucket.RangeLimitIterator(start, stop, store.RangeROpen, 0, -1) - defer it.Close() - - for ; it.Valid(); it.Next() { - _, _, err := db.hDecodeHashKey(it.Key()) - if err != nil { - return nil, err - } - - v = append(v, it.Value()) - } - - return v, nil -} - -func (db *DB) HClear(key []byte) (int64, error) { - if err := checkKeySize(key); err != nil { - return 0, err - } - - t := db.hashBatch - t.Lock() - defer t.Unlock() - - num := db.hDelete(t, key) - db.rmExpire(t, HashType, key) - - err := t.Commit() - return num, err -} - -func (db *DB) HMclear(keys ...[]byte) (int64, error) { - t := db.hashBatch - t.Lock() - defer t.Unlock() - - for _, key := range keys { - if err := checkKeySize(key); err != nil { - return 0, err - } - - db.hDelete(t, key) - db.rmExpire(t, HashType, key) - } - - err := t.Commit() - return int64(len(keys)), err -} - -func (db *DB) hFlush() (drop int64, err error) { - t := db.hashBatch - - t.Lock() - defer t.Unlock() - - return db.flushType(t, HashType) -} - -func (db *DB) HExpire(key []byte, duration int64) (int64, error) { - if duration <= 0 { - return 0, errExpireValue - } - - return db.hExpireAt(key, time.Now().Unix()+duration) -} - -func (db *DB) HExpireAt(key []byte, when int64) (int64, error) { - if when <= time.Now().Unix() { - return 0, errExpireValue - } - - return db.hExpireAt(key, when) -} - -func (db *DB) HTTL(key []byte) (int64, error) { - if err := checkKeySize(key); err != nil { - return -1, err - } - - return db.ttl(HashType, key) -} - -func (db *DB) HPersist(key []byte) (int64, error) { - if err := checkKeySize(key); err != nil { - return 0, err - } - - t := db.hashBatch - t.Lock() - defer t.Unlock() - - n, err := db.rmExpire(t, HashType, key) - if err != nil { - return 0, err - } - - err = t.Commit() - return n, err -} - -func (db *DB) HKeyExists(key []byte) (int64, error) { - if err := checkKeySize(key); err != nil { - return 0, err - } - sk := db.hEncodeSizeKey(key) - v, err := db.bucket.Get(sk) - if v != nil && err == nil { - return 1, nil - } - return 0, err -} diff --git a/vendor/github.com/siddontang/ledisdb/ledis/t_kv.go b/vendor/github.com/siddontang/ledisdb/ledis/t_kv.go deleted file mode 100644 index 624287fb89fa..000000000000 --- a/vendor/github.com/siddontang/ledisdb/ledis/t_kv.go +++ /dev/null @@ -1,769 +0,0 @@ -package ledis - -import ( - "encoding/binary" - "errors" - "fmt" - "strings" - "time" - - "github.com/siddontang/go/num" - "github.com/siddontang/ledisdb/store" -) - -type KVPair struct { - Key []byte - Value []byte -} - -var errKVKey = errors.New("invalid encode kv key") - -func checkKeySize(key []byte) error { - if len(key) > MaxKeySize || len(key) == 0 { - return errKeySize - } - return nil -} - -func checkValueSize(value []byte) error { - if len(value) > MaxValueSize { - return errValueSize - } - - return nil -} - -func (db *DB) encodeKVKey(key []byte) []byte { - ek := make([]byte, len(key)+1+len(db.indexVarBuf)) - pos := copy(ek, db.indexVarBuf) - ek[pos] = KVType - pos++ - copy(ek[pos:], key) - return ek -} - -func (db *DB) decodeKVKey(ek []byte) ([]byte, error) { - pos, err := db.checkKeyIndex(ek) - if err != nil { - return nil, err - } - if pos+1 > len(ek) || ek[pos] != KVType { - return nil, errKVKey - } - - pos++ - - return ek[pos:], nil -} - -func (db *DB) encodeKVMinKey() []byte { - ek := db.encodeKVKey(nil) - return ek -} - -func (db *DB) encodeKVMaxKey() []byte { - ek := db.encodeKVKey(nil) - ek[len(ek)-1] = KVType + 1 - return ek -} - -func (db *DB) incr(key []byte, delta int64) (int64, error) { - if err := checkKeySize(key); err != nil { - return 0, err - } - - var err error - key = db.encodeKVKey(key) - - t := db.kvBatch - - t.Lock() - defer t.Unlock() - - var n int64 - n, err = StrInt64(db.bucket.Get(key)) - if err != nil { - return 0, err - } - - n += delta - - t.Put(key, num.FormatInt64ToSlice(n)) - - err = t.Commit() - return n, err -} - -// ps : here just focus on deleting the key-value data, -// any other likes expire is ignore. -func (db *DB) delete(t *batch, key []byte) int64 { - key = db.encodeKVKey(key) - t.Delete(key) - return 1 -} - -func (db *DB) setExpireAt(key []byte, when int64) (int64, error) { - t := db.kvBatch - t.Lock() - defer t.Unlock() - - if exist, err := db.Exists(key); err != nil || exist == 0 { - return 0, err - } else { - db.expireAt(t, KVType, key, when) - if err := t.Commit(); err != nil { - return 0, err - } - } - return 1, nil -} - -func (db *DB) Decr(key []byte) (int64, error) { - return db.incr(key, -1) -} - -func (db *DB) DecrBy(key []byte, decrement int64) (int64, error) { - return db.incr(key, -decrement) -} - -func (db *DB) Del(keys ...[]byte) (int64, error) { - if len(keys) == 0 { - return 0, nil - } - - codedKeys := make([][]byte, len(keys)) - for i, k := range keys { - codedKeys[i] = db.encodeKVKey(k) - } - - t := db.kvBatch - t.Lock() - defer t.Unlock() - - for i, k := range keys { - t.Delete(codedKeys[i]) - db.rmExpire(t, KVType, k) - } - - err := t.Commit() - return int64(len(keys)), err -} - -func (db *DB) Exists(key []byte) (int64, error) { - if err := checkKeySize(key); err != nil { - return 0, err - } - - var err error - key = db.encodeKVKey(key) - - var v []byte - v, err = db.bucket.Get(key) - if v != nil && err == nil { - return 1, nil - } - - return 0, err -} - -func (db *DB) Get(key []byte) ([]byte, error) { - if err := checkKeySize(key); err != nil { - return nil, err - } - - key = db.encodeKVKey(key) - - return db.bucket.Get(key) -} - -func (db *DB) GetSlice(key []byte) (store.Slice, error) { - if err := checkKeySize(key); err != nil { - return nil, err - } - - key = db.encodeKVKey(key) - - return db.bucket.GetSlice(key) -} - -func (db *DB) GetSet(key []byte, value []byte) ([]byte, error) { - if err := checkKeySize(key); err != nil { - return nil, err - } else if err := checkValueSize(value); err != nil { - return nil, err - } - - key = db.encodeKVKey(key) - - t := db.kvBatch - - t.Lock() - defer t.Unlock() - - oldValue, err := db.bucket.Get(key) - if err != nil { - return nil, err - } - - t.Put(key, value) - - err = t.Commit() - - return oldValue, err -} - -func (db *DB) Incr(key []byte) (int64, error) { - return db.incr(key, 1) -} - -func (db *DB) IncrBy(key []byte, increment int64) (int64, error) { - return db.incr(key, increment) -} - -func (db *DB) MGet(keys ...[]byte) ([][]byte, error) { - values := make([][]byte, len(keys)) - - it := db.bucket.NewIterator() - defer it.Close() - - for i := range keys { - if err := checkKeySize(keys[i]); err != nil { - return nil, err - } - - values[i] = it.Find(db.encodeKVKey(keys[i])) - } - - return values, nil -} - -func (db *DB) MSet(args ...KVPair) error { - if len(args) == 0 { - return nil - } - - t := db.kvBatch - - var err error - var key []byte - var value []byte - - t.Lock() - defer t.Unlock() - - for i := 0; i < len(args); i++ { - if err := checkKeySize(args[i].Key); err != nil { - return err - } else if err := checkValueSize(args[i].Value); err != nil { - return err - } - - key = db.encodeKVKey(args[i].Key) - - value = args[i].Value - - t.Put(key, value) - - } - - err = t.Commit() - return err -} - -func (db *DB) Set(key []byte, value []byte) error { - if err := checkKeySize(key); err != nil { - return err - } else if err := checkValueSize(value); err != nil { - return err - } - - var err error - key = db.encodeKVKey(key) - - t := db.kvBatch - - t.Lock() - defer t.Unlock() - - t.Put(key, value) - - err = t.Commit() - - return err -} - -func (db *DB) SetNX(key []byte, value []byte) (int64, error) { - if err := checkKeySize(key); err != nil { - return 0, err - } else if err := checkValueSize(value); err != nil { - return 0, err - } - - var err error - key = db.encodeKVKey(key) - - var n int64 = 1 - - t := db.kvBatch - - t.Lock() - defer t.Unlock() - - if v, err := db.bucket.Get(key); err != nil { - return 0, err - } else if v != nil { - n = 0 - } else { - t.Put(key, value) - - err = t.Commit() - } - - return n, err -} - -func (db *DB) SetEX(key []byte, duration int64, value []byte) error { - if err := checkKeySize(key); err != nil { - return err - } else if err := checkValueSize(value); err != nil { - return err - } else if duration <= 0 { - return errExpireValue - } - - ek := db.encodeKVKey(key) - - t := db.kvBatch - - t.Lock() - defer t.Unlock() - - t.Put(ek, value) - db.expireAt(t, KVType, key, time.Now().Unix()+duration) - - if err := t.Commit(); err != nil { - return err - } - - return nil -} - -func (db *DB) flush() (drop int64, err error) { - t := db.kvBatch - t.Lock() - defer t.Unlock() - return db.flushType(t, KVType) -} - -func (db *DB) Expire(key []byte, duration int64) (int64, error) { - if duration <= 0 { - return 0, errExpireValue - } - - return db.setExpireAt(key, time.Now().Unix()+duration) -} - -func (db *DB) ExpireAt(key []byte, when int64) (int64, error) { - if when <= time.Now().Unix() { - return 0, errExpireValue - } - - return db.setExpireAt(key, when) -} - -func (db *DB) TTL(key []byte) (int64, error) { - if err := checkKeySize(key); err != nil { - return -1, err - } - - return db.ttl(KVType, key) -} - -func (db *DB) Persist(key []byte) (int64, error) { - if err := checkKeySize(key); err != nil { - return 0, err - } - - t := db.kvBatch - t.Lock() - defer t.Unlock() - n, err := db.rmExpire(t, KVType, key) - if err != nil { - return 0, err - } - - err = t.Commit() - return n, err -} - -func (db *DB) SetRange(key []byte, offset int, value []byte) (int64, error) { - if len(value) == 0 { - return 0, nil - } - - if err := checkKeySize(key); err != nil { - return 0, err - } else if len(value)+offset > MaxValueSize { - return 0, errValueSize - } - - key = db.encodeKVKey(key) - - t := db.kvBatch - - t.Lock() - defer t.Unlock() - - oldValue, err := db.bucket.Get(key) - if err != nil { - return 0, err - } - - extra := offset + len(value) - len(oldValue) - if extra > 0 { - oldValue = append(oldValue, make([]byte, extra)...) - } - - copy(oldValue[offset:], value) - - t.Put(key, oldValue) - - if err := t.Commit(); err != nil { - return 0, err - } - - return int64(len(oldValue)), nil -} - -func getRange(start int, end int, valLen int) (int, int) { - if start < 0 { - start = valLen + start - } - - if end < 0 { - end = valLen + end - } - - if start < 0 { - start = 0 - } - - if end < 0 { - end = 0 - } - - if end >= valLen { - end = valLen - 1 - } - return start, end -} - -func (db *DB) GetRange(key []byte, start int, end int) ([]byte, error) { - if err := checkKeySize(key); err != nil { - return nil, err - } - key = db.encodeKVKey(key) - - value, err := db.bucket.Get(key) - if err != nil { - return nil, err - } - - valLen := len(value) - - start, end = getRange(start, end, valLen) - - if start > end { - return nil, nil - } - - return value[start : end+1], nil -} - -func (db *DB) StrLen(key []byte) (int64, error) { - s, err := db.GetSlice(key) - if err != nil { - return 0, err - } - - n := s.Size() - s.Free() - return int64(n), nil -} - -func (db *DB) Append(key []byte, value []byte) (int64, error) { - if len(value) == 0 { - return 0, nil - } - - if err := checkKeySize(key); err != nil { - return 0, err - } - key = db.encodeKVKey(key) - - t := db.kvBatch - - t.Lock() - defer t.Unlock() - - oldValue, err := db.bucket.Get(key) - if err != nil { - return 0, err - } - - if len(oldValue)+len(value) > MaxValueSize { - return 0, errValueSize - } - - oldValue = append(oldValue, value...) - - t.Put(key, oldValue) - - if err := t.Commit(); err != nil { - return 0, nil - } - - return int64(len(oldValue)), nil -} - -func (db *DB) BitOP(op string, destKey []byte, srcKeys ...[]byte) (int64, error) { - if err := checkKeySize(destKey); err != nil { - return 0, err - } - - op = strings.ToLower(op) - if len(srcKeys) == 0 { - return 0, nil - } else if op == BitNot && len(srcKeys) > 1 { - return 0, fmt.Errorf("BITOP NOT has only one srckey") - } else if len(srcKeys) < 2 { - return 0, nil - } - - key := db.encodeKVKey(srcKeys[0]) - - value, err := db.bucket.Get(key) - if err != nil { - return 0, err - } - - if op == BitNot { - for i := 0; i < len(value); i++ { - value[i] = ^value[i] - } - } else { - for j := 1; j < len(srcKeys); j++ { - if err := checkKeySize(srcKeys[j]); err != nil { - return 0, err - } - - key = db.encodeKVKey(srcKeys[j]) - ovalue, err := db.bucket.Get(key) - if err != nil { - return 0, err - } - - if len(value) < len(ovalue) { - value, ovalue = ovalue, value - } - - for i := 0; i < len(ovalue); i++ { - switch op { - case BitAND: - value[i] &= ovalue[i] - case BitOR: - value[i] |= ovalue[i] - case BitXOR: - value[i] ^= ovalue[i] - default: - return 0, fmt.Errorf("invalid op type: %s", op) - } - } - - for i := len(ovalue); i < len(value); i++ { - switch op { - case BitAND: - value[i] &= 0 - case BitOR: - value[i] |= 0 - case BitXOR: - value[i] ^= 0 - } - } - } - } - - key = db.encodeKVKey(destKey) - - t := db.kvBatch - - t.Lock() - defer t.Unlock() - - t.Put(key, value) - - if err := t.Commit(); err != nil { - return 0, err - } - - return int64(len(value)), nil -} - -var bitsInByte = [256]int32{0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, - 4, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 1, 2, 2, 3, 2, 3, - 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, - 5, 5, 6, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, - 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, - 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 1, 2, - 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, - 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, - 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 2, 3, 3, 4, 3, 4, 4, - 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, - 6, 7, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 4, 5, 5, 6, 5, - 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8} - -func numberBitCount(i uint32) uint32 { - i = i - ((i >> 1) & 0x55555555) - i = (i & 0x33333333) + ((i >> 2) & 0x33333333) - return (((i + (i >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24 -} - -func (db *DB) BitCount(key []byte, start int, end int) (int64, error) { - if err := checkKeySize(key); err != nil { - return 0, err - } - - key = db.encodeKVKey(key) - value, err := db.bucket.Get(key) - if err != nil { - return 0, err - } - - start, end = getRange(start, end, len(value)) - value = value[start : end+1] - - var n int64 = 0 - - pos := 0 - for ; pos+4 <= len(value); pos = pos + 4 { - n += int64(numberBitCount(binary.BigEndian.Uint32(value[pos : pos+4]))) - } - - for ; pos < len(value); pos++ { - n += int64(bitsInByte[value[pos]]) - } - - return n, nil -} - -func (db *DB) BitPos(key []byte, on int, start int, end int) (int64, error) { - if err := checkKeySize(key); err != nil { - return 0, err - } - - if (on & ^1) != 0 { - return 0, fmt.Errorf("bit must be 0 or 1, not %d", on) - } - - var skipValue uint8 = 0 - if on == 0 { - skipValue = 0xFF - } - - key = db.encodeKVKey(key) - value, err := db.bucket.Get(key) - if err != nil { - return 0, err - } - - start, end = getRange(start, end, len(value)) - value = value[start : end+1] - - for i, v := range value { - if uint8(v) != skipValue { - for j := 0; j < 8; j++ { - isNull := uint8(v)&(1<> 3) - extra := byteOffset + 1 - len(value) - if extra > 0 { - value = append(value, make([]byte, extra)...) - } - - byteVal := value[byteOffset] - bit := 7 - uint8(uint32(offset)&0x7) - bitVal := byteVal & (1 << bit) - - byteVal &= ^(1 << bit) - byteVal |= (uint8(on&0x1) << bit) - - value[byteOffset] = byteVal - - t.Put(key, value) - if err := t.Commit(); err != nil { - return 0, err - } - - if bitVal > 0 { - return 1, nil - } else { - return 0, nil - } -} - -func (db *DB) GetBit(key []byte, offset int) (int64, error) { - if err := checkKeySize(key); err != nil { - return 0, err - } - - key = db.encodeKVKey(key) - - value, err := db.bucket.Get(key) - if err != nil { - return 0, err - } - - byteOffset := uint32(offset) >> 3 - bit := 7 - uint8(uint32(offset)&0x7) - - if byteOffset >= uint32(len(value)) { - return 0, nil - } - - bitVal := value[byteOffset] & (1 << bit) - if bitVal > 0 { - return 1, nil - } else { - return 0, nil - } -} diff --git a/vendor/github.com/siddontang/ledisdb/ledis/t_list.go b/vendor/github.com/siddontang/ledisdb/ledis/t_list.go deleted file mode 100644 index 83ed56e57fc2..000000000000 --- a/vendor/github.com/siddontang/ledisdb/ledis/t_list.go +++ /dev/null @@ -1,783 +0,0 @@ -package ledis - -import ( - "container/list" - "encoding/binary" - "errors" - "sync" - "time" - - "github.com/siddontang/go/hack" - "github.com/siddontang/go/log" - "github.com/siddontang/go/num" - "github.com/siddontang/ledisdb/store" - "golang.org/x/net/context" -) - -const ( - listHeadSeq int32 = 1 - listTailSeq int32 = 2 - - listMinSeq int32 = 1000 - listMaxSeq int32 = 1<<31 - 1000 - listInitialSeq int32 = listMinSeq + (listMaxSeq-listMinSeq)/2 -) - -var errLMetaKey = errors.New("invalid lmeta key") -var errListKey = errors.New("invalid list key") -var errListSeq = errors.New("invalid list sequence, overflow") - -func (db *DB) lEncodeMetaKey(key []byte) []byte { - buf := make([]byte, len(key)+1+len(db.indexVarBuf)) - pos := copy(buf, db.indexVarBuf) - buf[pos] = LMetaType - pos++ - - copy(buf[pos:], key) - return buf -} - -func (db *DB) lDecodeMetaKey(ek []byte) ([]byte, error) { - pos, err := db.checkKeyIndex(ek) - if err != nil { - return nil, err - } - - if pos+1 > len(ek) || ek[pos] != LMetaType { - return nil, errLMetaKey - } - - pos++ - return ek[pos:], nil -} - -func (db *DB) lEncodeListKey(key []byte, seq int32) []byte { - buf := make([]byte, len(key)+7+len(db.indexVarBuf)) - - pos := copy(buf, db.indexVarBuf) - - buf[pos] = ListType - pos++ - - binary.BigEndian.PutUint16(buf[pos:], uint16(len(key))) - pos += 2 - - copy(buf[pos:], key) - pos += len(key) - - binary.BigEndian.PutUint32(buf[pos:], uint32(seq)) - - return buf -} - -func (db *DB) lDecodeListKey(ek []byte) (key []byte, seq int32, err error) { - pos := 0 - pos, err = db.checkKeyIndex(ek) - if err != nil { - return - } - - if pos+1 > len(ek) || ek[pos] != ListType { - err = errListKey - return - } - - pos++ - - if pos+2 > len(ek) { - err = errListKey - return - } - - keyLen := int(binary.BigEndian.Uint16(ek[pos:])) - pos += 2 - if keyLen+pos+4 != len(ek) { - err = errListKey - return - } - - key = ek[pos : pos+keyLen] - seq = int32(binary.BigEndian.Uint32(ek[pos+keyLen:])) - return -} - -func (db *DB) lpush(key []byte, whereSeq int32, args ...[]byte) (int64, error) { - if err := checkKeySize(key); err != nil { - return 0, err - } - - var headSeq int32 - var tailSeq int32 - var size int32 - var err error - - t := db.listBatch - t.Lock() - defer t.Unlock() - - metaKey := db.lEncodeMetaKey(key) - headSeq, tailSeq, size, err = db.lGetMeta(nil, metaKey) - if err != nil { - return 0, err - } - - var pushCnt int = len(args) - if pushCnt == 0 { - return int64(size), nil - } - - var seq int32 = headSeq - var delta int32 = -1 - if whereSeq == listTailSeq { - seq = tailSeq - delta = 1 - } - - // append elements - if size > 0 { - seq += delta - } - - for i := 0; i < pushCnt; i++ { - ek := db.lEncodeListKey(key, seq+int32(i)*delta) - t.Put(ek, args[i]) - } - - seq += int32(pushCnt-1) * delta - if seq <= listMinSeq || seq >= listMaxSeq { - return 0, errListSeq - } - - // set meta info - if whereSeq == listHeadSeq { - headSeq = seq - } else { - tailSeq = seq - } - - db.lSetMeta(metaKey, headSeq, tailSeq) - - err = t.Commit() - - if err == nil { - db.lSignalAsReady(key) - } - - return int64(size) + int64(pushCnt), err -} - -func (db *DB) lpop(key []byte, whereSeq int32) ([]byte, error) { - if err := checkKeySize(key); err != nil { - return nil, err - } - - t := db.listBatch - t.Lock() - defer t.Unlock() - - var headSeq int32 - var tailSeq int32 - var size int32 - var err error - - metaKey := db.lEncodeMetaKey(key) - headSeq, tailSeq, size, err = db.lGetMeta(nil, metaKey) - if err != nil { - return nil, err - } else if size == 0 { - return nil, nil - } - - var value []byte - - var seq int32 = headSeq - if whereSeq == listTailSeq { - seq = tailSeq - } - - itemKey := db.lEncodeListKey(key, seq) - value, err = db.bucket.Get(itemKey) - if err != nil { - return nil, err - } - - if whereSeq == listHeadSeq { - headSeq += 1 - } else { - tailSeq -= 1 - } - - t.Delete(itemKey) - size = db.lSetMeta(metaKey, headSeq, tailSeq) - if size == 0 { - db.rmExpire(t, ListType, key) - } - - err = t.Commit() - return value, err -} - -func (db *DB) ltrim2(key []byte, startP, stopP int64) (err error) { - if err := checkKeySize(key); err != nil { - return err - } - - t := db.listBatch - t.Lock() - defer t.Unlock() - - var headSeq int32 - var llen int32 - start := int32(startP) - stop := int32(stopP) - - ek := db.lEncodeMetaKey(key) - if headSeq, _, llen, err = db.lGetMeta(nil, ek); err != nil { - return err - } else { - if start < 0 { - start = llen + start - } - if stop < 0 { - stop = llen + stop - } - if start >= llen || start > stop { - db.lDelete(t, key) - db.rmExpire(t, ListType, key) - return t.Commit() - } - - if start < 0 { - start = 0 - } - if stop >= llen { - stop = llen - 1 - } - } - - if start > 0 { - for i := int32(0); i < start; i++ { - t.Delete(db.lEncodeListKey(key, headSeq+i)) - } - } - if stop < int32(llen-1) { - for i := int32(stop + 1); i < llen; i++ { - t.Delete(db.lEncodeListKey(key, headSeq+i)) - } - } - - db.lSetMeta(ek, headSeq+start, headSeq+stop) - - return t.Commit() -} - -func (db *DB) ltrim(key []byte, trimSize, whereSeq int32) (int32, error) { - if err := checkKeySize(key); err != nil { - return 0, err - } - - if trimSize == 0 { - return 0, nil - } - - t := db.listBatch - t.Lock() - defer t.Unlock() - - var headSeq int32 - var tailSeq int32 - var size int32 - var err error - - metaKey := db.lEncodeMetaKey(key) - headSeq, tailSeq, size, err = db.lGetMeta(nil, metaKey) - if err != nil { - return 0, err - } else if size == 0 { - return 0, nil - } - - var ( - trimStartSeq int32 - trimEndSeq int32 - ) - - if whereSeq == listHeadSeq { - trimStartSeq = headSeq - trimEndSeq = num.MinInt32(trimStartSeq+trimSize-1, tailSeq) - headSeq = trimEndSeq + 1 - } else { - trimEndSeq = tailSeq - trimStartSeq = num.MaxInt32(trimEndSeq-trimSize+1, headSeq) - tailSeq = trimStartSeq - 1 - } - - for trimSeq := trimStartSeq; trimSeq <= trimEndSeq; trimSeq++ { - itemKey := db.lEncodeListKey(key, trimSeq) - t.Delete(itemKey) - } - - size = db.lSetMeta(metaKey, headSeq, tailSeq) - if size == 0 { - db.rmExpire(t, ListType, key) - } - - err = t.Commit() - return trimEndSeq - trimStartSeq + 1, err -} - -// ps : here just focus on deleting the list data, -// any other likes expire is ignore. -func (db *DB) lDelete(t *batch, key []byte) int64 { - mk := db.lEncodeMetaKey(key) - - var headSeq int32 - var tailSeq int32 - var err error - - it := db.bucket.NewIterator() - defer it.Close() - - headSeq, tailSeq, _, err = db.lGetMeta(it, mk) - if err != nil { - return 0 - } - - var num int64 = 0 - startKey := db.lEncodeListKey(key, headSeq) - stopKey := db.lEncodeListKey(key, tailSeq) - - rit := store.NewRangeIterator(it, &store.Range{startKey, stopKey, store.RangeClose}) - for ; rit.Valid(); rit.Next() { - t.Delete(rit.RawKey()) - num++ - } - - t.Delete(mk) - - return num -} - -func (db *DB) lGetMeta(it *store.Iterator, ek []byte) (headSeq int32, tailSeq int32, size int32, err error) { - var v []byte - if it != nil { - v = it.Find(ek) - } else { - v, err = db.bucket.Get(ek) - } - if err != nil { - return - } else if v == nil { - headSeq = listInitialSeq - tailSeq = listInitialSeq - size = 0 - return - } else { - headSeq = int32(binary.LittleEndian.Uint32(v[0:4])) - tailSeq = int32(binary.LittleEndian.Uint32(v[4:8])) - size = tailSeq - headSeq + 1 - } - return -} - -func (db *DB) lSetMeta(ek []byte, headSeq int32, tailSeq int32) int32 { - t := db.listBatch - - var size int32 = tailSeq - headSeq + 1 - if size < 0 { - // todo : log error + panic - log.Fatalf("invalid meta sequence range [%d, %d]", headSeq, tailSeq) - } else if size == 0 { - t.Delete(ek) - } else { - buf := make([]byte, 8) - - binary.LittleEndian.PutUint32(buf[0:4], uint32(headSeq)) - binary.LittleEndian.PutUint32(buf[4:8], uint32(tailSeq)) - - t.Put(ek, buf) - } - - return size -} - -func (db *DB) lExpireAt(key []byte, when int64) (int64, error) { - t := db.listBatch - t.Lock() - defer t.Unlock() - - if llen, err := db.LLen(key); err != nil || llen == 0 { - return 0, err - } else { - db.expireAt(t, ListType, key, when) - if err := t.Commit(); err != nil { - return 0, err - } - } - return 1, nil -} - -func (db *DB) LIndex(key []byte, index int32) ([]byte, error) { - if err := checkKeySize(key); err != nil { - return nil, err - } - - var seq int32 - var headSeq int32 - var tailSeq int32 - var err error - - metaKey := db.lEncodeMetaKey(key) - - it := db.bucket.NewIterator() - defer it.Close() - - headSeq, tailSeq, _, err = db.lGetMeta(it, metaKey) - if err != nil { - return nil, err - } - - if index >= 0 { - seq = headSeq + index - } else { - seq = tailSeq + index + 1 - } - - sk := db.lEncodeListKey(key, seq) - v := it.Find(sk) - - return v, nil -} - -func (db *DB) LLen(key []byte) (int64, error) { - if err := checkKeySize(key); err != nil { - return 0, err - } - - ek := db.lEncodeMetaKey(key) - _, _, size, err := db.lGetMeta(nil, ek) - return int64(size), err -} - -func (db *DB) LPop(key []byte) ([]byte, error) { - return db.lpop(key, listHeadSeq) -} - -func (db *DB) LTrim(key []byte, start, stop int64) error { - return db.ltrim2(key, start, stop) -} - -func (db *DB) LTrimFront(key []byte, trimSize int32) (int32, error) { - return db.ltrim(key, trimSize, listHeadSeq) -} - -func (db *DB) LTrimBack(key []byte, trimSize int32) (int32, error) { - return db.ltrim(key, trimSize, listTailSeq) -} - -func (db *DB) LPush(key []byte, args ...[]byte) (int64, error) { - return db.lpush(key, listHeadSeq, args...) -} -func (db *DB) LSet(key []byte, index int32, value []byte) error { - if err := checkKeySize(key); err != nil { - return err - } - - var seq int32 - var headSeq int32 - var tailSeq int32 - //var size int32 - var err error - t := db.listBatch - t.Lock() - defer t.Unlock() - metaKey := db.lEncodeMetaKey(key) - - headSeq, tailSeq, _, err = db.lGetMeta(nil, metaKey) - if err != nil { - return err - } - - if index >= 0 { - seq = headSeq + index - } else { - seq = tailSeq + index + 1 - } - if seq < headSeq || seq > tailSeq { - return errListIndex - } - sk := db.lEncodeListKey(key, seq) - t.Put(sk, value) - err = t.Commit() - return err -} - -func (db *DB) LRange(key []byte, start int32, stop int32) ([][]byte, error) { - if err := checkKeySize(key); err != nil { - return nil, err - } - - var headSeq int32 - var llen int32 - var err error - - metaKey := db.lEncodeMetaKey(key) - - it := db.bucket.NewIterator() - defer it.Close() - - if headSeq, _, llen, err = db.lGetMeta(it, metaKey); err != nil { - return nil, err - } - - if start < 0 { - start = llen + start - } - if stop < 0 { - stop = llen + stop - } - if start < 0 { - start = 0 - } - - if start > stop || start >= llen { - return [][]byte{}, nil - } - - if stop >= llen { - stop = llen - 1 - } - - limit := (stop - start) + 1 - headSeq += start - - v := make([][]byte, 0, limit) - - startKey := db.lEncodeListKey(key, headSeq) - rit := store.NewRangeLimitIterator(it, - &store.Range{ - Min: startKey, - Max: nil, - Type: store.RangeClose}, - &store.Limit{ - Offset: 0, - Count: int(limit)}) - - for ; rit.Valid(); rit.Next() { - v = append(v, rit.Value()) - } - - return v, nil -} - -func (db *DB) RPop(key []byte) ([]byte, error) { - return db.lpop(key, listTailSeq) -} - -func (db *DB) RPush(key []byte, args ...[]byte) (int64, error) { - return db.lpush(key, listTailSeq, args...) -} - -func (db *DB) LClear(key []byte) (int64, error) { - if err := checkKeySize(key); err != nil { - return 0, err - } - - t := db.listBatch - t.Lock() - defer t.Unlock() - - num := db.lDelete(t, key) - db.rmExpire(t, ListType, key) - - err := t.Commit() - return num, err -} - -func (db *DB) LMclear(keys ...[]byte) (int64, error) { - t := db.listBatch - t.Lock() - defer t.Unlock() - - for _, key := range keys { - if err := checkKeySize(key); err != nil { - return 0, err - } - - db.lDelete(t, key) - db.rmExpire(t, ListType, key) - - } - - err := t.Commit() - return int64(len(keys)), err -} - -func (db *DB) lFlush() (drop int64, err error) { - t := db.listBatch - t.Lock() - defer t.Unlock() - return db.flushType(t, ListType) -} - -func (db *DB) LExpire(key []byte, duration int64) (int64, error) { - if duration <= 0 { - return 0, errExpireValue - } - - return db.lExpireAt(key, time.Now().Unix()+duration) -} - -func (db *DB) LExpireAt(key []byte, when int64) (int64, error) { - if when <= time.Now().Unix() { - return 0, errExpireValue - } - - return db.lExpireAt(key, when) -} - -func (db *DB) LTTL(key []byte) (int64, error) { - if err := checkKeySize(key); err != nil { - return -1, err - } - - return db.ttl(ListType, key) -} - -func (db *DB) LPersist(key []byte) (int64, error) { - if err := checkKeySize(key); err != nil { - return 0, err - } - - t := db.listBatch - t.Lock() - defer t.Unlock() - - n, err := db.rmExpire(t, ListType, key) - if err != nil { - return 0, err - } - - err = t.Commit() - return n, err -} - -func (db *DB) lEncodeMinKey() []byte { - return db.lEncodeMetaKey(nil) -} - -func (db *DB) lEncodeMaxKey() []byte { - ek := db.lEncodeMetaKey(nil) - ek[len(ek)-1] = LMetaType + 1 - return ek -} - -func (db *DB) BLPop(keys [][]byte, timeout time.Duration) ([]interface{}, error) { - return db.lblockPop(keys, listHeadSeq, timeout) -} - -func (db *DB) BRPop(keys [][]byte, timeout time.Duration) ([]interface{}, error) { - return db.lblockPop(keys, listTailSeq, timeout) -} - -func (db *DB) LKeyExists(key []byte) (int64, error) { - if err := checkKeySize(key); err != nil { - return 0, err - } - sk := db.lEncodeMetaKey(key) - v, err := db.bucket.Get(sk) - if v != nil && err == nil { - return 1, nil - } - return 0, err -} - -func (db *DB) lblockPop(keys [][]byte, whereSeq int32, timeout time.Duration) ([]interface{}, error) { - for { - var ctx context.Context - var cancel context.CancelFunc - if timeout > 0 { - ctx, cancel = context.WithTimeout(context.Background(), timeout) - } else { - ctx, cancel = context.WithCancel(context.Background()) - } - - for _, key := range keys { - v, err := db.lbkeys.popOrWait(db, key, whereSeq, cancel) - - if err != nil { - cancel() - return nil, err - } else if v != nil { - cancel() - return []interface{}{key, v}, nil - } - } - - //blocking wait - <-ctx.Done() - cancel() - - //if ctx.Err() is a deadline exceeded (timeout) we return - //otherwise we try to pop one of the keys again. - if ctx.Err() == context.DeadlineExceeded { - return nil, nil - } - } -} - -func (db *DB) lSignalAsReady(key []byte) { - db.lbkeys.signal(key) -} - -type lBlockKeys struct { - sync.Mutex - - keys map[string]*list.List -} - -func newLBlockKeys() *lBlockKeys { - l := new(lBlockKeys) - - l.keys = make(map[string]*list.List) - return l -} - -func (l *lBlockKeys) signal(key []byte) { - l.Lock() - defer l.Unlock() - - s := hack.String(key) - fns, ok := l.keys[s] - if !ok { - return - } - for e := fns.Front(); e != nil; e = e.Next() { - fn := e.Value.(context.CancelFunc) - fn() - } - - delete(l.keys, s) -} - -func (l *lBlockKeys) popOrWait(db *DB, key []byte, whereSeq int32, fn context.CancelFunc) ([]interface{}, error) { - v, err := db.lpop(key, whereSeq) - if err != nil { - return nil, err - } else if v != nil { - return []interface{}{key, v}, nil - } - - l.Lock() - - s := hack.String(key) - chs, ok := l.keys[s] - if !ok { - chs = list.New() - l.keys[s] = chs - } - - chs.PushBack(fn) - l.Unlock() - return nil, nil -} diff --git a/vendor/github.com/siddontang/ledisdb/ledis/t_set.go b/vendor/github.com/siddontang/ledisdb/ledis/t_set.go deleted file mode 100644 index a4eaf9513192..000000000000 --- a/vendor/github.com/siddontang/ledisdb/ledis/t_set.go +++ /dev/null @@ -1,627 +0,0 @@ -package ledis - -import ( - "encoding/binary" - "errors" - "time" - - "github.com/siddontang/go/hack" - "github.com/siddontang/ledisdb/store" -) - -var errSetKey = errors.New("invalid set key") -var errSSizeKey = errors.New("invalid ssize key") - -const ( - setStartSep byte = ':' - setStopSep byte = setStartSep + 1 - UnionType byte = 51 - DiffType byte = 52 - InterType byte = 53 -) - -func checkSetKMSize(key []byte, member []byte) error { - if len(key) > MaxKeySize || len(key) == 0 { - return errKeySize - } else if len(member) > MaxSetMemberSize || len(member) == 0 { - return errSetMemberSize - } - return nil -} - -func (db *DB) sEncodeSizeKey(key []byte) []byte { - buf := make([]byte, len(key)+1+len(db.indexVarBuf)) - - pos := copy(buf, db.indexVarBuf) - buf[pos] = SSizeType - - pos++ - - copy(buf[pos:], key) - return buf -} - -func (db *DB) sDecodeSizeKey(ek []byte) ([]byte, error) { - pos, err := db.checkKeyIndex(ek) - if err != nil { - return nil, err - } - - if pos+1 > len(ek) || ek[pos] != SSizeType { - return nil, errSSizeKey - } - pos++ - - return ek[pos:], nil -} - -func (db *DB) sEncodeSetKey(key []byte, member []byte) []byte { - buf := make([]byte, len(key)+len(member)+1+1+2+len(db.indexVarBuf)) - - pos := copy(buf, db.indexVarBuf) - - buf[pos] = SetType - pos++ - - binary.BigEndian.PutUint16(buf[pos:], uint16(len(key))) - pos += 2 - - copy(buf[pos:], key) - pos += len(key) - - buf[pos] = setStartSep - pos++ - copy(buf[pos:], member) - - return buf -} - -func (db *DB) sDecodeSetKey(ek []byte) ([]byte, []byte, error) { - pos, err := db.checkKeyIndex(ek) - if err != nil { - return nil, nil, err - } - - if pos+1 > len(ek) || ek[pos] != SetType { - return nil, nil, errSetKey - } - - pos++ - - if pos+2 > len(ek) { - return nil, nil, errSetKey - } - - keyLen := int(binary.BigEndian.Uint16(ek[pos:])) - pos += 2 - - if keyLen+pos > len(ek) { - return nil, nil, errSetKey - } - - key := ek[pos : pos+keyLen] - pos += keyLen - - if ek[pos] != hashStartSep { - return nil, nil, errSetKey - } - - pos++ - member := ek[pos:] - return key, member, nil -} - -func (db *DB) sEncodeStartKey(key []byte) []byte { - return db.sEncodeSetKey(key, nil) -} - -func (db *DB) sEncodeStopKey(key []byte) []byte { - k := db.sEncodeSetKey(key, nil) - - k[len(k)-1] = setStopSep - - return k -} - -func (db *DB) sFlush() (drop int64, err error) { - - t := db.setBatch - t.Lock() - defer t.Unlock() - - return db.flushType(t, SetType) -} - -func (db *DB) sDelete(t *batch, key []byte) int64 { - sk := db.sEncodeSizeKey(key) - start := db.sEncodeStartKey(key) - stop := db.sEncodeStopKey(key) - - var num int64 = 0 - it := db.bucket.RangeLimitIterator(start, stop, store.RangeROpen, 0, -1) - for ; it.Valid(); it.Next() { - t.Delete(it.RawKey()) - num++ - } - - it.Close() - - t.Delete(sk) - return num -} - -func (db *DB) sIncrSize(key []byte, delta int64) (int64, error) { - t := db.setBatch - sk := db.sEncodeSizeKey(key) - - var err error - var size int64 = 0 - if size, err = Int64(db.bucket.Get(sk)); err != nil { - return 0, err - } else { - size += delta - if size <= 0 { - size = 0 - t.Delete(sk) - db.rmExpire(t, SetType, key) - } else { - t.Put(sk, PutInt64(size)) - } - } - - return size, nil -} - -func (db *DB) sExpireAt(key []byte, when int64) (int64, error) { - t := db.setBatch - t.Lock() - defer t.Unlock() - - if scnt, err := db.SCard(key); err != nil || scnt == 0 { - return 0, err - } else { - db.expireAt(t, SetType, key, when) - if err := t.Commit(); err != nil { - return 0, err - } - - } - - return 1, nil -} - -func (db *DB) sSetItem(key []byte, member []byte) (int64, error) { - t := db.setBatch - ek := db.sEncodeSetKey(key, member) - - var n int64 = 1 - if v, _ := db.bucket.Get(ek); v != nil { - n = 0 - } else { - if _, err := db.sIncrSize(key, 1); err != nil { - return 0, err - } - } - - t.Put(ek, nil) - return n, nil -} - -func (db *DB) SAdd(key []byte, args ...[]byte) (int64, error) { - t := db.setBatch - t.Lock() - defer t.Unlock() - - var err error - var ek []byte - var num int64 = 0 - for i := 0; i < len(args); i++ { - if err := checkSetKMSize(key, args[i]); err != nil { - return 0, err - } - - ek = db.sEncodeSetKey(key, args[i]) - - if v, err := db.bucket.Get(ek); err != nil { - return 0, err - } else if v == nil { - num++ - } - - t.Put(ek, nil) - } - - if _, err = db.sIncrSize(key, num); err != nil { - return 0, err - } - - err = t.Commit() - return num, err - -} - -func (db *DB) SCard(key []byte) (int64, error) { - if err := checkKeySize(key); err != nil { - return 0, err - } - - sk := db.sEncodeSizeKey(key) - - return Int64(db.bucket.Get(sk)) -} - -func (db *DB) sDiffGeneric(keys ...[]byte) ([][]byte, error) { - destMap := make(map[string]bool) - - members, err := db.SMembers(keys[0]) - if err != nil { - return nil, err - } - - for _, m := range members { - destMap[hack.String(m)] = true - } - - for _, k := range keys[1:] { - members, err := db.SMembers(k) - if err != nil { - return nil, err - } - - for _, m := range members { - if _, ok := destMap[hack.String(m)]; !ok { - continue - } else if ok { - delete(destMap, hack.String(m)) - } - } - // O - A = O, O is zero set. - if len(destMap) == 0 { - return nil, nil - } - } - - slice := make([][]byte, len(destMap)) - idx := 0 - for k, v := range destMap { - if !v { - continue - } - slice[idx] = []byte(k) - idx++ - } - - return slice, nil -} - -func (db *DB) SDiff(keys ...[]byte) ([][]byte, error) { - v, err := db.sDiffGeneric(keys...) - return v, err -} - -func (db *DB) SDiffStore(dstKey []byte, keys ...[]byte) (int64, error) { - n, err := db.sStoreGeneric(dstKey, DiffType, keys...) - return n, err -} - -func (db *DB) SKeyExists(key []byte) (int64, error) { - if err := checkKeySize(key); err != nil { - return 0, err - } - sk := db.sEncodeSizeKey(key) - v, err := db.bucket.Get(sk) - if v != nil && err == nil { - return 1, nil - } - return 0, err -} - -func (db *DB) sInterGeneric(keys ...[]byte) ([][]byte, error) { - destMap := make(map[string]bool) - - members, err := db.SMembers(keys[0]) - if err != nil { - return nil, err - } - - for _, m := range members { - destMap[hack.String(m)] = true - } - - for _, key := range keys[1:] { - if err := checkKeySize(key); err != nil { - return nil, err - } - - members, err := db.SMembers(key) - if err != nil { - return nil, err - } else if len(members) == 0 { - return nil, err - } - - tempMap := make(map[string]bool) - for _, member := range members { - if err := checkKeySize(member); err != nil { - return nil, err - } - if _, ok := destMap[hack.String(member)]; ok { - tempMap[hack.String(member)] = true //mark this item as selected - } - } - destMap = tempMap //reduce the size of the result set - if len(destMap) == 0 { - return nil, nil - } - } - - slice := make([][]byte, len(destMap)) - idx := 0 - for k, v := range destMap { - if !v { - continue - } - - slice[idx] = []byte(k) - idx++ - } - - return slice, nil - -} - -func (db *DB) SInter(keys ...[]byte) ([][]byte, error) { - v, err := db.sInterGeneric(keys...) - return v, err - -} - -func (db *DB) SInterStore(dstKey []byte, keys ...[]byte) (int64, error) { - n, err := db.sStoreGeneric(dstKey, InterType, keys...) - return n, err -} - -func (db *DB) SIsMember(key []byte, member []byte) (int64, error) { - ek := db.sEncodeSetKey(key, member) - - var n int64 = 1 - if v, err := db.bucket.Get(ek); err != nil { - return 0, err - } else if v == nil { - n = 0 - } - return n, nil -} - -func (db *DB) SMembers(key []byte) ([][]byte, error) { - if err := checkKeySize(key); err != nil { - return nil, err - } - - start := db.sEncodeStartKey(key) - stop := db.sEncodeStopKey(key) - - v := make([][]byte, 0, 16) - - it := db.bucket.RangeLimitIterator(start, stop, store.RangeROpen, 0, -1) - defer it.Close() - - for ; it.Valid(); it.Next() { - _, m, err := db.sDecodeSetKey(it.Key()) - if err != nil { - return nil, err - } - - v = append(v, m) - } - - return v, nil -} - -func (db *DB) SRem(key []byte, args ...[]byte) (int64, error) { - t := db.setBatch - t.Lock() - defer t.Unlock() - - var ek []byte - var v []byte - var err error - - it := db.bucket.NewIterator() - defer it.Close() - - var num int64 = 0 - for i := 0; i < len(args); i++ { - if err := checkSetKMSize(key, args[i]); err != nil { - return 0, err - } - - ek = db.sEncodeSetKey(key, args[i]) - - v = it.RawFind(ek) - if v == nil { - continue - } else { - num++ - t.Delete(ek) - } - } - - if _, err = db.sIncrSize(key, -num); err != nil { - return 0, err - } - - err = t.Commit() - return num, err - -} - -func (db *DB) sUnionGeneric(keys ...[]byte) ([][]byte, error) { - dstMap := make(map[string]bool) - - for _, key := range keys { - if err := checkKeySize(key); err != nil { - return nil, err - } - - members, err := db.SMembers(key) - if err != nil { - return nil, err - } - - for _, member := range members { - dstMap[hack.String(member)] = true - } - } - - slice := make([][]byte, len(dstMap)) - idx := 0 - for k, v := range dstMap { - if !v { - continue - } - slice[idx] = []byte(k) - idx++ - } - - return slice, nil -} - -func (db *DB) SUnion(keys ...[]byte) ([][]byte, error) { - v, err := db.sUnionGeneric(keys...) - return v, err -} - -func (db *DB) SUnionStore(dstKey []byte, keys ...[]byte) (int64, error) { - n, err := db.sStoreGeneric(dstKey, UnionType, keys...) - return n, err -} - -func (db *DB) sStoreGeneric(dstKey []byte, optType byte, keys ...[]byte) (int64, error) { - if err := checkKeySize(dstKey); err != nil { - return 0, err - } - - t := db.setBatch - t.Lock() - defer t.Unlock() - - db.sDelete(t, dstKey) - - var err error - var ek []byte - var v [][]byte - - switch optType { - case UnionType: - v, err = db.sUnionGeneric(keys...) - case DiffType: - v, err = db.sDiffGeneric(keys...) - case InterType: - v, err = db.sInterGeneric(keys...) - } - - if err != nil { - return 0, err - } - - for _, m := range v { - if err := checkSetKMSize(dstKey, m); err != nil { - return 0, err - } - - ek = db.sEncodeSetKey(dstKey, m) - - if _, err := db.bucket.Get(ek); err != nil { - return 0, err - } - - t.Put(ek, nil) - } - - var n = int64(len(v)) - sk := db.sEncodeSizeKey(dstKey) - t.Put(sk, PutInt64(n)) - - if err = t.Commit(); err != nil { - return 0, err - } - return n, nil -} - -func (db *DB) SClear(key []byte) (int64, error) { - if err := checkKeySize(key); err != nil { - return 0, err - } - - t := db.setBatch - t.Lock() - defer t.Unlock() - - num := db.sDelete(t, key) - db.rmExpire(t, SetType, key) - - err := t.Commit() - return num, err -} - -func (db *DB) SMclear(keys ...[]byte) (int64, error) { - t := db.setBatch - t.Lock() - defer t.Unlock() - - for _, key := range keys { - if err := checkKeySize(key); err != nil { - return 0, err - } - - db.sDelete(t, key) - db.rmExpire(t, SetType, key) - } - - err := t.Commit() - return int64(len(keys)), err -} - -func (db *DB) SExpire(key []byte, duration int64) (int64, error) { - if duration <= 0 { - return 0, errExpireValue - } - - return db.sExpireAt(key, time.Now().Unix()+duration) - -} - -func (db *DB) SExpireAt(key []byte, when int64) (int64, error) { - if when <= time.Now().Unix() { - return 0, errExpireValue - } - - return db.sExpireAt(key, when) - -} - -func (db *DB) STTL(key []byte) (int64, error) { - if err := checkKeySize(key); err != nil { - return -1, err - } - - return db.ttl(SetType, key) -} - -func (db *DB) SPersist(key []byte) (int64, error) { - if err := checkKeySize(key); err != nil { - return 0, err - } - - t := db.setBatch - t.Lock() - defer t.Unlock() - - n, err := db.rmExpire(t, SetType, key) - if err != nil { - return 0, err - } - err = t.Commit() - return n, err -} diff --git a/vendor/github.com/siddontang/ledisdb/ledis/t_ttl.go b/vendor/github.com/siddontang/ledisdb/ledis/t_ttl.go deleted file mode 100644 index 2c979ae342ed..000000000000 --- a/vendor/github.com/siddontang/ledisdb/ledis/t_ttl.go +++ /dev/null @@ -1,213 +0,0 @@ -package ledis - -import ( - "encoding/binary" - "errors" - "sync" - "time" - - "github.com/siddontang/ledisdb/store" -) - -var ( - errExpMetaKey = errors.New("invalid expire meta key") - errExpTimeKey = errors.New("invalid expire time key") -) - -type onExpired func(*batch, []byte) int64 - -type ttlChecker struct { - sync.Mutex - db *DB - txs []*batch - cbs []onExpired - - //next check time - nc int64 -} - -var errExpType = errors.New("invalid expire type") - -func (db *DB) expEncodeTimeKey(dataType byte, key []byte, when int64) []byte { - buf := make([]byte, len(key)+10+len(db.indexVarBuf)) - - pos := copy(buf, db.indexVarBuf) - - buf[pos] = ExpTimeType - pos++ - - binary.BigEndian.PutUint64(buf[pos:], uint64(when)) - pos += 8 - - buf[pos] = dataType - pos++ - - copy(buf[pos:], key) - - return buf -} - -func (db *DB) expEncodeMetaKey(dataType byte, key []byte) []byte { - buf := make([]byte, len(key)+2+len(db.indexVarBuf)) - - pos := copy(buf, db.indexVarBuf) - buf[pos] = ExpMetaType - pos++ - buf[pos] = dataType - pos++ - - copy(buf[pos:], key) - - return buf -} - -func (db *DB) expDecodeMetaKey(mk []byte) (byte, []byte, error) { - pos, err := db.checkKeyIndex(mk) - if err != nil { - return 0, nil, err - } - - if pos+2 > len(mk) || mk[pos] != ExpMetaType { - return 0, nil, errExpMetaKey - } - - return mk[pos+1], mk[pos+2:], nil -} - -func (db *DB) expDecodeTimeKey(tk []byte) (byte, []byte, int64, error) { - pos, err := db.checkKeyIndex(tk) - if err != nil { - return 0, nil, 0, err - } - - if pos+10 > len(tk) || tk[pos] != ExpTimeType { - return 0, nil, 0, errExpTimeKey - } - - return tk[pos+9], tk[pos+10:], int64(binary.BigEndian.Uint64(tk[pos+1:])), nil -} - -func (db *DB) expire(t *batch, dataType byte, key []byte, duration int64) { - db.expireAt(t, dataType, key, time.Now().Unix()+duration) -} - -func (db *DB) expireAt(t *batch, dataType byte, key []byte, when int64) { - mk := db.expEncodeMetaKey(dataType, key) - tk := db.expEncodeTimeKey(dataType, key, when) - - t.Put(tk, mk) - t.Put(mk, PutInt64(when)) - - db.ttlChecker.setNextCheckTime(when, false) -} - -func (db *DB) ttl(dataType byte, key []byte) (t int64, err error) { - mk := db.expEncodeMetaKey(dataType, key) - - if t, err = Int64(db.bucket.Get(mk)); err != nil || t == 0 { - t = -1 - } else { - t -= time.Now().Unix() - if t <= 0 { - t = -1 - } - // if t == -1 : to remove ???? - } - - return t, err -} - -func (db *DB) rmExpire(t *batch, dataType byte, key []byte) (int64, error) { - mk := db.expEncodeMetaKey(dataType, key) - if v, err := db.bucket.Get(mk); err != nil { - return 0, err - } else if v == nil { - return 0, nil - } else if when, err2 := Int64(v, nil); err2 != nil { - return 0, err2 - } else { - tk := db.expEncodeTimeKey(dataType, key, when) - t.Delete(mk) - t.Delete(tk) - return 1, nil - } -} - -func (c *ttlChecker) register(dataType byte, t *batch, f onExpired) { - c.txs[dataType] = t - c.cbs[dataType] = f -} - -func (c *ttlChecker) setNextCheckTime(when int64, force bool) { - c.Lock() - if force { - c.nc = when - } else if c.nc > when { - c.nc = when - } - c.Unlock() -} - -func (c *ttlChecker) check() { - now := time.Now().Unix() - - c.Lock() - nc := c.nc - c.Unlock() - - if now < nc { - return - } - - nc = now + 3600 - - db := c.db - dbGet := db.bucket.Get - - minKey := db.expEncodeTimeKey(NoneType, nil, 0) - maxKey := db.expEncodeTimeKey(maxDataType, nil, nc) - - it := db.bucket.RangeLimitIterator(minKey, maxKey, store.RangeROpen, 0, -1) - for ; it.Valid(); it.Next() { - tk := it.RawKey() - mk := it.RawValue() - - dt, k, nt, err := db.expDecodeTimeKey(tk) - if err != nil { - continue - } - - if nt > now { - //the next ttl check time is nt! - nc = nt - break - } - - t := c.txs[dt] - cb := c.cbs[dt] - if tk == nil || cb == nil { - continue - } - - t.Lock() - - if exp, err := Int64(dbGet(mk)); err == nil { - // check expire again - if exp <= now { - cb(t, k) - t.Delete(tk) - t.Delete(mk) - - t.Commit() - } - - } - - t.Unlock() - } - it.Close() - - c.setNextCheckTime(nc, true) - - return -} diff --git a/vendor/github.com/siddontang/ledisdb/ledis/t_zset.go b/vendor/github.com/siddontang/ledisdb/ledis/t_zset.go deleted file mode 100644 index fe11df872300..000000000000 --- a/vendor/github.com/siddontang/ledisdb/ledis/t_zset.go +++ /dev/null @@ -1,1063 +0,0 @@ -package ledis - -import ( - "bytes" - "encoding/binary" - "errors" - "time" - - "github.com/siddontang/go/hack" - "github.com/siddontang/ledisdb/store" -) - -const ( - MinScore int64 = -1<<63 + 1 - MaxScore int64 = 1<<63 - 1 - InvalidScore int64 = -1 << 63 - - AggregateSum byte = 0 - AggregateMin byte = 1 - AggregateMax byte = 2 -) - -type ScorePair struct { - Score int64 - Member []byte -} - -var errZSizeKey = errors.New("invalid zsize key") -var errZSetKey = errors.New("invalid zset key") -var errZScoreKey = errors.New("invalid zscore key") -var errScoreOverflow = errors.New("zset score overflow") -var errInvalidAggregate = errors.New("invalid aggregate") -var errInvalidWeightNum = errors.New("invalid weight number") -var errInvalidSrcKeyNum = errors.New("invalid src key number") - -const ( - zsetNScoreSep byte = '<' - zsetPScoreSep byte = zsetNScoreSep + 1 - zsetStopScoreSep byte = zsetPScoreSep + 1 - - zsetStartMemSep byte = ':' - zsetStopMemSep byte = zsetStartMemSep + 1 -) - -func checkZSetKMSize(key []byte, member []byte) error { - if len(key) > MaxKeySize || len(key) == 0 { - return errKeySize - } else if len(member) > MaxZSetMemberSize || len(member) == 0 { - return errZSetMemberSize - } - return nil -} - -func (db *DB) zEncodeSizeKey(key []byte) []byte { - buf := make([]byte, len(key)+1+len(db.indexVarBuf)) - pos := copy(buf, db.indexVarBuf) - buf[pos] = ZSizeType - pos++ - copy(buf[pos:], key) - return buf -} - -func (db *DB) zDecodeSizeKey(ek []byte) ([]byte, error) { - pos, err := db.checkKeyIndex(ek) - if err != nil { - return nil, err - } - - if pos+1 > len(ek) || ek[pos] != ZSizeType { - return nil, errZSizeKey - } - pos++ - return ek[pos:], nil -} - -func (db *DB) zEncodeSetKey(key []byte, member []byte) []byte { - buf := make([]byte, len(key)+len(member)+4+len(db.indexVarBuf)) - - pos := copy(buf, db.indexVarBuf) - - buf[pos] = ZSetType - pos++ - - binary.BigEndian.PutUint16(buf[pos:], uint16(len(key))) - pos += 2 - - copy(buf[pos:], key) - pos += len(key) - - buf[pos] = zsetStartMemSep - pos++ - - copy(buf[pos:], member) - - return buf -} - -func (db *DB) zDecodeSetKey(ek []byte) ([]byte, []byte, error) { - pos, err := db.checkKeyIndex(ek) - if err != nil { - return nil, nil, err - } - - if pos+1 > len(ek) || ek[pos] != ZSetType { - return nil, nil, errZSetKey - } - - pos++ - - if pos+2 > len(ek) { - return nil, nil, errZSetKey - } - - keyLen := int(binary.BigEndian.Uint16(ek[pos:])) - if keyLen+pos > len(ek) { - return nil, nil, errZSetKey - } - - pos += 2 - key := ek[pos : pos+keyLen] - - if ek[pos+keyLen] != zsetStartMemSep { - return nil, nil, errZSetKey - } - pos++ - - member := ek[pos+keyLen:] - return key, member, nil -} - -func (db *DB) zEncodeStartSetKey(key []byte) []byte { - k := db.zEncodeSetKey(key, nil) - return k -} - -func (db *DB) zEncodeStopSetKey(key []byte) []byte { - k := db.zEncodeSetKey(key, nil) - k[len(k)-1] = zsetStartMemSep + 1 - return k -} - -func (db *DB) zEncodeScoreKey(key []byte, member []byte, score int64) []byte { - buf := make([]byte, len(key)+len(member)+13+len(db.indexVarBuf)) - - pos := copy(buf, db.indexVarBuf) - - buf[pos] = ZScoreType - pos++ - - binary.BigEndian.PutUint16(buf[pos:], uint16(len(key))) - pos += 2 - - copy(buf[pos:], key) - pos += len(key) - - if score < 0 { - buf[pos] = zsetNScoreSep - } else { - buf[pos] = zsetPScoreSep - } - - pos++ - binary.BigEndian.PutUint64(buf[pos:], uint64(score)) - pos += 8 - - buf[pos] = zsetStartMemSep - pos++ - - copy(buf[pos:], member) - return buf -} - -func (db *DB) zEncodeStartScoreKey(key []byte, score int64) []byte { - return db.zEncodeScoreKey(key, nil, score) -} - -func (db *DB) zEncodeStopScoreKey(key []byte, score int64) []byte { - k := db.zEncodeScoreKey(key, nil, score) - k[len(k)-1] = zsetStopMemSep - return k -} - -func (db *DB) zDecodeScoreKey(ek []byte) (key []byte, member []byte, score int64, err error) { - pos := 0 - pos, err = db.checkKeyIndex(ek) - if err != nil { - return - } - - if pos+1 > len(ek) || ek[pos] != ZScoreType { - err = errZScoreKey - return - } - pos++ - - if pos+2 > len(ek) { - err = errZScoreKey - return - } - keyLen := int(binary.BigEndian.Uint16(ek[pos:])) - pos += 2 - - if keyLen+pos > len(ek) { - err = errZScoreKey - return - } - - key = ek[pos : pos+keyLen] - pos += keyLen - - if pos+10 > len(ek) { - err = errZScoreKey - return - } - - if (ek[pos] != zsetNScoreSep) && (ek[pos] != zsetPScoreSep) { - err = errZScoreKey - return - } - pos++ - - score = int64(binary.BigEndian.Uint64(ek[pos:])) - pos += 8 - - if ek[pos] != zsetStartMemSep { - err = errZScoreKey - return - } - - pos++ - - member = ek[pos:] - return -} - -func (db *DB) zSetItem(t *batch, key []byte, score int64, member []byte) (int64, error) { - if score <= MinScore || score >= MaxScore { - return 0, errScoreOverflow - } - - var exists int64 = 0 - ek := db.zEncodeSetKey(key, member) - - if v, err := db.bucket.Get(ek); err != nil { - return 0, err - } else if v != nil { - exists = 1 - - if s, err := Int64(v, err); err != nil { - return 0, err - } else { - sk := db.zEncodeScoreKey(key, member, s) - t.Delete(sk) - } - } - - t.Put(ek, PutInt64(score)) - - sk := db.zEncodeScoreKey(key, member, score) - t.Put(sk, []byte{}) - - return exists, nil -} - -func (db *DB) zDelItem(t *batch, key []byte, member []byte, skipDelScore bool) (int64, error) { - ek := db.zEncodeSetKey(key, member) - if v, err := db.bucket.Get(ek); err != nil { - return 0, err - } else if v == nil { - //not exists - return 0, nil - } else { - //exists - if !skipDelScore { - //we must del score - if s, err := Int64(v, err); err != nil { - return 0, err - } else { - sk := db.zEncodeScoreKey(key, member, s) - t.Delete(sk) - } - } - } - - t.Delete(ek) - - return 1, nil -} - -func (db *DB) zDelete(t *batch, key []byte) int64 { - delMembCnt, _ := db.zRemRange(t, key, MinScore, MaxScore, 0, -1) - // todo : log err - return delMembCnt -} - -func (db *DB) zExpireAt(key []byte, when int64) (int64, error) { - t := db.zsetBatch - t.Lock() - defer t.Unlock() - - if zcnt, err := db.ZCard(key); err != nil || zcnt == 0 { - return 0, err - } else { - db.expireAt(t, ZSetType, key, when) - if err := t.Commit(); err != nil { - return 0, err - } - } - return 1, nil -} - -func (db *DB) ZAdd(key []byte, args ...ScorePair) (int64, error) { - if len(args) == 0 { - return 0, nil - } - - t := db.zsetBatch - t.Lock() - defer t.Unlock() - - var num int64 = 0 - for i := 0; i < len(args); i++ { - score := args[i].Score - member := args[i].Member - - if err := checkZSetKMSize(key, member); err != nil { - return 0, err - } - - if n, err := db.zSetItem(t, key, score, member); err != nil { - return 0, err - } else if n == 0 { - //add new - num++ - } - } - - if _, err := db.zIncrSize(t, key, num); err != nil { - return 0, err - } - - err := t.Commit() - return num, err -} - -func (db *DB) zIncrSize(t *batch, key []byte, delta int64) (int64, error) { - sk := db.zEncodeSizeKey(key) - - size, err := Int64(db.bucket.Get(sk)) - if err != nil { - return 0, err - } else { - size += delta - if size <= 0 { - size = 0 - t.Delete(sk) - db.rmExpire(t, ZSetType, key) - } else { - t.Put(sk, PutInt64(size)) - } - } - - return size, nil -} - -func (db *DB) ZCard(key []byte) (int64, error) { - if err := checkKeySize(key); err != nil { - return 0, err - } - - sk := db.zEncodeSizeKey(key) - return Int64(db.bucket.Get(sk)) -} - -func (db *DB) ZScore(key []byte, member []byte) (int64, error) { - if err := checkZSetKMSize(key, member); err != nil { - return InvalidScore, err - } - - var score int64 = InvalidScore - - k := db.zEncodeSetKey(key, member) - if v, err := db.bucket.Get(k); err != nil { - return InvalidScore, err - } else if v == nil { - return InvalidScore, ErrScoreMiss - } else { - if score, err = Int64(v, nil); err != nil { - return InvalidScore, err - } - } - - return score, nil -} - -func (db *DB) ZRem(key []byte, members ...[]byte) (int64, error) { - if len(members) == 0 { - return 0, nil - } - - t := db.zsetBatch - t.Lock() - defer t.Unlock() - - var num int64 = 0 - for i := 0; i < len(members); i++ { - if err := checkZSetKMSize(key, members[i]); err != nil { - return 0, err - } - - if n, err := db.zDelItem(t, key, members[i], false); err != nil { - return 0, err - } else if n == 1 { - num++ - } - } - - if _, err := db.zIncrSize(t, key, -num); err != nil { - return 0, err - } - - err := t.Commit() - return num, err -} - -func (db *DB) ZIncrBy(key []byte, delta int64, member []byte) (int64, error) { - if err := checkZSetKMSize(key, member); err != nil { - return InvalidScore, err - } - - t := db.zsetBatch - t.Lock() - defer t.Unlock() - - ek := db.zEncodeSetKey(key, member) - - var oldScore int64 = 0 - v, err := db.bucket.Get(ek) - if err != nil { - return InvalidScore, err - } else if v == nil { - db.zIncrSize(t, key, 1) - } else { - if oldScore, err = Int64(v, err); err != nil { - return InvalidScore, err - } - } - - newScore := oldScore + delta - if newScore >= MaxScore || newScore <= MinScore { - return InvalidScore, errScoreOverflow - } - - sk := db.zEncodeScoreKey(key, member, newScore) - t.Put(sk, []byte{}) - t.Put(ek, PutInt64(newScore)) - - if v != nil { - // so as to update score, we must delete the old one - oldSk := db.zEncodeScoreKey(key, member, oldScore) - t.Delete(oldSk) - } - - err = t.Commit() - return newScore, err -} - -func (db *DB) ZCount(key []byte, min int64, max int64) (int64, error) { - if err := checkKeySize(key); err != nil { - return 0, err - } - minKey := db.zEncodeStartScoreKey(key, min) - maxKey := db.zEncodeStopScoreKey(key, max) - - rangeType := store.RangeROpen - - it := db.bucket.RangeLimitIterator(minKey, maxKey, rangeType, 0, -1) - var n int64 = 0 - for ; it.Valid(); it.Next() { - n++ - } - it.Close() - - return n, nil -} - -func (db *DB) zrank(key []byte, member []byte, reverse bool) (int64, error) { - if err := checkZSetKMSize(key, member); err != nil { - return 0, err - } - - k := db.zEncodeSetKey(key, member) - - it := db.bucket.NewIterator() - defer it.Close() - - if v := it.Find(k); v == nil { - return -1, nil - } else { - if s, err := Int64(v, nil); err != nil { - return 0, err - } else { - var rit *store.RangeLimitIterator - - sk := db.zEncodeScoreKey(key, member, s) - - if !reverse { - minKey := db.zEncodeStartScoreKey(key, MinScore) - - rit = store.NewRangeIterator(it, &store.Range{minKey, sk, store.RangeClose}) - } else { - maxKey := db.zEncodeStopScoreKey(key, MaxScore) - rit = store.NewRevRangeIterator(it, &store.Range{sk, maxKey, store.RangeClose}) - } - - var lastKey []byte = nil - var n int64 = 0 - - for ; rit.Valid(); rit.Next() { - n++ - - lastKey = rit.BufKey(lastKey) - } - - if _, m, _, err := db.zDecodeScoreKey(lastKey); err == nil && bytes.Equal(m, member) { - n-- - return n, nil - } - } - } - - return -1, nil -} - -func (db *DB) zIterator(key []byte, min int64, max int64, offset int, count int, reverse bool) *store.RangeLimitIterator { - minKey := db.zEncodeStartScoreKey(key, min) - maxKey := db.zEncodeStopScoreKey(key, max) - - if !reverse { - return db.bucket.RangeLimitIterator(minKey, maxKey, store.RangeClose, offset, count) - } else { - return db.bucket.RevRangeLimitIterator(minKey, maxKey, store.RangeClose, offset, count) - } -} - -func (db *DB) zRemRange(t *batch, key []byte, min int64, max int64, offset int, count int) (int64, error) { - if len(key) > MaxKeySize { - return 0, errKeySize - } - - it := db.zIterator(key, min, max, offset, count, false) - var num int64 = 0 - for ; it.Valid(); it.Next() { - sk := it.RawKey() - _, m, _, err := db.zDecodeScoreKey(sk) - if err != nil { - continue - } - - if n, err := db.zDelItem(t, key, m, true); err != nil { - return 0, err - } else if n == 1 { - num++ - } - - t.Delete(sk) - } - it.Close() - - if _, err := db.zIncrSize(t, key, -num); err != nil { - return 0, err - } - - return num, nil -} - -func (db *DB) zRange(key []byte, min int64, max int64, offset int, count int, reverse bool) ([]ScorePair, error) { - if len(key) > MaxKeySize { - return nil, errKeySize - } - - if offset < 0 { - return []ScorePair{}, nil - } - - nv := count - // count may be very large, so we must limit it for below mem make. - if nv <= 0 || nv > 1024 { - nv = 64 - } - - v := make([]ScorePair, 0, nv) - - var it *store.RangeLimitIterator - - //if reverse and offset is 0, count < 0, we may use forward iterator then reverse - //because store iterator prev is slower than next - if !reverse || (offset == 0 && count < 0) { - it = db.zIterator(key, min, max, offset, count, false) - } else { - it = db.zIterator(key, min, max, offset, count, true) - } - - for ; it.Valid(); it.Next() { - _, m, s, err := db.zDecodeScoreKey(it.Key()) - //may be we will check key equal? - if err != nil { - continue - } - - v = append(v, ScorePair{Member: m, Score: s}) - } - it.Close() - - if reverse && (offset == 0 && count < 0) { - for i, j := 0, len(v)-1; i < j; i, j = i+1, j-1 { - v[i], v[j] = v[j], v[i] - } - } - - return v, nil -} - -func (db *DB) zParseLimit(key []byte, start int, stop int) (offset int, count int, err error) { - if start < 0 || stop < 0 { - //refer redis implementation - var size int64 - size, err = db.ZCard(key) - if err != nil { - return - } - - llen := int(size) - - if start < 0 { - start = llen + start - } - if stop < 0 { - stop = llen + stop - } - - if start < 0 { - start = 0 - } - - if start >= llen { - offset = -1 - return - } - } - - if start > stop { - offset = -1 - return - } - - offset = start - count = (stop - start) + 1 - return -} - -func (db *DB) ZClear(key []byte) (int64, error) { - t := db.zsetBatch - t.Lock() - defer t.Unlock() - - rmCnt, err := db.zRemRange(t, key, MinScore, MaxScore, 0, -1) - if err == nil { - err = t.Commit() - } - - return rmCnt, err -} - -func (db *DB) ZMclear(keys ...[]byte) (int64, error) { - t := db.zsetBatch - t.Lock() - defer t.Unlock() - - for _, key := range keys { - if _, err := db.zRemRange(t, key, MinScore, MaxScore, 0, -1); err != nil { - return 0, err - } - } - - err := t.Commit() - - return int64(len(keys)), err -} - -func (db *DB) ZRange(key []byte, start int, stop int) ([]ScorePair, error) { - return db.ZRangeGeneric(key, start, stop, false) -} - -//min and max must be inclusive -//if no limit, set offset = 0 and count = -1 -func (db *DB) ZRangeByScore(key []byte, min int64, max int64, - offset int, count int) ([]ScorePair, error) { - return db.ZRangeByScoreGeneric(key, min, max, offset, count, false) -} - -func (db *DB) ZRank(key []byte, member []byte) (int64, error) { - return db.zrank(key, member, false) -} - -func (db *DB) ZRemRangeByRank(key []byte, start int, stop int) (int64, error) { - offset, count, err := db.zParseLimit(key, start, stop) - if err != nil { - return 0, err - } - - var rmCnt int64 - - t := db.zsetBatch - t.Lock() - defer t.Unlock() - - rmCnt, err = db.zRemRange(t, key, MinScore, MaxScore, offset, count) - if err == nil { - err = t.Commit() - } - - return rmCnt, err -} - -//min and max must be inclusive -func (db *DB) ZRemRangeByScore(key []byte, min int64, max int64) (int64, error) { - t := db.zsetBatch - t.Lock() - defer t.Unlock() - - rmCnt, err := db.zRemRange(t, key, min, max, 0, -1) - if err == nil { - err = t.Commit() - } - - return rmCnt, err -} - -func (db *DB) ZRevRange(key []byte, start int, stop int) ([]ScorePair, error) { - return db.ZRangeGeneric(key, start, stop, true) -} - -func (db *DB) ZRevRank(key []byte, member []byte) (int64, error) { - return db.zrank(key, member, true) -} - -//min and max must be inclusive -//if no limit, set offset = 0 and count = -1 -func (db *DB) ZRevRangeByScore(key []byte, min int64, max int64, offset int, count int) ([]ScorePair, error) { - return db.ZRangeByScoreGeneric(key, min, max, offset, count, true) -} - -func (db *DB) ZRangeGeneric(key []byte, start int, stop int, reverse bool) ([]ScorePair, error) { - offset, count, err := db.zParseLimit(key, start, stop) - if err != nil { - return nil, err - } - - return db.zRange(key, MinScore, MaxScore, offset, count, reverse) -} - -//min and max must be inclusive -//if no limit, set offset = 0 and count = -1 -func (db *DB) ZRangeByScoreGeneric(key []byte, min int64, max int64, - offset int, count int, reverse bool) ([]ScorePair, error) { - - return db.zRange(key, min, max, offset, count, reverse) -} - -func (db *DB) zFlush() (drop int64, err error) { - t := db.zsetBatch - t.Lock() - defer t.Unlock() - return db.flushType(t, ZSetType) -} - -func (db *DB) ZExpire(key []byte, duration int64) (int64, error) { - if duration <= 0 { - return 0, errExpireValue - } - - return db.zExpireAt(key, time.Now().Unix()+duration) -} - -func (db *DB) ZExpireAt(key []byte, when int64) (int64, error) { - if when <= time.Now().Unix() { - return 0, errExpireValue - } - - return db.zExpireAt(key, when) -} - -func (db *DB) ZTTL(key []byte) (int64, error) { - if err := checkKeySize(key); err != nil { - return -1, err - } - - return db.ttl(ZSetType, key) -} - -func (db *DB) ZPersist(key []byte) (int64, error) { - if err := checkKeySize(key); err != nil { - return 0, err - } - - t := db.zsetBatch - t.Lock() - defer t.Unlock() - - n, err := db.rmExpire(t, ZSetType, key) - if err != nil { - return 0, err - } - - err = t.Commit() - return n, err -} - -func getAggregateFunc(aggregate byte) func(int64, int64) int64 { - switch aggregate { - case AggregateSum: - return func(a int64, b int64) int64 { - return a + b - } - case AggregateMax: - return func(a int64, b int64) int64 { - if a > b { - return a - } - return b - } - case AggregateMin: - return func(a int64, b int64) int64 { - if a > b { - return b - } - return a - } - } - return nil -} - -func (db *DB) ZUnionStore(destKey []byte, srcKeys [][]byte, weights []int64, aggregate byte) (int64, error) { - - var destMap = map[string]int64{} - aggregateFunc := getAggregateFunc(aggregate) - if aggregateFunc == nil { - return 0, errInvalidAggregate - } - if len(srcKeys) < 1 { - return 0, errInvalidSrcKeyNum - } - if weights != nil { - if len(srcKeys) != len(weights) { - return 0, errInvalidWeightNum - } - } else { - weights = make([]int64, len(srcKeys)) - for i := 0; i < len(weights); i++ { - weights[i] = 1 - } - } - - for i, key := range srcKeys { - scorePairs, err := db.ZRange(key, 0, -1) - if err != nil { - return 0, err - } - for _, pair := range scorePairs { - if score, ok := destMap[hack.String(pair.Member)]; !ok { - destMap[hack.String(pair.Member)] = pair.Score * weights[i] - } else { - destMap[hack.String(pair.Member)] = aggregateFunc(score, pair.Score*weights[i]) - } - } - } - - t := db.zsetBatch - t.Lock() - defer t.Unlock() - - db.zDelete(t, destKey) - - for member, score := range destMap { - if err := checkZSetKMSize(destKey, []byte(member)); err != nil { - return 0, err - } - - if _, err := db.zSetItem(t, destKey, score, []byte(member)); err != nil { - return 0, err - } - } - - var n = int64(len(destMap)) - sk := db.zEncodeSizeKey(destKey) - t.Put(sk, PutInt64(n)) - - if err := t.Commit(); err != nil { - return 0, err - } - return n, nil -} - -func (db *DB) ZInterStore(destKey []byte, srcKeys [][]byte, weights []int64, aggregate byte) (int64, error) { - - aggregateFunc := getAggregateFunc(aggregate) - if aggregateFunc == nil { - return 0, errInvalidAggregate - } - if len(srcKeys) < 1 { - return 0, errInvalidSrcKeyNum - } - if weights != nil { - if len(srcKeys) != len(weights) { - return 0, errInvalidWeightNum - } - } else { - weights = make([]int64, len(srcKeys)) - for i := 0; i < len(weights); i++ { - weights[i] = 1 - } - } - - var destMap = map[string]int64{} - scorePairs, err := db.ZRange(srcKeys[0], 0, -1) - if err != nil { - return 0, err - } - for _, pair := range scorePairs { - destMap[hack.String(pair.Member)] = pair.Score * weights[0] - } - - for i, key := range srcKeys[1:] { - scorePairs, err := db.ZRange(key, 0, -1) - if err != nil { - return 0, err - } - tmpMap := map[string]int64{} - for _, pair := range scorePairs { - if score, ok := destMap[hack.String(pair.Member)]; ok { - tmpMap[hack.String(pair.Member)] = aggregateFunc(score, pair.Score*weights[i+1]) - } - } - destMap = tmpMap - } - - t := db.zsetBatch - t.Lock() - defer t.Unlock() - - db.zDelete(t, destKey) - - for member, score := range destMap { - if err := checkZSetKMSize(destKey, []byte(member)); err != nil { - return 0, err - } - if _, err := db.zSetItem(t, destKey, score, []byte(member)); err != nil { - return 0, err - } - } - - var n int64 = int64(len(destMap)) - sk := db.zEncodeSizeKey(destKey) - t.Put(sk, PutInt64(n)) - - if err := t.Commit(); err != nil { - return 0, err - } - return n, nil -} - -func (db *DB) ZRangeByLex(key []byte, min []byte, max []byte, rangeType uint8, offset int, count int) ([][]byte, error) { - if min == nil { - min = db.zEncodeStartSetKey(key) - } else { - min = db.zEncodeSetKey(key, min) - } - if max == nil { - max = db.zEncodeStopSetKey(key) - } else { - max = db.zEncodeSetKey(key, max) - } - - it := db.bucket.RangeLimitIterator(min, max, rangeType, offset, count) - defer it.Close() - - ay := make([][]byte, 0, 16) - for ; it.Valid(); it.Next() { - if _, m, err := db.zDecodeSetKey(it.Key()); err == nil { - ay = append(ay, m) - } - } - - return ay, nil -} - -func (db *DB) ZRemRangeByLex(key []byte, min []byte, max []byte, rangeType uint8) (int64, error) { - if min == nil { - min = db.zEncodeStartSetKey(key) - } else { - min = db.zEncodeSetKey(key, min) - } - if max == nil { - max = db.zEncodeStopSetKey(key) - } else { - max = db.zEncodeSetKey(key, max) - } - - t := db.zsetBatch - t.Lock() - defer t.Unlock() - - it := db.bucket.RangeIterator(min, max, rangeType) - defer it.Close() - - var n int64 = 0 - for ; it.Valid(); it.Next() { - t.Delete(it.RawKey()) - n++ - } - - if err := t.Commit(); err != nil { - return 0, err - } - - return n, nil -} - -func (db *DB) ZLexCount(key []byte, min []byte, max []byte, rangeType uint8) (int64, error) { - if min == nil { - min = db.zEncodeStartSetKey(key) - } else { - min = db.zEncodeSetKey(key, min) - } - if max == nil { - max = db.zEncodeStopSetKey(key) - } else { - max = db.zEncodeSetKey(key, max) - } - - it := db.bucket.RangeIterator(min, max, rangeType) - defer it.Close() - - var n int64 = 0 - for ; it.Valid(); it.Next() { - n++ - } - - return n, nil -} - -func (db *DB) ZKeyExists(key []byte) (int64, error) { - if err := checkKeySize(key); err != nil { - return 0, err - } - sk := db.zEncodeSizeKey(key) - v, err := db.bucket.Get(sk) - if v != nil && err == nil { - return 1, nil - } - return 0, err -} diff --git a/vendor/github.com/siddontang/ledisdb/ledis/util.go b/vendor/github.com/siddontang/ledisdb/ledis/util.go deleted file mode 100644 index 26ee6d08d87b..000000000000 --- a/vendor/github.com/siddontang/ledisdb/ledis/util.go +++ /dev/null @@ -1,95 +0,0 @@ -package ledis - -import ( - "encoding/binary" - "errors" - "strconv" - - "github.com/siddontang/go/hack" -) - -var errIntNumber = errors.New("invalid integer") - -/* - Below I forget why I use little endian to store int. - Maybe I was foolish at that time. -*/ - -func Int64(v []byte, err error) (int64, error) { - if err != nil { - return 0, err - } else if v == nil || len(v) == 0 { - return 0, nil - } else if len(v) != 8 { - return 0, errIntNumber - } - - return int64(binary.LittleEndian.Uint64(v)), nil -} - -func Uint64(v []byte, err error) (uint64, error) { - if err != nil { - return 0, err - } else if v == nil || len(v) == 0 { - return 0, nil - } else if len(v) != 8 { - return 0, errIntNumber - } - - return binary.LittleEndian.Uint64(v), nil -} - -func PutInt64(v int64) []byte { - b := make([]byte, 8) - binary.LittleEndian.PutUint64(b, uint64(v)) - return b -} - -func StrInt64(v []byte, err error) (int64, error) { - if err != nil { - return 0, err - } else if v == nil { - return 0, nil - } else { - return strconv.ParseInt(hack.String(v), 10, 64) - } -} - -func StrUint64(v []byte, err error) (uint64, error) { - if err != nil { - return 0, err - } else if v == nil { - return 0, nil - } else { - return strconv.ParseUint(hack.String(v), 10, 64) - } -} - -func StrInt32(v []byte, err error) (int32, error) { - if err != nil { - return 0, err - } else if v == nil { - return 0, nil - } else { - res, err := strconv.ParseInt(hack.String(v), 10, 32) - return int32(res), err - } -} - -func StrInt8(v []byte, err error) (int8, error) { - if err != nil { - return 0, err - } else if v == nil { - return 0, nil - } else { - res, err := strconv.ParseInt(hack.String(v), 10, 8) - return int8(res), err - } -} - -func AsyncNotify(ch chan struct{}) { - select { - case ch <- struct{}{}: - default: - } -} diff --git a/vendor/github.com/siddontang/ledisdb/rpl/file_io.go b/vendor/github.com/siddontang/ledisdb/rpl/file_io.go deleted file mode 100644 index 6eac87890907..000000000000 --- a/vendor/github.com/siddontang/ledisdb/rpl/file_io.go +++ /dev/null @@ -1,363 +0,0 @@ -package rpl - -import ( - "fmt" - "io" - "os" - - "github.com/edsrzf/mmap-go" - "github.com/siddontang/go/log" -) - -//like leveldb or rocksdb file interface, haha! - -type writeFile interface { - Sync() error - Write(b []byte) (n int, err error) - Close() error - ReadAt(buf []byte, offset int64) (int, error) - Truncate(size int64) error - SetOffset(o int64) - Name() string - Size() int - Offset() int64 -} - -type readFile interface { - ReadAt(buf []byte, offset int64) (int, error) - Close() error - Size() int - Name() string -} - -type rawWriteFile struct { - writeFile - f *os.File - offset int64 - name string -} - -func newRawWriteFile(name string, size int64) (writeFile, error) { - m := new(rawWriteFile) - var err error - - m.name = name - - m.f, err = os.OpenFile(name, os.O_CREATE|os.O_RDWR, 0644) - if err != nil { - return nil, err - } - - return m, nil -} - -func (m *rawWriteFile) Close() error { - if err := m.f.Truncate(m.offset); err != nil { - return fmt.Errorf("close truncate %s error %s", m.name, err.Error()) - } - - if err := m.f.Close(); err != nil { - return fmt.Errorf("close %s error %s", m.name, err.Error()) - } - - return nil -} - -func (m *rawWriteFile) Sync() error { - return m.f.Sync() -} - -func (m *rawWriteFile) Write(b []byte) (n int, err error) { - n, err = m.f.WriteAt(b, m.offset) - if err != nil { - return - } else if n != len(b) { - err = io.ErrShortWrite - return - } - - m.offset += int64(n) - return -} - -func (m *rawWriteFile) ReadAt(buf []byte, offset int64) (int, error) { - return m.f.ReadAt(buf, offset) -} - -func (m *rawWriteFile) Truncate(size int64) error { - var err error - if err = m.f.Truncate(size); err != nil { - return err - } - - if m.offset > size { - m.offset = size - } - return nil -} - -func (m *rawWriteFile) SetOffset(o int64) { - m.offset = o -} - -func (m *rawWriteFile) Offset() int64 { - return m.offset -} - -func (m *rawWriteFile) Name() string { - return m.name -} - -func (m *rawWriteFile) Size() int { - st, _ := m.f.Stat() - return int(st.Size()) -} - -type rawReadFile struct { - readFile - - f *os.File - name string -} - -func newRawReadFile(name string) (readFile, error) { - m := new(rawReadFile) - - var err error - m.f, err = os.Open(name) - m.name = name - - if err != nil { - return nil, err - } - - return m, err -} - -func (m *rawReadFile) Close() error { - return m.f.Close() -} - -func (m *rawReadFile) Size() int { - st, _ := m.f.Stat() - return int(st.Size()) -} - -func (m *rawReadFile) ReadAt(b []byte, offset int64) (int, error) { - return m.f.ReadAt(b, offset) -} - -func (m *rawReadFile) Name() string { - return m.name -} - -///////////////////////////////////////////////// - -type mmapWriteFile struct { - writeFile - - f *os.File - m mmap.MMap - name string - size int64 - offset int64 -} - -func newMmapWriteFile(name string, size int64) (writeFile, error) { - m := new(mmapWriteFile) - - m.name = name - - var err error - - m.f, err = os.OpenFile(name, os.O_CREATE|os.O_RDWR, 0644) - if err != nil { - return nil, err - } - - if size == 0 { - st, _ := m.f.Stat() - size = st.Size() - } - - if err = m.f.Truncate(size); err != nil { - return nil, err - } - - if m.m, err = mmap.Map(m.f, mmap.RDWR, 0); err != nil { - return nil, err - } - - m.size = size - m.offset = 0 - return m, nil -} - -func (m *mmapWriteFile) Size() int { - return int(m.size) -} - -func (m *mmapWriteFile) Sync() error { - return m.m.Flush() -} - -func (m *mmapWriteFile) Close() error { - if err := m.m.Unmap(); err != nil { - return fmt.Errorf("unmap %s error %s", m.name, err.Error()) - } - - if err := m.f.Truncate(m.offset); err != nil { - return fmt.Errorf("close truncate %s error %s", m.name, err.Error()) - } - - if err := m.f.Close(); err != nil { - return fmt.Errorf("close %s error %s", m.name, err.Error()) - } - - return nil -} - -func (m *mmapWriteFile) Write(b []byte) (n int, err error) { - extra := int64(len(b)) - (m.size - m.offset) - if extra > 0 { - newSize := m.size + extra + m.size/10 - if err = m.Truncate(newSize); err != nil { - return - } - m.size = newSize - } - - n = copy(m.m[m.offset:], b) - if n != len(b) { - return 0, io.ErrShortWrite - } - - m.offset += int64(len(b)) - return len(b), nil -} - -func (m *mmapWriteFile) ReadAt(buf []byte, offset int64) (int, error) { - if offset > m.offset { - return 0, fmt.Errorf("invalid offset %d", offset) - } - - n := copy(buf, m.m[offset:m.offset]) - if n != len(buf) { - return n, io.ErrUnexpectedEOF - } - - return n, nil -} - -func (m *mmapWriteFile) Truncate(size int64) error { - var err error - if err = m.m.Unmap(); err != nil { - return err - } - - if err = m.f.Truncate(size); err != nil { - return err - } - - if m.m, err = mmap.Map(m.f, mmap.RDWR, 0); err != nil { - return err - } - - m.size = size - if m.offset > m.size { - m.offset = m.size - } - return nil -} - -func (m *mmapWriteFile) SetOffset(o int64) { - m.offset = o -} - -func (m *mmapWriteFile) Offset() int64 { - return m.offset -} - -func (m *mmapWriteFile) Name() string { - return m.name -} - -type mmapReadFile struct { - readFile - - f *os.File - m mmap.MMap - name string -} - -func newMmapReadFile(name string) (readFile, error) { - m := new(mmapReadFile) - - m.name = name - - var err error - m.f, err = os.Open(name) - if err != nil { - return nil, err - } - - m.m, err = mmap.Map(m.f, mmap.RDONLY, 0) - return m, err -} - -func (m *mmapReadFile) ReadAt(buf []byte, offset int64) (int, error) { - if int64(offset) > int64(len(m.m)) { - return 0, fmt.Errorf("invalid offset %d", offset) - } - - n := copy(buf, m.m[offset:]) - if n != len(buf) { - return n, io.ErrUnexpectedEOF - } - - return n, nil -} - -func (m *mmapReadFile) Close() error { - if m.m != nil { - if err := m.m.Unmap(); err != nil { - log.Errorf("unmap %s error %s", m.name, err.Error()) - } - m.m = nil - } - - if m.f != nil { - if err := m.f.Close(); err != nil { - log.Errorf("close %s error %s", m.name, err.Error()) - } - m.f = nil - } - - return nil -} - -func (m *mmapReadFile) Size() int { - return len(m.m) -} - -func (m *mmapReadFile) Name() string { - return m.name -} - -///////////////////////////////////// - -func newWriteFile(useMmap bool, name string, size int64) (writeFile, error) { - if useMmap { - return newMmapWriteFile(name, size) - } else { - return newRawWriteFile(name, size) - } -} - -func newReadFile(useMmap bool, name string) (readFile, error) { - if useMmap { - return newMmapReadFile(name) - } else { - return newRawReadFile(name) - } -} diff --git a/vendor/github.com/siddontang/ledisdb/rpl/file_store.go b/vendor/github.com/siddontang/ledisdb/rpl/file_store.go deleted file mode 100644 index f6f708b7c60f..000000000000 --- a/vendor/github.com/siddontang/ledisdb/rpl/file_store.go +++ /dev/null @@ -1,416 +0,0 @@ -package rpl - -import ( - "fmt" - "io/ioutil" - "os" - "sort" - "sync" - "time" - - "github.com/siddontang/go/log" - "github.com/siddontang/go/num" - "github.com/siddontang/ledisdb/config" -) - -const ( - defaultMaxLogFileSize = int64(256 * 1024 * 1024) - - maxLogFileSize = int64(1024 * 1024 * 1024) - - defaultLogNumInFile = int64(1024 * 1024) -) - -/* - File Store: - 00000001.data - 00000001.meta - 00000002.data - 00000002.meta - - data: log1 data | log2 data | magic data - - if data has no magic data, it means that we don't close replication gracefully. - so we must repair the log data - log data: id (bigendian uint64), create time (bigendian uint32), compression (byte), data len(bigendian uint32), data - split data = log0 data + [padding 0] -> file % pagesize() == 0 - - meta: log1 offset | log2 offset - log offset: bigendian uint32 | bigendian uint32 - - //sha1 of github.com/siddontang/ledisdb 20 bytes - magic data = "\x1c\x1d\xb8\x88\xff\x9e\x45\x55\x40\xf0\x4c\xda\xe0\xce\x47\xde\x65\x48\x71\x17" - - we must guarantee that the log id is monotonic increment strictly. - if log1's id is 1, log2 must be 2 -*/ - -type FileStore struct { - LogStore - - cfg *config.Config - - base string - - rm sync.RWMutex - wm sync.Mutex - - rs tableReaders - w *tableWriter - - quit chan struct{} -} - -func NewFileStore(base string, cfg *config.Config) (*FileStore, error) { - s := new(FileStore) - - s.quit = make(chan struct{}) - - var err error - - if err = os.MkdirAll(base, 0755); err != nil { - return nil, err - } - - s.base = base - - if cfg.Replication.MaxLogFileSize == 0 { - cfg.Replication.MaxLogFileSize = defaultMaxLogFileSize - } - - cfg.Replication.MaxLogFileSize = num.MinInt64(cfg.Replication.MaxLogFileSize, maxLogFileSize) - - s.cfg = cfg - - if err = s.load(); err != nil { - return nil, err - } - - index := int64(1) - if len(s.rs) != 0 { - index = s.rs[len(s.rs)-1].index + 1 - } - - s.w = newTableWriter(s.base, index, cfg.Replication.MaxLogFileSize, cfg.Replication.UseMmap) - s.w.SetSyncType(cfg.Replication.SyncLog) - - go s.checkTableReaders() - - return s, nil -} - -func (s *FileStore) GetLog(id uint64, l *Log) error { - //first search in table writer - if err := s.w.GetLog(id, l); err == nil { - return nil - } else if err != ErrLogNotFound { - return err - } - - s.rm.RLock() - t := s.rs.Search(id) - - if t == nil { - s.rm.RUnlock() - - return ErrLogNotFound - } - - err := t.GetLog(id, l) - s.rm.RUnlock() - - return err -} - -func (s *FileStore) FirstID() (uint64, error) { - id := uint64(0) - - s.rm.RLock() - if len(s.rs) > 0 { - id = s.rs[0].first - } else { - id = 0 - } - s.rm.RUnlock() - - if id > 0 { - return id, nil - } - - //if id = 0, - - return s.w.First(), nil -} - -func (s *FileStore) LastID() (uint64, error) { - id := s.w.Last() - if id > 0 { - return id, nil - } - - //if table writer has no last id, we may find in the last table reader - - s.rm.RLock() - if len(s.rs) > 0 { - id = s.rs[len(s.rs)-1].last - } - s.rm.RUnlock() - - return id, nil -} - -func (s *FileStore) StoreLog(l *Log) error { - s.wm.Lock() - err := s.storeLog(l) - s.wm.Unlock() - return err -} - -func (s *FileStore) storeLog(l *Log) error { - err := s.w.StoreLog(l) - if err == nil { - return nil - } else if err != errTableNeedFlush { - return err - } - - var r *tableReader - r, err = s.w.Flush() - - if err != nil { - log.Fatalf("write table flush error %s, can not store!!!", err.Error()) - - s.w.Close() - - return err - } - - s.rm.Lock() - s.rs = append(s.rs, r) - s.rm.Unlock() - - err = s.w.StoreLog(l) - - return err -} - -func (s *FileStore) PurgeExpired(n int64) error { - s.rm.Lock() - - var purges []*tableReader - - t := uint32(time.Now().Unix() - int64(n)) - - for i, r := range s.rs { - if r.lastTime > t { - purges = append([]*tableReader{}, s.rs[0:i]...) - n := copy(s.rs, s.rs[i:]) - s.rs = s.rs[0:n] - break - } - } - - s.rm.Unlock() - - s.purgeTableReaders(purges) - - return nil -} - -func (s *FileStore) Sync() error { - return s.w.Sync() -} - -func (s *FileStore) Clear() error { - s.wm.Lock() - s.rm.Lock() - - defer func() { - s.rm.Unlock() - s.wm.Unlock() - }() - - s.w.Close() - - for i := range s.rs { - s.rs[i].Close() - } - - s.rs = tableReaders{} - - if err := os.RemoveAll(s.base); err != nil { - return err - } - - if err := os.MkdirAll(s.base, 0755); err != nil { - return err - } - - s.w = newTableWriter(s.base, 1, s.cfg.Replication.MaxLogFileSize, s.cfg.Replication.UseMmap) - - return nil -} - -func (s *FileStore) Close() error { - close(s.quit) - - s.wm.Lock() - s.rm.Lock() - - if r, err := s.w.Flush(); err != nil { - if err != errNilHandler { - log.Errorf("close err: %s", err.Error()) - } - } else { - r.Close() - s.w.Close() - } - - for i := range s.rs { - s.rs[i].Close() - } - - s.rs = tableReaders{} - - s.rm.Unlock() - s.wm.Unlock() - - return nil -} - -func (s *FileStore) checkTableReaders() { - t := time.NewTicker(60 * time.Second) - defer t.Stop() - for { - select { - case <-t.C: - s.rm.Lock() - - for _, r := range s.rs { - if !r.Keepalived() { - r.Close() - } - } - - purges := []*tableReader{} - maxNum := s.cfg.Replication.MaxLogFileNum - num := len(s.rs) - if num > maxNum { - purges = s.rs[:num-maxNum] - s.rs = s.rs[num-maxNum:] - } - - s.rm.Unlock() - - s.purgeTableReaders(purges) - - case <-s.quit: - return - } - } -} - -func (s *FileStore) purgeTableReaders(purges []*tableReader) { - for _, r := range purges { - dataName := fmtTableDataName(r.base, r.index) - metaName := fmtTableMetaName(r.base, r.index) - r.Close() - if err := os.Remove(dataName); err != nil { - log.Errorf("purge table data %s err: %s", dataName, err.Error()) - } - if err := os.Remove(metaName); err != nil { - log.Errorf("purge table meta %s err: %s", metaName, err.Error()) - } - - } -} - -func (s *FileStore) load() error { - fs, err := ioutil.ReadDir(s.base) - if err != nil { - return err - } - - s.rs = make(tableReaders, 0, len(fs)) - - var r *tableReader - var index int64 - for _, f := range fs { - if _, err := fmt.Sscanf(f.Name(), "%08d.data", &index); err == nil { - if r, err = newTableReader(s.base, index, s.cfg.Replication.UseMmap); err != nil { - log.Errorf("load table %s err: %s", f.Name(), err.Error()) - } else { - s.rs = append(s.rs, r) - } - } - } - - if err := s.rs.check(); err != nil { - return err - } - - return nil -} - -type tableReaders []*tableReader - -func (ts tableReaders) Len() int { - return len(ts) -} - -func (ts tableReaders) Swap(i, j int) { - ts[i], ts[j] = ts[j], ts[i] -} - -func (ts tableReaders) Less(i, j int) bool { - return ts[i].first < ts[j].first -} - -func (ts tableReaders) Search(id uint64) *tableReader { - i, j := 0, len(ts)-1 - - for i <= j { - h := i + (j-i)/2 - - if ts[h].first <= id && id <= ts[h].last { - return ts[h] - } else if ts[h].last < id { - i = h + 1 - } else { - j = h - 1 - } - } - - return nil -} - -func (ts tableReaders) check() error { - if len(ts) == 0 { - return nil - } - - sort.Sort(ts) - - first := ts[0].first - last := ts[0].last - index := ts[0].index - - if first == 0 || first > last { - return fmt.Errorf("invalid log in table %s", ts[0]) - } - - for i := 1; i < len(ts); i++ { - if ts[i].first <= last { - return fmt.Errorf("invalid first log id %d in table %s", ts[i].first, ts[i]) - } - - if ts[i].index <= index { - return fmt.Errorf("invalid index %d in table %s", ts[i].index, ts[i]) - } - - first = ts[i].first - last = ts[i].last - index = ts[i].index - } - return nil -} diff --git a/vendor/github.com/siddontang/ledisdb/rpl/file_table.go b/vendor/github.com/siddontang/ledisdb/rpl/file_table.go deleted file mode 100644 index 9658102ff65c..000000000000 --- a/vendor/github.com/siddontang/ledisdb/rpl/file_table.go +++ /dev/null @@ -1,571 +0,0 @@ -package rpl - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "path" - "sync" - "time" - - "github.com/siddontang/go/log" - "github.com/siddontang/go/sync2" -) - -var ( - magic = []byte("\x1c\x1d\xb8\x88\xff\x9e\x45\x55\x40\xf0\x4c\xda\xe0\xce\x47\xde\x65\x48\x71\x17") - errTableNeedFlush = errors.New("write table need flush") - errNilHandler = errors.New("nil write handler") -) - -const tableReaderKeepaliveInterval int64 = 30 - -func fmtTableDataName(base string, index int64) string { - return path.Join(base, fmt.Sprintf("%08d.data", index)) -} - -func fmtTableMetaName(base string, index int64) string { - return path.Join(base, fmt.Sprintf("%08d.meta", index)) -} - -type tableReader struct { - sync.Mutex - - base string - index int64 - - data readFile - meta readFile - - first uint64 - last uint64 - - lastTime uint32 - - lastReadTime sync2.AtomicInt64 - - useMmap bool -} - -func newTableReader(base string, index int64, useMmap bool) (*tableReader, error) { - if index <= 0 { - return nil, fmt.Errorf("invalid index %d", index) - } - t := new(tableReader) - t.base = base - t.index = index - - t.useMmap = useMmap - - var err error - - if err = t.check(); err != nil { - log.Errorf("check %d error: %s, try to repair", t.index, err.Error()) - - if err = t.repair(); err != nil { - log.Errorf("repair %d error: %s", t.index, err.Error()) - return nil, err - } - } - - t.close() - - return t, nil -} - -func (t *tableReader) String() string { - return fmt.Sprintf("%d", t.index) -} - -func (t *tableReader) Close() { - t.Lock() - - t.close() - - t.Unlock() -} - -func (t *tableReader) close() { - if t.data != nil { - t.data.Close() - t.data = nil - } - - if t.meta != nil { - t.meta.Close() - t.meta = nil - } -} - -func (t *tableReader) Keepalived() bool { - l := t.lastReadTime.Get() - if l > 0 && time.Now().Unix()-l > tableReaderKeepaliveInterval { - return false - } - - return true -} - -func (t *tableReader) getLogPos(index int) (uint32, error) { - var buf [4]byte - if _, err := t.meta.ReadAt(buf[0:4], int64(index)*4); err != nil { - return 0, err - } - - return binary.BigEndian.Uint32(buf[0:4]), nil -} - -func (t *tableReader) checkData() error { - var err error - //check will use raw file mode - if t.data, err = newReadFile(false, fmtTableDataName(t.base, t.index)); err != nil { - return err - } - - if t.data.Size() < len(magic) { - return fmt.Errorf("data file %s size %d too short", t.data.Name(), t.data.Size()) - } - - buf := make([]byte, len(magic)) - if _, err := t.data.ReadAt(buf, int64(t.data.Size()-len(magic))); err != nil { - return err - } - - if !bytes.Equal(magic, buf) { - return fmt.Errorf("data file %s invalid magic data %q", t.data.Name(), buf) - } - - return nil -} - -func (t *tableReader) checkMeta() error { - var err error - //check will use raw file mode - if t.meta, err = newReadFile(false, fmtTableMetaName(t.base, t.index)); err != nil { - return err - } - - if t.meta.Size()%4 != 0 || t.meta.Size() == 0 { - return fmt.Errorf("meta file %s invalid offset len %d, must 4 multiple and not 0", t.meta.Name(), t.meta.Size()) - } - - return nil -} - -func (t *tableReader) check() error { - var err error - - if err := t.checkMeta(); err != nil { - return err - } - - if err := t.checkData(); err != nil { - return err - } - - firstLogPos, _ := t.getLogPos(0) - lastLogPos, _ := t.getLogPos(t.meta.Size()/4 - 1) - - if firstLogPos != 0 { - return fmt.Errorf("invalid first log pos %d, must 0", firstLogPos) - } - - var l Log - if _, err = t.decodeLogHead(&l, t.data, int64(firstLogPos)); err != nil { - return fmt.Errorf("decode first log err %s", err.Error()) - } - - t.first = l.ID - var n int64 - if n, err = t.decodeLogHead(&l, t.data, int64(lastLogPos)); err != nil { - return fmt.Errorf("decode last log err %s", err.Error()) - } else if n+int64(len(magic)) != int64(t.data.Size()) { - return fmt.Errorf("extra log data at offset %d", n) - } - - t.last = l.ID - t.lastTime = l.CreateTime - - if t.first > t.last { - return fmt.Errorf("invalid log table first %d > last %d", t.first, t.last) - } else if (t.last - t.first + 1) != uint64(t.meta.Size()/4) { - return fmt.Errorf("invalid log table, first %d, last %d, and log num %d", t.first, t.last, t.meta.Size()/4) - } - - return nil -} - -func (t *tableReader) repair() error { - t.close() - - var err error - var data writeFile - var meta writeFile - - //repair will use raw file mode - data, err = newWriteFile(false, fmtTableDataName(t.base, t.index), 0) - data.SetOffset(int64(data.Size())) - - meta, err = newWriteFile(false, fmtTableMetaName(t.base, t.index), int64(defaultLogNumInFile*4)) - - var l Log - var pos int64 = 0 - var nextPos int64 = 0 - b := make([]byte, 4) - - t.first = 0 - t.last = 0 - - for { - nextPos, err = t.decodeLogHead(&l, data, pos) - if err != nil { - //if error, we may lost all logs from pos - log.Errorf("%s may lost logs from %d", data.Name(), pos) - break - } - - if l.ID == 0 { - log.Errorf("%s may lost logs from %d, invalid log 0", data.Name(), pos) - break - } - - if t.first == 0 { - t.first = l.ID - } - - if t.last == 0 { - t.last = l.ID - } else if l.ID <= t.last { - log.Errorf("%s may lost logs from %d, invalid logid %d", t.data.Name(), pos, l.ID) - break - } - - t.last = l.ID - t.lastTime = l.CreateTime - - binary.BigEndian.PutUint32(b, uint32(pos)) - meta.Write(b) - - pos = nextPos - - t.lastTime = l.CreateTime - } - - var e error - if err := meta.Close(); err != nil { - e = err - } - - data.SetOffset(pos) - - if _, err = data.Write(magic); err != nil { - log.Errorf("write magic error %s", err.Error()) - } - - if err = data.Close(); err != nil { - return err - } - - return e -} - -func (t *tableReader) decodeLogHead(l *Log, r io.ReaderAt, pos int64) (int64, error) { - dataLen, err := l.DecodeHeadAt(r, pos) - if err != nil { - return 0, err - } - - return pos + int64(l.HeadSize()) + int64(dataLen), nil -} - -func (t *tableReader) GetLog(id uint64, l *Log) error { - if id < t.first || id > t.last { - return ErrLogNotFound - } - - t.lastReadTime.Set(time.Now().Unix()) - - t.Lock() - - if err := t.openTable(); err != nil { - t.close() - t.Unlock() - return err - } - t.Unlock() - - pos, err := t.getLogPos(int(id - t.first)) - if err != nil { - return err - } - - if err := l.DecodeAt(t.data, int64(pos)); err != nil { - return err - } else if l.ID != id { - return fmt.Errorf("invalid log id %d != %d", l.ID, id) - } - - return nil -} - -func (t *tableReader) openTable() error { - var err error - if t.data == nil { - if t.data, err = newReadFile(t.useMmap, fmtTableDataName(t.base, t.index)); err != nil { - return err - } - } - - if t.meta == nil { - if t.meta, err = newReadFile(t.useMmap, fmtTableMetaName(t.base, t.index)); err != nil { - return err - } - - } - - return nil -} - -type tableWriter struct { - sync.RWMutex - - data writeFile - meta writeFile - - base string - index int64 - - first uint64 - last uint64 - lastTime uint32 - - maxLogSize int64 - - closed bool - - syncType int - - posBuf []byte - - useMmap bool -} - -func newTableWriter(base string, index int64, maxLogSize int64, useMmap bool) *tableWriter { - if index <= 0 { - panic(fmt.Errorf("invalid index %d", index)) - } - - t := new(tableWriter) - - t.base = base - t.index = index - - t.maxLogSize = maxLogSize - - t.closed = false - - t.posBuf = make([]byte, 4) - - t.useMmap = useMmap - - return t -} - -func (t *tableWriter) String() string { - return fmt.Sprintf("%d", t.index) -} - -func (t *tableWriter) SetMaxLogSize(s int64) { - t.maxLogSize = s -} - -func (t *tableWriter) SetSyncType(tp int) { - t.syncType = tp -} - -func (t *tableWriter) close() { - if t.meta != nil { - if err := t.meta.Close(); err != nil { - log.Fatalf("close log meta error %s", err.Error()) - } - t.meta = nil - } - - if t.data != nil { - if _, err := t.data.Write(magic); err != nil { - log.Fatalf("write magic error %s", err.Error()) - } - - if err := t.data.Close(); err != nil { - log.Fatalf("close log data error %s", err.Error()) - } - t.data = nil - } -} - -func (t *tableWriter) Close() { - t.Lock() - t.closed = true - - t.close() - t.Unlock() -} - -func (t *tableWriter) First() uint64 { - t.Lock() - id := t.first - t.Unlock() - return id -} - -func (t *tableWriter) Last() uint64 { - t.Lock() - id := t.last - t.Unlock() - return id -} - -func (t *tableWriter) Flush() (*tableReader, error) { - t.Lock() - - if t.data == nil || t.meta == nil { - t.Unlock() - return nil, errNilHandler - } - - tr := new(tableReader) - tr.base = t.base - tr.index = t.index - - tr.first = t.first - tr.last = t.last - tr.lastTime = t.lastTime - tr.useMmap = t.useMmap - - t.close() - - t.first = 0 - t.last = 0 - t.index = t.index + 1 - - t.Unlock() - - return tr, nil -} - -func (t *tableWriter) StoreLog(l *Log) error { - t.Lock() - err := t.storeLog(l) - t.Unlock() - - return err -} - -func (t *tableWriter) openFile() error { - var err error - if t.data == nil { - if t.data, err = newWriteFile(t.useMmap, fmtTableDataName(t.base, t.index), t.maxLogSize+t.maxLogSize/10+int64(len(magic))); err != nil { - return err - } - } - - if t.meta == nil { - if t.meta, err = newWriteFile(t.useMmap, fmtTableMetaName(t.base, t.index), int64(defaultLogNumInFile*4)); err != nil { - return err - } - } - return err -} - -func (t *tableWriter) storeLog(l *Log) error { - if l.ID == 0 { - return ErrStoreLogID - } - - if t.closed { - return fmt.Errorf("table writer is closed") - } - - if t.last > 0 && l.ID != t.last+1 { - return ErrStoreLogID - } - - if t.data != nil && t.data.Offset() > t.maxLogSize { - return errTableNeedFlush - } - - var err error - if err = t.openFile(); err != nil { - return err - } - - offsetPos := t.data.Offset() - if err = l.Encode(t.data); err != nil { - return err - } - - binary.BigEndian.PutUint32(t.posBuf, uint32(offsetPos)) - if _, err = t.meta.Write(t.posBuf); err != nil { - return err - } - - if t.first == 0 { - t.first = l.ID - } - - t.last = l.ID - t.lastTime = l.CreateTime - - if t.syncType == 2 { - if err := t.data.Sync(); err != nil { - log.Errorf("sync table error %s", err.Error()) - } - } - - return nil -} - -func (t *tableWriter) GetLog(id uint64, l *Log) error { - t.RLock() - defer t.RUnlock() - - if id < t.first || id > t.last { - return ErrLogNotFound - } - - var buf [4]byte - if _, err := t.meta.ReadAt(buf[0:4], int64((id-t.first)*4)); err != nil { - return err - } - - offset := binary.BigEndian.Uint32(buf[0:4]) - - if err := l.DecodeAt(t.data, int64(offset)); err != nil { - return err - } else if l.ID != id { - return fmt.Errorf("invalid log id %d != %d", id, l.ID) - } - - return nil -} - -func (t *tableWriter) Sync() error { - t.Lock() - - var err error - if t.data != nil { - err = t.data.Sync() - t.Unlock() - return err - } - - if t.meta != nil { - err = t.meta.Sync() - } - - t.Unlock() - - return err -} diff --git a/vendor/github.com/siddontang/ledisdb/rpl/goleveldb_store.go b/vendor/github.com/siddontang/ledisdb/rpl/goleveldb_store.go deleted file mode 100644 index 445c17cfcf68..000000000000 --- a/vendor/github.com/siddontang/ledisdb/rpl/goleveldb_store.go +++ /dev/null @@ -1,225 +0,0 @@ -package rpl - -import ( - "bytes" - "fmt" - "os" - "sync" - "time" - - "github.com/siddontang/go/num" - "github.com/siddontang/ledisdb/config" - "github.com/siddontang/ledisdb/store" -) - -type GoLevelDBStore struct { - LogStore - - m sync.Mutex - db *store.DB - - cfg *config.Config - - first uint64 - last uint64 - - buf bytes.Buffer -} - -func (s *GoLevelDBStore) FirstID() (uint64, error) { - s.m.Lock() - id, err := s.firstID() - s.m.Unlock() - - return id, err -} - -func (s *GoLevelDBStore) LastID() (uint64, error) { - s.m.Lock() - id, err := s.lastID() - s.m.Unlock() - - return id, err -} - -func (s *GoLevelDBStore) firstID() (uint64, error) { - if s.first != InvalidLogID { - return s.first, nil - } - - it := s.db.NewIterator() - defer it.Close() - - it.SeekToFirst() - - if it.Valid() { - s.first = num.BytesToUint64(it.RawKey()) - } - - return s.first, nil -} - -func (s *GoLevelDBStore) lastID() (uint64, error) { - if s.last != InvalidLogID { - return s.last, nil - } - - it := s.db.NewIterator() - defer it.Close() - - it.SeekToLast() - - if it.Valid() { - s.last = num.BytesToUint64(it.RawKey()) - } - - return s.last, nil -} - -func (s *GoLevelDBStore) GetLog(id uint64, log *Log) error { - v, err := s.db.Get(num.Uint64ToBytes(id)) - if err != nil { - return err - } else if v == nil { - return ErrLogNotFound - } else { - return log.Decode(bytes.NewBuffer(v)) - } -} - -func (s *GoLevelDBStore) StoreLog(log *Log) error { - s.m.Lock() - defer s.m.Unlock() - - last, err := s.lastID() - if err != nil { - return err - } - - s.last = InvalidLogID - - s.buf.Reset() - - if log.ID != last+1 { - return ErrStoreLogID - } - - last = log.ID - key := num.Uint64ToBytes(log.ID) - - if err := log.Encode(&s.buf); err != nil { - return err - } - - if err = s.db.Put(key, s.buf.Bytes()); err != nil { - return err - } - - s.last = last - return nil -} - -func (s *GoLevelDBStore) PurgeExpired(n int64) error { - if n <= 0 { - return fmt.Errorf("invalid expired time %d", n) - } - - t := uint32(time.Now().Unix() - int64(n)) - - s.m.Lock() - defer s.m.Unlock() - - s.reset() - - it := s.db.NewIterator() - it.SeekToFirst() - - w := s.db.NewWriteBatch() - defer w.Rollback() - - l := new(Log) - for ; it.Valid(); it.Next() { - v := it.RawValue() - - if err := l.Unmarshal(v); err != nil { - return err - } else if l.CreateTime > t { - break - } else { - w.Delete(it.RawKey()) - } - } - - if err := w.Commit(); err != nil { - return err - } - - return nil -} - -func (s *GoLevelDBStore) Sync() error { - //no other way for sync, so ignore here - return nil -} - -func (s *GoLevelDBStore) reset() { - s.first = InvalidLogID - s.last = InvalidLogID -} - -func (s *GoLevelDBStore) Clear() error { - s.m.Lock() - defer s.m.Unlock() - - if s.db != nil { - s.db.Close() - } - - s.reset() - os.RemoveAll(s.cfg.DBPath) - - return s.open() -} - -func (s *GoLevelDBStore) Close() error { - s.m.Lock() - defer s.m.Unlock() - - if s.db == nil { - return nil - } - - err := s.db.Close() - s.db = nil - return err -} - -func (s *GoLevelDBStore) open() error { - var err error - - s.first = InvalidLogID - s.last = InvalidLogID - - s.db, err = store.Open(s.cfg) - return err -} - -func NewGoLevelDBStore(base string, syncLog int) (*GoLevelDBStore, error) { - cfg := config.NewConfigDefault() - cfg.DBName = "goleveldb" - cfg.DBPath = base - cfg.LevelDB.BlockSize = 16 * 1024 * 1024 - cfg.LevelDB.CacheSize = 64 * 1024 * 1024 - cfg.LevelDB.WriteBufferSize = 64 * 1024 * 1024 - cfg.LevelDB.Compression = false - cfg.DBSyncCommit = syncLog - - s := new(GoLevelDBStore) - s.cfg = cfg - - if err := s.open(); err != nil { - return nil, err - } - - return s, nil -} diff --git a/vendor/github.com/siddontang/ledisdb/rpl/log.go b/vendor/github.com/siddontang/ledisdb/rpl/log.go deleted file mode 100644 index ad0b48cd4f79..000000000000 --- a/vendor/github.com/siddontang/ledisdb/rpl/log.go +++ /dev/null @@ -1,167 +0,0 @@ -package rpl - -import ( - "bytes" - "encoding/binary" - "io" - "sync" -) - -const LogHeadSize = 17 - -type Log struct { - ID uint64 - CreateTime uint32 - Compression uint8 - - Data []byte -} - -func (l *Log) HeadSize() int { - return LogHeadSize -} - -func (l *Log) Size() int { - return l.HeadSize() + len(l.Data) -} - -func (l *Log) Marshal() ([]byte, error) { - buf := bytes.NewBuffer(make([]byte, l.Size())) - buf.Reset() - - if err := l.Encode(buf); err != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -func (l *Log) Unmarshal(b []byte) error { - buf := bytes.NewBuffer(b) - - return l.Decode(buf) -} - -var headPool = sync.Pool{ - New: func() interface{} { return make([]byte, LogHeadSize) }, -} - -func (l *Log) Encode(w io.Writer) error { - b := headPool.Get().([]byte) - pos := 0 - - binary.BigEndian.PutUint64(b[pos:], l.ID) - pos += 8 - binary.BigEndian.PutUint32(b[pos:], uint32(l.CreateTime)) - pos += 4 - b[pos] = l.Compression - pos++ - binary.BigEndian.PutUint32(b[pos:], uint32(len(l.Data))) - - n, err := w.Write(b) - headPool.Put(b) - - if err != nil { - return err - } else if n != LogHeadSize { - return io.ErrShortWrite - } - - if n, err = w.Write(l.Data); err != nil { - return err - } else if n != len(l.Data) { - return io.ErrShortWrite - } - return nil -} - -func (l *Log) Decode(r io.Reader) error { - length, err := l.DecodeHead(r) - if err != nil { - return err - } - - l.growData(int(length)) - - if _, err := io.ReadFull(r, l.Data); err != nil { - return err - } - - return nil -} - -func (l *Log) DecodeHead(r io.Reader) (uint32, error) { - buf := headPool.Get().([]byte) - - if _, err := io.ReadFull(r, buf); err != nil { - headPool.Put(buf) - return 0, err - } - - length := l.decodeHeadBuf(buf) - - headPool.Put(buf) - - return length, nil -} - -func (l *Log) DecodeAt(r io.ReaderAt, pos int64) error { - length, err := l.DecodeHeadAt(r, pos) - if err != nil { - return err - } - - l.growData(int(length)) - var n int - n, err = r.ReadAt(l.Data, pos+int64(LogHeadSize)) - if err == io.EOF && n == len(l.Data) { - err = nil - } - - return err -} - -func (l *Log) growData(length int) { - l.Data = l.Data[0:0] - - if cap(l.Data) >= length { - l.Data = l.Data[0:length] - } else { - l.Data = make([]byte, length) - } -} - -func (l *Log) DecodeHeadAt(r io.ReaderAt, pos int64) (uint32, error) { - buf := headPool.Get().([]byte) - - n, err := r.ReadAt(buf, pos) - if err != nil && err != io.EOF { - headPool.Put(buf) - - return 0, err - } - - length := l.decodeHeadBuf(buf) - headPool.Put(buf) - - if err == io.EOF && (length != 0 || n != len(buf)) { - return 0, err - } - - return length, nil -} - -func (l *Log) decodeHeadBuf(buf []byte) uint32 { - pos := 0 - l.ID = binary.BigEndian.Uint64(buf[pos:]) - pos += 8 - - l.CreateTime = binary.BigEndian.Uint32(buf[pos:]) - pos += 4 - - l.Compression = uint8(buf[pos]) - pos++ - - length := binary.BigEndian.Uint32(buf[pos:]) - return length -} diff --git a/vendor/github.com/siddontang/ledisdb/rpl/rpl.go b/vendor/github.com/siddontang/ledisdb/rpl/rpl.go deleted file mode 100644 index 0ebf66de413c..000000000000 --- a/vendor/github.com/siddontang/ledisdb/rpl/rpl.go +++ /dev/null @@ -1,336 +0,0 @@ -package rpl - -import ( - "encoding/binary" - "os" - "path" - "sync" - "time" - - "github.com/siddontang/go/log" - "github.com/siddontang/go/snappy" - "github.com/siddontang/ledisdb/config" -) - -type Stat struct { - FirstID uint64 - LastID uint64 - CommitID uint64 -} - -type Replication struct { - m sync.Mutex - - cfg *config.Config - - s LogStore - - commitID uint64 - commitLog *os.File - - quit chan struct{} - - wg sync.WaitGroup - - nc chan struct{} - - ncm sync.Mutex -} - -func NewReplication(cfg *config.Config) (*Replication, error) { - if len(cfg.Replication.Path) == 0 { - cfg.Replication.Path = path.Join(cfg.DataDir, "rpl") - } - - base := cfg.Replication.Path - - r := new(Replication) - - r.quit = make(chan struct{}) - r.nc = make(chan struct{}) - - r.cfg = cfg - - var err error - - switch cfg.Replication.StoreName { - case "goleveldb": - if r.s, err = NewGoLevelDBStore(path.Join(base, "wal"), cfg.Replication.SyncLog); err != nil { - return nil, err - } - default: - if r.s, err = NewFileStore(path.Join(base, "ldb"), cfg); err != nil { - return nil, err - } - } - - if r.commitLog, err = os.OpenFile(path.Join(base, "commit.log"), os.O_RDWR|os.O_CREATE, 0644); err != nil { - return nil, err - } - - if s, _ := r.commitLog.Stat(); s.Size() == 0 { - r.commitID = 0 - } else if err = binary.Read(r.commitLog, binary.BigEndian, &r.commitID); err != nil { - return nil, err - } - - log.Infof("staring replication with commit ID %d", r.commitID) - - r.wg.Add(1) - go r.run() - - return r, nil -} - -func (r *Replication) Close() error { - close(r.quit) - - r.wg.Wait() - - r.m.Lock() - defer r.m.Unlock() - - log.Infof("closing replication with commit ID %d", r.commitID) - - if r.s != nil { - r.s.Close() - r.s = nil - } - - if err := r.updateCommitID(r.commitID, true); err != nil { - log.Errorf("update commit id err %s", err.Error()) - } - - if r.commitLog != nil { - r.commitLog.Close() - r.commitLog = nil - } - - return nil -} - -func (r *Replication) Log(data []byte) (*Log, error) { - if r.cfg.Replication.Compression { - //todo optimize - var err error - if data, err = snappy.Encode(nil, data); err != nil { - return nil, err - } - } - - r.m.Lock() - - lastID, err := r.s.LastID() - if err != nil { - r.m.Unlock() - return nil, err - } - - commitId := r.commitID - if lastID < commitId { - lastID = commitId - } else if lastID > commitId { - r.m.Unlock() - return nil, ErrCommitIDBehind - } - - l := new(Log) - l.ID = lastID + 1 - l.CreateTime = uint32(time.Now().Unix()) - - if r.cfg.Replication.Compression { - l.Compression = 1 - } else { - l.Compression = 0 - } - - l.Data = data - - if err = r.s.StoreLog(l); err != nil { - r.m.Unlock() - return nil, err - } - - r.m.Unlock() - - r.ncm.Lock() - close(r.nc) - r.nc = make(chan struct{}) - r.ncm.Unlock() - - return l, nil -} - -func (r *Replication) WaitLog() <-chan struct{} { - r.ncm.Lock() - ch := r.nc - r.ncm.Unlock() - return ch -} - -func (r *Replication) StoreLog(log *Log) error { - r.m.Lock() - err := r.s.StoreLog(log) - r.m.Unlock() - - return err -} - -func (r *Replication) FirstLogID() (uint64, error) { - r.m.Lock() - id, err := r.s.FirstID() - r.m.Unlock() - - return id, err -} - -func (r *Replication) LastLogID() (uint64, error) { - r.m.Lock() - id, err := r.s.LastID() - r.m.Unlock() - return id, err -} - -func (r *Replication) LastCommitID() (uint64, error) { - r.m.Lock() - id := r.commitID - r.m.Unlock() - return id, nil -} - -func (r *Replication) UpdateCommitID(id uint64) error { - r.m.Lock() - err := r.updateCommitID(id, r.cfg.Replication.SyncLog == 2) - r.m.Unlock() - - return err -} - -func (r *Replication) Stat() (*Stat, error) { - r.m.Lock() - defer r.m.Unlock() - - s := &Stat{} - var err error - - if s.FirstID, err = r.s.FirstID(); err != nil { - return nil, err - } - - if s.LastID, err = r.s.LastID(); err != nil { - return nil, err - } - - s.CommitID = r.commitID - return s, nil -} - -func (r *Replication) updateCommitID(id uint64, force bool) error { - if force { - if _, err := r.commitLog.Seek(0, os.SEEK_SET); err != nil { - return err - } - - if err := binary.Write(r.commitLog, binary.BigEndian, id); err != nil { - return err - } - } - - r.commitID = id - - return nil -} - -func (r *Replication) CommitIDBehind() (bool, error) { - r.m.Lock() - - id, err := r.s.LastID() - if err != nil { - r.m.Unlock() - return false, err - } - - behind := id > r.commitID - r.m.Unlock() - - return behind, nil -} - -func (r *Replication) GetLog(id uint64, log *Log) error { - return r.s.GetLog(id, log) -} - -func (r *Replication) NextNeedCommitLog(log *Log) error { - r.m.Lock() - defer r.m.Unlock() - - id, err := r.s.LastID() - if err != nil { - return err - } - - if id <= r.commitID { - return ErrNoBehindLog - } - - return r.s.GetLog(r.commitID+1, log) - -} - -func (r *Replication) Clear() error { - return r.ClearWithCommitID(0) -} - -func (r *Replication) ClearWithCommitID(id uint64) error { - r.m.Lock() - defer r.m.Unlock() - - if err := r.s.Clear(); err != nil { - return err - } - - return r.updateCommitID(id, true) -} - -func (r *Replication) run() { - defer r.wg.Done() - - syncTc := time.NewTicker(1 * time.Second) - purgeTc := time.NewTicker(1 * time.Hour) - - for { - select { - case <-purgeTc.C: - n := (r.cfg.Replication.ExpiredLogDays * 24 * 3600) - r.m.Lock() - err := r.s.PurgeExpired(int64(n)) - r.m.Unlock() - if err != nil { - log.Errorf("purge expired log error %s", err.Error()) - } - case <-syncTc.C: - if r.cfg.Replication.SyncLog == 1 { - r.m.Lock() - err := r.s.Sync() - r.m.Unlock() - if err != nil { - log.Errorf("sync store error %s", err.Error()) - } - } - if r.cfg.Replication.SyncLog != 2 { - //we will sync commit id every 1 second - r.m.Lock() - err := r.updateCommitID(r.commitID, true) - r.m.Unlock() - - if err != nil { - log.Errorf("sync commitid error %s", err.Error()) - } - } - case <-r.quit: - syncTc.Stop() - purgeTc.Stop() - return - } - } -} diff --git a/vendor/github.com/siddontang/ledisdb/rpl/store.go b/vendor/github.com/siddontang/ledisdb/rpl/store.go deleted file mode 100644 index 9f985ec6be0f..000000000000 --- a/vendor/github.com/siddontang/ledisdb/rpl/store.go +++ /dev/null @@ -1,36 +0,0 @@ -package rpl - -import ( - "errors" -) - -const ( - InvalidLogID uint64 = 0 -) - -var ( - ErrLogNotFound = errors.New("log not found") - ErrStoreLogID = errors.New("log id is less") - ErrNoBehindLog = errors.New("no behind commit log") - ErrCommitIDBehind = errors.New("commit id is behind last log id") -) - -type LogStore interface { - GetLog(id uint64, log *Log) error - - FirstID() (uint64, error) - LastID() (uint64, error) - - // if log id is less than current last id, return error - StoreLog(log *Log) error - - // Delete logs before n seconds - PurgeExpired(n int64) error - - Sync() error - - // Clear all logs - Clear() error - - Close() error -} diff --git a/vendor/github.com/siddontang/ledisdb/store/db.go b/vendor/github.com/siddontang/ledisdb/store/db.go deleted file mode 100644 index bfc0418298b8..000000000000 --- a/vendor/github.com/siddontang/ledisdb/store/db.go +++ /dev/null @@ -1,169 +0,0 @@ -package store - -import ( - "sync" - "time" - - "github.com/siddontang/ledisdb/config" - "github.com/siddontang/ledisdb/store/driver" -) - -type DB struct { - db driver.IDB - name string - - st *Stat - - cfg *config.Config - - lastCommit time.Time - - m sync.Mutex -} - -func (db *DB) Close() error { - return db.db.Close() -} - -func (db *DB) String() string { - return db.name -} - -func (db *DB) NewIterator() *Iterator { - db.st.IterNum.Add(1) - - it := new(Iterator) - it.it = db.db.NewIterator() - it.st = db.st - - return it -} - -func (db *DB) Get(key []byte) ([]byte, error) { - t := time.Now() - v, err := db.db.Get(key) - db.st.statGet(v, err) - db.st.GetTotalTime.Add(time.Now().Sub(t)) - return v, err -} - -func (db *DB) Put(key []byte, value []byte) error { - db.st.PutNum.Add(1) - - if db.needSyncCommit() { - return db.db.SyncPut(key, value) - - } else { - return db.db.Put(key, value) - - } -} - -func (db *DB) Delete(key []byte) error { - db.st.DeleteNum.Add(1) - - if db.needSyncCommit() { - return db.db.SyncDelete(key) - } else { - return db.db.Delete(key) - } -} - -func (db *DB) NewWriteBatch() *WriteBatch { - db.st.BatchNum.Add(1) - wb := new(WriteBatch) - wb.wb = db.db.NewWriteBatch() - wb.st = db.st - wb.db = db - return wb -} - -func (db *DB) NewSnapshot() (*Snapshot, error) { - db.st.SnapshotNum.Add(1) - - var err error - s := &Snapshot{} - if s.ISnapshot, err = db.db.NewSnapshot(); err != nil { - return nil, err - } - s.st = db.st - - return s, nil -} - -func (db *DB) Compact() error { - db.st.CompactNum.Add(1) - - t := time.Now() - err := db.db.Compact() - - db.st.CompactTotalTime.Add(time.Now().Sub(t)) - - return err -} - -func (db *DB) RangeIterator(min []byte, max []byte, rangeType uint8) *RangeLimitIterator { - return NewRangeLimitIterator(db.NewIterator(), &Range{min, max, rangeType}, &Limit{0, -1}) -} - -func (db *DB) RevRangeIterator(min []byte, max []byte, rangeType uint8) *RangeLimitIterator { - return NewRevRangeLimitIterator(db.NewIterator(), &Range{min, max, rangeType}, &Limit{0, -1}) -} - -//count < 0, unlimit. -// -//offset must >= 0, if < 0, will get nothing. -func (db *DB) RangeLimitIterator(min []byte, max []byte, rangeType uint8, offset int, count int) *RangeLimitIterator { - return NewRangeLimitIterator(db.NewIterator(), &Range{min, max, rangeType}, &Limit{offset, count}) -} - -//count < 0, unlimit. -// -//offset must >= 0, if < 0, will get nothing. -func (db *DB) RevRangeLimitIterator(min []byte, max []byte, rangeType uint8, offset int, count int) *RangeLimitIterator { - return NewRevRangeLimitIterator(db.NewIterator(), &Range{min, max, rangeType}, &Limit{offset, count}) -} - -func (db *DB) Stat() *Stat { - return db.st -} - -func (db *DB) needSyncCommit() bool { - if db.cfg.DBSyncCommit == 0 { - return false - } else if db.cfg.DBSyncCommit == 2 { - return true - } else { - n := time.Now() - need := false - db.m.Lock() - - if n.Sub(db.lastCommit) > time.Second { - need = true - } - db.lastCommit = n - - db.m.Unlock() - return need - } - -} - -func (db *DB) GetSlice(key []byte) (Slice, error) { - if d, ok := db.db.(driver.ISliceGeter); ok { - t := time.Now() - v, err := d.GetSlice(key) - db.st.statGet(v, err) - db.st.GetTotalTime.Add(time.Now().Sub(t)) - return v, err - } else { - v, err := db.Get(key) - if err != nil { - return nil, err - } else if v == nil { - return nil, nil - } else { - return driver.GoSlice(v), nil - } - } -} diff --git a/vendor/github.com/siddontang/ledisdb/store/driver/driver.go b/vendor/github.com/siddontang/ledisdb/store/driver/driver.go deleted file mode 100644 index afa549cd258d..000000000000 --- a/vendor/github.com/siddontang/ledisdb/store/driver/driver.go +++ /dev/null @@ -1,57 +0,0 @@ -package driver - -type IDB interface { - Close() error - - Get(key []byte) ([]byte, error) - - Put(key []byte, value []byte) error - Delete(key []byte) error - - SyncPut(key []byte, value []byte) error - SyncDelete(key []byte) error - - NewIterator() IIterator - - NewWriteBatch() IWriteBatch - - NewSnapshot() (ISnapshot, error) - - Compact() error -} - -type ISnapshot interface { - Get(key []byte) ([]byte, error) - NewIterator() IIterator - Close() -} - -type IIterator interface { - Close() error - - First() - Last() - Seek(key []byte) - - Next() - Prev() - - Valid() bool - - Key() []byte - Value() []byte -} - -type IWriteBatch interface { - Put(key []byte, value []byte) - Delete(key []byte) - Commit() error - SyncCommit() error - Rollback() error - Data() []byte - Close() -} - -type ISliceGeter interface { - GetSlice(key []byte) (ISlice, error) -} diff --git a/vendor/github.com/siddontang/ledisdb/store/driver/slice.go b/vendor/github.com/siddontang/ledisdb/store/driver/slice.go deleted file mode 100644 index d0c80e0b8fcf..000000000000 --- a/vendor/github.com/siddontang/ledisdb/store/driver/slice.go +++ /dev/null @@ -1,21 +0,0 @@ -package driver - -type ISlice interface { - Data() []byte - Size() int - Free() -} - -type GoSlice []byte - -func (s GoSlice) Data() []byte { - return []byte(s) -} - -func (s GoSlice) Size() int { - return len(s) -} - -func (s GoSlice) Free() { - -} diff --git a/vendor/github.com/siddontang/ledisdb/store/driver/store.go b/vendor/github.com/siddontang/ledisdb/store/driver/store.go deleted file mode 100644 index fbaebfc98b05..000000000000 --- a/vendor/github.com/siddontang/ledisdb/store/driver/store.go +++ /dev/null @@ -1,46 +0,0 @@ -package driver - -import ( - "fmt" - - "github.com/siddontang/ledisdb/config" -) - -type Store interface { - String() string - Open(path string, cfg *config.Config) (IDB, error) - Repair(path string, cfg *config.Config) error -} - -var dbs = map[string]Store{} - -func Register(s Store) { - name := s.String() - if _, ok := dbs[name]; ok { - panic(fmt.Errorf("store %s is registered", s)) - } - - dbs[name] = s -} - -func ListStores() []string { - s := []string{} - for k := range dbs { - s = append(s, k) - } - - return s -} - -func GetStore(cfg *config.Config) (Store, error) { - if len(cfg.DBName) == 0 { - cfg.DBName = config.DefaultDBName - } - - s, ok := dbs[cfg.DBName] - if !ok { - return nil, fmt.Errorf("store %s is not registered", cfg.DBName) - } - - return s, nil -} diff --git a/vendor/github.com/siddontang/ledisdb/store/goleveldb/batch.go b/vendor/github.com/siddontang/ledisdb/store/goleveldb/batch.go deleted file mode 100644 index 2032279a2a8e..000000000000 --- a/vendor/github.com/siddontang/ledisdb/store/goleveldb/batch.go +++ /dev/null @@ -1,39 +0,0 @@ -package goleveldb - -import ( - "github.com/syndtr/goleveldb/leveldb" -) - -type WriteBatch struct { - db *DB - wbatch *leveldb.Batch -} - -func (w *WriteBatch) Put(key, value []byte) { - w.wbatch.Put(key, value) -} - -func (w *WriteBatch) Delete(key []byte) { - w.wbatch.Delete(key) -} - -func (w *WriteBatch) Commit() error { - return w.db.db.Write(w.wbatch, nil) -} - -func (w *WriteBatch) SyncCommit() error { - return w.db.db.Write(w.wbatch, w.db.syncOpts) -} - -func (w *WriteBatch) Rollback() error { - w.wbatch.Reset() - return nil -} - -func (w *WriteBatch) Close() { - w.wbatch.Reset() -} - -func (w *WriteBatch) Data() []byte { - return w.wbatch.Dump() -} diff --git a/vendor/github.com/siddontang/ledisdb/store/goleveldb/const.go b/vendor/github.com/siddontang/ledisdb/store/goleveldb/const.go deleted file mode 100644 index 2fffa7c82bb4..000000000000 --- a/vendor/github.com/siddontang/ledisdb/store/goleveldb/const.go +++ /dev/null @@ -1,4 +0,0 @@ -package goleveldb - -const DBName = "goleveldb" -const MemDBName = "memory" diff --git a/vendor/github.com/siddontang/ledisdb/store/goleveldb/db.go b/vendor/github.com/siddontang/ledisdb/store/goleveldb/db.go deleted file mode 100644 index 1afc32fb75b5..000000000000 --- a/vendor/github.com/siddontang/ledisdb/store/goleveldb/db.go +++ /dev/null @@ -1,204 +0,0 @@ -package goleveldb - -import ( - "github.com/syndtr/goleveldb/leveldb" - "github.com/syndtr/goleveldb/leveldb/cache" - "github.com/syndtr/goleveldb/leveldb/filter" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/storage" - "github.com/syndtr/goleveldb/leveldb/util" - - "github.com/siddontang/ledisdb/config" - "github.com/siddontang/ledisdb/store/driver" - - "os" -) - -const defaultFilterBits int = 10 - -type Store struct { -} - -func (s Store) String() string { - return DBName -} - -type MemStore struct { -} - -func (s MemStore) String() string { - return MemDBName -} - -type DB struct { - path string - - cfg *config.LevelDBConfig - - db *leveldb.DB - - opts *opt.Options - - iteratorOpts *opt.ReadOptions - - syncOpts *opt.WriteOptions - - cache cache.Cache - - filter filter.Filter -} - -func (s Store) Open(path string, cfg *config.Config) (driver.IDB, error) { - if err := os.MkdirAll(path, 0755); err != nil { - return nil, err - } - - db := new(DB) - db.path = path - db.cfg = &cfg.LevelDB - - db.initOpts() - - var err error - db.db, err = leveldb.OpenFile(db.path, db.opts) - - if err != nil { - return nil, err - } - - return db, nil -} - -func (s Store) Repair(path string, cfg *config.Config) error { - db, err := leveldb.RecoverFile(path, newOptions(&cfg.LevelDB)) - if err != nil { - return err - } - - db.Close() - return nil -} - -func (s MemStore) Open(path string, cfg *config.Config) (driver.IDB, error) { - db := new(DB) - db.path = path - db.cfg = &cfg.LevelDB - - db.initOpts() - - var err error - db.db, err = leveldb.Open(storage.NewMemStorage(), db.opts) - if err != nil { - return nil, err - } - - return db, nil -} - -func (s MemStore) Repair(path string, cfg *config.Config) error { - return nil -} - -func (db *DB) initOpts() { - db.opts = newOptions(db.cfg) - - db.iteratorOpts = &opt.ReadOptions{} - db.iteratorOpts.DontFillCache = true - - db.syncOpts = &opt.WriteOptions{} - db.syncOpts.Sync = true -} - -func newOptions(cfg *config.LevelDBConfig) *opt.Options { - opts := &opt.Options{} - opts.ErrorIfMissing = false - - opts.BlockCacheCapacity = cfg.CacheSize - - //we must use bloomfilter - opts.Filter = filter.NewBloomFilter(defaultFilterBits) - - if !cfg.Compression { - opts.Compression = opt.NoCompression - } else { - opts.Compression = opt.SnappyCompression - } - - opts.BlockSize = cfg.BlockSize - opts.WriteBuffer = cfg.WriteBufferSize - opts.OpenFilesCacheCapacity = cfg.MaxOpenFiles - - //here we use default value, later add config support - opts.CompactionTableSize = 32 * 1024 * 1024 - opts.WriteL0SlowdownTrigger = 16 - opts.WriteL0PauseTrigger = 64 - - return opts -} - -func (db *DB) Close() error { - return db.db.Close() -} - -func (db *DB) Put(key, value []byte) error { - return db.db.Put(key, value, nil) -} - -func (db *DB) Get(key []byte) ([]byte, error) { - v, err := db.db.Get(key, nil) - if err == leveldb.ErrNotFound { - return nil, nil - } - return v, nil -} - -func (db *DB) Delete(key []byte) error { - return db.db.Delete(key, nil) -} - -func (db *DB) SyncPut(key []byte, value []byte) error { - return db.db.Put(key, value, db.syncOpts) -} - -func (db *DB) SyncDelete(key []byte) error { - return db.db.Delete(key, db.syncOpts) -} - -func (db *DB) NewWriteBatch() driver.IWriteBatch { - wb := &WriteBatch{ - db: db, - wbatch: new(leveldb.Batch), - } - return wb -} - -func (db *DB) NewIterator() driver.IIterator { - it := &Iterator{ - db.db.NewIterator(nil, db.iteratorOpts), - } - - return it -} - -func (db *DB) NewSnapshot() (driver.ISnapshot, error) { - snapshot, err := db.db.GetSnapshot() - if err != nil { - return nil, err - } - - s := &Snapshot{ - db: db, - snp: snapshot, - } - - return s, nil -} - -func (db *DB) Compact() error { - return db.db.CompactRange(util.Range{nil, nil}) -} - -func init() { - driver.Register(Store{}) - driver.Register(MemStore{}) -} diff --git a/vendor/github.com/siddontang/ledisdb/store/goleveldb/iterator.go b/vendor/github.com/siddontang/ledisdb/store/goleveldb/iterator.go deleted file mode 100644 index c1fd8b5573bb..000000000000 --- a/vendor/github.com/siddontang/ledisdb/store/goleveldb/iterator.go +++ /dev/null @@ -1,49 +0,0 @@ -package goleveldb - -import ( - "github.com/syndtr/goleveldb/leveldb/iterator" -) - -type Iterator struct { - it iterator.Iterator -} - -func (it *Iterator) Key() []byte { - return it.it.Key() -} - -func (it *Iterator) Value() []byte { - return it.it.Value() -} - -func (it *Iterator) Close() error { - if it.it != nil { - it.it.Release() - it.it = nil - } - return nil -} - -func (it *Iterator) Valid() bool { - return it.it.Valid() -} - -func (it *Iterator) Next() { - it.it.Next() -} - -func (it *Iterator) Prev() { - it.it.Prev() -} - -func (it *Iterator) First() { - it.it.First() -} - -func (it *Iterator) Last() { - it.it.Last() -} - -func (it *Iterator) Seek(key []byte) { - it.it.Seek(key) -} diff --git a/vendor/github.com/siddontang/ledisdb/store/goleveldb/snapshot.go b/vendor/github.com/siddontang/ledisdb/store/goleveldb/snapshot.go deleted file mode 100644 index c615579bb89d..000000000000 --- a/vendor/github.com/siddontang/ledisdb/store/goleveldb/snapshot.go +++ /dev/null @@ -1,26 +0,0 @@ -package goleveldb - -import ( - "github.com/siddontang/ledisdb/store/driver" - "github.com/syndtr/goleveldb/leveldb" -) - -type Snapshot struct { - db *DB - snp *leveldb.Snapshot -} - -func (s *Snapshot) Get(key []byte) ([]byte, error) { - return s.snp.Get(key, s.db.iteratorOpts) -} - -func (s *Snapshot) NewIterator() driver.IIterator { - it := &Iterator{ - s.snp.NewIterator(nil, s.db.iteratorOpts), - } - return it -} - -func (s *Snapshot) Close() { - s.snp.Release() -} diff --git a/vendor/github.com/siddontang/ledisdb/store/iterator.go b/vendor/github.com/siddontang/ledisdb/store/iterator.go deleted file mode 100644 index 12a03b6cd402..000000000000 --- a/vendor/github.com/siddontang/ledisdb/store/iterator.go +++ /dev/null @@ -1,334 +0,0 @@ -package store - -import ( - "bytes" - - "github.com/siddontang/ledisdb/store/driver" -) - -const ( - IteratorForward uint8 = 0 - IteratorBackward uint8 = 1 -) - -const ( - RangeClose uint8 = 0x00 - RangeLOpen uint8 = 0x01 - RangeROpen uint8 = 0x10 - RangeOpen uint8 = 0x11 -) - -// min must less or equal than max -// -// range type: -// -// close: [min, max] -// open: (min, max) -// lopen: (min, max] -// ropen: [min, max) -// -type Range struct { - Min []byte - Max []byte - - Type uint8 -} - -type Limit struct { - Offset int - Count int -} - -type Iterator struct { - it driver.IIterator - st *Stat -} - -// Returns a copy of key. -func (it *Iterator) Key() []byte { - k := it.it.Key() - if k == nil { - return nil - } - - return append([]byte{}, k...) -} - -// Returns a copy of value. -func (it *Iterator) Value() []byte { - v := it.it.Value() - if v == nil { - return nil - } - - return append([]byte{}, v...) -} - -// Returns a reference of key. -// you must be careful that it will be changed after next iterate. -func (it *Iterator) RawKey() []byte { - return it.it.Key() -} - -// Returns a reference of value. -// you must be careful that it will be changed after next iterate. -func (it *Iterator) RawValue() []byte { - return it.it.Value() -} - -// Copy key to b, if b len is small or nil, returns a new one. -func (it *Iterator) BufKey(b []byte) []byte { - k := it.RawKey() - if k == nil { - return nil - } - if b == nil { - b = []byte{} - } - - b = b[0:0] - return append(b, k...) -} - -// Copy value to b, if b len is small or nil, returns a new one. -func (it *Iterator) BufValue(b []byte) []byte { - v := it.RawValue() - if v == nil { - return nil - } - - if b == nil { - b = []byte{} - } - - b = b[0:0] - return append(b, v...) -} - -func (it *Iterator) Close() { - if it.it != nil { - it.st.IterCloseNum.Add(1) - it.it.Close() - it.it = nil - } -} - -func (it *Iterator) Valid() bool { - return it.it.Valid() -} - -func (it *Iterator) Next() { - it.st.IterSeekNum.Add(1) - it.it.Next() -} - -func (it *Iterator) Prev() { - it.st.IterSeekNum.Add(1) - it.it.Prev() -} - -func (it *Iterator) SeekToFirst() { - it.st.IterSeekNum.Add(1) - it.it.First() -} - -func (it *Iterator) SeekToLast() { - it.st.IterSeekNum.Add(1) - it.it.Last() -} - -func (it *Iterator) Seek(key []byte) { - it.st.IterSeekNum.Add(1) - it.it.Seek(key) -} - -// Finds by key, if not found, nil returns. -func (it *Iterator) Find(key []byte) []byte { - it.Seek(key) - if it.Valid() { - k := it.RawKey() - if k == nil { - return nil - } else if bytes.Equal(k, key) { - return it.Value() - } - } - - return nil -} - -// Finds by key, if not found, nil returns, else a reference of value returns. -// you must be careful that it will be changed after next iterate. -func (it *Iterator) RawFind(key []byte) []byte { - it.Seek(key) - if it.Valid() { - k := it.RawKey() - if k == nil { - return nil - } else if bytes.Equal(k, key) { - return it.RawValue() - } - } - - return nil -} - -type RangeLimitIterator struct { - it *Iterator - - r *Range - l *Limit - - step int - - //0 for IteratorForward, 1 for IteratorBackward - direction uint8 -} - -func (it *RangeLimitIterator) Key() []byte { - return it.it.Key() -} - -func (it *RangeLimitIterator) Value() []byte { - return it.it.Value() -} - -func (it *RangeLimitIterator) RawKey() []byte { - return it.it.RawKey() -} - -func (it *RangeLimitIterator) RawValue() []byte { - return it.it.RawValue() -} - -func (it *RangeLimitIterator) BufKey(b []byte) []byte { - return it.it.BufKey(b) -} - -func (it *RangeLimitIterator) BufValue(b []byte) []byte { - return it.it.BufValue(b) -} - -func (it *RangeLimitIterator) Valid() bool { - if it.l.Offset < 0 { - return false - } else if !it.it.Valid() { - return false - } else if it.l.Count >= 0 && it.step >= it.l.Count { - return false - } - - if it.direction == IteratorForward { - if it.r.Max != nil { - r := bytes.Compare(it.it.RawKey(), it.r.Max) - if it.r.Type&RangeROpen > 0 { - return !(r >= 0) - } else { - return !(r > 0) - } - } - } else { - if it.r.Min != nil { - r := bytes.Compare(it.it.RawKey(), it.r.Min) - if it.r.Type&RangeLOpen > 0 { - return !(r <= 0) - } else { - return !(r < 0) - } - } - } - - return true -} - -func (it *RangeLimitIterator) Next() { - it.step++ - - if it.direction == IteratorForward { - it.it.Next() - } else { - it.it.Prev() - } -} - -func (it *RangeLimitIterator) Close() { - it.it.Close() -} - -func NewRangeLimitIterator(i *Iterator, r *Range, l *Limit) *RangeLimitIterator { - return rangeLimitIterator(i, r, l, IteratorForward) -} - -func NewRevRangeLimitIterator(i *Iterator, r *Range, l *Limit) *RangeLimitIterator { - return rangeLimitIterator(i, r, l, IteratorBackward) -} - -func NewRangeIterator(i *Iterator, r *Range) *RangeLimitIterator { - return rangeLimitIterator(i, r, &Limit{0, -1}, IteratorForward) -} - -func NewRevRangeIterator(i *Iterator, r *Range) *RangeLimitIterator { - return rangeLimitIterator(i, r, &Limit{0, -1}, IteratorBackward) -} - -func rangeLimitIterator(i *Iterator, r *Range, l *Limit, direction uint8) *RangeLimitIterator { - it := new(RangeLimitIterator) - - it.it = i - - it.r = r - it.l = l - it.direction = direction - - it.step = 0 - - if l.Offset < 0 { - return it - } - - if direction == IteratorForward { - if r.Min == nil { - it.it.SeekToFirst() - } else { - it.it.Seek(r.Min) - - if r.Type&RangeLOpen > 0 { - if it.it.Valid() && bytes.Equal(it.it.RawKey(), r.Min) { - it.it.Next() - } - } - } - } else { - if r.Max == nil { - it.it.SeekToLast() - } else { - it.it.Seek(r.Max) - - if !it.it.Valid() { - it.it.SeekToLast() - } else { - if !bytes.Equal(it.it.RawKey(), r.Max) { - it.it.Prev() - } - } - - if r.Type&RangeROpen > 0 { - if it.it.Valid() && bytes.Equal(it.it.RawKey(), r.Max) { - it.it.Prev() - } - } - } - } - - for i := 0; i < l.Offset; i++ { - if it.it.Valid() { - if it.direction == IteratorForward { - it.it.Next() - } else { - it.it.Prev() - } - } - } - - return it -} diff --git a/vendor/github.com/siddontang/ledisdb/store/leveldb/batch.go b/vendor/github.com/siddontang/ledisdb/store/leveldb/batch.go deleted file mode 100644 index cc1b02448896..000000000000 --- a/vendor/github.com/siddontang/ledisdb/store/leveldb/batch.go +++ /dev/null @@ -1,99 +0,0 @@ -// +build leveldb - -package leveldb - -// #cgo LDFLAGS: -lleveldb -// #include "leveldb/c.h" -// #include "leveldb_ext.h" -import "C" - -import ( - "unsafe" - - "github.com/syndtr/goleveldb/leveldb" -) - -type WriteBatch struct { - db *DB - wbatch *C.leveldb_writebatch_t -} - -func newWriteBatch(db *DB) *WriteBatch { - w := new(WriteBatch) - w.db = db - w.wbatch = C.leveldb_writebatch_create() - - return w -} - -func (w *WriteBatch) Close() { - if w.wbatch != nil { - C.leveldb_writebatch_destroy(w.wbatch) - w.wbatch = nil - } -} - -func (w *WriteBatch) Put(key, value []byte) { - var k, v *C.char - if len(key) != 0 { - k = (*C.char)(unsafe.Pointer(&key[0])) - } - if len(value) != 0 { - v = (*C.char)(unsafe.Pointer(&value[0])) - } - - lenk := len(key) - lenv := len(value) - - C.leveldb_writebatch_put(w.wbatch, k, C.size_t(lenk), v, C.size_t(lenv)) -} - -func (w *WriteBatch) Delete(key []byte) { - C.leveldb_writebatch_delete(w.wbatch, - (*C.char)(unsafe.Pointer(&key[0])), C.size_t(len(key))) -} - -func (w *WriteBatch) Commit() error { - return w.commit(w.db.writeOpts) -} - -func (w *WriteBatch) SyncCommit() error { - return w.commit(w.db.syncOpts) -} - -func (w *WriteBatch) Rollback() error { - C.leveldb_writebatch_clear(w.wbatch) - - return nil -} - -func (w *WriteBatch) commit(wb *WriteOptions) error { - var errStr *C.char - C.leveldb_write(w.db.db, wb.Opt, w.wbatch, &errStr) - if errStr != nil { - return saveError(errStr) - } - return nil -} - -//export leveldb_writebatch_iterate_put -func leveldb_writebatch_iterate_put(p unsafe.Pointer, k *C.char, klen C.size_t, v *C.char, vlen C.size_t) { - b := (*leveldb.Batch)(p) - key := slice(unsafe.Pointer(k), int(klen)) - value := slice(unsafe.Pointer(v), int(vlen)) - b.Put(key, value) -} - -//export leveldb_writebatch_iterate_delete -func leveldb_writebatch_iterate_delete(p unsafe.Pointer, k *C.char, klen C.size_t) { - b := (*leveldb.Batch)(p) - key := slice(unsafe.Pointer(k), int(klen)) - b.Delete(key) -} - -func (w *WriteBatch) Data() []byte { - gbatch := leveldb.Batch{} - C.leveldb_writebatch_iterate_ext(w.wbatch, - unsafe.Pointer(&gbatch)) - return gbatch.Dump() -} diff --git a/vendor/github.com/siddontang/ledisdb/store/leveldb/cache.go b/vendor/github.com/siddontang/ledisdb/store/leveldb/cache.go deleted file mode 100644 index e5587cbf89e2..000000000000 --- a/vendor/github.com/siddontang/ledisdb/store/leveldb/cache.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build leveldb - -package leveldb - -// #cgo LDFLAGS: -lleveldb -// #include -// #include "leveldb/c.h" -import "C" - -type Cache struct { - Cache *C.leveldb_cache_t -} - -func NewLRUCache(capacity int) *Cache { - return &Cache{C.leveldb_cache_create_lru(C.size_t(capacity))} -} - -func (c *Cache) Close() { - C.leveldb_cache_destroy(c.Cache) -} diff --git a/vendor/github.com/siddontang/ledisdb/store/leveldb/const.go b/vendor/github.com/siddontang/ledisdb/store/leveldb/const.go deleted file mode 100644 index df5b3c7a83b5..000000000000 --- a/vendor/github.com/siddontang/ledisdb/store/leveldb/const.go +++ /dev/null @@ -1,3 +0,0 @@ -package leveldb - -const DBName = "leveldb" diff --git a/vendor/github.com/siddontang/ledisdb/store/leveldb/db.go b/vendor/github.com/siddontang/ledisdb/store/leveldb/db.go deleted file mode 100644 index 7f1ee676ec39..000000000000 --- a/vendor/github.com/siddontang/ledisdb/store/leveldb/db.go +++ /dev/null @@ -1,314 +0,0 @@ -// +build leveldb - -// Package leveldb is a wrapper for c++ leveldb -package leveldb - -/* -#cgo LDFLAGS: -lleveldb -#include -#include "leveldb_ext.h" -*/ -import "C" - -import ( - "os" - "runtime" - "unsafe" - - "github.com/siddontang/ledisdb/config" - "github.com/siddontang/ledisdb/store/driver" -) - -const defaultFilterBits int = 10 - -type Store struct { -} - -func (s Store) String() string { - return DBName -} - -func (s Store) Open(path string, cfg *config.Config) (driver.IDB, error) { - if err := os.MkdirAll(path, 0755); err != nil { - return nil, err - } - - db := new(DB) - db.path = path - db.cfg = &cfg.LevelDB - - if err := db.open(); err != nil { - return nil, err - } - - return db, nil -} - -func (s Store) Repair(path string, cfg *config.Config) error { - db := new(DB) - db.cfg = &cfg.LevelDB - db.path = path - - err := db.open() - defer db.Close() - - //open ok, do not need repair - if err == nil { - return nil - } - - var errStr *C.char - ldbname := C.CString(path) - defer C.leveldb_free(unsafe.Pointer(ldbname)) - - C.leveldb_repair_db(db.opts.Opt, ldbname, &errStr) - if errStr != nil { - return saveError(errStr) - } - return nil -} - -type DB struct { - path string - - cfg *config.LevelDBConfig - - db *C.leveldb_t - - opts *Options - - //for default read and write options - readOpts *ReadOptions - writeOpts *WriteOptions - iteratorOpts *ReadOptions - - syncOpts *WriteOptions - - cache *Cache - - filter *FilterPolicy -} - -func (db *DB) open() error { - db.initOptions(db.cfg) - - var errStr *C.char - ldbname := C.CString(db.path) - defer C.leveldb_free(unsafe.Pointer(ldbname)) - - db.db = C.leveldb_open(db.opts.Opt, ldbname, &errStr) - if errStr != nil { - db.db = nil - return saveError(errStr) - } - return nil -} - -func (db *DB) initOptions(cfg *config.LevelDBConfig) { - opts := NewOptions() - - opts.SetCreateIfMissing(true) - - db.cache = NewLRUCache(cfg.CacheSize) - opts.SetCache(db.cache) - - //we must use bloomfilter - db.filter = NewBloomFilter(defaultFilterBits) - opts.SetFilterPolicy(db.filter) - - if !cfg.Compression { - opts.SetCompression(NoCompression) - } else { - opts.SetCompression(SnappyCompression) - } - - opts.SetBlockSize(cfg.BlockSize) - - opts.SetWriteBufferSize(cfg.WriteBufferSize) - - opts.SetMaxOpenFiles(cfg.MaxOpenFiles) - - opts.SetMaxFileSize(cfg.MaxFileSize) - - db.opts = opts - - db.readOpts = NewReadOptions() - db.writeOpts = NewWriteOptions() - - db.syncOpts = NewWriteOptions() - db.syncOpts.SetSync(true) - - db.iteratorOpts = NewReadOptions() - db.iteratorOpts.SetFillCache(false) -} - -func (db *DB) Close() error { - if db.db != nil { - C.leveldb_close(db.db) - db.db = nil - } - - db.opts.Close() - - if db.cache != nil { - db.cache.Close() - } - - if db.filter != nil { - db.filter.Close() - } - - db.readOpts.Close() - db.writeOpts.Close() - db.iteratorOpts.Close() - - return nil -} - -func (db *DB) Put(key, value []byte) error { - return db.put(db.writeOpts, key, value) -} - -func (db *DB) Get(key []byte) ([]byte, error) { - return db.get(db.readOpts, key) -} - -func (db *DB) Delete(key []byte) error { - return db.delete(db.writeOpts, key) -} - -func (db *DB) SyncPut(key []byte, value []byte) error { - return db.put(db.syncOpts, key, value) -} - -func (db *DB) SyncDelete(key []byte) error { - return db.delete(db.syncOpts, key) -} - -func (db *DB) NewWriteBatch() driver.IWriteBatch { - wb := newWriteBatch(db) - - runtime.SetFinalizer(wb, func(w *WriteBatch) { - w.Close() - }) - - return wb -} - -func (db *DB) NewIterator() driver.IIterator { - it := new(Iterator) - - it.it = C.leveldb_create_iterator(db.db, db.iteratorOpts.Opt) - - return it -} - -func (db *DB) NewSnapshot() (driver.ISnapshot, error) { - snap := &Snapshot{ - db: db, - snap: C.leveldb_create_snapshot(db.db), - readOpts: NewReadOptions(), - iteratorOpts: NewReadOptions(), - } - snap.readOpts.SetSnapshot(snap) - snap.iteratorOpts.SetSnapshot(snap) - snap.iteratorOpts.SetFillCache(false) - - return snap, nil -} - -func (db *DB) put(wo *WriteOptions, key, value []byte) error { - var errStr *C.char - var k, v *C.char - if len(key) != 0 { - k = (*C.char)(unsafe.Pointer(&key[0])) - } - if len(value) != 0 { - v = (*C.char)(unsafe.Pointer(&value[0])) - } - - lenk := len(key) - lenv := len(value) - C.leveldb_put( - db.db, wo.Opt, k, C.size_t(lenk), v, C.size_t(lenv), &errStr) - - if errStr != nil { - return saveError(errStr) - } - return nil -} - -func (db *DB) get(ro *ReadOptions, key []byte) ([]byte, error) { - var errStr *C.char - var vallen C.size_t - var k *C.char - if len(key) != 0 { - k = (*C.char)(unsafe.Pointer(&key[0])) - } - - value := C.leveldb_get( - db.db, ro.Opt, k, C.size_t(len(key)), &vallen, &errStr) - - if errStr != nil { - return nil, saveError(errStr) - } - - if value == nil { - return nil, nil - } - - defer C.leveldb_free(unsafe.Pointer(value)) - - return C.GoBytes(unsafe.Pointer(value), C.int(vallen)), nil -} - -func (db *DB) getSlice(ro *ReadOptions, key []byte) (driver.ISlice, error) { - var errStr *C.char - var vallen C.size_t - var k *C.char - if len(key) != 0 { - k = (*C.char)(unsafe.Pointer(&key[0])) - } - - value := C.leveldb_get( - db.db, ro.Opt, k, C.size_t(len(key)), &vallen, &errStr) - - if errStr != nil { - return nil, saveError(errStr) - } - - if value == nil { - return nil, nil - } - - return NewCSlice(unsafe.Pointer(value), int(vallen)), nil -} - -func (db *DB) delete(wo *WriteOptions, key []byte) error { - var errStr *C.char - var k *C.char - if len(key) != 0 { - k = (*C.char)(unsafe.Pointer(&key[0])) - } - - C.leveldb_delete( - db.db, wo.Opt, k, C.size_t(len(key)), &errStr) - - if errStr != nil { - return saveError(errStr) - } - return nil -} - -func (db *DB) Compact() error { - C.leveldb_compact_range(db.db, nil, 0, nil, 0) - return nil -} - -func (db *DB) GetSlice(key []byte) (driver.ISlice, error) { - return db.getSlice(db.readOpts, key) -} - -func init() { - driver.Register(Store{}) -} diff --git a/vendor/github.com/siddontang/ledisdb/store/leveldb/filterpolicy.go b/vendor/github.com/siddontang/ledisdb/store/leveldb/filterpolicy.go deleted file mode 100644 index 640139fb8b8e..000000000000 --- a/vendor/github.com/siddontang/ledisdb/store/leveldb/filterpolicy.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build leveldb - -package leveldb - -// #cgo LDFLAGS: -lleveldb -// #include -// #include "leveldb/c.h" -import "C" - -type FilterPolicy struct { - Policy *C.leveldb_filterpolicy_t -} - -func NewBloomFilter(bitsPerKey int) *FilterPolicy { - policy := C.leveldb_filterpolicy_create_bloom(C.int(bitsPerKey)) - return &FilterPolicy{policy} -} - -func (fp *FilterPolicy) Close() { - C.leveldb_filterpolicy_destroy(fp.Policy) -} diff --git a/vendor/github.com/siddontang/ledisdb/store/leveldb/iterator.go b/vendor/github.com/siddontang/ledisdb/store/leveldb/iterator.go deleted file mode 100644 index 49cfd7db18ea..000000000000 --- a/vendor/github.com/siddontang/ledisdb/store/leveldb/iterator.go +++ /dev/null @@ -1,70 +0,0 @@ -// +build leveldb - -package leveldb - -// #cgo LDFLAGS: -lleveldb -// #include -// #include "leveldb/c.h" -// #include "leveldb_ext.h" -import "C" - -import ( - "unsafe" -) - -type Iterator struct { - it *C.leveldb_iterator_t - isValid C.uchar -} - -func (it *Iterator) Key() []byte { - var klen C.size_t - kdata := C.leveldb_iter_key(it.it, &klen) - if kdata == nil { - return nil - } - - return slice(unsafe.Pointer(kdata), int(C.int(klen))) -} - -func (it *Iterator) Value() []byte { - var vlen C.size_t - vdata := C.leveldb_iter_value(it.it, &vlen) - if vdata == nil { - return nil - } - - return slice(unsafe.Pointer(vdata), int(C.int(vlen))) -} - -func (it *Iterator) Close() error { - if it.it != nil { - C.leveldb_iter_destroy(it.it) - it.it = nil - } - return nil -} - -func (it *Iterator) Valid() bool { - return ucharToBool(it.isValid) -} - -func (it *Iterator) Next() { - it.isValid = C.leveldb_iter_next_ext(it.it) -} - -func (it *Iterator) Prev() { - it.isValid = C.leveldb_iter_prev_ext(it.it) -} - -func (it *Iterator) First() { - it.isValid = C.leveldb_iter_seek_to_first_ext(it.it) -} - -func (it *Iterator) Last() { - it.isValid = C.leveldb_iter_seek_to_last_ext(it.it) -} - -func (it *Iterator) Seek(key []byte) { - it.isValid = C.leveldb_iter_seek_ext(it.it, (*C.char)(unsafe.Pointer(&key[0])), C.size_t(len(key))) -} diff --git a/vendor/github.com/siddontang/ledisdb/store/leveldb/leveldb_ext.cc b/vendor/github.com/siddontang/ledisdb/store/leveldb/leveldb_ext.cc deleted file mode 100644 index 540b7397b01b..000000000000 --- a/vendor/github.com/siddontang/ledisdb/store/leveldb/leveldb_ext.cc +++ /dev/null @@ -1,95 +0,0 @@ -// +build leveldb - -#include "leveldb_ext.h" - -#include -//#include - -//#include "leveldb/db.h" - -//using namespace leveldb; - -extern "C" { - -// static bool SaveError(char** errptr, const Status& s) { -// assert(errptr != NULL); -// if (s.ok()) { -// return false; -// } else if (*errptr == NULL) { -// *errptr = strdup(s.ToString().c_str()); -// } else { -// free(*errptr); -// *errptr = strdup(s.ToString().c_str()); -// } -// return true; -// } - -// void* leveldb_get_ext( -// leveldb_t* db, -// const leveldb_readoptions_t* options, -// const char* key, size_t keylen, -// char** valptr, -// size_t* vallen, -// char** errptr) { - -// std::string *tmp = new(std::string); - -// //very tricky, maybe changed with c++ leveldb upgrade -// Status s = (*(DB**)db)->Get(*(ReadOptions*)options, Slice(key, keylen), tmp); - -// if (s.ok()) { -// *valptr = (char*)tmp->data(); -// *vallen = tmp->size(); -// } else { -// delete(tmp); -// tmp = NULL; -// *valptr = NULL; -// *vallen = 0; -// if (!s.IsNotFound()) { -// SaveError(errptr, s); -// } -// } -// return tmp; -// } - -// void leveldb_get_free_ext(void* context) { -// std::string* s = (std::string*)context; - -// delete(s); -// } - - -unsigned char leveldb_iter_seek_to_first_ext(leveldb_iterator_t* iter) { - leveldb_iter_seek_to_first(iter); - return leveldb_iter_valid(iter); -} - -unsigned char leveldb_iter_seek_to_last_ext(leveldb_iterator_t* iter) { - leveldb_iter_seek_to_last(iter); - return leveldb_iter_valid(iter); -} - -unsigned char leveldb_iter_seek_ext(leveldb_iterator_t* iter, const char* k, size_t klen) { - leveldb_iter_seek(iter, k, klen); - return leveldb_iter_valid(iter); -} - -unsigned char leveldb_iter_next_ext(leveldb_iterator_t* iter) { - leveldb_iter_next(iter); - return leveldb_iter_valid(iter); -} - -unsigned char leveldb_iter_prev_ext(leveldb_iterator_t* iter) { - leveldb_iter_prev(iter); - return leveldb_iter_valid(iter); -} - -extern void leveldb_writebatch_iterate_put(void*, const char* k, size_t klen, const char* v, size_t vlen); -extern void leveldb_writebatch_iterate_delete(void*, const char* k, size_t klen); - -void leveldb_writebatch_iterate_ext(leveldb_writebatch_t* w, void *p) { - leveldb_writebatch_iterate(w, p, - leveldb_writebatch_iterate_put, leveldb_writebatch_iterate_delete); -} - -} \ No newline at end of file diff --git a/vendor/github.com/siddontang/ledisdb/store/leveldb/leveldb_ext.h b/vendor/github.com/siddontang/ledisdb/store/leveldb/leveldb_ext.h deleted file mode 100644 index 3eed41bdf9b0..000000000000 --- a/vendor/github.com/siddontang/ledisdb/store/leveldb/leveldb_ext.h +++ /dev/null @@ -1,41 +0,0 @@ -// +build leveldb - -#ifndef LEVELDB_EXT_H -#define LEVELDB_EXT_H - -#ifdef __cplusplus -extern "C" { -#endif - -#include "leveldb/c.h" - - -// /* Returns NULL if not found. Otherwise stores the value in **valptr. -// Stores the length of the value in *vallen. -// Returns a context must be later to free*/ -// extern void* leveldb_get_ext( -// leveldb_t* db, -// const leveldb_readoptions_t* options, -// const char* key, size_t keylen, -// char** valptr, -// size_t* vallen, -// char** errptr); - -// // Free context returns by leveldb_get_ext -// extern void leveldb_get_free_ext(void* context); - - -// Below iterator functions like leveldb iterator but returns valid status for iterator -extern unsigned char leveldb_iter_seek_to_first_ext(leveldb_iterator_t*); -extern unsigned char leveldb_iter_seek_to_last_ext(leveldb_iterator_t*); -extern unsigned char leveldb_iter_seek_ext(leveldb_iterator_t*, const char* k, size_t klen); -extern unsigned char leveldb_iter_next_ext(leveldb_iterator_t*); -extern unsigned char leveldb_iter_prev_ext(leveldb_iterator_t*); - -extern void leveldb_writebatch_iterate_ext(leveldb_writebatch_t*, void* p); - -#ifdef __cplusplus -} -#endif - -#endif \ No newline at end of file diff --git a/vendor/github.com/siddontang/ledisdb/store/leveldb/options.go b/vendor/github.com/siddontang/ledisdb/store/leveldb/options.go deleted file mode 100644 index 68733bb612be..000000000000 --- a/vendor/github.com/siddontang/ledisdb/store/leveldb/options.go +++ /dev/null @@ -1,126 +0,0 @@ -// +build leveldb - -package leveldb - -// #cgo LDFLAGS: -lleveldb -// #include "leveldb/c.h" -import "C" - -type CompressionOpt int - -const ( - NoCompression = CompressionOpt(0) - SnappyCompression = CompressionOpt(1) -) - -type Options struct { - Opt *C.leveldb_options_t -} - -type ReadOptions struct { - Opt *C.leveldb_readoptions_t -} - -type WriteOptions struct { - Opt *C.leveldb_writeoptions_t -} - -func NewOptions() *Options { - opt := C.leveldb_options_create() - return &Options{opt} -} - -func NewReadOptions() *ReadOptions { - opt := C.leveldb_readoptions_create() - return &ReadOptions{opt} -} - -func NewWriteOptions() *WriteOptions { - opt := C.leveldb_writeoptions_create() - return &WriteOptions{opt} -} - -func (o *Options) Close() { - C.leveldb_options_destroy(o.Opt) -} - -func (o *Options) SetComparator(cmp *C.leveldb_comparator_t) { - C.leveldb_options_set_comparator(o.Opt, cmp) -} - -func (o *Options) SetErrorIfExists(error_if_exists bool) { - eie := boolToUchar(error_if_exists) - C.leveldb_options_set_error_if_exists(o.Opt, eie) -} - -func (o *Options) SetCache(cache *Cache) { - C.leveldb_options_set_cache(o.Opt, cache.Cache) -} - -func (o *Options) SetWriteBufferSize(s int) { - C.leveldb_options_set_write_buffer_size(o.Opt, C.size_t(s)) -} - -func (o *Options) SetParanoidChecks(pc bool) { - C.leveldb_options_set_paranoid_checks(o.Opt, boolToUchar(pc)) -} - -func (o *Options) SetMaxOpenFiles(n int) { - C.leveldb_options_set_max_open_files(o.Opt, C.int(n)) -} - -func (o *Options) SetMaxFileSize(n int) { - C.leveldb_options_set_max_file_size(o.Opt, C.size_t(n)) -} - -func (o *Options) SetBlockSize(s int) { - C.leveldb_options_set_block_size(o.Opt, C.size_t(s)) -} - -func (o *Options) SetBlockRestartInterval(n int) { - C.leveldb_options_set_block_restart_interval(o.Opt, C.int(n)) -} - -func (o *Options) SetCompression(t CompressionOpt) { - C.leveldb_options_set_compression(o.Opt, C.int(t)) -} - -func (o *Options) SetCreateIfMissing(b bool) { - C.leveldb_options_set_create_if_missing(o.Opt, boolToUchar(b)) -} - -func (o *Options) SetFilterPolicy(fp *FilterPolicy) { - var policy *C.leveldb_filterpolicy_t - if fp != nil { - policy = fp.Policy - } - C.leveldb_options_set_filter_policy(o.Opt, policy) -} - -func (ro *ReadOptions) Close() { - C.leveldb_readoptions_destroy(ro.Opt) -} - -func (ro *ReadOptions) SetVerifyChecksums(b bool) { - C.leveldb_readoptions_set_verify_checksums(ro.Opt, boolToUchar(b)) -} - -func (ro *ReadOptions) SetFillCache(b bool) { - C.leveldb_readoptions_set_fill_cache(ro.Opt, boolToUchar(b)) -} - -func (ro *ReadOptions) SetSnapshot(snap *Snapshot) { - var s *C.leveldb_snapshot_t - if snap != nil { - s = snap.snap - } - C.leveldb_readoptions_set_snapshot(ro.Opt, s) -} - -func (wo *WriteOptions) Close() { - C.leveldb_writeoptions_destroy(wo.Opt) -} - -func (wo *WriteOptions) SetSync(b bool) { - C.leveldb_writeoptions_set_sync(wo.Opt, boolToUchar(b)) -} diff --git a/vendor/github.com/siddontang/ledisdb/store/leveldb/slice.go b/vendor/github.com/siddontang/ledisdb/store/leveldb/slice.go deleted file mode 100644 index 83ebf55c0369..000000000000 --- a/vendor/github.com/siddontang/ledisdb/store/leveldb/slice.go +++ /dev/null @@ -1,40 +0,0 @@ -// +build leveldb - -package leveldb - -// #cgo LDFLAGS: -lleveldb -// #include "leveldb/c.h" -import "C" - -import ( - "reflect" - "unsafe" -) - -type CSlice struct { - data unsafe.Pointer - size int -} - -func NewCSlice(p unsafe.Pointer, n int) *CSlice { - return &CSlice{p, n} -} - -func (s *CSlice) Data() []byte { - var value []byte - - sH := (*reflect.SliceHeader)(unsafe.Pointer(&value)) - sH.Cap = int(s.size) - sH.Len = int(s.size) - sH.Data = uintptr(s.data) - - return value -} - -func (s *CSlice) Size() int { - return int(s.size) -} - -func (s *CSlice) Free() { - C.leveldb_free(s.data) -} diff --git a/vendor/github.com/siddontang/ledisdb/store/leveldb/snapshot.go b/vendor/github.com/siddontang/ledisdb/store/leveldb/snapshot.go deleted file mode 100644 index bdc8d514da8f..000000000000 --- a/vendor/github.com/siddontang/ledisdb/store/leveldb/snapshot.go +++ /dev/null @@ -1,39 +0,0 @@ -// +build leveldb - -package leveldb - -// #cgo LDFLAGS: -lleveldb -// #include "leveldb/c.h" -import "C" - -import ( - "github.com/siddontang/ledisdb/store/driver" -) - -type Snapshot struct { - db *DB - snap *C.leveldb_snapshot_t - readOpts *ReadOptions - iteratorOpts *ReadOptions -} - -func (s *Snapshot) Get(key []byte) ([]byte, error) { - return s.db.get(s.readOpts, key) -} - -func (s *Snapshot) GetSlice(key []byte) (driver.ISlice, error) { - return s.db.getSlice(s.readOpts, key) -} - -func (s *Snapshot) NewIterator() driver.IIterator { - it := new(Iterator) - it.it = C.leveldb_create_iterator(s.db.db, s.db.iteratorOpts.Opt) - return it - -} - -func (s *Snapshot) Close() { - C.leveldb_release_snapshot(s.db.db, s.snap) - s.iteratorOpts.Close() - s.readOpts.Close() -} diff --git a/vendor/github.com/siddontang/ledisdb/store/leveldb/util.go b/vendor/github.com/siddontang/ledisdb/store/leveldb/util.go deleted file mode 100644 index 6efe33b661d6..000000000000 --- a/vendor/github.com/siddontang/ledisdb/store/leveldb/util.go +++ /dev/null @@ -1,45 +0,0 @@ -// +build leveldb - -package leveldb - -// #include "leveldb/c.h" -import "C" - -import ( - "fmt" - "reflect" - "unsafe" -) - -func boolToUchar(b bool) C.uchar { - uc := C.uchar(0) - if b { - uc = C.uchar(1) - } - return uc -} - -func ucharToBool(uc C.uchar) bool { - if uc == C.uchar(0) { - return false - } - return true -} - -func saveError(errStr *C.char) error { - if errStr != nil { - gs := C.GoString(errStr) - C.leveldb_free(unsafe.Pointer(errStr)) - return fmt.Errorf(gs) - } - return nil -} - -func slice(p unsafe.Pointer, n int) []byte { - var b []byte - pbyte := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - pbyte.Data = uintptr(p) - pbyte.Len = n - pbyte.Cap = n - return b -} diff --git a/vendor/github.com/siddontang/ledisdb/store/rocksdb/batch.go b/vendor/github.com/siddontang/ledisdb/store/rocksdb/batch.go deleted file mode 100644 index bb727e7015ef..000000000000 --- a/vendor/github.com/siddontang/ledisdb/store/rocksdb/batch.go +++ /dev/null @@ -1,83 +0,0 @@ -// +build rocksdb - -package rocksdb - -// #cgo LDFLAGS: -lrocksdb -// #include "rocksdb/c.h" -// #include "rocksdb_ext.h" -import "C" - -import ( - "unsafe" -) - -type WriteBatch struct { - db *DB - wbatch *C.rocksdb_writebatch_t - commitOk bool -} - -func (w *WriteBatch) Close() { - if w.wbatch != nil { - C.rocksdb_writebatch_destroy(w.wbatch) - w.wbatch = nil - } -} - -func (w *WriteBatch) Put(key, value []byte) { - w.commitOk = false - - var k, v *C.char - if len(key) != 0 { - k = (*C.char)(unsafe.Pointer(&key[0])) - } - if len(value) != 0 { - v = (*C.char)(unsafe.Pointer(&value[0])) - } - - lenk := len(key) - lenv := len(value) - - C.rocksdb_writebatch_put(w.wbatch, k, C.size_t(lenk), v, C.size_t(lenv)) -} - -func (w *WriteBatch) Delete(key []byte) { - w.commitOk = false - - C.rocksdb_writebatch_delete(w.wbatch, - (*C.char)(unsafe.Pointer(&key[0])), C.size_t(len(key))) -} - -func (w *WriteBatch) Commit() error { - return w.commit(w.db.writeOpts) -} - -func (w *WriteBatch) SyncCommit() error { - return w.commit(w.db.syncOpts) -} - -func (w *WriteBatch) Rollback() error { - if !w.commitOk { - C.rocksdb_writebatch_clear(w.wbatch) - } - return nil -} - -func (w *WriteBatch) commit(wb *WriteOptions) error { - w.commitOk = true - - var errStr *C.char - C.rocksdb_write_ext(w.db.db, wb.Opt, w.wbatch, &errStr) - if errStr != nil { - w.commitOk = false - return saveError(errStr) - } - return nil -} - -func (w *WriteBatch) Data() []byte { - var vallen C.size_t - value := C.rocksdb_writebatch_data(w.wbatch, &vallen) - - return slice(unsafe.Pointer(value), int(vallen)) -} diff --git a/vendor/github.com/siddontang/ledisdb/store/rocksdb/cache.go b/vendor/github.com/siddontang/ledisdb/store/rocksdb/cache.go deleted file mode 100644 index 931998ba4af4..000000000000 --- a/vendor/github.com/siddontang/ledisdb/store/rocksdb/cache.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build rocksdb - -package rocksdb - -// #cgo LDFLAGS: -lrocksdb -// #include -// #include "rocksdb/c.h" -import "C" - -type Cache struct { - Cache *C.rocksdb_cache_t -} - -func NewLRUCache(capacity int) *Cache { - return &Cache{C.rocksdb_cache_create_lru(C.size_t(capacity))} -} - -func (c *Cache) Close() { - C.rocksdb_cache_destroy(c.Cache) -} diff --git a/vendor/github.com/siddontang/ledisdb/store/rocksdb/const.go b/vendor/github.com/siddontang/ledisdb/store/rocksdb/const.go deleted file mode 100644 index f4155bbe2018..000000000000 --- a/vendor/github.com/siddontang/ledisdb/store/rocksdb/const.go +++ /dev/null @@ -1,3 +0,0 @@ -package rocksdb - -const DBName = "rocksdb" diff --git a/vendor/github.com/siddontang/ledisdb/store/rocksdb/db.go b/vendor/github.com/siddontang/ledisdb/store/rocksdb/db.go deleted file mode 100644 index d5b708043ec6..000000000000 --- a/vendor/github.com/siddontang/ledisdb/store/rocksdb/db.go +++ /dev/null @@ -1,342 +0,0 @@ -// +build rocksdb - -// Package rocksdb is a wrapper for c++ rocksdb -package rocksdb - -/* -#cgo LDFLAGS: -lrocksdb -#include -#include -#include "rocksdb_ext.h" -*/ -import "C" - -import ( - "os" - "runtime" - "unsafe" - - "github.com/siddontang/ledisdb/config" - "github.com/siddontang/ledisdb/store/driver" -) - -const defaultFilterBits int = 10 - -type Store struct { -} - -func (s Store) String() string { - return DBName -} - -func (s Store) Open(path string, cfg *config.Config) (driver.IDB, error) { - if err := os.MkdirAll(path, 0755); err != nil { - return nil, err - } - - db := new(DB) - db.path = path - db.cfg = &cfg.RocksDB - - if err := db.open(); err != nil { - return nil, err - } - - return db, nil -} - -func (s Store) Repair(path string, cfg *config.Config) error { - db := new(DB) - db.path = path - db.cfg = &cfg.RocksDB - - err := db.open() - defer db.Close() - - //open ok, do not need repair - if err == nil { - return nil - } - - var errStr *C.char - ldbname := C.CString(path) - defer C.free(unsafe.Pointer(ldbname)) - - C.rocksdb_repair_db(db.opts.Opt, ldbname, &errStr) - if errStr != nil { - return saveError(errStr) - } - return nil -} - -type DB struct { - path string - - cfg *config.RocksDBConfig - - db *C.rocksdb_t - - env *Env - - opts *Options - blockOpts *BlockBasedTableOptions - - //for default read and write options - readOpts *ReadOptions - writeOpts *WriteOptions - iteratorOpts *ReadOptions - - syncOpts *WriteOptions - - cache *Cache - - filter *FilterPolicy -} - -func (db *DB) open() error { - db.initOptions(db.cfg) - - var errStr *C.char - ldbname := C.CString(db.path) - defer C.free(unsafe.Pointer(ldbname)) - - db.db = C.rocksdb_open(db.opts.Opt, ldbname, &errStr) - if errStr != nil { - db.db = nil - return saveError(errStr) - } - return nil -} - -func (db *DB) initOptions(cfg *config.RocksDBConfig) { - opts := NewOptions() - blockOpts := NewBlockBasedTableOptions() - - opts.SetCreateIfMissing(true) - - db.env = NewDefaultEnv() - db.env.SetBackgroundThreads(cfg.BackgroundThreads) - db.env.SetHighPriorityBackgroundThreads(cfg.HighPriorityBackgroundThreads) - opts.SetEnv(db.env) - - db.cache = NewLRUCache(cfg.CacheSize) - blockOpts.SetCache(db.cache) - - //we must use bloomfilter - db.filter = NewBloomFilter(defaultFilterBits) - blockOpts.SetFilterPolicy(db.filter) - blockOpts.SetBlockSize(cfg.BlockSize) - opts.SetBlockBasedTableFactory(blockOpts) - - opts.SetCompression(CompressionOpt(cfg.Compression)) - opts.SetWriteBufferSize(cfg.WriteBufferSize) - opts.SetMaxOpenFiles(cfg.MaxOpenFiles) - opts.SetMaxBackgroundCompactions(cfg.MaxBackgroundCompactions) - opts.SetMaxBackgroundFlushes(cfg.MaxBackgroundFlushes) - opts.SetLevel0FileNumCompactionTrigger(cfg.Level0FileNumCompactionTrigger) - opts.SetLevel0SlowdownWritesTrigger(cfg.Level0SlowdownWritesTrigger) - opts.SetLevel0StopWritesTrigger(cfg.Level0StopWritesTrigger) - opts.SetTargetFileSizeBase(cfg.TargetFileSizeBase) - opts.SetTargetFileSizeMultiplier(cfg.TargetFileSizeMultiplier) - opts.SetMaxBytesForLevelBase(cfg.MaxBytesForLevelBase) - opts.SetMaxBytesForLevelMultiplier(cfg.MaxBytesForLevelMultiplier) - opts.SetMinWriteBufferNumberToMerge(cfg.MinWriteBufferNumberToMerge) - opts.DisableAutoCompactions(cfg.DisableAutoCompactions) - opts.EnableStatistics(cfg.EnableStatistics) - opts.UseFsync(cfg.UseFsync) - opts.SetStatsDumpPeriodSec(cfg.StatsDumpPeriodSec) - opts.SetMaxManifestFileSize(cfg.MaxManifestFileSize) - - db.opts = opts - db.blockOpts = blockOpts - - db.readOpts = NewReadOptions() - db.writeOpts = NewWriteOptions() - db.writeOpts.DisableWAL(cfg.DisableWAL) - - db.syncOpts = NewWriteOptions() - db.syncOpts.SetSync(true) - db.syncOpts.DisableWAL(cfg.DisableWAL) - - db.iteratorOpts = NewReadOptions() - db.iteratorOpts.SetFillCache(false) -} - -func (db *DB) Close() error { - if db.db != nil { - C.rocksdb_close(db.db) - db.db = nil - } - - if db.filter != nil { - db.filter.Close() - } - - if db.cache != nil { - db.cache.Close() - } - - if db.env != nil { - db.env.Close() - } - - //db.blockOpts.Close() - - db.opts.Close() - - db.readOpts.Close() - db.writeOpts.Close() - db.iteratorOpts.Close() - - return nil -} - -func (db *DB) Put(key, value []byte) error { - return db.put(db.writeOpts, key, value) -} - -func (db *DB) Get(key []byte) ([]byte, error) { - return db.get(db.readOpts, key) -} - -func (db *DB) Delete(key []byte) error { - return db.delete(db.writeOpts, key) -} - -func (db *DB) SyncPut(key []byte, value []byte) error { - return db.put(db.syncOpts, key, value) -} - -func (db *DB) SyncDelete(key []byte) error { - return db.delete(db.syncOpts, key) -} - -func (db *DB) NewWriteBatch() driver.IWriteBatch { - wb := &WriteBatch{ - db: db, - wbatch: C.rocksdb_writebatch_create(), - } - - runtime.SetFinalizer(wb, func(w *WriteBatch) { - w.Close() - }) - - return wb -} - -func (db *DB) NewIterator() driver.IIterator { - it := new(Iterator) - - it.it = C.rocksdb_create_iterator(db.db, db.iteratorOpts.Opt) - - return it -} - -func (db *DB) NewSnapshot() (driver.ISnapshot, error) { - snap := &Snapshot{ - db: db, - snap: C.rocksdb_create_snapshot(db.db), - readOpts: NewReadOptions(), - iteratorOpts: NewReadOptions(), - } - snap.readOpts.SetSnapshot(snap) - snap.iteratorOpts.SetSnapshot(snap) - snap.iteratorOpts.SetFillCache(false) - - return snap, nil -} - -func (db *DB) put(wo *WriteOptions, key, value []byte) error { - var errStr *C.char - var k, v *C.char - if len(key) != 0 { - k = (*C.char)(unsafe.Pointer(&key[0])) - } - if len(value) != 0 { - v = (*C.char)(unsafe.Pointer(&value[0])) - } - - lenk := len(key) - lenv := len(value) - C.rocksdb_put( - db.db, wo.Opt, k, C.size_t(lenk), v, C.size_t(lenv), &errStr) - - if errStr != nil { - return saveError(errStr) - } - return nil -} - -func (db *DB) get(ro *ReadOptions, key []byte) ([]byte, error) { - var errStr *C.char - var vallen C.size_t - var k *C.char - if len(key) != 0 { - k = (*C.char)(unsafe.Pointer(&key[0])) - } - - value := C.rocksdb_get( - db.db, ro.Opt, k, C.size_t(len(key)), &vallen, &errStr) - - if errStr != nil { - return nil, saveError(errStr) - } - - if value == nil { - return nil, nil - } - - defer C.free(unsafe.Pointer(value)) - return C.GoBytes(unsafe.Pointer(value), C.int(vallen)), nil -} - -func (db *DB) getSlice(ro *ReadOptions, key []byte) (driver.ISlice, error) { - var errStr *C.char - var vallen C.size_t - var k *C.char - if len(key) != 0 { - k = (*C.char)(unsafe.Pointer(&key[0])) - } - - value := C.rocksdb_get( - db.db, ro.Opt, k, C.size_t(len(key)), &vallen, &errStr) - - if errStr != nil { - return nil, saveError(errStr) - } - - if value == nil { - return nil, nil - } - - return NewCSlice(unsafe.Pointer(value), int(vallen)), nil -} - -func (db *DB) delete(wo *WriteOptions, key []byte) error { - var errStr *C.char - var k *C.char - if len(key) != 0 { - k = (*C.char)(unsafe.Pointer(&key[0])) - } - - C.rocksdb_delete( - db.db, wo.Opt, k, C.size_t(len(key)), &errStr) - - if errStr != nil { - return saveError(errStr) - } - return nil -} - -func (db *DB) Compact() error { - C.rocksdb_compact_range(db.db, nil, 0, nil, 0) - return nil -} - -func (db *DB) GetSlice(key []byte) (driver.ISlice, error) { - return db.getSlice(db.readOpts, key) -} - -func init() { - driver.Register(Store{}) -} diff --git a/vendor/github.com/siddontang/ledisdb/store/rocksdb/env.go b/vendor/github.com/siddontang/ledisdb/store/rocksdb/env.go deleted file mode 100644 index e239c1b6c0e8..000000000000 --- a/vendor/github.com/siddontang/ledisdb/store/rocksdb/env.go +++ /dev/null @@ -1,27 +0,0 @@ -// +build rocksdb - -package rocksdb - -// #cgo LDFLAGS: -lrocksdb -// #include "rocksdb/c.h" -import "C" - -type Env struct { - Env *C.rocksdb_env_t -} - -func NewDefaultEnv() *Env { - return &Env{C.rocksdb_create_default_env()} -} - -func (env *Env) SetHighPriorityBackgroundThreads(n int) { - C.rocksdb_env_set_high_priority_background_threads(env.Env, C.int(n)) -} - -func (env *Env) SetBackgroundThreads(n int) { - C.rocksdb_env_set_background_threads(env.Env, C.int(n)) -} - -func (env *Env) Close() { - C.rocksdb_env_destroy(env.Env) -} diff --git a/vendor/github.com/siddontang/ledisdb/store/rocksdb/filterpolicy.go b/vendor/github.com/siddontang/ledisdb/store/rocksdb/filterpolicy.go deleted file mode 100644 index 3be4ef6acac7..000000000000 --- a/vendor/github.com/siddontang/ledisdb/store/rocksdb/filterpolicy.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build rocksdb - -package rocksdb - -// #cgo LDFLAGS: -lrocksdb -// #include -// #include "rocksdb/c.h" -import "C" - -type FilterPolicy struct { - Policy *C.rocksdb_filterpolicy_t -} - -func NewBloomFilter(bitsPerKey int) *FilterPolicy { - policy := C.rocksdb_filterpolicy_create_bloom(C.int(bitsPerKey)) - return &FilterPolicy{policy} -} - -func (fp *FilterPolicy) Close() { - C.rocksdb_filterpolicy_destroy(fp.Policy) -} diff --git a/vendor/github.com/siddontang/ledisdb/store/rocksdb/iterator.go b/vendor/github.com/siddontang/ledisdb/store/rocksdb/iterator.go deleted file mode 100644 index 046c5e9dfaa4..000000000000 --- a/vendor/github.com/siddontang/ledisdb/store/rocksdb/iterator.go +++ /dev/null @@ -1,70 +0,0 @@ -// +build rocksdb - -package rocksdb - -// #cgo LDFLAGS: -lrocksdb -// #include -// #include "rocksdb/c.h" -// #include "rocksdb_ext.h" -import "C" - -import ( - "unsafe" -) - -type Iterator struct { - it *C.rocksdb_iterator_t - isValid C.uchar -} - -func (it *Iterator) Key() []byte { - var klen C.size_t - kdata := C.rocksdb_iter_key(it.it, &klen) - if kdata == nil { - return nil - } - - return slice(unsafe.Pointer(kdata), int(C.int(klen))) -} - -func (it *Iterator) Value() []byte { - var vlen C.size_t - vdata := C.rocksdb_iter_value(it.it, &vlen) - if vdata == nil { - return nil - } - - return slice(unsafe.Pointer(vdata), int(C.int(vlen))) -} - -func (it *Iterator) Close() error { - if it.it != nil { - C.rocksdb_iter_destroy(it.it) - it.it = nil - } - return nil -} - -func (it *Iterator) Valid() bool { - return ucharToBool(it.isValid) -} - -func (it *Iterator) Next() { - it.isValid = C.rocksdb_iter_next_ext(it.it) -} - -func (it *Iterator) Prev() { - it.isValid = C.rocksdb_iter_prev_ext(it.it) -} - -func (it *Iterator) First() { - it.isValid = C.rocksdb_iter_seek_to_first_ext(it.it) -} - -func (it *Iterator) Last() { - it.isValid = C.rocksdb_iter_seek_to_last_ext(it.it) -} - -func (it *Iterator) Seek(key []byte) { - it.isValid = C.rocksdb_iter_seek_ext(it.it, (*C.char)(unsafe.Pointer(&key[0])), C.size_t(len(key))) -} diff --git a/vendor/github.com/siddontang/ledisdb/store/rocksdb/options.go b/vendor/github.com/siddontang/ledisdb/store/rocksdb/options.go deleted file mode 100644 index 48ca2301a49a..000000000000 --- a/vendor/github.com/siddontang/ledisdb/store/rocksdb/options.go +++ /dev/null @@ -1,229 +0,0 @@ -// +build rocksdb - -package rocksdb - -// #cgo LDFLAGS: -lrocksdb -// #include "rocksdb/c.h" -import "C" - -type CompressionOpt int - -const ( - NoCompression = CompressionOpt(0) - SnappyCompression = CompressionOpt(1) - ZlibCompression = CompressionOpt(2) - Bz2Compression = CompressionOpt(3) - Lz4Compression = CompressionOpt(4) - Lz4hcCompression = CompressionOpt(5) -) - -type Options struct { - Opt *C.rocksdb_options_t -} - -type ReadOptions struct { - Opt *C.rocksdb_readoptions_t -} - -type WriteOptions struct { - Opt *C.rocksdb_writeoptions_t -} - -type BlockBasedTableOptions struct { - Opt *C.rocksdb_block_based_table_options_t -} - -func NewOptions() *Options { - opt := C.rocksdb_options_create() - return &Options{opt} -} - -func NewReadOptions() *ReadOptions { - opt := C.rocksdb_readoptions_create() - return &ReadOptions{opt} -} - -func NewWriteOptions() *WriteOptions { - opt := C.rocksdb_writeoptions_create() - return &WriteOptions{opt} -} - -func NewBlockBasedTableOptions() *BlockBasedTableOptions { - opt := C.rocksdb_block_based_options_create() - return &BlockBasedTableOptions{opt} -} - -func (o *Options) Close() { - C.rocksdb_options_destroy(o.Opt) -} - -func (o *Options) IncreaseParallelism(n int) { - C.rocksdb_options_increase_parallelism(o.Opt, C.int(n)) -} - -func (o *Options) OptimizeLevelStyleCompaction(n int) { - C.rocksdb_options_optimize_level_style_compaction(o.Opt, C.uint64_t(n)) -} - -func (o *Options) SetComparator(cmp *C.rocksdb_comparator_t) { - C.rocksdb_options_set_comparator(o.Opt, cmp) -} - -func (o *Options) SetErrorIfExists(error_if_exists bool) { - eie := boolToUchar(error_if_exists) - C.rocksdb_options_set_error_if_exists(o.Opt, eie) -} - -func (o *Options) SetEnv(env *Env) { - C.rocksdb_options_set_env(o.Opt, env.Env) -} - -func (o *Options) SetWriteBufferSize(s int) { - C.rocksdb_options_set_write_buffer_size(o.Opt, C.size_t(s)) -} - -func (o *Options) SetParanoidChecks(pc bool) { - C.rocksdb_options_set_paranoid_checks(o.Opt, boolToUchar(pc)) -} - -func (o *Options) SetMaxOpenFiles(n int) { - C.rocksdb_options_set_max_open_files(o.Opt, C.int(n)) -} - -func (o *Options) SetCompression(t CompressionOpt) { - C.rocksdb_options_set_compression(o.Opt, C.int(t)) -} - -func (o *Options) SetCreateIfMissing(b bool) { - C.rocksdb_options_set_create_if_missing(o.Opt, boolToUchar(b)) -} - -func (o *Options) SetMaxWriteBufferNumber(n int) { - C.rocksdb_options_set_max_write_buffer_number(o.Opt, C.int(n)) -} - -func (o *Options) SetMaxBackgroundCompactions(n int) { - C.rocksdb_options_set_max_background_compactions(o.Opt, C.int(n)) -} - -func (o *Options) SetMaxBackgroundFlushes(n int) { - C.rocksdb_options_set_max_background_flushes(o.Opt, C.int(n)) -} - -func (o *Options) SetNumLevels(n int) { - C.rocksdb_options_set_num_levels(o.Opt, C.int(n)) -} - -func (o *Options) SetLevel0FileNumCompactionTrigger(n int) { - C.rocksdb_options_set_level0_file_num_compaction_trigger(o.Opt, C.int(n)) -} - -func (o *Options) SetLevel0SlowdownWritesTrigger(n int) { - C.rocksdb_options_set_level0_slowdown_writes_trigger(o.Opt, C.int(n)) -} - -func (o *Options) SetLevel0StopWritesTrigger(n int) { - C.rocksdb_options_set_level0_stop_writes_trigger(o.Opt, C.int(n)) -} - -func (o *Options) SetTargetFileSizeBase(n int) { - C.rocksdb_options_set_target_file_size_base(o.Opt, C.uint64_t(uint64(n))) -} - -func (o *Options) SetTargetFileSizeMultiplier(n int) { - C.rocksdb_options_set_target_file_size_multiplier(o.Opt, C.int(n)) -} - -func (o *Options) SetMaxBytesForLevelBase(n int) { - C.rocksdb_options_set_max_bytes_for_level_base(o.Opt, C.uint64_t(uint64(n))) -} - -func (o *Options) SetMaxBytesForLevelMultiplier(n int) { - C.rocksdb_options_set_max_bytes_for_level_multiplier(o.Opt, C.double(n)) -} - -func (o *Options) SetBlockBasedTableFactory(opt *BlockBasedTableOptions) { - C.rocksdb_options_set_block_based_table_factory(o.Opt, opt.Opt) -} - -func (o *Options) SetMinWriteBufferNumberToMerge(n int) { - C.rocksdb_options_set_min_write_buffer_number_to_merge(o.Opt, C.int(n)) -} - -func (o *Options) DisableAutoCompactions(b bool) { - C.rocksdb_options_set_disable_auto_compactions(o.Opt, boolToInt(b)) -} - -func (o *Options) UseFsync(b bool) { - C.rocksdb_options_set_use_fsync(o.Opt, boolToInt(b)) -} - -func (o *Options) EnableStatistics(b bool) { - if b { - C.rocksdb_options_enable_statistics(o.Opt) - } -} - -func (o *Options) SetStatsDumpPeriodSec(n int) { - C.rocksdb_options_set_stats_dump_period_sec(o.Opt, C.uint(n)) -} - -func (o *Options) SetMaxManifestFileSize(n int) { - C.rocksdb_options_set_max_manifest_file_size(o.Opt, C.size_t(n)) -} - -func (o *BlockBasedTableOptions) Close() { - C.rocksdb_block_based_options_destroy(o.Opt) -} - -func (o *BlockBasedTableOptions) SetFilterPolicy(fp *FilterPolicy) { - var policy *C.rocksdb_filterpolicy_t - if fp != nil { - policy = fp.Policy - } - C.rocksdb_block_based_options_set_filter_policy(o.Opt, policy) -} - -func (o *BlockBasedTableOptions) SetBlockSize(s int) { - C.rocksdb_block_based_options_set_block_size(o.Opt, C.size_t(s)) -} - -func (o *BlockBasedTableOptions) SetBlockRestartInterval(n int) { - C.rocksdb_block_based_options_set_block_restart_interval(o.Opt, C.int(n)) -} - -func (o *BlockBasedTableOptions) SetCache(cache *Cache) { - C.rocksdb_block_based_options_set_block_cache(o.Opt, cache.Cache) -} - -func (ro *ReadOptions) Close() { - C.rocksdb_readoptions_destroy(ro.Opt) -} - -func (ro *ReadOptions) SetVerifyChecksums(b bool) { - C.rocksdb_readoptions_set_verify_checksums(ro.Opt, boolToUchar(b)) -} - -func (ro *ReadOptions) SetFillCache(b bool) { - C.rocksdb_readoptions_set_fill_cache(ro.Opt, boolToUchar(b)) -} - -func (ro *ReadOptions) SetSnapshot(snap *Snapshot) { - var s *C.rocksdb_snapshot_t - if snap != nil { - s = snap.snap - } - C.rocksdb_readoptions_set_snapshot(ro.Opt, s) -} - -func (wo *WriteOptions) Close() { - C.rocksdb_writeoptions_destroy(wo.Opt) -} - -func (wo *WriteOptions) SetSync(b bool) { - C.rocksdb_writeoptions_set_sync(wo.Opt, boolToUchar(b)) -} - -func (wo *WriteOptions) DisableWAL(b bool) { - C.rocksdb_writeoptions_disable_WAL(wo.Opt, boolToInt(b)) -} diff --git a/vendor/github.com/siddontang/ledisdb/store/rocksdb/rocksdb_ext.cc b/vendor/github.com/siddontang/ledisdb/store/rocksdb/rocksdb_ext.cc deleted file mode 100644 index 39036ab96cc2..000000000000 --- a/vendor/github.com/siddontang/ledisdb/store/rocksdb/rocksdb_ext.cc +++ /dev/null @@ -1,44 +0,0 @@ -// +build rocksdb - -#include "rocksdb_ext.h" - -#include -#include - -extern "C" { - -unsigned char rocksdb_iter_seek_to_first_ext(rocksdb_iterator_t* iter) { - rocksdb_iter_seek_to_first(iter); - return rocksdb_iter_valid(iter); -} - -unsigned char rocksdb_iter_seek_to_last_ext(rocksdb_iterator_t* iter) { - rocksdb_iter_seek_to_last(iter); - return rocksdb_iter_valid(iter); -} - -unsigned char rocksdb_iter_seek_ext(rocksdb_iterator_t* iter, const char* k, size_t klen) { - rocksdb_iter_seek(iter, k, klen); - return rocksdb_iter_valid(iter); -} - -unsigned char rocksdb_iter_next_ext(rocksdb_iterator_t* iter) { - rocksdb_iter_next(iter); - return rocksdb_iter_valid(iter); -} - -unsigned char rocksdb_iter_prev_ext(rocksdb_iterator_t* iter) { - rocksdb_iter_prev(iter); - return rocksdb_iter_valid(iter); -} - -void rocksdb_write_ext(rocksdb_t* db, - const rocksdb_writeoptions_t* options, - rocksdb_writebatch_t* batch, char** errptr) { - rocksdb_write(db, options, batch, errptr); - if(*errptr == NULL) { - rocksdb_writebatch_clear(batch); - } -} - -} \ No newline at end of file diff --git a/vendor/github.com/siddontang/ledisdb/store/rocksdb/rocksdb_ext.h b/vendor/github.com/siddontang/ledisdb/store/rocksdb/rocksdb_ext.h deleted file mode 100644 index 11cb65304105..000000000000 --- a/vendor/github.com/siddontang/ledisdb/store/rocksdb/rocksdb_ext.h +++ /dev/null @@ -1,24 +0,0 @@ -// +build rocksdb - -#ifndef ROCKSDB_EXT_H -#define ROCKSDB_EXT_H - -#ifdef __cplusplus -extern "C" { -#endif - -#include "rocksdb/c.h" - -// Below iterator functions like rocksdb iterator but returns valid status for iterator -extern unsigned char rocksdb_iter_seek_to_first_ext(rocksdb_iterator_t*); -extern unsigned char rocksdb_iter_seek_to_last_ext(rocksdb_iterator_t*); -extern unsigned char rocksdb_iter_seek_ext(rocksdb_iterator_t*, const char* k, size_t klen); -extern unsigned char rocksdb_iter_next_ext(rocksdb_iterator_t*); -extern unsigned char rocksdb_iter_prev_ext(rocksdb_iterator_t*); -extern void rocksdb_write_ext(rocksdb_t* db, const rocksdb_writeoptions_t* options, rocksdb_writebatch_t* batch, char** errptr); - -#ifdef __cplusplus -} -#endif - -#endif \ No newline at end of file diff --git a/vendor/github.com/siddontang/ledisdb/store/rocksdb/slice.go b/vendor/github.com/siddontang/ledisdb/store/rocksdb/slice.go deleted file mode 100644 index bbaa65bd7652..000000000000 --- a/vendor/github.com/siddontang/ledisdb/store/rocksdb/slice.go +++ /dev/null @@ -1,41 +0,0 @@ -//+build rocksdb - -package rocksdb - -// #cgo LDFLAGS: -lrocksdb -// #include -// #include -import "C" - -import ( - "reflect" - "unsafe" -) - -type CSlice struct { - data unsafe.Pointer - size int -} - -func NewCSlice(p unsafe.Pointer, n int) *CSlice { - return &CSlice{p, n} -} - -func (s *CSlice) Data() []byte { - var value []byte - - sH := (*reflect.SliceHeader)(unsafe.Pointer(&value)) - sH.Cap = int(s.size) - sH.Len = int(s.size) - sH.Data = uintptr(s.data) - - return value -} - -func (s *CSlice) Size() int { - return int(s.size) -} - -func (s *CSlice) Free() { - C.free(s.data) -} diff --git a/vendor/github.com/siddontang/ledisdb/store/rocksdb/snapshot.go b/vendor/github.com/siddontang/ledisdb/store/rocksdb/snapshot.go deleted file mode 100644 index 1ced60020027..000000000000 --- a/vendor/github.com/siddontang/ledisdb/store/rocksdb/snapshot.go +++ /dev/null @@ -1,39 +0,0 @@ -// +build rocksdb - -package rocksdb - -// #cgo LDFLAGS: -lrocksdb -// #include "rocksdb/c.h" -import "C" - -import ( - "github.com/siddontang/ledisdb/store/driver" -) - -type Snapshot struct { - db *DB - snap *C.rocksdb_snapshot_t - readOpts *ReadOptions - iteratorOpts *ReadOptions -} - -func (s *Snapshot) Get(key []byte) ([]byte, error) { - return s.db.get(s.readOpts, key) -} - -func (s *Snapshot) GetSlice(key []byte) (driver.ISlice, error) { - return s.db.getSlice(s.readOpts, key) -} - -func (s *Snapshot) NewIterator() driver.IIterator { - it := new(Iterator) - it.it = C.rocksdb_create_iterator(s.db.db, s.db.iteratorOpts.Opt) - return it - -} - -func (s *Snapshot) Close() { - C.rocksdb_release_snapshot(s.db.db, s.snap) - s.iteratorOpts.Close() - s.readOpts.Close() -} diff --git a/vendor/github.com/siddontang/ledisdb/store/rocksdb/util.go b/vendor/github.com/siddontang/ledisdb/store/rocksdb/util.go deleted file mode 100644 index 22b73baf4ad4..000000000000 --- a/vendor/github.com/siddontang/ledisdb/store/rocksdb/util.go +++ /dev/null @@ -1,54 +0,0 @@ -// +build rocksdb - -package rocksdb - -// #include -// #include "rocksdb/c.h" -import "C" - -import ( - "fmt" - "reflect" - "unsafe" -) - -func boolToUchar(b bool) C.uchar { - uc := C.uchar(0) - if b { - uc = C.uchar(1) - } - return uc -} - -func ucharToBool(uc C.uchar) bool { - if uc == C.uchar(0) { - return false - } - return true -} - -func boolToInt(b bool) C.int { - uc := C.int(0) - if b { - uc = C.int(1) - } - return uc -} - -func saveError(errStr *C.char) error { - if errStr != nil { - gs := C.GoString(errStr) - C.free(unsafe.Pointer(errStr)) - return fmt.Errorf(gs) - } - return nil -} - -func slice(p unsafe.Pointer, n int) []byte { - var b []byte - pbyte := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - pbyte.Data = uintptr(p) - pbyte.Len = n - pbyte.Cap = n - return b -} diff --git a/vendor/github.com/siddontang/ledisdb/store/slice.go b/vendor/github.com/siddontang/ledisdb/store/slice.go deleted file mode 100644 index b027f4f28b8e..000000000000 --- a/vendor/github.com/siddontang/ledisdb/store/slice.go +++ /dev/null @@ -1,9 +0,0 @@ -package store - -import ( - "github.com/siddontang/ledisdb/store/driver" -) - -type Slice interface { - driver.ISlice -} diff --git a/vendor/github.com/siddontang/ledisdb/store/snapshot.go b/vendor/github.com/siddontang/ledisdb/store/snapshot.go deleted file mode 100644 index a1c9de9944e6..000000000000 --- a/vendor/github.com/siddontang/ledisdb/store/snapshot.go +++ /dev/null @@ -1,48 +0,0 @@ -package store - -import ( - "github.com/siddontang/ledisdb/store/driver" -) - -type Snapshot struct { - driver.ISnapshot - st *Stat -} - -func (s *Snapshot) NewIterator() *Iterator { - it := new(Iterator) - it.it = s.ISnapshot.NewIterator() - it.st = s.st - - s.st.IterNum.Add(1) - - return it -} - -func (s *Snapshot) Get(key []byte) ([]byte, error) { - v, err := s.ISnapshot.Get(key) - s.st.statGet(v, err) - return v, err -} - -func (s *Snapshot) GetSlice(key []byte) (Slice, error) { - if d, ok := s.ISnapshot.(driver.ISliceGeter); ok { - v, err := d.GetSlice(key) - s.st.statGet(v, err) - return v, err - } else { - v, err := s.Get(key) - if err != nil { - return nil, err - } else if v == nil { - return nil, nil - } else { - return driver.GoSlice(v), nil - } - } -} - -func (s *Snapshot) Close() { - s.st.SnapshotCloseNum.Add(1) - s.ISnapshot.Close() -} diff --git a/vendor/github.com/siddontang/ledisdb/store/stat.go b/vendor/github.com/siddontang/ledisdb/store/stat.go deleted file mode 100644 index e0a035ab8e18..000000000000 --- a/vendor/github.com/siddontang/ledisdb/store/stat.go +++ /dev/null @@ -1,37 +0,0 @@ -package store - -import ( - "github.com/siddontang/go/sync2" -) - -type Stat struct { - GetNum sync2.AtomicInt64 - GetMissingNum sync2.AtomicInt64 - GetTotalTime sync2.AtomicDuration - PutNum sync2.AtomicInt64 - DeleteNum sync2.AtomicInt64 - IterNum sync2.AtomicInt64 - IterSeekNum sync2.AtomicInt64 - IterCloseNum sync2.AtomicInt64 - SnapshotNum sync2.AtomicInt64 - SnapshotCloseNum sync2.AtomicInt64 - BatchNum sync2.AtomicInt64 - BatchCommitNum sync2.AtomicInt64 - BatchCommitTotalTime sync2.AtomicDuration - TxNum sync2.AtomicInt64 - TxCommitNum sync2.AtomicInt64 - TxCloseNum sync2.AtomicInt64 - CompactNum sync2.AtomicInt64 - CompactTotalTime sync2.AtomicDuration -} - -func (st *Stat) statGet(v interface{}, err error) { - st.GetNum.Add(1) - if v == nil && err == nil { - st.GetMissingNum.Add(1) - } -} - -func (st *Stat) Reset() { - *st = Stat{} -} diff --git a/vendor/github.com/siddontang/ledisdb/store/store.go b/vendor/github.com/siddontang/ledisdb/store/store.go deleted file mode 100644 index 1352491254dc..000000000000 --- a/vendor/github.com/siddontang/ledisdb/store/store.go +++ /dev/null @@ -1,62 +0,0 @@ -package store - -import ( - "fmt" - "os" - "path" - - "github.com/siddontang/ledisdb/config" - "github.com/siddontang/ledisdb/store/driver" - - _ "github.com/siddontang/ledisdb/store/goleveldb" - _ "github.com/siddontang/ledisdb/store/leveldb" - _ "github.com/siddontang/ledisdb/store/rocksdb" -) - -func getStorePath(cfg *config.Config) string { - if len(cfg.DBPath) > 0 { - return cfg.DBPath - } else { - return path.Join(cfg.DataDir, fmt.Sprintf("%s_data", cfg.DBName)) - } -} - -func Open(cfg *config.Config) (*DB, error) { - s, err := driver.GetStore(cfg) - if err != nil { - return nil, err - } - - path := getStorePath(cfg) - - if err := os.MkdirAll(path, 0755); err != nil { - return nil, err - } - - idb, err := s.Open(path, cfg) - if err != nil { - return nil, err - } - - db := new(DB) - db.db = idb - db.name = s.String() - db.st = &Stat{} - db.cfg = cfg - - return db, nil -} - -func Repair(cfg *config.Config) error { - s, err := driver.GetStore(cfg) - if err != nil { - return err - } - - path := getStorePath(cfg) - - return s.Repair(path, cfg) -} - -func init() { -} diff --git a/vendor/github.com/siddontang/ledisdb/store/writebatch.go b/vendor/github.com/siddontang/ledisdb/store/writebatch.go deleted file mode 100644 index 73760d719c3c..000000000000 --- a/vendor/github.com/siddontang/ledisdb/store/writebatch.go +++ /dev/null @@ -1,136 +0,0 @@ -package store - -import ( - "time" - - "github.com/siddontang/ledisdb/store/driver" - "github.com/syndtr/goleveldb/leveldb" -) - -type WriteBatch struct { - wb driver.IWriteBatch - st *Stat - - putNum int64 - deleteNum int64 - db *DB - - data *BatchData -} - -func (wb *WriteBatch) Close() { - wb.wb.Close() -} - -func (wb *WriteBatch) Put(key []byte, value []byte) { - wb.putNum++ - wb.wb.Put(key, value) -} - -func (wb *WriteBatch) Delete(key []byte) { - wb.deleteNum++ - wb.wb.Delete(key) -} - -func (wb *WriteBatch) Commit() error { - wb.st.BatchCommitNum.Add(1) - wb.st.PutNum.Add(wb.putNum) - wb.st.DeleteNum.Add(wb.deleteNum) - wb.putNum = 0 - wb.deleteNum = 0 - - var err error - t := time.Now() - if wb.db == nil || !wb.db.needSyncCommit() { - err = wb.wb.Commit() - } else { - err = wb.wb.SyncCommit() - } - - wb.st.BatchCommitTotalTime.Add(time.Now().Sub(t)) - - return err -} - -func (wb *WriteBatch) Rollback() error { - wb.putNum = 0 - wb.deleteNum = 0 - - return wb.wb.Rollback() -} - -// the data will be undefined after commit or rollback -func (wb *WriteBatch) BatchData() *BatchData { - data := wb.wb.Data() - if wb.data == nil { - wb.data = new(BatchData) - } - - wb.data.Load(data) - return wb.data -} - -func (wb *WriteBatch) Data() []byte { - b := wb.BatchData() - return b.Data() -} - -/* - see leveldb batch data format for more information -*/ - -type BatchData struct { - leveldb.Batch -} - -func NewBatchData(data []byte) (*BatchData, error) { - b := new(BatchData) - - if err := b.Load(data); err != nil { - return nil, err - } - - return b, nil -} - -func (d *BatchData) Data() []byte { - return d.Dump() -} - -func (d *BatchData) Reset() { - d.Batch.Reset() -} - -type BatchDataReplay interface { - Put(key, value []byte) - Delete(key []byte) -} - -type BatchItem struct { - Key []byte - Value []byte -} - -type batchItems []BatchItem - -func (bs *batchItems) Put(key, value []byte) { - *bs = append(*bs, BatchItem{key, value}) -} - -func (bs *batchItems) Delete(key []byte) { - *bs = append(*bs, BatchItem{key, nil}) -} - -func (d *BatchData) Replay(r BatchDataReplay) error { - return d.Batch.Replay(r) -} - -func (d *BatchData) Items() ([]BatchItem, error) { - is := make(batchItems, 0, d.Len()) - - if err := d.Replay(&is); err != nil { - return nil, err - } - - return []BatchItem(is), nil -} diff --git a/vendor/github.com/siddontang/rdb/LICENSE b/vendor/github.com/siddontang/rdb/LICENSE deleted file mode 100644 index c16e3affbd3d..000000000000 --- a/vendor/github.com/siddontang/rdb/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 siddontang - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/siddontang/rdb/decode.go b/vendor/github.com/siddontang/rdb/decode.go deleted file mode 100644 index 865d24198120..000000000000 --- a/vendor/github.com/siddontang/rdb/decode.go +++ /dev/null @@ -1,128 +0,0 @@ -package rdb - -// Copyright 2014 Wandoujia Inc. All Rights Reserved. -// Licensed under the MIT (MIT-LICENSE.txt) license. - -import "fmt" - -import ( - "github.com/cupcake/rdb" - "github.com/cupcake/rdb/nopdecoder" -) - -func DecodeDump(p []byte) (interface{}, error) { - d := &decoder{} - if err := rdb.DecodeDump(p, 0, nil, 0, d); err != nil { - return nil, err - } - return d.obj, d.err -} - -type decoder struct { - nopdecoder.NopDecoder - obj interface{} - err error -} - -func (d *decoder) initObject(obj interface{}) { - if d.err != nil { - return - } - if d.obj != nil { - d.err = fmt.Errorf("invalid object, init again") - } else { - d.obj = obj - } -} - -func (d *decoder) Set(key, value []byte, expiry int64) { - d.initObject(String(value)) -} - -func (d *decoder) StartHash(key []byte, length, expiry int64) { - d.initObject(Hash(nil)) -} - -func (d *decoder) Hset(key, field, value []byte) { - if d.err != nil { - return - } - switch h := d.obj.(type) { - default: - d.err = fmt.Errorf("invalid object, not a hashmap") - case Hash: - v := struct { - Field, Value []byte - }{ - field, - value, - } - d.obj = append(h, v) - } -} - -func (d *decoder) StartSet(key []byte, cardinality, expiry int64) { - d.initObject(Set(nil)) -} - -func (d *decoder) Sadd(key, member []byte) { - if d.err != nil { - return - } - switch s := d.obj.(type) { - default: - d.err = fmt.Errorf("invalid object, not a set") - case Set: - d.obj = append(s, member) - } -} - -func (d *decoder) StartList(key []byte, length, expiry int64) { - d.initObject(List(nil)) -} - -func (d *decoder) Rpush(key, value []byte) { - if d.err != nil { - return - } - switch l := d.obj.(type) { - default: - d.err = fmt.Errorf("invalid object, not a list") - case List: - d.obj = append(l, value) - } -} - -func (d *decoder) StartZSet(key []byte, cardinality, expiry int64) { - d.initObject(ZSet(nil)) -} - -func (d *decoder) Zadd(key []byte, score float64, member []byte) { - if d.err != nil { - return - } - switch z := d.obj.(type) { - default: - d.err = fmt.Errorf("invalid object, not a zset") - case ZSet: - v := struct { - Member []byte - Score float64 - }{ - member, - score, - } - d.obj = append(z, v) - } -} - -type String []byte -type List [][]byte -type Hash []struct { - Field, Value []byte -} -type Set [][]byte -type ZSet []struct { - Member []byte - Score float64 -} diff --git a/vendor/github.com/siddontang/rdb/digest.go b/vendor/github.com/siddontang/rdb/digest.go deleted file mode 100644 index b59e4dfb7d23..000000000000 --- a/vendor/github.com/siddontang/rdb/digest.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2014 Wandoujia Inc. All Rights Reserved. -// Licensed under the MIT (MIT-LICENSE.txt) license. - -package rdb - -import ( - "encoding/binary" - "hash" -) - -var crc64_table = [256]uint64{ - 0x0000000000000000, 0x7ad870c830358979, 0xf5b0e190606b12f2, 0x8f689158505e9b8b, - 0xc038e5739841b68f, 0xbae095bba8743ff6, 0x358804e3f82aa47d, 0x4f50742bc81f2d04, - 0xab28ecb46814fe75, 0xd1f09c7c5821770c, 0x5e980d24087fec87, 0x24407dec384a65fe, - 0x6b1009c7f05548fa, 0x11c8790fc060c183, 0x9ea0e857903e5a08, 0xe478989fa00bd371, - 0x7d08ff3b88be6f81, 0x07d08ff3b88be6f8, 0x88b81eabe8d57d73, 0xf2606e63d8e0f40a, - 0xbd301a4810ffd90e, 0xc7e86a8020ca5077, 0x4880fbd87094cbfc, 0x32588b1040a14285, - 0xd620138fe0aa91f4, 0xacf86347d09f188d, 0x2390f21f80c18306, 0x594882d7b0f40a7f, - 0x1618f6fc78eb277b, 0x6cc0863448deae02, 0xe3a8176c18803589, 0x997067a428b5bcf0, - 0xfa11fe77117cdf02, 0x80c98ebf2149567b, 0x0fa11fe77117cdf0, 0x75796f2f41224489, - 0x3a291b04893d698d, 0x40f16bccb908e0f4, 0xcf99fa94e9567b7f, 0xb5418a5cd963f206, - 0x513912c379682177, 0x2be1620b495da80e, 0xa489f35319033385, 0xde51839b2936bafc, - 0x9101f7b0e12997f8, 0xebd98778d11c1e81, 0x64b116208142850a, 0x1e6966e8b1770c73, - 0x8719014c99c2b083, 0xfdc17184a9f739fa, 0x72a9e0dcf9a9a271, 0x08719014c99c2b08, - 0x4721e43f0183060c, 0x3df994f731b68f75, 0xb29105af61e814fe, 0xc849756751dd9d87, - 0x2c31edf8f1d64ef6, 0x56e99d30c1e3c78f, 0xd9810c6891bd5c04, 0xa3597ca0a188d57d, - 0xec09088b6997f879, 0x96d1784359a27100, 0x19b9e91b09fcea8b, 0x636199d339c963f2, - 0xdf7adabd7a6e2d6f, 0xa5a2aa754a5ba416, 0x2aca3b2d1a053f9d, 0x50124be52a30b6e4, - 0x1f423fcee22f9be0, 0x659a4f06d21a1299, 0xeaf2de5e82448912, 0x902aae96b271006b, - 0x74523609127ad31a, 0x0e8a46c1224f5a63, 0x81e2d7997211c1e8, 0xfb3aa75142244891, - 0xb46ad37a8a3b6595, 0xceb2a3b2ba0eecec, 0x41da32eaea507767, 0x3b024222da65fe1e, - 0xa2722586f2d042ee, 0xd8aa554ec2e5cb97, 0x57c2c41692bb501c, 0x2d1ab4dea28ed965, - 0x624ac0f56a91f461, 0x1892b03d5aa47d18, 0x97fa21650afae693, 0xed2251ad3acf6fea, - 0x095ac9329ac4bc9b, 0x7382b9faaaf135e2, 0xfcea28a2faafae69, 0x8632586aca9a2710, - 0xc9622c4102850a14, 0xb3ba5c8932b0836d, 0x3cd2cdd162ee18e6, 0x460abd1952db919f, - 0x256b24ca6b12f26d, 0x5fb354025b277b14, 0xd0dbc55a0b79e09f, 0xaa03b5923b4c69e6, - 0xe553c1b9f35344e2, 0x9f8bb171c366cd9b, 0x10e3202993385610, 0x6a3b50e1a30ddf69, - 0x8e43c87e03060c18, 0xf49bb8b633338561, 0x7bf329ee636d1eea, 0x012b592653589793, - 0x4e7b2d0d9b47ba97, 0x34a35dc5ab7233ee, 0xbbcbcc9dfb2ca865, 0xc113bc55cb19211c, - 0x5863dbf1e3ac9dec, 0x22bbab39d3991495, 0xadd33a6183c78f1e, 0xd70b4aa9b3f20667, - 0x985b3e827bed2b63, 0xe2834e4a4bd8a21a, 0x6debdf121b863991, 0x1733afda2bb3b0e8, - 0xf34b37458bb86399, 0x8993478dbb8deae0, 0x06fbd6d5ebd3716b, 0x7c23a61ddbe6f812, - 0x3373d23613f9d516, 0x49aba2fe23cc5c6f, 0xc6c333a67392c7e4, 0xbc1b436e43a74e9d, - 0x95ac9329ac4bc9b5, 0xef74e3e19c7e40cc, 0x601c72b9cc20db47, 0x1ac40271fc15523e, - 0x5594765a340a7f3a, 0x2f4c0692043ff643, 0xa02497ca54616dc8, 0xdafce7026454e4b1, - 0x3e847f9dc45f37c0, 0x445c0f55f46abeb9, 0xcb349e0da4342532, 0xb1eceec59401ac4b, - 0xfebc9aee5c1e814f, 0x8464ea266c2b0836, 0x0b0c7b7e3c7593bd, 0x71d40bb60c401ac4, - 0xe8a46c1224f5a634, 0x927c1cda14c02f4d, 0x1d148d82449eb4c6, 0x67ccfd4a74ab3dbf, - 0x289c8961bcb410bb, 0x5244f9a98c8199c2, 0xdd2c68f1dcdf0249, 0xa7f41839ecea8b30, - 0x438c80a64ce15841, 0x3954f06e7cd4d138, 0xb63c61362c8a4ab3, 0xcce411fe1cbfc3ca, - 0x83b465d5d4a0eece, 0xf96c151de49567b7, 0x76048445b4cbfc3c, 0x0cdcf48d84fe7545, - 0x6fbd6d5ebd3716b7, 0x15651d968d029fce, 0x9a0d8ccedd5c0445, 0xe0d5fc06ed698d3c, - 0xaf85882d2576a038, 0xd55df8e515432941, 0x5a3569bd451db2ca, 0x20ed197575283bb3, - 0xc49581ead523e8c2, 0xbe4df122e51661bb, 0x3125607ab548fa30, 0x4bfd10b2857d7349, - 0x04ad64994d625e4d, 0x7e7514517d57d734, 0xf11d85092d094cbf, 0x8bc5f5c11d3cc5c6, - 0x12b5926535897936, 0x686de2ad05bcf04f, 0xe70573f555e26bc4, 0x9ddd033d65d7e2bd, - 0xd28d7716adc8cfb9, 0xa85507de9dfd46c0, 0x273d9686cda3dd4b, 0x5de5e64efd965432, - 0xb99d7ed15d9d8743, 0xc3450e196da80e3a, 0x4c2d9f413df695b1, 0x36f5ef890dc31cc8, - 0x79a59ba2c5dc31cc, 0x037deb6af5e9b8b5, 0x8c157a32a5b7233e, 0xf6cd0afa9582aa47, - 0x4ad64994d625e4da, 0x300e395ce6106da3, 0xbf66a804b64ef628, 0xc5bed8cc867b7f51, - 0x8aeeace74e645255, 0xf036dc2f7e51db2c, 0x7f5e4d772e0f40a7, 0x05863dbf1e3ac9de, - 0xe1fea520be311aaf, 0x9b26d5e88e0493d6, 0x144e44b0de5a085d, 0x6e963478ee6f8124, - 0x21c640532670ac20, 0x5b1e309b16452559, 0xd476a1c3461bbed2, 0xaeaed10b762e37ab, - 0x37deb6af5e9b8b5b, 0x4d06c6676eae0222, 0xc26e573f3ef099a9, 0xb8b627f70ec510d0, - 0xf7e653dcc6da3dd4, 0x8d3e2314f6efb4ad, 0x0256b24ca6b12f26, 0x788ec2849684a65f, - 0x9cf65a1b368f752e, 0xe62e2ad306bafc57, 0x6946bb8b56e467dc, 0x139ecb4366d1eea5, - 0x5ccebf68aecec3a1, 0x2616cfa09efb4ad8, 0xa97e5ef8cea5d153, 0xd3a62e30fe90582a, - 0xb0c7b7e3c7593bd8, 0xca1fc72bf76cb2a1, 0x45775673a732292a, 0x3faf26bb9707a053, - 0x70ff52905f188d57, 0x0a2722586f2d042e, 0x854fb3003f739fa5, 0xff97c3c80f4616dc, - 0x1bef5b57af4dc5ad, 0x61372b9f9f784cd4, 0xee5fbac7cf26d75f, 0x9487ca0fff135e26, - 0xdbd7be24370c7322, 0xa10fceec0739fa5b, 0x2e675fb4576761d0, 0x54bf2f7c6752e8a9, - 0xcdcf48d84fe75459, 0xb71738107fd2dd20, 0x387fa9482f8c46ab, 0x42a7d9801fb9cfd2, - 0x0df7adabd7a6e2d6, 0x772fdd63e7936baf, 0xf8474c3bb7cdf024, 0x829f3cf387f8795d, - 0x66e7a46c27f3aa2c, 0x1c3fd4a417c62355, 0x935745fc4798b8de, 0xe98f353477ad31a7, - 0xa6df411fbfb21ca3, 0xdc0731d78f8795da, 0x536fa08fdfd90e51, 0x29b7d047efec8728} - -type digest struct { - crc uint64 -} - -func (d *digest) update(p []byte) { - for _, b := range p { - d.crc = crc64_table[byte(d.crc)^b] ^ (d.crc >> 8) - } -} - -func newDigest() hash.Hash64 { - d := &digest{} - return d -} - -func (d *digest) Write(p []byte) (int, error) { - d.update(p) - return len(p), nil -} - -func (d *digest) Sum(in []byte) []byte { - buf := make([]byte, 8) - binary.LittleEndian.PutUint64(buf, d.crc) - return append(in, buf...) -} - -func (d *digest) Sum64() uint64 { return d.crc } -func (d *digest) BlockSize() int { return 1 } -func (d *digest) Size() int { return 8 } -func (d *digest) Reset() { d.crc = 0 } diff --git a/vendor/github.com/siddontang/rdb/encode.go b/vendor/github.com/siddontang/rdb/encode.go deleted file mode 100644 index 19c48c331fab..000000000000 --- a/vendor/github.com/siddontang/rdb/encode.go +++ /dev/null @@ -1,52 +0,0 @@ -package rdb - -import ( - "bytes" - "fmt" - "github.com/cupcake/rdb" -) - -func Dump(obj interface{}) ([]byte, error) { - var buf bytes.Buffer - - e := rdb.NewEncoder(&buf) - - switch v := obj.(type) { - case String: - e.EncodeType(rdb.TypeString) - e.EncodeString(v) - case Hash: - e.EncodeType(rdb.TypeHash) - e.EncodeLength(uint32(len(v))) - - for i := 0; i < len(v); i++ { - e.EncodeString(v[i].Field) - e.EncodeString(v[i].Value) - } - case List: - e.EncodeType(rdb.TypeList) - e.EncodeLength(uint32(len(v))) - for i := 0; i < len(v); i++ { - e.EncodeString(v[i]) - } - case Set: - e.EncodeType(rdb.TypeSet) - e.EncodeLength(uint32(len(v))) - for i := 0; i < len(v); i++ { - e.EncodeString(v[i]) - } - case ZSet: - e.EncodeType(rdb.TypeZSet) - e.EncodeLength(uint32(len(v))) - for i := 0; i < len(v); i++ { - e.EncodeString(v[i].Member) - e.EncodeFloat(v[i].Score) - } - default: - return nil, fmt.Errorf("invalid dump type %T", obj) - } - - e.EncodeDumpFooter() - - return buf.Bytes(), nil -} diff --git a/vendor/github.com/siddontang/rdb/loader.go b/vendor/github.com/siddontang/rdb/loader.go deleted file mode 100644 index 22743cbdebda..000000000000 --- a/vendor/github.com/siddontang/rdb/loader.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2014 Wandoujia Inc. All Rights Reserved. -// Licensed under the MIT (MIT-LICENSE.txt) license. - -package rdb - -import ( - "bytes" - "encoding/binary" - "fmt" - "hash" - "io" - "strconv" -) - -type Loader struct { - *rdbReader - crc hash.Hash64 - db uint32 -} - -func NewLoader(r io.Reader) *Loader { - l := &Loader{} - l.crc = newDigest() - l.rdbReader = newRdbReader(io.TeeReader(r, l.crc)) - return l -} - -func (l *Loader) LoadHeader() error { - header := make([]byte, 9) - if err := l.readFull(header); err != nil { - return err - } - if !bytes.Equal(header[:5], []byte("REDIS")) { - return fmt.Errorf("verify magic string, invalid file format") - } - if version, err := strconv.ParseInt(string(header[5:]), 10, 64); err != nil { - return err - } else if version <= 0 || version > Version { - return fmt.Errorf("verify version, invalid RDB version number %d", version) - } - return nil -} - -func (l *Loader) LoadChecksum() error { - crc1 := l.crc.Sum64() - if crc2, err := l.readUint64(); err != nil { - return err - } else if crc1 != crc2 { - return fmt.Errorf("checksum validation failed") - } - return nil -} - -type Entry struct { - DB uint32 - Key []byte - ValDump []byte - ExpireAt uint64 -} - -func (l *Loader) LoadEntry() (entry *Entry, err error) { - var expireat uint64 - for { - var otype byte - if otype, err = l.readByte(); err != nil { - return - } - switch otype { - case rdbFlagExpiryMS: - if expireat, err = l.readUint64(); err != nil { - return - } - case rdbFlagExpiry: - var sec uint32 - if sec, err = l.readUint32(); err != nil { - return - } - expireat = uint64(sec) * 1000 - case rdbFlagSelectDB: - if l.db, err = l.readLength(); err != nil { - return - } - case rdbFlagEOF: - return - default: - var key, obj []byte - if key, err = l.readString(); err != nil { - return - } - if obj, err = l.readObject(otype); err != nil { - return - } - entry = &Entry{} - entry.DB = l.db - entry.Key = key - entry.ValDump = createValDump(otype, obj) - entry.ExpireAt = expireat - return - } - } -} - -func createValDump(otype byte, obj []byte) []byte { - var b bytes.Buffer - c := newDigest() - w := io.MultiWriter(&b, c) - w.Write([]byte{otype}) - w.Write(obj) - binary.Write(w, binary.LittleEndian, uint16(Version)) - binary.Write(w, binary.LittleEndian, c.Sum64()) - return b.Bytes() -} diff --git a/vendor/github.com/siddontang/rdb/reader.go b/vendor/github.com/siddontang/rdb/reader.go deleted file mode 100644 index 89ae9ed18121..000000000000 --- a/vendor/github.com/siddontang/rdb/reader.go +++ /dev/null @@ -1,332 +0,0 @@ -// Copyright 2014 Wandoujia Inc. All Rights Reserved. -// Licensed under the MIT (MIT-LICENSE.txt) license. - -package rdb - -import ( - "bytes" - "encoding/binary" - "fmt" - "io" - "math" - "strconv" -) - -const ( - Version = 6 -) - -const ( - rdbTypeString = 0 - rdbTypeList = 1 - rdbTypeSet = 2 - rdbTypeZSet = 3 - rdbTypeHash = 4 - - rdbTypeHashZipmap = 9 - rdbTypeListZiplist = 10 - rdbTypeSetIntset = 11 - rdbTypeZSetZiplist = 12 - rdbTypeHashZiplist = 13 - - rdbFlagExpiryMS = 0xfc - rdbFlagExpiry = 0xfd - rdbFlagSelectDB = 0xfe - rdbFlagEOF = 0xff -) - -const ( - rdb6bitLen = 0 - rdb14bitLen = 1 - rdb32bitLen = 2 - rdbEncVal = 3 - - rdbEncInt8 = 0 - rdbEncInt16 = 1 - rdbEncInt32 = 2 - rdbEncLZF = 3 - - rdbZiplist6bitlenString = 0 - rdbZiplist14bitlenString = 1 - rdbZiplist32bitlenString = 2 - - rdbZiplistInt16 = 0xc0 - rdbZiplistInt32 = 0xd0 - rdbZiplistInt64 = 0xe0 - rdbZiplistInt24 = 0xf0 - rdbZiplistInt8 = 0xfe - rdbZiplistInt4 = 15 -) - -type rdbReader struct { - raw io.Reader - buf [8]byte - nread int64 -} - -func newRdbReader(r io.Reader) *rdbReader { - return &rdbReader{raw: r} -} - -func (r *rdbReader) Read(p []byte) (int, error) { - n, err := r.raw.Read(p) - r.nread += int64(n) - return n, err -} - -func (r *rdbReader) offset() int64 { - return r.nread -} - -func (r *rdbReader) readObject(otype byte) ([]byte, error) { - var b bytes.Buffer - r = newRdbReader(io.TeeReader(r, &b)) - switch otype { - default: - return nil, fmt.Errorf("unknown object-type %02x", otype) - case rdbTypeHashZipmap: - fallthrough - case rdbTypeListZiplist: - fallthrough - case rdbTypeSetIntset: - fallthrough - case rdbTypeZSetZiplist: - fallthrough - case rdbTypeHashZiplist: - fallthrough - case rdbTypeString: - if _, err := r.readString(); err != nil { - return nil, err - } - case rdbTypeList, rdbTypeSet: - if n, err := r.readLength(); err != nil { - return nil, err - } else { - for i := 0; i < int(n); i++ { - if _, err := r.readString(); err != nil { - return nil, err - } - } - } - case rdbTypeZSet: - if n, err := r.readLength(); err != nil { - return nil, err - } else { - for i := 0; i < int(n); i++ { - if _, err := r.readString(); err != nil { - return nil, err - } - if _, err := r.readFloat(); err != nil { - return nil, err - } - } - } - case rdbTypeHash: - if n, err := r.readLength(); err != nil { - return nil, err - } else { - for i := 0; i < int(n); i++ { - if _, err := r.readString(); err != nil { - return nil, err - } - if _, err := r.readString(); err != nil { - return nil, err - } - } - } - } - return b.Bytes(), nil -} - -func (r *rdbReader) readString() ([]byte, error) { - length, encoded, err := r.readEncodedLength() - if err != nil { - return nil, err - } - if !encoded { - return r.readBytes(int(length)) - } - switch t := uint8(length); t { - default: - return nil, fmt.Errorf("invalid encoded-string %02x", t) - case rdbEncInt8: - i, err := r.readInt8() - return []byte(strconv.FormatInt(int64(i), 10)), err - case rdbEncInt16: - i, err := r.readInt16() - return []byte(strconv.FormatInt(int64(i), 10)), err - case rdbEncInt32: - i, err := r.readInt32() - return []byte(strconv.FormatInt(int64(i), 10)), err - case rdbEncLZF: - var inlen, outlen uint32 - if inlen, err = r.readLength(); err != nil { - return nil, err - } - if outlen, err = r.readLength(); err != nil { - return nil, err - } - if in, err := r.readBytes(int(inlen)); err != nil { - return nil, err - } else { - return lzfDecompress(in, int(outlen)) - } - } -} - -func (r *rdbReader) readEncodedLength() (length uint32, encoded bool, err error) { - var u uint8 - if u, err = r.readUint8(); err != nil { - return - } - length = uint32(u & 0x3f) - switch u >> 6 { - case rdb6bitLen: - case rdb14bitLen: - u, err = r.readUint8() - length = (length << 8) + uint32(u) - case rdbEncVal: - encoded = true - default: - length, err = r.readUint32BigEndian() - } - return -} - -func (r *rdbReader) readLength() (uint32, error) { - length, encoded, err := r.readEncodedLength() - if err == nil && encoded { - err = fmt.Errorf("encoded-length") - } - return length, err -} - -func (r *rdbReader) readFloat() (float64, error) { - u, err := r.readUint8() - if err != nil { - return 0, err - } - switch u { - case 253: - return math.NaN(), nil - case 254: - return math.Inf(0), nil - case 255: - return math.Inf(-1), nil - default: - if b, err := r.readBytes(int(u)); err != nil { - return 0, err - } else { - v, err := strconv.ParseFloat(string(b), 64) - return v, err - } - } -} - -func (r *rdbReader) readByte() (byte, error) { - b := r.buf[:1] - _, err := r.Read(b) - return b[0], err -} - -func (r *rdbReader) readFull(p []byte) error { - _, err := io.ReadFull(r, p) - return err -} - -func (r *rdbReader) readBytes(n int) ([]byte, error) { - p := make([]byte, n) - return p, r.readFull(p) -} - -func (r *rdbReader) readUint8() (uint8, error) { - b, err := r.readByte() - return uint8(b), err -} - -func (r *rdbReader) readUint16() (uint16, error) { - b := r.buf[:2] - err := r.readFull(b) - return binary.LittleEndian.Uint16(b), err -} - -func (r *rdbReader) readUint32() (uint32, error) { - b := r.buf[:4] - err := r.readFull(b) - return binary.LittleEndian.Uint32(b), err -} - -func (r *rdbReader) readUint64() (uint64, error) { - b := r.buf[:8] - err := r.readFull(b) - return binary.LittleEndian.Uint64(b), err -} - -func (r *rdbReader) readUint32BigEndian() (uint32, error) { - b := r.buf[:4] - err := r.readFull(b) - return binary.BigEndian.Uint32(b), err -} - -func (r *rdbReader) readInt8() (int8, error) { - u, err := r.readUint8() - return int8(u), err -} - -func (r *rdbReader) readInt16() (int16, error) { - u, err := r.readUint16() - return int16(u), err -} - -func (r *rdbReader) readInt32() (int32, error) { - u, err := r.readUint32() - return int32(u), err -} - -func (r *rdbReader) readInt64() (int64, error) { - u, err := r.readUint64() - return int64(u), err -} - -func (r *rdbReader) readInt32BigEndian() (int32, error) { - u, err := r.readUint32BigEndian() - return int32(u), err -} - -func lzfDecompress(in []byte, outlen int) (out []byte, err error) { - defer func() { - if x := recover(); x != nil { - err = fmt.Errorf("decompress exception: %v", x) - } - }() - out = make([]byte, outlen) - i, o := 0, 0 - for i < len(in) { - ctrl := int(in[i]) - i++ - if ctrl < 32 { - for x := 0; x <= ctrl; x++ { - out[o] = in[i] - i++ - o++ - } - } else { - length := ctrl >> 5 - if length == 7 { - length = length + int(in[i]) - i++ - } - ref := o - ((ctrl & 0x1f) << 8) - int(in[i]) - 1 - i++ - for x := 0; x <= length+1; x++ { - out[o] = out[ref] - ref++ - o++ - } - } - } - if o != outlen { - return nil, fmt.Errorf("decompress length is %d != expected %d", o, outlen) - } - return out, nil -} From 9a442e694bf7ad466f167074c69cebd07d09f01a Mon Sep 17 00:00:00 2001 From: Lunny Xiao Date: Sun, 17 Feb 2019 20:33:02 +0800 Subject: [PATCH 13/14] fix bug when init issue indexer --- models/issue_indexer.go | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/models/issue_indexer.go b/models/issue_indexer.go index 00b8558eaf09..d02b7164da02 100644 --- a/models/issue_indexer.go +++ b/models/issue_indexer.go @@ -21,6 +21,7 @@ var ( // InitIssueIndexer initialize issue indexer func InitIssueIndexer() error { + var populate bool switch setting.Indexer.IssueType { case "bleve": issueIndexer = issues.NewBleveIndexer(setting.Indexer.IssuePath) @@ -28,9 +29,7 @@ func InitIssueIndexer() error { if err != nil { return err } - if !exist { - go populateIssueIndexer() - } + populate = !exist default: return fmt.Errorf("unknow issue indexer type: %s", setting.Indexer.IssueType) } @@ -53,6 +52,10 @@ func InitIssueIndexer() error { go issueIndexerUpdateQueue.Run() + if populate { + go populateIssueIndexer() + } + return nil } From 2e897438b9643a9b453f80e9ed82a0e31879bf1c Mon Sep 17 00:00:00 2001 From: Lunny Xiao Date: Mon, 18 Feb 2019 11:24:30 +0800 Subject: [PATCH 14/14] split indexer setting to a seperate file --- Gopkg.toml | 2 +- modules/setting/indexer.go | 55 ++++++++++++++++++++++++++++++++++++++ modules/setting/setting.go | 42 +---------------------------- 3 files changed, 57 insertions(+), 42 deletions(-) create mode 100644 modules/setting/indexer.go diff --git a/Gopkg.toml b/Gopkg.toml index ba29ce5a962c..94f15079ba93 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -112,4 +112,4 @@ ignored = ["google.golang.org/appengine*"] [[constraint]] name = "github.com/prometheus/client_golang" - version = "0.9.0" \ No newline at end of file + version = "0.9.0" diff --git a/modules/setting/indexer.go b/modules/setting/indexer.go new file mode 100644 index 000000000000..245ebb049675 --- /dev/null +++ b/modules/setting/indexer.go @@ -0,0 +1,55 @@ +// Copyright 2019 The Gitea Authors. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package setting + +import ( + "path" + "path/filepath" +) + +// enumerates all the indexer queue types +const ( + LevelQueueType = "levelqueue" + ChannelQueueType = "channel" +) + +var ( + // Indexer settings + Indexer = struct { + IssueType string + IssuePath string + RepoIndexerEnabled bool + RepoPath string + UpdateQueueLength int + MaxIndexerFileSize int64 + IssueIndexerQueueType string + IssueIndexerQueueDir string + IssueIndexerQueueBatchNumber int + }{ + IssueType: "bleve", + IssuePath: "indexers/issues.bleve", + IssueIndexerQueueType: LevelQueueType, + IssueIndexerQueueDir: "indexers/issues.queue", + IssueIndexerQueueBatchNumber: 20, + } +) + +func newIndexerService() { + sec := Cfg.Section("indexer") + Indexer.IssuePath = sec.Key("ISSUE_INDEXER_PATH").MustString(path.Join(AppDataPath, "indexers/issues.bleve")) + if !filepath.IsAbs(Indexer.IssuePath) { + Indexer.IssuePath = path.Join(AppWorkPath, Indexer.IssuePath) + } + Indexer.RepoIndexerEnabled = sec.Key("REPO_INDEXER_ENABLED").MustBool(false) + Indexer.RepoPath = sec.Key("REPO_INDEXER_PATH").MustString(path.Join(AppDataPath, "indexers/repos.bleve")) + if !filepath.IsAbs(Indexer.RepoPath) { + Indexer.RepoPath = path.Join(AppWorkPath, Indexer.RepoPath) + } + Indexer.UpdateQueueLength = sec.Key("UPDATE_BUFFER_LEN").MustInt(20) + Indexer.MaxIndexerFileSize = sec.Key("MAX_FILE_SIZE").MustInt64(1024 * 1024) + Indexer.IssueIndexerQueueType = sec.Key("ISSUE_INDEXER_QUEUE_TYPE").MustString(LevelQueueType) + Indexer.IssueIndexerQueueDir = sec.Key("ISSUE_INDEXER_QUEUE_DIR").MustString(path.Join(AppDataPath, "indexers/issues.queue")) + Indexer.IssueIndexerQueueBatchNumber = sec.Key("ISSUE_INDEXER_QUEUE_BATCH_NUMBER").MustInt(20) +} diff --git a/modules/setting/setting.go b/modules/setting/setting.go index 3b19ee996408..d19d68553527 100644 --- a/modules/setting/setting.go +++ b/modules/setting/setting.go @@ -83,12 +83,6 @@ const ( ReCaptcha = "recaptcha" ) -// enumerates all the indexer queue types -const ( - LevelQueueType = "levelqueue" - ChannelQueueType = "channel" -) - // settings var ( // AppVer settings @@ -185,25 +179,6 @@ var ( DBConnectRetries int DBConnectBackoff time.Duration - // Indexer settings - Indexer = struct { - IssueType string - IssuePath string - RepoIndexerEnabled bool - RepoPath string - UpdateQueueLength int - MaxIndexerFileSize int64 - IssueIndexerQueueType string - IssueIndexerQueueDir string - IssueIndexerQueueBatchNumber int - }{ - IssueType: "bleve", - IssuePath: "indexers/issues.bleve", - IssueIndexerQueueType: LevelQueueType, - IssueIndexerQueueDir: "indexers/issues.queue", - IssueIndexerQueueBatchNumber: 20, - } - // Repository settings Repository = struct { AnsiCharset string @@ -1232,22 +1207,6 @@ func NewContext() { // Explicitly disable credential helper, otherwise Git credentials might leak git.GlobalCommandArgs = append(git.GlobalCommandArgs, "-c", "credential.helper=") } - - sec = Cfg.Section("indexer") - Indexer.IssuePath = sec.Key("ISSUE_INDEXER_PATH").MustString(path.Join(AppDataPath, "indexers/issues.bleve")) - if !filepath.IsAbs(Indexer.IssuePath) { - Indexer.IssuePath = path.Join(AppWorkPath, Indexer.IssuePath) - } - Indexer.RepoIndexerEnabled = sec.Key("REPO_INDEXER_ENABLED").MustBool(false) - Indexer.RepoPath = sec.Key("REPO_INDEXER_PATH").MustString(path.Join(AppDataPath, "indexers/repos.bleve")) - if !filepath.IsAbs(Indexer.RepoPath) { - Indexer.RepoPath = path.Join(AppWorkPath, Indexer.RepoPath) - } - Indexer.UpdateQueueLength = sec.Key("UPDATE_BUFFER_LEN").MustInt(20) - Indexer.MaxIndexerFileSize = sec.Key("MAX_FILE_SIZE").MustInt64(1024 * 1024) - Indexer.IssueIndexerQueueType = sec.Key("ISSUE_INDEXER_QUEUE_TYPE").MustString(LevelQueueType) - Indexer.IssueIndexerQueueDir = sec.Key("ISSUE_INDEXER_QUEUE_DIR").MustString(path.Join(AppDataPath, "indexers/issues.queue")) - Indexer.IssueIndexerQueueBatchNumber = sec.Key("ISSUE_INDEXER_QUEUE_BATCH_NUMBER").MustInt(20) } // NewServices initializes the services @@ -1261,4 +1220,5 @@ func NewServices() { newRegisterMailService() newNotifyMailService() newWebhookService() + newIndexerService() }